Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

Conflicts:

Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml
c25504a0ba36 ("dt-bindings: net: fsl,qoriq-mc-dpmac: add missed property phys")
be034ee6c33d ("dt-bindings: net: fsl,qoriq-mc-dpmac: using unevaluatedProperties")
https://lore.kernel.org/20240815110934.56ae623a@canb.auug.org.au

drivers/net/dsa/vitesse-vsc73xx-core.c
5b9eebc2c7a5 ("net: dsa: vsc73xx: pass value in phy_write operation")
fa63c6434b6f ("net: dsa: vsc73xx: check busy flag in MDIO operations")
2524d6c28bdc ("net: dsa: vsc73xx: use defined values in phy operations")
https://lore.kernel.org/20240813104039.429b9fe6@canb.auug.org.au
Resolve by using FIELD_PREP(), Stephen's resolution is simpler.

Adjacent changes:

net/vmw_vsock/af_vsock.c
69139d2919dd ("vsock: fix recursive ->recvmsg calls")
744500d81f81 ("vsock: add support for SIOCOUTQ ioctl")

Link: https://patch.msgid.link/20240815141149.33862-1-pabeni@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+3314 -1269
+3 -3
Documentation/ABI/testing/sysfs-bus-i2c-devices-turris-omnia-mcu
··· 32 32 interrupt. 33 33 34 34 This file switches between these two modes: 35 - - "mcu" makes the button press event be handled by the MCU to 36 - change the LEDs panel intensity. 37 - - "cpu" makes the button press event be handled by the CPU. 35 + - ``mcu`` makes the button press event be handled by the MCU to 36 + change the LEDs panel intensity. 37 + - ``cpu`` makes the button press event be handled by the CPU. 38 38 39 39 Format: %s. 40 40
+1 -1
Documentation/admin-guide/cifs/usage.rst
··· 742 742 may use NTLMSSP 0x00080 743 743 must use NTLMSSP 0x80080 744 744 seal (packet encryption) 0x00040 745 - must seal (not implemented yet) 0x40040 745 + must seal 0x40040 746 746 747 747 cifsFYI If set to non-zero value, additional debug information 748 748 will be logged to the system error log. This field
+6 -3
Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
··· 17 17 oneOf: 18 18 # Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel 19 19 - const: samsung,atna33xc20 20 - # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel 21 20 - items: 22 - - const: samsung,atna45af01 23 - - const: samsung,atna33xc20 21 + - enum: 22 + # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel 23 + - samsung,atna45af01 24 + # Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel 25 + - samsung,atna45dc02 26 + - const: samsung,atna33xc20 24 27 25 28 enable-gpios: true 26 29 port: true
+4
Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml
··· 30 30 A reference to a node representing a PCS PHY device found on 31 31 the internal MDIO bus. 32 32 33 + phys: 34 + description: A reference to the SerDes lane(s) 35 + maxItems: 1 36 + 33 37 required: 34 38 - reg 35 39
+2 -1
Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
··· 199 199 200 200 examples: 201 201 - | 202 + #include <dt-bindings/gpio/gpio.h> 202 203 codec@1,0{ 203 204 compatible = "slim217,250"; 204 205 reg = <1 0>; 205 - reset-gpios = <&tlmm 64 0>; 206 + reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>; 206 207 slim-ifc-dev = <&wcd9340_ifd>; 207 208 #sound-dai-cells = <1>; 208 209 interrupt-parent = <&tlmm>;
+1 -1
Documentation/devicetree/bindings/sound/qcom,wcd937x.yaml
··· 42 42 pinctrl-names = "default", "sleep"; 43 43 pinctrl-0 = <&wcd_reset_n>; 44 44 pinctrl-1 = <&wcd_reset_n_sleep>; 45 - reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>; 45 + reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>; 46 46 vdd-buck-supply = <&vreg_l17b_1p8>; 47 47 vdd-rxtx-supply = <&vreg_l18b_1p8>; 48 48 vdd-px-supply = <&vreg_l18b_1p8>;
+2 -1
Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml
··· 34 34 35 35 examples: 36 36 - | 37 + #include <dt-bindings/gpio/gpio.h> 37 38 codec { 38 39 compatible = "qcom,wcd9380-codec"; 39 - reset-gpios = <&tlmm 32 0>; 40 + reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>; 40 41 #sound-dai-cells = <1>; 41 42 qcom,tx-device = <&wcd938x_tx>; 42 43 qcom,rx-device = <&wcd938x_rx>;
+2 -2
Documentation/devicetree/bindings/sound/qcom,wcd939x.yaml
··· 52 52 53 53 examples: 54 54 - | 55 - #include <dt-bindings/interrupt-controller/irq.h> 55 + #include <dt-bindings/gpio/gpio.h> 56 56 codec { 57 57 compatible = "qcom,wcd9390-codec"; 58 - reset-gpios = <&tlmm 32 IRQ_TYPE_NONE>; 58 + reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>; 59 59 #sound-dai-cells = <1>; 60 60 qcom,tx-device = <&wcd939x_tx>; 61 61 qcom,rx-device = <&wcd939x_rx>;
+1
Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
··· 18 18 - usb424,2412 19 19 - usb424,2417 20 20 - usb424,2514 21 + - usb424,2517 21 22 22 23 reg: true 23 24
+4 -4
Documentation/filesystems/caching/fscache.rst
··· 318 318 Debugging 319 319 ========= 320 320 321 - If CONFIG_FSCACHE_DEBUG is enabled, the FS-Cache facility can have runtime 322 - debugging enabled by adjusting the value in:: 321 + If CONFIG_NETFS_DEBUG is enabled, the FS-Cache facility and NETFS support can 322 + have runtime debugging enabled by adjusting the value in:: 323 323 324 - /sys/module/fscache/parameters/debug 324 + /sys/module/netfs/parameters/debug 325 325 326 326 This is a bitmask of debugging streams to enable: 327 327 ··· 343 343 The appropriate set of values should be OR'd together and the result written to 344 344 the control file. For example:: 345 345 346 - echo $((1|8|512)) >/sys/module/fscache/parameters/debug 346 + echo $((1|8|512)) >/sys/module/netfs/parameters/debug 347 347 348 348 will turn on all function entry debugging.
+96 -57
Documentation/process/embargoed-hardware-issues.rst
··· 13 13 Hardware issues like Meltdown, Spectre, L1TF etc. must be treated 14 14 differently because they usually affect all Operating Systems ("OS") and 15 15 therefore need coordination across different OS vendors, distributions, 16 - hardware vendors and other parties. For some of the issues, software 17 - mitigations can depend on microcode or firmware updates, which need further 18 - coordination. 16 + silicon vendors, hardware integrators, and other parties. For some of the 17 + issues, software mitigations can depend on microcode or firmware updates, 18 + which need further coordination. 19 19 20 20 .. _Contact: 21 21 ··· 32 32 <securitybugs>`) instead. 33 33 34 34 The team can be contacted by email at <hardware-security@kernel.org>. This 35 - is a private list of security officers who will help you to coordinate a 36 - fix according to our documented process. 35 + is a private list of security officers who will help you coordinate a fix 36 + according to our documented process. 37 37 38 38 The list is encrypted and email to the list can be sent by either PGP or 39 39 S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME ··· 43 43 - PGP: https://www.kernel.org/static/files/hardware-security.asc 44 44 - S/MIME: https://www.kernel.org/static/files/hardware-security.crt 45 45 46 - While hardware security issues are often handled by the affected hardware 46 + While hardware security issues are often handled by the affected silicon 47 47 vendor, we welcome contact from researchers or individuals who have 48 48 identified a potential hardware flaw. 49 49 ··· 65 65 ability to access the embargoed information, but are obliged to 66 66 confidentiality by their employment contract. Linux Foundation IT 67 67 personnel are also responsible for operating and managing the rest of 68 - kernel.org infrastructure. 68 + kernel.org's infrastructure. 69 69 70 70 The Linux Foundation's current director of IT Project infrastructure is 71 71 Konstantin Ryabitsev. ··· 85 85 86 86 The Linux kernel community has a deep understanding of the requirement to 87 87 keep hardware security issues under embargo for coordination between 88 - different OS vendors, distributors, hardware vendors and other parties. 88 + different OS vendors, distributors, silicon vendors, and other parties. 89 89 90 90 The Linux kernel community has successfully handled hardware security 91 91 issues in the past and has the necessary mechanisms in place to allow ··· 103 103 All involved developers pledge to adhere to the embargo rules and to keep 104 104 the received information confidential. Violation of the pledge will lead to 105 105 immediate exclusion from the current issue and removal from all related 106 - mailing-lists. In addition, the hardware security team will also exclude 106 + mailing lists. In addition, the hardware security team will also exclude 107 107 the offender from future issues. The impact of this consequence is a highly 108 108 effective deterrent in our community. In case a violation happens the 109 109 hardware security team will inform the involved parties immediately. If you 110 - or anyone becomes aware of a potential violation, please report it 110 + or anyone else becomes aware of a potential violation, please report it 111 111 immediately to the Hardware security officers. 112 112 113 113 ··· 124 124 Start of Disclosure 125 125 """"""""""""""""""" 126 126 127 - Disclosure starts by contacting the Linux kernel hardware security team by 128 - email. This initial contact should contain a description of the problem and 129 - a list of any known affected hardware. If your organization builds or 130 - distributes the affected hardware, we encourage you to also consider what 131 - other hardware could be affected. 127 + Disclosure starts by emailing the Linux kernel hardware security team per 128 + the Contact section above. This initial contact should contain a 129 + description of the problem and a list of any known affected silicon. If 130 + your organization builds or distributes the affected hardware, we encourage 131 + you to also consider what other hardware could be affected. The disclosing 132 + party is responsible for contacting the affected silicon vendors in a 133 + timely manner. 132 134 133 135 The hardware security team will provide an incident-specific encrypted 134 - mailing-list which will be used for initial discussion with the reporter, 136 + mailing list which will be used for initial discussion with the reporter, 135 137 further disclosure, and coordination of fixes. 136 138 137 139 The hardware security team will provide the disclosing party a list of ··· 160 158 - The disclosed entities can be contacted to name experts who should 161 159 participate in the mitigation development. 162 160 163 - - If an expert which is required to handle an issue is employed by an 164 - listed entity or member of an listed entity, then the response teams can 161 + - If an expert who is required to handle an issue is employed by a listed 162 + entity or member of an listed entity, then the response teams can 165 163 request the disclosure of that expert from that entity. This ensures 166 164 that the expert is also part of the entity's response team. 167 165 ··· 171 169 The disclosing party provides detailed information to the initial response 172 170 team via the specific encrypted mailing-list. 173 171 174 - From our experience the technical documentation of these issues is usually 175 - a sufficient starting point and further technical clarification is best 172 + From our experience, the technical documentation of these issues is usually 173 + a sufficient starting point, and further technical clarification is best 176 174 done via email. 177 175 178 176 Mitigation development ··· 181 179 The initial response team sets up an encrypted mailing-list or repurposes 182 180 an existing one if appropriate. 183 181 184 - Using a mailing-list is close to the normal Linux development process and 185 - has been successfully used in developing mitigations for various hardware 182 + Using a mailing list is close to the normal Linux development process and 183 + has been successfully used to develop mitigations for various hardware 186 184 security issues in the past. 187 185 188 - The mailing-list operates in the same way as normal Linux development. 189 - Patches are posted, discussed and reviewed and if agreed on applied to a 190 - non-public git repository which is only accessible to the participating 186 + The mailing list operates in the same way as normal Linux development. 187 + Patches are posted, discussed, and reviewed and if agreed upon, applied to 188 + a non-public git repository which is only accessible to the participating 191 189 developers via a secure connection. The repository contains the main 192 190 development branch against the mainline kernel and backport branches for 193 191 stable kernel versions as necessary. 194 192 195 193 The initial response team will identify further experts from the Linux 196 - kernel developer community as needed. Bringing in experts can happen at any 197 - time of the development process and needs to be handled in a timely manner. 194 + kernel developer community as needed. Any involved party can suggest 195 + further experts to be included, each of which will be subject to the same 196 + requirements outlined above. 198 197 199 - If an expert is employed by or member of an entity on the disclosure list 198 + Bringing in experts can happen at any time in the development process and 199 + needs to be handled in a timely manner. 200 + 201 + If an expert is employed by or a member of an entity on the disclosure list 200 202 provided by the disclosing party, then participation will be requested from 201 203 the relevant entity. 202 204 203 - If not, then the disclosing party will be informed about the experts 205 + If not, then the disclosing party will be informed about the experts' 204 206 participation. The experts are covered by the Memorandum of Understanding 205 - and the disclosing party is requested to acknowledge the participation. In 206 - case that the disclosing party has a compelling reason to object, then this 207 - objection has to be raised within five work days and resolved with the 208 - incident team immediately. If the disclosing party does not react within 209 - five work days this is taken as silent acknowledgement. 207 + and the disclosing party is requested to acknowledge their participation. 208 + In the case where the disclosing party has a compelling reason to object, 209 + any objection must to be raised within five working days and resolved with 210 + the incident team immediately. If the disclosing party does not react 211 + within five working days this is taken as silent acknowledgment. 210 212 211 - After acknowledgement or resolution of an objection the expert is disclosed 212 - by the incident team and brought into the development process. 213 + After the incident team acknowledges or resolves an objection, the expert 214 + is disclosed and brought into the development process. 213 215 214 216 List participants may not communicate about the issue outside of the 215 217 private mailing list. List participants may not use any shared resources 216 218 (e.g. employer build farms, CI systems, etc) when working on patches. 217 219 220 + Early access 221 + """""""""""" 222 + 223 + The patches discussed and developed on the list can neither be distributed 224 + to any individual who is not a member of the response team nor to any other 225 + organization. 226 + 227 + To allow the affected silicon vendors to work with their internal teams and 228 + industry partners on testing, validation, and logistics, the following 229 + exception is provided: 230 + 231 + Designated representatives of the affected silicon vendors are 232 + allowed to hand over the patches at any time to the silicon 233 + vendor’s response team. The representative must notify the kernel 234 + response team about the handover. The affected silicon vendor must 235 + have and maintain their own documented security process for any 236 + patches shared with their response team that is consistent with 237 + this policy. 238 + 239 + The silicon vendor’s response team can distribute these patches to 240 + their industry partners and to their internal teams under the 241 + silicon vendor’s documented security process. Feedback from the 242 + industry partners goes back to the silicon vendor and is 243 + communicated by the silicon vendor to the kernel response team. 244 + 245 + The handover to the silicon vendor’s response team removes any 246 + responsibility or liability from the kernel response team regarding 247 + premature disclosure, which happens due to the involvement of the 248 + silicon vendor’s internal teams or industry partners. The silicon 249 + vendor guarantees this release of liability by agreeing to this 250 + process. 218 251 219 252 Coordinated release 220 253 """"""""""""""""""" 221 254 222 - The involved parties will negotiate the date and time where the embargo 223 - ends. At that point the prepared mitigations are integrated into the 224 - relevant kernel trees and published. There is no pre-notification process: 225 - fixes are published in public and available to everyone at the same time. 255 + The involved parties will negotiate the date and time when the embargo 256 + ends. At that point, the prepared mitigations are published into the 257 + relevant kernel trees. There is no pre-notification process: the 258 + mitigations are published in public and available to everyone at the same 259 + time. 226 260 227 261 While we understand that hardware security issues need coordinated embargo 228 - time, the embargo time should be constrained to the minimum time which is 229 - required for all involved parties to develop, test and prepare the 262 + time, the embargo time should be constrained to the minimum time that is 263 + required for all involved parties to develop, test, and prepare their 230 264 mitigations. Extending embargo time artificially to meet conference talk 231 - dates or other non-technical reasons is creating more work and burden for 232 - the involved developers and response teams as the patches need to be kept 233 - up to date in order to follow the ongoing upstream kernel development, 234 - which might create conflicting changes. 265 + dates or other non-technical reasons creates more work and burden for the 266 + involved developers and response teams as the patches need to be kept up to 267 + date in order to follow the ongoing upstream kernel development, which 268 + might create conflicting changes. 235 269 236 270 CVE assignment 237 271 """""""""""""" ··· 313 275 314 276 If you want your organization to be added to the ambassadors list, please 315 277 contact the hardware security team. The nominated ambassador has to 316 - understand and support our process fully and is ideally well connected in 278 + understand and support our process fully and is ideally well-connected in 317 279 the Linux kernel community. 318 280 319 281 Encrypted mailing-lists 320 282 ----------------------- 321 283 322 - We use encrypted mailing-lists for communication. The operating principle 284 + We use encrypted mailing lists for communication. The operating principle 323 285 of these lists is that email sent to the list is encrypted either with the 324 - list's PGP key or with the list's S/MIME certificate. The mailing-list 286 + list's PGP key or with the list's S/MIME certificate. The mailing list 325 287 software decrypts the email and re-encrypts it individually for each 326 288 subscriber with the subscriber's PGP key or S/MIME certificate. Details 327 - about the mailing-list software and the setup which is used to ensure the 289 + about the mailing list software and the setup that is used to ensure the 328 290 security of the lists and protection of the data can be found here: 329 291 https://korg.wiki.kernel.org/userdoc/remail. 330 292 331 293 List keys 332 294 ^^^^^^^^^ 333 295 334 - For initial contact see :ref:`Contact`. For incident specific mailing-lists 335 - the key and S/MIME certificate are conveyed to the subscribers by email 336 - sent from the specific list. 296 + For initial contact see the :ref:`Contact` section above. For incident 297 + specific mailing lists, the key and S/MIME certificate are conveyed to the 298 + subscribers by email sent from the specific list. 337 299 338 - Subscription to incident specific lists 300 + Subscription to incident-specific lists 339 301 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 340 302 341 - Subscription is handled by the response teams. Disclosed parties who want 342 - to participate in the communication send a list of potential subscribers to 343 - the response team so the response team can validate subscription requests. 303 + Subscription to incident-specific lists is handled by the response teams. 304 + Disclosed parties who want to participate in the communication send a list 305 + of potential experts to the response team so the response team can validate 306 + subscription requests. 344 307 345 308 Each subscriber needs to send a subscription request to the response team 346 309 by email. The email must be signed with the subscriber's PGP key or S/MIME
+1 -1
Documentation/virt/kvm/api.rst
··· 2592 2592 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT] 2593 2593 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND] 2594 2594 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ] 2595 - 0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ] 2595 + 0x6030 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ] 2596 2596 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] [1]_ 2597 2597 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] [1]_ 2598 2598 ...
+2 -2
MAINTAINERS
··· 5306 5306 CIRRUS LOGIC AUDIO CODEC DRIVERS 5307 5307 M: David Rhodes <david.rhodes@cirrus.com> 5308 5308 M: Richard Fitzgerald <rf@opensource.cirrus.com> 5309 - L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5309 + L: linux-sound@vger.kernel.org 5310 5310 L: patches@opensource.cirrus.com 5311 5311 S: Maintained 5312 5312 F: Documentation/devicetree/bindings/sound/cirrus,cs* ··· 5375 5375 CIRRUS LOGIC MADERA CODEC DRIVERS 5376 5376 M: Charles Keepax <ckeepax@opensource.cirrus.com> 5377 5377 M: Richard Fitzgerald <rf@opensource.cirrus.com> 5378 - L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5378 + L: linux-sound@vger.kernel.org 5379 5379 L: patches@opensource.cirrus.com 5380 5380 S: Supported 5381 5381 W: https://github.com/CirrusLogic/linux-drivers/wiki
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 11 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc2 5 + EXTRAVERSION = -rc3 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+7 -4
arch/arm/mach-pxa/gumstix.c
··· 21 21 #include <linux/mtd/mtd.h> 22 22 #include <linux/mtd/partitions.h> 23 23 #include <linux/gpio/machine.h> 24 + #include <linux/gpio/property.h> 24 25 #include <linux/gpio.h> 25 26 #include <linux/err.h> 26 27 #include <linux/clk.h> ··· 41 40 #include <linux/platform_data/mmc-pxamci.h> 42 41 #include "udc.h" 43 42 #include "gumstix.h" 43 + #include "devices.h" 44 44 45 45 #include "generic.h" 46 46 ··· 101 99 } 102 100 #endif 103 101 104 - #ifdef CONFIG_USB_PXA25X 105 - static const struct property_entry spitz_mci_props[] __initconst = { 102 + #if IS_ENABLED(CONFIG_USB_PXA25X) 103 + static const struct property_entry gumstix_vbus_props[] __initconst = { 106 104 PROPERTY_ENTRY_GPIO("vbus-gpios", &pxa2xx_gpiochip_node, 107 105 GPIO_GUMSTIX_USB_GPIOn, GPIO_ACTIVE_HIGH), 108 106 PROPERTY_ENTRY_GPIO("pullup-gpios", &pxa2xx_gpiochip_node, ··· 111 109 }; 112 110 113 111 static const struct platform_device_info gumstix_gpio_vbus_info __initconst = { 114 - .name = "gpio-vbus", 115 - .id = PLATFORM_DEVID_NONE, 112 + .name = "gpio-vbus", 113 + .id = PLATFORM_DEVID_NONE, 114 + .properties = gumstix_vbus_props, 116 115 }; 117 116 118 117 static void __init gumstix_udc_init(void)
-22
arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
··· 43 43 sound-dai = <&mcasp0>; 44 44 }; 45 45 }; 46 - 47 - reg_usb_hub: regulator-usb-hub { 48 - compatible = "regulator-fixed"; 49 - enable-active-high; 50 - /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */ 51 - gpio = <&main_gpio0 31 GPIO_ACTIVE_HIGH>; 52 - regulator-boot-on; 53 - regulator-name = "HUB_PWR_EN"; 54 - }; 55 46 }; 56 47 57 48 /* Verdin ETHs */ ··· 184 193 status = "okay"; 185 194 }; 186 195 187 - /* Do not force CTRL_SLEEP_MOCI# always enabled */ 188 - &reg_force_sleep_moci { 189 - status = "disabled"; 190 - }; 191 - 192 196 /* Verdin SD_1 */ 193 197 &sdhci1 { 194 198 status = "okay"; ··· 204 218 }; 205 219 206 220 &usb1 { 207 - #address-cells = <1>; 208 - #size-cells = <0>; 209 221 status = "okay"; 210 - 211 - usb-hub@1 { 212 - compatible = "usb424,2744"; 213 - reg = <1>; 214 - vdd-supply = <&reg_usb_hub>; 215 - }; 216 222 }; 217 223 218 224 /* Verdin CTRL_WAKE1_MICO# */
-6
arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
··· 138 138 vin-supply = <&reg_1v8>; 139 139 }; 140 140 141 - /* 142 - * By default we enable CTRL_SLEEP_MOCI#, this is required to have 143 - * peripherals on the carrier board powered. 144 - * If more granularity or power saving is required this can be disabled 145 - * in the carrier board device tree files. 146 - */ 147 141 reg_force_sleep_moci: regulator-force-sleep-moci { 148 142 compatible = "regulator-fixed"; 149 143 enable-active-high;
+2
arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
··· 146 146 power-domains = <&k3_pds 79 TI_SCI_PD_EXCLUSIVE>; 147 147 clocks = <&k3_clks 79 0>; 148 148 clock-names = "gpio"; 149 + gpio-ranges = <&mcu_pmx0 0 0 21>, <&mcu_pmx0 21 23 1>, 150 + <&mcu_pmx0 22 32 2>; 149 151 }; 150 152 151 153 mcu_rti0: watchdog@4880000 {
+2 -1
arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
··· 45 45 &main_pmx0 { 46 46 pinctrl-single,gpio-range = 47 47 <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, 48 - <&main_pmx0_range 33 92 PIN_GPIO_RANGE_IOPAD>, 48 + <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>, 49 + <&main_pmx0_range 72 22 PIN_GPIO_RANGE_IOPAD>, 49 50 <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, 50 51 <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>, 51 52 <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>;
+2 -1
arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
··· 193 193 &main_pmx0 { 194 194 pinctrl-single,gpio-range = 195 195 <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, 196 - <&main_pmx0_range 33 55 PIN_GPIO_RANGE_IOPAD>, 196 + <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>, 197 + <&main_pmx0_range 72 17 PIN_GPIO_RANGE_IOPAD>, 197 198 <&main_pmx0_range 101 25 PIN_GPIO_RANGE_IOPAD>, 198 199 <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, 199 200 <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,
+8 -17
arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
··· 1262 1262 &serdes0 { 1263 1263 status = "okay"; 1264 1264 1265 + serdes0_pcie1_link: phy@0 { 1266 + reg = <0>; 1267 + cdns,num-lanes = <2>; 1268 + #phy-cells = <0>; 1269 + cdns,phy-type = <PHY_TYPE_PCIE>; 1270 + resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>; 1271 + }; 1272 + 1265 1273 serdes0_usb_link: phy@3 { 1266 1274 reg = <3>; 1267 1275 cdns,num-lanes = <1>; ··· 1392 1384 pinctrl-names = "default"; 1393 1385 pinctrl-0 = <&main_mcan4_pins_default>; 1394 1386 phys = <&transceiver3>; 1395 - }; 1396 - 1397 - &serdes0 { 1398 - status = "okay"; 1399 - 1400 - serdes0_pcie1_link: phy@0 { 1401 - reg = <0>; 1402 - cdns,num-lanes = <4>; 1403 - #phy-cells = <0>; 1404 - cdns,phy-type = <PHY_TYPE_PCIE>; 1405 - resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>, 1406 - <&serdes_wiz0 3>, <&serdes_wiz0 4>; 1407 - }; 1408 - }; 1409 - 1410 - &serdes_wiz0 { 1411 - status = "okay"; 1412 1387 }; 1413 1388 1414 1389 &pcie1_rc {
+2 -2
arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
··· 2755 2755 interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>, 2756 2756 <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>; 2757 2757 interrupt-names = "tx", "rx"; 2758 - dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>; 2758 + dmas = <&main_udmap 0xc403>, <&main_udmap 0x4403>; 2759 2759 dma-names = "tx", "rx"; 2760 2760 clocks = <&k3_clks 268 0>; 2761 2761 clock-names = "fck"; ··· 2773 2773 interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>, 2774 2774 <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>; 2775 2775 interrupt-names = "tx", "rx"; 2776 - dmas = <&main_udmap 0xc501>, <&main_udmap 0x4501>; 2776 + dmas = <&main_udmap 0xc404>, <&main_udmap 0x4404>; 2777 2777 dma-names = "tx", "rx"; 2778 2778 clocks = <&k3_clks 269 0>; 2779 2779 clock-names = "fck";
+1 -1
arch/arm64/include/asm/kvm_ptrauth.h
··· 104 104 105 105 #define __ptrauth_save_key(ctxt, key) \ 106 106 do { \ 107 - u64 __val; \ 107 + u64 __val; \ 108 108 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ 109 109 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ 110 110 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+1
arch/arm64/kvm/Kconfig
··· 19 19 20 20 menuconfig KVM 21 21 bool "Kernel-based Virtual Machine (KVM) support" 22 + depends on AS_HAS_ARMV8_4 22 23 select KVM_COMMON 23 24 select KVM_GENERIC_HARDWARE_ENABLING 24 25 select KVM_GENERIC_MMU_NOTIFIER
+3
arch/arm64/kvm/Makefile
··· 10 10 obj-$(CONFIG_KVM) += kvm.o 11 11 obj-$(CONFIG_KVM) += hyp/ 12 12 13 + CFLAGS_sys_regs.o += -Wno-override-init 14 + CFLAGS_handle_exit.o += -Wno-override-init 15 + 13 16 kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ 14 17 inject_fault.o va_layout.o handle_exit.o \ 15 18 guest.o debug.o reset.o sys_regs.o stacktrace.o \
+5 -10
arch/arm64/kvm/arm.c
··· 164 164 /** 165 165 * kvm_arch_init_vm - initializes a VM data structure 166 166 * @kvm: pointer to the KVM struct 167 + * @type: kvm device type 167 168 */ 168 169 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 169 170 { ··· 522 521 523 522 static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) 524 523 { 525 - if (vcpu_has_ptrauth(vcpu)) { 524 + if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) { 526 525 /* 527 - * Either we're running running an L2 guest, and the API/APK 528 - * bits come from L1's HCR_EL2, or API/APK are both set. 526 + * Either we're running an L2 guest, and the API/APK bits come 527 + * from L1's HCR_EL2, or API/APK are both set. 529 528 */ 530 529 if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) { 531 530 u64 val; ··· 542 541 * Save the host keys if there is any chance for the guest 543 542 * to use pauth, as the entry code will reload the guest 544 543 * keys in that case. 545 - * Protected mode is the exception to that rule, as the 546 - * entry into the EL2 code eagerly switch back and forth 547 - * between host and hyp keys (and kvm_hyp_ctxt is out of 548 - * reach anyway). 549 544 */ 550 - if (is_protected_kvm_enabled()) 551 - return; 552 - 553 545 if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { 554 546 struct kvm_cpu_context *ctxt; 547 + 555 548 ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt); 556 549 ptrauth_save_keys(ctxt); 557 550 }
-1
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 27 27 #include <asm/kvm_hyp.h> 28 28 #include <asm/kvm_mmu.h> 29 29 #include <asm/kvm_nested.h> 30 - #include <asm/kvm_ptrauth.h> 31 30 #include <asm/fpsimd.h> 32 31 #include <asm/debug-monitors.h> 33 32 #include <asm/processor.h>
+2
arch/arm64/kvm/hyp/nvhe/Makefile
··· 20 20 lib-objs := clear_page.o copy_page.o memcpy.o memset.o 21 21 lib-objs := $(addprefix ../../../lib/, $(lib-objs)) 22 22 23 + CFLAGS_switch.nvhe.o += -Wno-override-init 24 + 23 25 hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ 24 26 hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \ 25 27 cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
+2 -3
arch/arm64/kvm/hyp/nvhe/switch.c
··· 173 173 static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) 174 174 { 175 175 /* 176 - * Make sure we handle the exit for workarounds and ptrauth 177 - * before the pKVM handling, as the latter could decide to 178 - * UNDEF. 176 + * Make sure we handle the exit for workarounds before the pKVM 177 + * handling, as the latter could decide to UNDEF. 179 178 */ 180 179 return (kvm_hyp_handle_sysreg(vcpu, exit_code) || 181 180 kvm_handle_pvm_sysreg(vcpu, exit_code));
+2
arch/arm64/kvm/hyp/vhe/Makefile
··· 6 6 asflags-y := -D__KVM_VHE_HYPERVISOR__ 7 7 ccflags-y := -D__KVM_VHE_HYPERVISOR__ 8 8 9 + CFLAGS_switch.o += -Wno-override-init 10 + 9 11 obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o 10 12 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ 11 13 ../fpsimd.o ../hyp-entry.o ../exception.o
+1 -1
arch/arm64/kvm/nested.c
··· 786 786 if (!WARN_ON(atomic_read(&mmu->refcnt))) 787 787 kvm_free_stage2_pgd(mmu); 788 788 } 789 - kfree(kvm->arch.nested_mmus); 789 + kvfree(kvm->arch.nested_mmus); 790 790 kvm->arch.nested_mmus = NULL; 791 791 kvm->arch.nested_mmus_size = 0; 792 792 kvm_uninit_stage2_mmu(kvm);
+3 -2
arch/arm64/kvm/vgic/vgic-debug.c
··· 45 45 * Let the xarray drive the iterator after the last SPI, as the iterator 46 46 * has exhausted the sequentially-allocated INTID space. 47 47 */ 48 - if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1)) { 48 + if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1) && 49 + iter->nr_lpis) { 49 50 if (iter->lpi_idx < iter->nr_lpis) 50 51 xa_find_after(&dist->lpi_xa, &iter->intid, 51 52 VGIC_LPI_MAX_INTID, ··· 113 112 return iter->dist_id > 0 && 114 113 iter->vcpu_id == iter->nr_cpus && 115 114 iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) && 116 - iter->lpi_idx > iter->nr_lpis; 115 + (!iter->nr_lpis || iter->lpi_idx > iter->nr_lpis); 117 116 } 118 117 119 118 static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
+1 -2
arch/arm64/kvm/vgic/vgic-init.c
··· 438 438 unsigned long i; 439 439 440 440 mutex_lock(&kvm->slots_lock); 441 + mutex_lock(&kvm->arch.config_lock); 441 442 442 443 vgic_debug_destroy(kvm); 443 444 444 445 kvm_for_each_vcpu(i, vcpu, kvm) 445 446 __kvm_vgic_vcpu_destroy(vcpu); 446 - 447 - mutex_lock(&kvm->arch.config_lock); 448 447 449 448 kvm_vgic_dist_destroy(kvm); 450 449
+4 -3
arch/arm64/kvm/vgic/vgic-irqfd.c
··· 9 9 #include <kvm/arm_vgic.h> 10 10 #include "vgic.h" 11 11 12 - /** 12 + /* 13 13 * vgic_irqfd_set_irq: inject the IRQ corresponding to the 14 14 * irqchip routing entry 15 15 * ··· 75 75 msi->flags = e->msi.flags; 76 76 msi->devid = e->msi.devid; 77 77 } 78 - /** 78 + 79 + /* 79 80 * kvm_set_msi: inject the MSI corresponding to the 80 81 * MSI routing entry 81 82 * ··· 99 98 return vgic_its_inject_msi(kvm, &msi); 100 99 } 101 100 102 - /** 101 + /* 103 102 * kvm_arch_set_irq_inatomic: fast-path for irqfd injection 104 103 */ 105 104 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+11 -7
arch/arm64/kvm/vgic/vgic-its.c
··· 2040 2040 * @start_id: the ID of the first entry in the table 2041 2041 * (non zero for 2d level tables) 2042 2042 * @fn: function to apply on each entry 2043 + * @opaque: pointer to opaque data 2043 2044 * 2044 2045 * Return: < 0 on error, 0 if last element was identified, 1 otherwise 2045 2046 * (the last element may not be found on second level tables) ··· 2080 2079 return 1; 2081 2080 } 2082 2081 2083 - /** 2082 + /* 2084 2083 * vgic_its_save_ite - Save an interrupt translation entry at @gpa 2085 2084 */ 2086 2085 static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, ··· 2100 2099 2101 2100 /** 2102 2101 * vgic_its_restore_ite - restore an interrupt translation entry 2102 + * 2103 + * @its: its handle 2103 2104 * @event_id: id used for indexing 2104 2105 * @ptr: pointer to the ITE entry 2105 2106 * @opaque: pointer to the its_device ··· 2234 2231 * @its: ITS handle 2235 2232 * @dev: ITS device 2236 2233 * @ptr: GPA 2234 + * @dte_esz: device table entry size 2237 2235 */ 2238 2236 static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, 2239 2237 gpa_t ptr, int dte_esz) ··· 2317 2313 return 1; 2318 2314 } 2319 2315 2320 - /** 2316 + /* 2321 2317 * vgic_its_save_device_tables - Save the device table and all ITT 2322 2318 * into guest RAM 2323 2319 * ··· 2390 2386 return ret; 2391 2387 } 2392 2388 2393 - /** 2389 + /* 2394 2390 * vgic_its_restore_device_tables - Restore the device table and all ITT 2395 2391 * from guest RAM to internal data structs 2396 2392 */ ··· 2482 2478 return 1; 2483 2479 } 2484 2480 2485 - /** 2481 + /* 2486 2482 * vgic_its_save_collection_table - Save the collection table into 2487 2483 * guest RAM 2488 2484 */ ··· 2522 2518 return ret; 2523 2519 } 2524 2520 2525 - /** 2521 + /* 2526 2522 * vgic_its_restore_collection_table - reads the collection table 2527 2523 * in guest memory and restores the ITS internal state. Requires the 2528 2524 * BASER registers to be restored before. ··· 2560 2556 return ret; 2561 2557 } 2562 2558 2563 - /** 2559 + /* 2564 2560 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM 2565 2561 * according to v0 ABI 2566 2562 */ ··· 2575 2571 return vgic_its_save_collection_table(its); 2576 2572 } 2577 2573 2578 - /** 2574 + /* 2579 2575 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM 2580 2576 * to internal data structs according to V0 ABI 2581 2577 *
+1 -1
arch/arm64/kvm/vgic/vgic-v3.c
··· 370 370 dist->its_vm.vpes[i]->irq)); 371 371 } 372 372 373 - /** 373 + /* 374 374 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM 375 375 * kvm lock and all vcpu lock must be held 376 376 */
+1 -1
arch/arm64/kvm/vgic/vgic.c
··· 313 313 * with all locks dropped. 314 314 */ 315 315 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, 316 - unsigned long flags) 316 + unsigned long flags) __releases(&irq->irq_lock) 317 317 { 318 318 struct kvm_vcpu *vcpu; 319 319
+1 -1
arch/arm64/kvm/vgic/vgic.h
··· 186 186 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending); 187 187 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active); 188 188 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, 189 - unsigned long flags); 189 + unsigned long flags) __releases(&irq->irq_lock); 190 190 void vgic_kick_vcpus(struct kvm *kvm); 191 191 void vgic_irq_handle_resampling(struct vgic_irq *irq, 192 192 bool lr_deactivated, bool lr_pending);
+4 -1
arch/s390/include/asm/uv.h
··· 441 441 442 442 if (!uv_call(0, (u64)&uvcb)) 443 443 return 0; 444 - return -EINVAL; 444 + pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n", 445 + uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare", 446 + uvcb.header.rc, uvcb.header.rrc); 447 + panic("System security cannot be guaranteed unless the system panics now.\n"); 445 448 } 446 449 447 450 /*
+6 -1
arch/s390/kvm/kvm-s390.h
··· 267 267 268 268 static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm) 269 269 { 270 - u32 gd = virt_to_phys(kvm->arch.gisa_int.origin); 270 + u32 gd; 271 + 272 + if (!kvm->arch.gisa_int.origin) 273 + return 0; 274 + 275 + gd = virt_to_phys(kvm->arch.gisa_int.origin); 271 276 272 277 if (gd && sclp.has_gisaf) 273 278 gd |= GISA_FORMAT1;
+2
arch/x86/include/asm/kvm_host.h
··· 2192 2192 #define kvm_arch_has_private_mem(kvm) false 2193 2193 #endif 2194 2194 2195 + #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state) 2196 + 2195 2197 static inline u16 kvm_read_ldt(void) 2196 2198 { 2197 2199 u16 ldt;
+7 -5
arch/x86/include/asm/qspinlock.h
··· 66 66 67 67 #ifdef CONFIG_PARAVIRT 68 68 /* 69 - * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. 69 + * virt_spin_lock_key - disables by default the virt_spin_lock() hijack. 70 70 * 71 - * Native (and PV wanting native due to vCPU pinning) should disable this key. 72 - * It is done in this backwards fashion to only have a single direction change, 73 - * which removes ordering between native_pv_spin_init() and HV setup. 71 + * Native (and PV wanting native due to vCPU pinning) should keep this key 72 + * disabled. Native does not touch the key. 73 + * 74 + * When in a guest then native_pv_lock_init() enables the key first and 75 + * KVM/XEN might conditionally disable it later in the boot process again. 74 76 */ 75 - DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); 77 + DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); 76 78 77 79 /* 78 80 * Shortcut for the queued_spin_lock_slowpath() function that allows
+1 -1
arch/x86/kernel/acpi/madt_wakeup.c
··· 19 19 static u64 acpi_mp_wake_mailbox_paddr __ro_after_init; 20 20 21 21 /* Virtual address of the Multiprocessor Wakeup Structure mailbox */ 22 - static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init; 22 + static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox; 23 23 24 24 static u64 acpi_mp_pgd __ro_after_init; 25 25 static u64 acpi_mp_reset_vector_paddr __ro_after_init;
+1 -1
arch/x86/kernel/cpu/mtrr/mtrr.c
··· 609 609 { 610 610 int first_cpu; 611 611 612 - if (!mtrr_enabled()) 612 + if (!mtrr_enabled() || !mtrr_state.have_fixed) 613 613 return; 614 614 615 615 first_cpu = cpumask_first(cpu_online_mask);
+3 -4
arch/x86/kernel/paravirt.c
··· 51 51 DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); 52 52 #endif 53 53 54 - DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); 54 + DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); 55 55 56 56 void __init native_pv_lock_init(void) 57 57 { 58 - if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && 59 - !boot_cpu_has(X86_FEATURE_HYPERVISOR)) 60 - static_branch_disable(&virt_spin_lock_key); 58 + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 59 + static_branch_enable(&virt_spin_lock_key); 61 60 } 62 61 63 62 static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
-1
arch/x86/kvm/hyperv.h
··· 286 286 return HV_STATUS_ACCESS_DENIED; 287 287 } 288 288 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {} 289 - static inline void kvm_hv_free_pa_page(struct kvm *kvm) {} 290 289 static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) 291 290 { 292 291 return false;
+15 -7
arch/x86/kvm/lapic.c
··· 351 351 * reversing the LDR calculation to get cluster of APICs, i.e. no 352 352 * additional work is required. 353 353 */ 354 - if (apic_x2apic_mode(apic)) { 355 - WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic))); 354 + if (apic_x2apic_mode(apic)) 356 355 return; 357 - } 358 356 359 357 if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr, 360 358 &cluster, &mask))) { ··· 2964 2966 struct kvm_lapic_state *s, bool set) 2965 2967 { 2966 2968 if (apic_x2apic_mode(vcpu->arch.apic)) { 2969 + u32 x2apic_id = kvm_x2apic_id(vcpu->arch.apic); 2967 2970 u32 *id = (u32 *)(s->regs + APIC_ID); 2968 2971 u32 *ldr = (u32 *)(s->regs + APIC_LDR); 2969 2972 u64 icr; 2970 2973 2971 2974 if (vcpu->kvm->arch.x2apic_format) { 2972 - if (*id != vcpu->vcpu_id) 2975 + if (*id != x2apic_id) 2973 2976 return -EINVAL; 2974 2977 } else { 2978 + /* 2979 + * Ignore the userspace value when setting APIC state. 2980 + * KVM's model is that the x2APIC ID is readonly, e.g. 2981 + * KVM only supports delivering interrupts to KVM's 2982 + * version of the x2APIC ID. However, for backwards 2983 + * compatibility, don't reject attempts to set a 2984 + * mismatched ID for userspace that hasn't opted into 2985 + * x2apic_format. 2986 + */ 2975 2987 if (set) 2976 - *id >>= 24; 2988 + *id = x2apic_id; 2977 2989 else 2978 - *id <<= 24; 2990 + *id = x2apic_id << 24; 2979 2991 } 2980 2992 2981 2993 /* ··· 2994 2986 * split to ICR+ICR2 in userspace for backwards compatibility. 2995 2987 */ 2996 2988 if (set) { 2997 - *ldr = kvm_apic_calc_x2apic_ldr(*id); 2989 + *ldr = kvm_apic_calc_x2apic_ldr(x2apic_id); 2998 2990 2999 2991 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) | 3000 2992 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
+4 -3
arch/x86/kvm/svm/sev.c
··· 2276 2276 2277 2277 for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) { 2278 2278 struct sev_data_snp_launch_update fw_args = {0}; 2279 - bool assigned; 2279 + bool assigned = false; 2280 2280 int level; 2281 2281 2282 2282 ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level); ··· 2290 2290 if (src) { 2291 2291 void *vaddr = kmap_local_pfn(pfn + i); 2292 2292 2293 - ret = copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE); 2294 - if (ret) 2293 + if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) { 2294 + ret = -EFAULT; 2295 2295 goto err; 2296 + } 2296 2297 kunmap_local(vaddr); 2297 2298 } 2298 2299
+2 -4
arch/x86/kvm/x86.c
··· 427 427 428 428 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 429 429 { 430 - unsigned int cpu = smp_processor_id(); 431 - struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 430 + struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs); 432 431 int err; 433 432 434 433 value = (value & mask) | (msrs->values[slot].host & ~mask); ··· 449 450 450 451 static void drop_user_return_notifiers(void) 451 452 { 452 - unsigned int cpu = smp_processor_id(); 453 - struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 453 + struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs); 454 454 455 455 if (msrs->registered) 456 456 kvm_on_user_return(&msrs->urn);
+29 -16
arch/x86/mm/pti.c
··· 241 241 * 242 242 * Returns a pointer to a PTE on success, or NULL on failure. 243 243 */ 244 - static pte_t *pti_user_pagetable_walk_pte(unsigned long address) 244 + static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text) 245 245 { 246 246 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 247 247 pmd_t *pmd; ··· 251 251 if (!pmd) 252 252 return NULL; 253 253 254 - /* We can't do anything sensible if we hit a large mapping. */ 254 + /* Large PMD mapping found */ 255 255 if (pmd_leaf(*pmd)) { 256 - WARN_ON(1); 257 - return NULL; 256 + /* Clear the PMD if we hit a large mapping from the first round */ 257 + if (late_text) { 258 + set_pmd(pmd, __pmd(0)); 259 + } else { 260 + WARN_ON_ONCE(1); 261 + return NULL; 262 + } 258 263 } 259 264 260 265 if (pmd_none(*pmd)) { ··· 288 283 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) 289 284 return; 290 285 291 - target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); 286 + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false); 292 287 if (WARN_ON(!target_pte)) 293 288 return; 294 289 ··· 306 301 307 302 static void 308 303 pti_clone_pgtable(unsigned long start, unsigned long end, 309 - enum pti_clone_level level) 304 + enum pti_clone_level level, bool late_text) 310 305 { 311 306 unsigned long addr; 312 307 ··· 395 390 return; 396 391 397 392 /* Allocate PTE in the user page-table */ 398 - target_pte = pti_user_pagetable_walk_pte(addr); 393 + target_pte = pti_user_pagetable_walk_pte(addr, late_text); 399 394 if (WARN_ON(!target_pte)) 400 395 return; 401 396 ··· 457 452 phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); 458 453 pte_t *target_pte; 459 454 460 - target_pte = pti_user_pagetable_walk_pte(va); 455 + target_pte = pti_user_pagetable_walk_pte(va, false); 461 456 if (WARN_ON(!target_pte)) 462 457 return; 463 458 ··· 480 475 start = CPU_ENTRY_AREA_BASE; 481 476 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); 482 477 483 - pti_clone_pgtable(start, end, PTI_CLONE_PMD); 478 + pti_clone_pgtable(start, end, PTI_CLONE_PMD, false); 484 479 } 485 480 #endif /* CONFIG_X86_64 */ 486 481 ··· 497 492 /* 498 493 * Clone the populated PMDs of the entry text and force it RO. 499 494 */ 500 - static void pti_clone_entry_text(void) 495 + static void pti_clone_entry_text(bool late) 501 496 { 502 497 pti_clone_pgtable((unsigned long) __entry_text_start, 503 498 (unsigned long) __entry_text_end, 504 - PTI_LEVEL_KERNEL_IMAGE); 499 + PTI_LEVEL_KERNEL_IMAGE, late); 505 500 } 506 501 507 502 /* ··· 576 571 * pti_set_kernel_image_nonglobal() did to clear the 577 572 * global bit. 578 573 */ 579 - pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); 574 + pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false); 580 575 581 576 /* 582 577 * pti_clone_pgtable() will set the global bit in any PMDs ··· 643 638 644 639 /* Undo all global bits from the init pagetables in head_64.S: */ 645 640 pti_set_kernel_image_nonglobal(); 641 + 646 642 /* Replace some of the global bits just for shared entry text: */ 647 - pti_clone_entry_text(); 643 + /* 644 + * This is very early in boot. Device and Late initcalls can do 645 + * modprobe before free_initmem() and mark_readonly(). This 646 + * pti_clone_entry_text() allows those user-mode-helpers to function, 647 + * but notably the text is still RW. 648 + */ 649 + pti_clone_entry_text(false); 648 650 pti_setup_espfix64(); 649 651 pti_setup_vsyscall(); 650 652 } ··· 668 656 if (!boot_cpu_has(X86_FEATURE_PTI)) 669 657 return; 670 658 /* 671 - * We need to clone everything (again) that maps parts of the 672 - * kernel image. 659 + * This is after free_initmem() (all initcalls are done) and we've done 660 + * mark_readonly(). Text is now NX which might've split some PMDs 661 + * relative to the early clone. 673 662 */ 674 - pti_clone_entry_text(); 663 + pti_clone_entry_text(true); 675 664 pti_clone_kernel_text(); 676 665 677 666 debug_checkwx_user();
-11
block/blk-throttle.c
··· 31 31 32 32 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 33 33 34 - /* We measure latency for request size from <= 4k to >= 1M */ 35 - #define LATENCY_BUCKET_SIZE 9 36 - 37 - struct latency_bucket { 38 - unsigned long total_latency; /* ns / 1024 */ 39 - int samples; 40 - }; 41 - 42 34 struct throtl_data 43 35 { 44 36 /* service tree for active throtl groups */ ··· 107 115 108 116 return tg->iops[rw]; 109 117 } 110 - 111 - #define request_bucket_index(sectors) \ 112 - clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1) 113 118 114 119 /** 115 120 * throtl_log - log debug message via blktrace
+6 -9
drivers/android/binder.c
··· 1044 1044 } 1045 1045 1046 1046 /* Find the smallest unused descriptor the "slow way" */ 1047 - static u32 slow_desc_lookup_olocked(struct binder_proc *proc) 1047 + static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset) 1048 1048 { 1049 1049 struct binder_ref *ref; 1050 1050 struct rb_node *n; 1051 1051 u32 desc; 1052 1052 1053 - desc = 1; 1053 + desc = offset; 1054 1054 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { 1055 1055 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1056 1056 if (ref->data.desc > desc) ··· 1071 1071 u32 *desc) 1072 1072 { 1073 1073 struct dbitmap *dmap = &proc->dmap; 1074 + unsigned int nbits, offset; 1074 1075 unsigned long *new, bit; 1075 - unsigned int nbits; 1076 1076 1077 1077 /* 0 is reserved for the context manager */ 1078 - if (node == proc->context->binder_context_mgr_node) { 1079 - *desc = 0; 1080 - return 0; 1081 - } 1078 + offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1; 1082 1079 1083 1080 if (!dbitmap_enabled(dmap)) { 1084 - *desc = slow_desc_lookup_olocked(proc); 1081 + *desc = slow_desc_lookup_olocked(proc, offset); 1085 1082 return 0; 1086 1083 } 1087 1084 1088 - if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) { 1085 + if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) { 1089 1086 *desc = bit; 1090 1087 return 0; 1091 1088 }
+1 -1
drivers/android/binder_alloc.c
··· 939 939 __free_page(alloc->pages[i].page_ptr); 940 940 page_count++; 941 941 } 942 - kvfree(alloc->pages); 943 942 } 944 943 spin_unlock(&alloc->lock); 944 + kvfree(alloc->pages); 945 945 if (alloc->mm) 946 946 mmdrop(alloc->mm); 947 947
+7 -15
drivers/android/dbitmap.h
··· 6 6 * 7 7 * Used by the binder driver to optimize the allocation of the smallest 8 8 * available descriptor ID. Each bit in the bitmap represents the state 9 - * of an ID, with the exception of BIT(0) which is used exclusively to 10 - * reference binder's context manager. 9 + * of an ID. 11 10 * 12 11 * A dbitmap can grow or shrink as needed. This part has been designed 13 12 * considering that users might need to briefly release their locks in ··· 57 58 if (bit < (dmap->nbits >> 2)) 58 59 return dmap->nbits >> 1; 59 60 60 - /* 61 - * Note that find_last_bit() returns dmap->nbits when no bits 62 - * are set. While this is technically not possible here since 63 - * BIT(0) is always set, this check is left for extra safety. 64 - */ 61 + /* find_last_bit() returns dmap->nbits when no bits are set. */ 65 62 if (bit == dmap->nbits) 66 63 return NBITS_MIN; 67 64 ··· 127 132 } 128 133 129 134 /* 130 - * Finds and sets the first zero bit in the bitmap. Upon success @bit 135 + * Finds and sets the next zero bit in the bitmap. Upon success @bit 131 136 * is populated with the index and 0 is returned. Otherwise, -ENOSPC 132 137 * is returned to indicate that a dbitmap_grow() is needed. 133 138 */ 134 139 static inline int 135 - dbitmap_acquire_first_zero_bit(struct dbitmap *dmap, unsigned long *bit) 140 + dbitmap_acquire_next_zero_bit(struct dbitmap *dmap, unsigned long offset, 141 + unsigned long *bit) 136 142 { 137 143 unsigned long n; 138 144 139 - n = find_first_zero_bit(dmap->map, dmap->nbits); 145 + n = find_next_zero_bit(dmap->map, dmap->nbits, offset); 140 146 if (n == dmap->nbits) 141 147 return -ENOSPC; 142 148 ··· 150 154 static inline void 151 155 dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit) 152 156 { 153 - /* BIT(0) should always set for the context manager */ 154 - if (bit) 155 - clear_bit(bit, dmap->map); 157 + clear_bit(bit, dmap->map); 156 158 } 157 159 158 160 static inline int dbitmap_init(struct dbitmap *dmap) ··· 162 168 } 163 169 164 170 dmap->nbits = NBITS_MIN; 165 - /* BIT(0) is reserved for the context manager */ 166 - set_bit(0, dmap->map); 167 171 168 172 return 0; 169 173 }
+13 -2
drivers/ata/libata-scsi.c
··· 951 951 &sense_key, &asc, &ascq); 952 952 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); 953 953 } else { 954 - /* ATA PASS-THROUGH INFORMATION AVAILABLE */ 955 - ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D); 954 + /* 955 + * ATA PASS-THROUGH INFORMATION AVAILABLE 956 + * 957 + * Note: we are supposed to call ata_scsi_set_sense(), which 958 + * respects the D_SENSE bit, instead of unconditionally 959 + * generating the sense data in descriptor format. However, 960 + * because hdparm, hddtemp, and udisks incorrectly assume sense 961 + * data in descriptor format, without even looking at the 962 + * RESPONSE CODE field in the returned sense data (to see which 963 + * format the returned sense data is in), we are stuck with 964 + * being bug compatible with older kernels. 965 + */ 966 + scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D); 956 967 } 957 968 } 958 969
+5 -4
drivers/atm/idt77252.c
··· 1118 1118 rpp->len += skb->len; 1119 1119 1120 1120 if (stat & SAR_RSQE_EPDU) { 1121 + unsigned int len, truesize; 1121 1122 unsigned char *l1l2; 1122 - unsigned int len; 1123 1123 1124 1124 l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6); 1125 1125 ··· 1189 1189 ATM_SKB(skb)->vcc = vcc; 1190 1190 __net_timestamp(skb); 1191 1191 1192 + truesize = skb->truesize; 1192 1193 vcc->push(vcc, skb); 1193 1194 atomic_inc(&vcc->stats->rx); 1194 1195 1195 - if (skb->truesize > SAR_FB_SIZE_3) 1196 + if (truesize > SAR_FB_SIZE_3) 1196 1197 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); 1197 - else if (skb->truesize > SAR_FB_SIZE_2) 1198 + else if (truesize > SAR_FB_SIZE_2) 1198 1199 add_rx_skb(card, 2, SAR_FB_SIZE_2, 1); 1199 - else if (skb->truesize > SAR_FB_SIZE_1) 1200 + else if (truesize > SAR_FB_SIZE_1) 1200 1201 add_rx_skb(card, 1, SAR_FB_SIZE_1, 1); 1201 1202 else 1202 1203 add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
+8 -5
drivers/base/core.c
··· 25 25 #include <linux/mutex.h> 26 26 #include <linux/pm_runtime.h> 27 27 #include <linux/netdevice.h> 28 + #include <linux/rcupdate.h> 28 29 #include <linux/sched/signal.h> 29 30 #include <linux/sched/mm.h> 30 31 #include <linux/string_helpers.h> ··· 2641 2640 static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 2642 2641 { 2643 2642 const struct device *dev = kobj_to_dev(kobj); 2643 + struct device_driver *driver; 2644 2644 int retval = 0; 2645 2645 2646 2646 /* add device node properties if present */ ··· 2670 2668 if (dev->type && dev->type->name) 2671 2669 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 2672 2670 2673 - if (dev->driver) 2674 - add_uevent_var(env, "DRIVER=%s", dev->driver->name); 2671 + /* Synchronize with module_remove_driver() */ 2672 + rcu_read_lock(); 2673 + driver = READ_ONCE(dev->driver); 2674 + if (driver) 2675 + add_uevent_var(env, "DRIVER=%s", driver->name); 2676 + rcu_read_unlock(); 2675 2677 2676 2678 /* Add common DT information about the device */ 2677 2679 of_device_uevent(dev, env); ··· 2745 2739 if (!env) 2746 2740 return -ENOMEM; 2747 2741 2748 - /* Synchronize with really_probe() */ 2749 - device_lock(dev); 2750 2742 /* let the kset specific function add its keys */ 2751 2743 retval = kset->uevent_ops->uevent(&dev->kobj, env); 2752 - device_unlock(dev); 2753 2744 if (retval) 2754 2745 goto out; 2755 2746
+4
drivers/base/module.c
··· 7 7 #include <linux/errno.h> 8 8 #include <linux/slab.h> 9 9 #include <linux/string.h> 10 + #include <linux/rcupdate.h> 10 11 #include "base.h" 11 12 12 13 static char *make_driver_name(const struct device_driver *drv) ··· 97 96 98 97 if (!drv) 99 98 return; 99 + 100 + /* Synchronize with dev_uevent() */ 101 + synchronize_rcu(); 100 102 101 103 sysfs_remove_link(&drv->p->kobj, "module"); 102 104
+1
drivers/char/ds1620.c
··· 421 421 module_init(ds1620_init); 422 422 module_exit(ds1620_exit); 423 423 424 + MODULE_DESCRIPTION("Dallas Semiconductor DS1620 thermometer driver"); 424 425 MODULE_LICENSE("GPL");
+1
drivers/char/nwbutton.c
··· 241 241 242 242 243 243 MODULE_AUTHOR("Alex Holden"); 244 + MODULE_DESCRIPTION("NetWinder button driver"); 244 245 MODULE_LICENSE("GPL"); 245 246 246 247 module_init(nwbutton_init);
+1
drivers/char/nwflash.c
··· 618 618 iounmap((void *)FLASH_BASE); 619 619 } 620 620 621 + MODULE_DESCRIPTION("NetWinder flash memory driver"); 621 622 MODULE_LICENSE("GPL"); 622 623 623 624 module_param(flashdebug, bool, 0644);
+1
drivers/cpufreq/intel_pstate.c
··· 3405 3405 */ 3406 3406 X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), 3407 3407 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), 3408 + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), 3408 3409 X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, 3409 3410 179, 64, 16)), 3410 3411 X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+1
drivers/fsi/fsi-core.c
··· 1444 1444 } 1445 1445 module_exit(fsi_exit); 1446 1446 module_param(discard_errors, int, 0664); 1447 + MODULE_DESCRIPTION("FSI core driver"); 1447 1448 MODULE_LICENSE("GPL"); 1448 1449 MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses");
+1
drivers/fsi/fsi-master-aspeed.c
··· 670 670 }; 671 671 672 672 module_platform_driver(fsi_master_aspeed_driver); 673 + MODULE_DESCRIPTION("FSI master driver for AST2600"); 673 674 MODULE_LICENSE("GPL");
+2 -1
drivers/fsi/fsi-master-ast-cf.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 // Copyright 2018 IBM Corp 3 3 /* 4 - * A FSI master controller, using a simple GPIO bit-banging interface 4 + * A FSI master based on Aspeed ColdFire coprocessor 5 5 */ 6 6 7 7 #include <linux/crc4.h> ··· 1438 1438 }; 1439 1439 1440 1440 module_platform_driver(fsi_master_acf); 1441 + MODULE_DESCRIPTION("A FSI master based on Aspeed ColdFire coprocessor"); 1441 1442 MODULE_LICENSE("GPL"); 1442 1443 MODULE_FIRMWARE(FW_FILE_NAME);
+1
drivers/fsi/fsi-master-gpio.c
··· 892 892 }; 893 893 894 894 module_platform_driver(fsi_master_gpio_driver); 895 + MODULE_DESCRIPTION("A FSI master controller, using a simple GPIO bit-banging interface"); 895 896 MODULE_LICENSE("GPL");
+1
drivers/fsi/fsi-master-hub.c
··· 295 295 }; 296 296 297 297 module_fsi_driver(hub_master_driver); 298 + MODULE_DESCRIPTION("FSI hub master driver"); 298 299 MODULE_LICENSE("GPL");
+1
drivers/fsi/fsi-scom.c
··· 625 625 626 626 module_init(scom_init); 627 627 module_exit(scom_exit); 628 + MODULE_DESCRIPTION("SCOM FSI Client device driver"); 628 629 MODULE_LICENSE("GPL");
+6
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 156 156 uint64_t addr, uint64_t *flags); 157 157 /* get the amount of memory used by the vbios for pre-OS console */ 158 158 unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); 159 + /* get the DCC buffer alignment */ 160 + unsigned int (*get_dcc_alignment)(struct amdgpu_device *adev); 159 161 160 162 enum amdgpu_memory_partition (*query_mem_partition_mode)( 161 163 struct amdgpu_device *adev); ··· 365 363 (adev)->gmc.gmc_funcs->override_vm_pte_flags \ 366 364 ((adev), (vm), (addr), (pte_flags)) 367 365 #define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) 366 + #define amdgpu_gmc_get_dcc_alignment(adev) ({ \ 367 + typeof(adev) _adev = (adev); \ 368 + _adev->gmc.gmc_funcs->get_dcc_alignment(_adev); \ 369 + }) 368 370 369 371 /** 370 372 * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 264 264 struct dma_fence *fence = NULL; 265 265 int r; 266 266 267 - /* Ignore soft recovered fences here */ 268 267 r = drm_sched_entity_error(s_entity); 269 - if (r && r != -ENODATA) 268 + if (r) 270 269 goto error; 271 270 272 271 if (!fence && job->gang_submit)
+34 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 456 456 u64 vis_usage = 0, max_bytes, min_block_size; 457 457 struct amdgpu_vram_mgr_resource *vres; 458 458 u64 size, remaining_size, lpfn, fpfn; 459 + unsigned int adjust_dcc_size = 0; 459 460 struct drm_buddy *mm = &mgr->mm; 460 461 struct drm_buddy_block *block; 461 462 unsigned long pages_per_block; ··· 512 511 /* Allocate blocks in desired range */ 513 512 vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; 514 513 514 + if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC && 515 + adev->gmc.gmc_funcs->get_dcc_alignment) 516 + adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev); 517 + 515 518 remaining_size = (u64)vres->base.size; 519 + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { 520 + unsigned int dcc_size; 521 + 522 + dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size); 523 + remaining_size = (u64)dcc_size; 524 + 525 + vres->flags |= DRM_BUDDY_TRIM_DISABLE; 526 + } 516 527 517 528 mutex_lock(&mgr->lock); 518 529 while (remaining_size) { ··· 534 521 min_block_size = mgr->default_page_size; 535 522 536 523 size = remaining_size; 537 - if ((size >= (u64)pages_per_block << PAGE_SHIFT) && 538 - !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) 524 + 525 + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) 526 + min_block_size = size; 527 + else if ((size >= (u64)pages_per_block << PAGE_SHIFT) && 528 + !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) 539 529 min_block_size = (u64)pages_per_block << PAGE_SHIFT; 540 530 541 531 BUG_ON(min_block_size < mm->chunk_size); ··· 568 552 remaining_size -= size; 569 553 } 570 554 mutex_unlock(&mgr->lock); 555 + 556 + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { 557 + struct drm_buddy_block *dcc_block; 558 + unsigned long dcc_start; 559 + u64 trim_start; 560 + 561 + dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks); 562 + /* Adjust the start address for DCC buffers only */ 563 + dcc_start = 564 + roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block), 565 + adjust_dcc_size); 566 + trim_start = (u64)dcc_start; 567 + drm_buddy_block_trim(mm, &trim_start, 568 + (u64)vres->base.size, 569 + &vres->blocks); 570 + } 571 571 572 572 vres->base.start = 0; 573 573 size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
+27
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 202 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) 203 203 }; 204 204 205 + static const struct soc15_reg_golden golden_settings_gc_12_0[] = { 206 + SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f), 207 + SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000), 208 + SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020) 209 + }; 210 + 205 211 #define DEFAULT_SH_MEM_CONFIG \ 206 212 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 207 213 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ ··· 3438 3432 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 3439 3433 } 3440 3434 3435 + static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) 3436 + { 3437 + if (amdgpu_sriov_vf(adev)) 3438 + return; 3439 + 3440 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3441 + case IP_VERSION(12, 0, 0): 3442 + case IP_VERSION(12, 0, 1): 3443 + if (adev->rev_id == 0) 3444 + soc15_program_register_sequence(adev, 3445 + golden_settings_gc_12_0, 3446 + (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); 3447 + break; 3448 + default: 3449 + break; 3450 + } 3451 + } 3452 + 3441 3453 static int gfx_v12_0_hw_init(void *handle) 3442 3454 { 3443 3455 int r; ··· 3495 3471 return r; 3496 3472 } 3497 3473 } 3474 + 3475 + if (!amdgpu_emu_mode) 3476 + gfx_v12_0_init_golden_registers(adev); 3498 3477 3499 3478 adev->gfx.is_poweron = true; 3500 3479
+18
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 542 542 return 0; 543 543 } 544 544 545 + static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev) 546 + { 547 + unsigned int max_tex_channel_caches, alignment; 548 + 549 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) && 550 + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1)) 551 + return 0; 552 + 553 + max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches; 554 + if (is_power_of_2(max_tex_channel_caches)) 555 + alignment = (unsigned int)(max_tex_channel_caches / SZ_4); 556 + else 557 + alignment = roundup_pow_of_two(max_tex_channel_caches); 558 + 559 + return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K); 560 + } 561 + 545 562 static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = { 546 563 .flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb, 547 564 .flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid, ··· 568 551 .get_vm_pde = gmc_v12_0_get_vm_pde, 569 552 .get_vm_pte = gmc_v12_0_get_vm_pte, 570 553 .get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size, 554 + .get_dcc_alignment = gmc_v12_0_get_dcc_alignment, 571 555 }; 572 556 573 557 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
··· 80 80 /* invalidate using legacy mode on vmid*/ 81 81 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, 82 82 PER_VMID_INVALIDATE_REQ, 1 << vmid); 83 - req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 83 + /* Only use legacy inv on mmhub side */ 84 + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); 84 85 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 85 86 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 86 87 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+4 -3
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
··· 1575 1575 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | 1576 1576 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | 1577 1577 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) | 1578 - SDMA_PKT_COPY_LINEAR_HEADER_CPV((copy_flags & 1579 - (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)) ? 1 : 0); 1578 + SDMA_PKT_COPY_LINEAR_HEADER_CPV(1); 1580 1579 1581 1580 ib->ptr[ib->length_dw++] = byte_count - 1; 1582 1581 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ··· 1589 1590 ((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) | 1590 1591 ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) | 1591 1592 SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1); 1593 + else 1594 + ib->ptr[ib->length_dw++] = 0; 1592 1595 } 1593 1596 1594 1597 /** ··· 1617 1616 1618 1617 static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = { 1619 1618 .copy_max_bytes = 0x400000, 1620 - .copy_num_dw = 7, 1619 + .copy_num_dw = 8, 1621 1620 .emit_copy_buffer = sdma_v7_0_emit_copy_buffer, 1622 1621 .fill_max_bytes = 0x400000, 1623 1622 .fill_num_dw = 5,
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 1270 1270 } 1271 1271 } 1272 1272 1273 + if (new_stream_on_link_num == 0) 1274 + return false; 1275 + 1273 1276 /* check current_state if there stream on link but it is not in 1274 1277 * new request state 1275 1278 */
+1 -2
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
··· 185 185 else 186 186 copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; 187 187 188 - 189 - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 188 + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 190 189 191 190 return true; 192 191 }
+2
drivers/gpu/drm/amd/display/dc/dml/Makefile
··· 83 83 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_rcflags) 84 84 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_rcflags) 85 85 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcflags) 86 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_rcflags) 87 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_rcflags) 86 88 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags) 87 89 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) 88 90 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)
+2
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 1402 1402 if (hubbub && hubp) { 1403 1403 if (hubbub->funcs->program_det_size) 1404 1404 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); 1405 + if (hubbub->funcs->program_det_segments) 1406 + hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0); 1405 1407 } 1406 1408 } 1407 1409
+2
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 771 771 if (hubbub && hubp) { 772 772 if (hubbub->funcs->program_det_size) 773 773 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); 774 + if (hubbub->funcs->program_det_segments) 775 + hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0); 774 776 } 775 777 } 776 778
+1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 723 723 .min_prefetch_in_strobe_ns = 60000, // 60us 724 724 .disable_unbounded_requesting = false, 725 725 .enable_legacy_fast_update = false, 726 + .dcc_meta_propagation_delay_us = 10, 726 727 .fams2_config = { 727 728 .bits = { 728 729 .enable = true,
+3 -1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
··· 138 138 SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \ 139 139 SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \ 140 140 SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \ 141 - HUBP_3DLUT_FL_REG_LIST_DCN401(id) 141 + HUBP_3DLUT_FL_REG_LIST_DCN401(id), \ 142 + SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \ 143 + SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id) 142 144 143 145 /* ABM */ 144 146 #define ABM_DCN401_REG_LIST_RI(id) \
+46 -6
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h
··· 27 27 28 28 #pragma pack(push, 1) 29 29 30 - #define SMU_14_0_2_TABLE_FORMAT_REVISION 3 30 + #define SMU_14_0_2_TABLE_FORMAT_REVISION 23 31 + #define SMU_14_0_2_CUSTOM_TABLE_FORMAT_REVISION 1 31 32 32 33 // POWERPLAYTABLE::ulPlatformCaps 33 34 #define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. ··· 44 43 #define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 45 44 46 45 #define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD 46 + #define SMU_14_0_2_PP_CUSTOM_OVERDRIVE_VERSION 0x1 47 47 #define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 48 48 49 49 enum SMU_14_0_2_OD_SW_FEATURE_CAP ··· 109 107 SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, 110 108 SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, 111 109 SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, 110 + SMU_14_0_2_PMSETTING_COUNT 112 111 }; 113 112 #define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings 114 113 ··· 130 127 int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings 131 128 }; 132 129 130 + enum smu_14_0_3_pptable_source { 131 + PPTABLE_SOURCE_IFWI = 0, 132 + PPTABLE_SOURCE_DRIVER_HARDCODED = 1, 133 + PPTABLE_SOURCE_PPGEN_REGISTRY = 2, 134 + PPTABLE_SOURCE_MAX = PPTABLE_SOURCE_PPGEN_REGISTRY, 135 + }; 136 + 133 137 struct smu_14_0_2_powerplay_table 134 138 { 135 139 struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. 136 140 uint8_t table_revision; // PPGen use only: table_revision = 3 137 - uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). 141 + uint8_t pptable_source; // PPGen UI dropdown box 138 142 uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) 139 143 uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. 140 - uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. 141 - uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. 142 - uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. 143 - uint16_t pmfw_board_table_size; // The size of BoardTable_t. 144 + uint16_t pmfw_sku_table_start_offset; // DO NOT CHANGE ORDER; The absolute start offset of the SkuTable_t (within smu_14_0_3_powerplay_table). 145 + uint16_t pmfw_sku_table_size; // DO NOT CHANGE ORDER; The size of SkuTable_t. 146 + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t 147 + uint16_t pmfw_board_table_size; // The size of BoardTable_t. 144 148 uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. 145 149 uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. 146 150 uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base ··· 167 157 struct smu_14_0_2_overdrive_table overdrive_table; 168 158 169 159 PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes 160 + }; 161 + 162 + enum SMU_14_0_2_CUSTOM_OD_SW_FEATURE_CAP { 163 + SMU_14_0_2_CUSTOM_ODCAP_POWER_MODE = 0, 164 + SMU_14_0_2_CUSTOM_ODCAP_COUNT 165 + }; 166 + 167 + enum SMU_14_0_2_CUSTOM_OD_FEATURE_SETTING_ID { 168 + SMU_14_0_2_CUSTOM_ODSETTING_POWER_MODE = 0, 169 + SMU_14_0_2_CUSTOM_ODSETTING_COUNT, 170 + }; 171 + 172 + struct smu_14_0_2_custom_overdrive_table { 173 + uint8_t revision; 174 + uint8_t reserve[3]; 175 + uint8_t cap[SMU_14_0_2_CUSTOM_ODCAP_COUNT]; 176 + int32_t max[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; 177 + int32_t min[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; 178 + int16_t pm_setting[SMU_14_0_2_PMSETTING_COUNT]; 179 + }; 180 + 181 + struct smu_14_0_3_custom_powerplay_table { 182 + uint8_t custom_table_revision; 183 + uint16_t custom_table_size; 184 + uint16_t custom_sku_table_offset; 185 + uint32_t custom_platform_caps; 186 + uint16_t software_shutdown_temp; 187 + struct smu_14_0_2_custom_overdrive_table custom_overdrive_table; 188 + uint32_t reserve[8]; 189 + CustomSkuTable_t custom_sku_table_pmfw; 170 190 }; 171 191 172 192 #pragma pack(pop)
+4 -11
drivers/gpu/drm/drm_atomic_uapi.c
··· 1071 1071 } 1072 1072 1073 1073 if (async_flip && 1074 - prop != config->prop_fb_id && 1075 - prop != config->prop_in_fence_fd && 1076 - prop != config->prop_fb_damage_clips) { 1074 + (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY || 1075 + (prop != config->prop_fb_id && 1076 + prop != config->prop_in_fence_fd && 1077 + prop != config->prop_fb_damage_clips))) { 1077 1078 ret = drm_atomic_plane_get_property(plane, plane_state, 1078 1079 prop, &old_val); 1079 1080 ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); 1080 - break; 1081 - } 1082 - 1083 - if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) { 1084 - drm_dbg_atomic(prop->dev, 1085 - "[OBJECT:%d] Only primary planes can be changed during async flip\n", 1086 - obj->id); 1087 - ret = -EINVAL; 1088 1081 break; 1089 1082 } 1090 1083
+2 -6
drivers/gpu/drm/drm_bridge_connector.c
··· 443 443 panel_bridge = bridge; 444 444 } 445 445 446 - if (connector_type == DRM_MODE_CONNECTOR_Unknown) { 447 - kfree(bridge_connector); 446 + if (connector_type == DRM_MODE_CONNECTOR_Unknown) 448 447 return ERR_PTR(-EINVAL); 449 - } 450 448 451 449 if (bridge_connector->bridge_hdmi) 452 450 ret = drmm_connector_hdmi_init(drm, connector, ··· 459 461 ret = drmm_connector_init(drm, connector, 460 462 &drm_bridge_connector_funcs, 461 463 connector_type, ddc); 462 - if (ret) { 463 - kfree(bridge_connector); 464 + if (ret) 464 465 return ERR_PTR(ret); 465 - } 466 466 467 467 drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs); 468 468
+23 -2
drivers/gpu/drm/drm_buddy.c
··· 851 851 * drm_buddy_block_trim - free unused pages 852 852 * 853 853 * @mm: DRM buddy manager 854 + * @start: start address to begin the trimming. 854 855 * @new_size: original size requested 855 856 * @blocks: Input and output list of allocated blocks. 856 857 * MUST contain single block as input to be trimmed. ··· 867 866 * 0 on success, error code on failure. 868 867 */ 869 868 int drm_buddy_block_trim(struct drm_buddy *mm, 869 + u64 *start, 870 870 u64 new_size, 871 871 struct list_head *blocks) 872 872 { 873 873 struct drm_buddy_block *parent; 874 874 struct drm_buddy_block *block; 875 + u64 block_start, block_end; 875 876 LIST_HEAD(dfs); 876 877 u64 new_start; 877 878 int err; ··· 884 881 block = list_first_entry(blocks, 885 882 struct drm_buddy_block, 886 883 link); 884 + 885 + block_start = drm_buddy_block_offset(block); 886 + block_end = block_start + drm_buddy_block_size(mm, block); 887 887 888 888 if (WARN_ON(!drm_buddy_block_is_allocated(block))) 889 889 return -EINVAL; ··· 900 894 if (new_size == drm_buddy_block_size(mm, block)) 901 895 return 0; 902 896 897 + new_start = block_start; 898 + if (start) { 899 + new_start = *start; 900 + 901 + if (new_start < block_start) 902 + return -EINVAL; 903 + 904 + if (!IS_ALIGNED(new_start, mm->chunk_size)) 905 + return -EINVAL; 906 + 907 + if (range_overflows(new_start, new_size, block_end)) 908 + return -EINVAL; 909 + } 910 + 903 911 list_del(&block->link); 904 912 mark_free(mm, block); 905 913 mm->avail += drm_buddy_block_size(mm, block); ··· 924 904 parent = block->parent; 925 905 block->parent = NULL; 926 906 927 - new_start = drm_buddy_block_offset(block); 928 907 list_add(&block->tmp_link, &dfs); 929 908 err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); 930 909 if (err) { ··· 1085 1066 } while (1); 1086 1067 1087 1068 /* Trim the allocated block to the required size */ 1088 - if (original_size != size) { 1069 + if (!(flags & DRM_BUDDY_TRIM_DISABLE) && 1070 + original_size != size) { 1089 1071 struct list_head *trim_list; 1090 1072 LIST_HEAD(temp); 1091 1073 u64 trim_size; ··· 1103 1083 } 1104 1084 1105 1085 drm_buddy_block_trim(mm, 1086 + NULL, 1106 1087 trim_size, 1107 1088 trim_list); 1108 1089
+5
drivers/gpu/drm/drm_client_modeset.c
··· 880 880 881 881 kfree(modeset->mode); 882 882 modeset->mode = drm_mode_duplicate(dev, mode); 883 + if (!modeset->mode) { 884 + ret = -ENOMEM; 885 + break; 886 + } 887 + 883 888 drm_connector_get(connector); 884 889 modeset->connectors[modeset->num_connectors++] = connector; 885 890 modeset->x = offset->x;
+3
drivers/gpu/drm/i915/display/intel_backlight.c
··· 1449 1449 1450 1450 static int cnp_num_backlight_controllers(struct drm_i915_private *i915) 1451 1451 { 1452 + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) 1453 + return 2; 1454 + 1452 1455 if (INTEL_PCH_TYPE(i915) >= PCH_DG1) 1453 1456 return 1; 1454 1457
+3
drivers/gpu/drm/i915/display/intel_pps.c
··· 351 351 if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 352 352 return 2; 353 353 354 + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) 355 + return 2; 356 + 354 357 if (INTEL_PCH_TYPE(i915) >= PCH_DG1) 355 358 return 1; 356 359
+49 -6
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 290 290 return i915_error_to_vmf_fault(err); 291 291 } 292 292 293 + static void set_address_limits(struct vm_area_struct *area, 294 + struct i915_vma *vma, 295 + unsigned long obj_offset, 296 + unsigned long *start_vaddr, 297 + unsigned long *end_vaddr) 298 + { 299 + unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ 300 + long start, end; /* memory boundaries */ 301 + 302 + /* 303 + * Let's move into the ">> PAGE_SHIFT" 304 + * domain to be sure not to lose bits 305 + */ 306 + vm_start = area->vm_start >> PAGE_SHIFT; 307 + vm_end = area->vm_end >> PAGE_SHIFT; 308 + vma_size = vma->size >> PAGE_SHIFT; 309 + 310 + /* 311 + * Calculate the memory boundaries by considering the offset 312 + * provided by the user during memory mapping and the offset 313 + * provided for the partial mapping. 314 + */ 315 + start = vm_start; 316 + start -= obj_offset; 317 + start += vma->gtt_view.partial.offset; 318 + end = start + vma_size; 319 + 320 + start = max_t(long, start, vm_start); 321 + end = min_t(long, end, vm_end); 322 + 323 + /* Let's move back into the "<< PAGE_SHIFT" domain */ 324 + *start_vaddr = (unsigned long)start << PAGE_SHIFT; 325 + *end_vaddr = (unsigned long)end << PAGE_SHIFT; 326 + } 327 + 293 328 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) 294 329 { 295 330 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) ··· 337 302 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 338 303 bool write = area->vm_flags & VM_WRITE; 339 304 struct i915_gem_ww_ctx ww; 305 + unsigned long obj_offset; 306 + unsigned long start, end; /* memory boundaries */ 340 307 intel_wakeref_t wakeref; 341 308 struct i915_vma *vma; 342 309 pgoff_t page_offset; 310 + unsigned long pfn; 343 311 int srcu; 344 312 int ret; 345 313 346 - /* We don't use vmf->pgoff since that has the fake offset */ 314 + obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); 347 315 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 316 + page_offset += obj_offset; 348 317 349 318 trace_i915_gem_object_fault(obj, page_offset, true, write); 350 319 ··· 441 402 if (ret) 442 403 goto err_unpin; 443 404 405 + set_address_limits(area, vma, obj_offset, &start, &end); 406 + 407 + pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; 408 + pfn += (start - area->vm_start) >> PAGE_SHIFT; 409 + pfn += obj_offset - vma->gtt_view.partial.offset; 410 + 444 411 /* Finally, remap it using the new GTT offset */ 445 - ret = remap_io_mapping(area, 446 - area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), 447 - (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT, 448 - min_t(u64, vma->size, area->vm_end - area->vm_start), 449 - &ggtt->iomap); 412 + ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); 450 413 if (ret) 451 414 goto err_fence; 452 415 ··· 1125 1084 mmo = mmap_offset_attach(obj, mmap_type, NULL); 1126 1085 if (IS_ERR(mmo)) 1127 1086 return PTR_ERR(mmo); 1087 + 1088 + vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); 1128 1089 } 1129 1090 1130 1091 /*
+7 -6
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 165 165 i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : 166 166 obj->mm.region, &places[0], obj->bo_offset, 167 167 obj->base.size, flags); 168 - places[0].flags |= TTM_PL_FLAG_DESIRED; 169 168 170 169 /* Cache this on object? */ 171 170 for (i = 0; i < num_allowed; ++i) { ··· 778 779 .interruptible = true, 779 780 .no_wait_gpu = false, 780 781 }; 781 - int real_num_busy; 782 + struct ttm_placement initial_placement; 783 + struct ttm_place initial_place; 782 784 int ret; 783 785 784 786 /* First try only the requested placement. No eviction. */ 785 - real_num_busy = placement->num_placement; 786 - placement->num_placement = 1; 787 - ret = ttm_bo_validate(bo, placement, &ctx); 787 + initial_placement.num_placement = 1; 788 + memcpy(&initial_place, placement->placement, sizeof(struct ttm_place)); 789 + initial_place.flags |= TTM_PL_FLAG_DESIRED; 790 + initial_placement.placement = &initial_place; 791 + ret = ttm_bo_validate(bo, &initial_placement, &ctx); 788 792 if (ret) { 789 793 ret = i915_ttm_err_to_gem(ret); 790 794 /* ··· 802 800 * If the initial attempt fails, allow all accepted placements, 803 801 * evicting if necessary. 804 802 */ 805 - placement->num_placement = real_num_busy; 806 803 ret = ttm_bo_validate(bo, placement, &ctx); 807 804 if (ret) 808 805 return i915_ttm_err_to_gem(ret);
+1
drivers/gpu/drm/omapdrm/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config DRM_OMAP 3 3 tristate "OMAP DRM" 4 + depends on MMU 4 5 depends on DRM && OF 5 6 depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB) 6 7 select DRM_KMS_HELPER
+11
drivers/gpu/drm/tests/drm_gem_shmem_test.c
··· 102 102 103 103 sg_init_one(sgt->sgl, buf, TEST_SIZE); 104 104 105 + /* 106 + * Set the DMA mask to 64-bits and map the sgtables 107 + * otherwise drm_gem_shmem_free will cause a warning 108 + * on debug kernels. 109 + */ 110 + ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64)); 111 + KUNIT_ASSERT_EQ(test, ret, 0); 112 + 113 + ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0); 114 + KUNIT_ASSERT_EQ(test, ret, 0); 115 + 105 116 /* Init a mock DMA-BUF */ 106 117 buf_mock.size = TEST_SIZE; 107 118 attach_mock.dmabuf = &buf_mock;
+2 -1
drivers/gpu/drm/xe/xe_hwmon.c
··· 203 203 reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0); 204 204 reg_val = xe_mmio_read32(hwmon->gt, rapl_limit); 205 205 if (reg_val & PKG_PWR_LIM_1_EN) { 206 + drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n"); 206 207 ret = -EOPNOTSUPP; 207 - goto unlock; 208 208 } 209 + goto unlock; 209 210 } 210 211 211 212 /* Computation in 64-bits to avoid overflow. Round to nearest. */
+14 -1
drivers/gpu/drm/xe/xe_lrc.c
··· 1634 1634 if (!snapshot) 1635 1635 return NULL; 1636 1636 1637 + if (lrc->bo && lrc->bo->vm) 1638 + xe_vm_get(lrc->bo->vm); 1639 + 1637 1640 snapshot->context_desc = xe_lrc_ggtt_addr(lrc); 1638 1641 snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc); 1639 1642 snapshot->head = xe_lrc_ring_head(lrc); ··· 1656 1653 void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) 1657 1654 { 1658 1655 struct xe_bo *bo; 1656 + struct xe_vm *vm; 1659 1657 struct iosys_map src; 1660 1658 1661 1659 if (!snapshot) 1662 1660 return; 1663 1661 1664 1662 bo = snapshot->lrc_bo; 1663 + vm = bo->vm; 1665 1664 snapshot->lrc_bo = NULL; 1666 1665 1667 1666 snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL); ··· 1683 1678 xe_bo_unlock(bo); 1684 1679 put_bo: 1685 1680 xe_bo_put(bo); 1681 + if (vm) 1682 + xe_vm_put(vm); 1686 1683 } 1687 1684 1688 1685 void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p) ··· 1734 1727 return; 1735 1728 1736 1729 kvfree(snapshot->lrc_snapshot); 1737 - if (snapshot->lrc_bo) 1730 + if (snapshot->lrc_bo) { 1731 + struct xe_vm *vm; 1732 + 1733 + vm = snapshot->lrc_bo->vm; 1738 1734 xe_bo_put(snapshot->lrc_bo); 1735 + if (vm) 1736 + xe_vm_put(vm); 1737 + } 1739 1738 kfree(snapshot); 1740 1739 } 1741 1740
+1 -1
drivers/gpu/drm/xe/xe_rtp.c
··· 231 231 if (first == last) 232 232 bitmap_set(ctx->active_entries, first, 1); 233 233 else 234 - bitmap_set(ctx->active_entries, first, last - first + 2); 234 + bitmap_set(ctx->active_entries, first, last - first + 1); 235 235 } 236 236 237 237 /**
+1 -1
drivers/gpu/drm/xe/xe_sync.c
··· 263 263 if (sync->fence) 264 264 dma_fence_put(sync->fence); 265 265 if (sync->chain_fence) 266 - dma_fence_put(&sync->chain_fence->base); 266 + dma_fence_chain_free(sync->chain_fence); 267 267 if (sync->ufence) 268 268 user_fence_put(sync->ufence); 269 269 }
+1 -1
drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
··· 150 150 } while (remaining_size); 151 151 152 152 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 153 - if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks)) 153 + if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) 154 154 size = vres->base.size; 155 155 } 156 156
+4 -1
drivers/i2c/busses/i2c-qcom-geni.c
··· 990 990 return ret; 991 991 992 992 ret = geni_se_resources_on(&gi2c->se); 993 - if (ret) 993 + if (ret) { 994 + clk_disable_unprepare(gi2c->core_clk); 995 + geni_icc_disable(&gi2c->se); 994 996 return ret; 997 + } 995 998 996 999 enable_irq(gi2c->irq); 997 1000 gi2c->suspended = 0;
+2 -2
drivers/i2c/i2c-slave-testunit.c
··· 18 18 19 19 enum testunit_cmds { 20 20 TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */ 21 - TU_CMD_HOST_NOTIFY, 21 + TU_CMD_SMBUS_HOST_NOTIFY, 22 22 TU_CMD_SMBUS_BLOCK_PROC_CALL, 23 23 TU_NUM_CMDS 24 24 }; ··· 60 60 msg.len = tu->regs[TU_REG_DATAH]; 61 61 break; 62 62 63 - case TU_CMD_HOST_NOTIFY: 63 + case TU_CMD_SMBUS_HOST_NOTIFY: 64 64 msg.addr = 0x08; 65 65 msg.flags = 0; 66 66 msg.len = 3;
+57 -7
drivers/i2c/i2c-smbus.c
··· 34 34 struct i2c_client *client = i2c_verify_client(dev); 35 35 struct alert_data *data = addrp; 36 36 struct i2c_driver *driver; 37 + int ret; 37 38 38 39 if (!client || client->addr != data->addr) 39 40 return 0; ··· 48 47 device_lock(dev); 49 48 if (client->dev.driver) { 50 49 driver = to_i2c_driver(client->dev.driver); 51 - if (driver->alert) 50 + if (driver->alert) { 51 + /* Stop iterating after we find the device */ 52 52 driver->alert(client, data->type, data->data); 53 - else 53 + ret = -EBUSY; 54 + } else { 54 55 dev_warn(&client->dev, "no driver alert()!\n"); 55 - } else 56 + ret = -EOPNOTSUPP; 57 + } 58 + } else { 56 59 dev_dbg(&client->dev, "alert with no driver\n"); 60 + ret = -ENODEV; 61 + } 57 62 device_unlock(dev); 58 63 59 - /* Stop iterating after we find the device */ 60 - return -EBUSY; 64 + return ret; 65 + } 66 + 67 + /* Same as above, but call back all drivers with alert handler */ 68 + 69 + static int smbus_do_alert_force(struct device *dev, void *addrp) 70 + { 71 + struct i2c_client *client = i2c_verify_client(dev); 72 + struct alert_data *data = addrp; 73 + struct i2c_driver *driver; 74 + 75 + if (!client || (client->flags & I2C_CLIENT_TEN)) 76 + return 0; 77 + 78 + /* 79 + * Drivers should either disable alerts, or provide at least 80 + * a minimal handler. Lock so the driver won't change. 81 + */ 82 + device_lock(dev); 83 + if (client->dev.driver) { 84 + driver = to_i2c_driver(client->dev.driver); 85 + if (driver->alert) 86 + driver->alert(client, data->type, data->data); 87 + } 88 + device_unlock(dev); 89 + 90 + return 0; 61 91 } 62 92 63 93 /* ··· 99 67 { 100 68 struct i2c_smbus_alert *alert = d; 101 69 struct i2c_client *ara; 70 + unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */ 102 71 103 72 ara = alert->ara; 104 73 ··· 127 94 data.addr, data.data); 128 95 129 96 /* Notify driver for the device which issued the alert */ 130 - device_for_each_child(&ara->adapter->dev, &data, 131 - smbus_do_alert); 97 + status = device_for_each_child(&ara->adapter->dev, &data, 98 + smbus_do_alert); 99 + /* 100 + * If we read the same address more than once, and the alert 101 + * was not handled by a driver, it won't do any good to repeat 102 + * the loop because it will never terminate. Try again, this 103 + * time calling the alert handlers of all devices connected to 104 + * the bus, and abort the loop afterwards. If this helps, we 105 + * are all set. If it doesn't, there is nothing else we can do, 106 + * so we might as well abort the loop. 107 + * Note: This assumes that a driver with alert handler handles 108 + * the alert properly and clears it if necessary. 109 + */ 110 + if (data.addr == prev_addr && status != -EBUSY) { 111 + device_for_each_child(&ara->adapter->dev, &data, 112 + smbus_do_alert_force); 113 + break; 114 + } 115 + prev_addr = data.addr; 132 116 } 133 117 134 118 return IRQ_HANDLED;
+25 -7
drivers/irqchip/irq-riscv-aplic-msi.c
··· 32 32 aplic_irq_unmask(d); 33 33 } 34 34 35 - static void aplic_msi_irq_eoi(struct irq_data *d) 35 + static void aplic_msi_irq_retrigger_level(struct irq_data *d) 36 36 { 37 37 struct aplic_priv *priv = irq_data_get_irq_chip_data(d); 38 - 39 - /* 40 - * EOI handling is required only for level-triggered interrupts 41 - * when APLIC is in MSI mode. 42 - */ 43 38 44 39 switch (irqd_get_trigger_type(d)) { 45 40 case IRQ_TYPE_LEVEL_LOW: ··· 52 57 writel(d->hwirq, priv->regs + APLIC_SETIPNUM_LE); 53 58 break; 54 59 } 60 + } 61 + 62 + static void aplic_msi_irq_eoi(struct irq_data *d) 63 + { 64 + /* 65 + * EOI handling is required only for level-triggered interrupts 66 + * when APLIC is in MSI mode. 67 + */ 68 + aplic_msi_irq_retrigger_level(d); 69 + } 70 + 71 + static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type) 72 + { 73 + int rc = aplic_irq_set_type(d, type); 74 + 75 + if (rc) 76 + return rc; 77 + /* 78 + * Updating sourcecfg register for level-triggered interrupts 79 + * requires interrupt retriggering when APLIC is in MSI mode. 80 + */ 81 + aplic_msi_irq_retrigger_level(d); 82 + return 0; 55 83 } 56 84 57 85 static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg) ··· 148 130 .name = "APLIC-MSI", 149 131 .irq_mask = aplic_msi_irq_mask, 150 132 .irq_unmask = aplic_msi_irq_unmask, 151 - .irq_set_type = aplic_irq_set_type, 133 + .irq_set_type = aplic_msi_irq_set_type, 152 134 .irq_eoi = aplic_msi_irq_eoi, 153 135 #ifdef CONFIG_SMP 154 136 .irq_set_affinity = irq_chip_set_affinity_parent,
+1 -1
drivers/irqchip/irq-xilinx-intc.c
··· 189 189 irqc->intr_mask = 0; 190 190 } 191 191 192 - if (irqc->intr_mask >> irqc->nr_irq) 192 + if ((u64)irqc->intr_mask >> irqc->nr_irq) 193 193 pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); 194 194 195 195 pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
+4 -31
drivers/media/usb/dvb-usb/dvb-usb-init.c
··· 23 23 module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage, int, 0444); 24 24 MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID filter, if any (default: 0)."); 25 25 26 - static int dvb_usb_check_bulk_endpoint(struct dvb_usb_device *d, u8 endpoint) 27 - { 28 - if (endpoint) { 29 - int ret; 30 - 31 - ret = usb_pipe_type_check(d->udev, usb_sndbulkpipe(d->udev, endpoint)); 32 - if (ret) 33 - return ret; 34 - ret = usb_pipe_type_check(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); 35 - if (ret) 36 - return ret; 37 - } 38 - return 0; 39 - } 40 - 41 - static void dvb_usb_clear_halt(struct dvb_usb_device *d, u8 endpoint) 42 - { 43 - if (endpoint) { 44 - usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, endpoint)); 45 - usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); 46 - } 47 - } 48 - 49 26 static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) 50 27 { 51 28 struct dvb_usb_adapter *adap; 52 29 int ret, n, o; 53 30 54 - ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint); 55 - if (ret) 56 - return ret; 57 - ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint_response); 58 - if (ret) 59 - return ret; 60 31 for (n = 0; n < d->props.num_adapters; n++) { 61 32 adap = &d->adapter[n]; 62 33 adap->dev = d; ··· 103 132 * when reloading the driver w/o replugging the device 104 133 * sometimes a timeout occurs, this helps 105 134 */ 106 - dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint); 107 - dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint_response); 135 + if (d->props.generic_bulk_ctrl_endpoint != 0) { 136 + usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); 137 + usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); 138 + } 108 139 109 140 return 0; 110 141
+1 -1
drivers/misc/Kconfig
··· 587 587 588 588 config MARVELL_CN10K_DPI 589 589 tristate "Octeon CN10K DPI driver" 590 - depends on PCI 590 + depends on PCI && PCI_IOV 591 591 depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT) 592 592 help 593 593 Enables Octeon CN10K DMA packet interface (DPI) driver which
+51 -34
drivers/misc/eeprom/ee1004.c
··· 233 233 mutex_unlock(&ee1004_bus_lock); 234 234 } 235 235 236 + static int ee1004_init_bus_data(struct i2c_client *client) 237 + { 238 + struct ee1004_bus_data *bd; 239 + int err, cnr = 0; 240 + 241 + bd = ee1004_get_bus_data(client->adapter); 242 + if (!bd) 243 + return dev_err_probe(&client->dev, -ENOSPC, "Only %d busses supported", 244 + EE1004_MAX_BUSSES); 245 + 246 + i2c_set_clientdata(client, bd); 247 + 248 + if (++bd->dev_count == 1) { 249 + /* Use 2 dummy devices for page select command */ 250 + for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) { 251 + struct i2c_client *cl; 252 + 253 + cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr); 254 + if (IS_ERR(cl)) { 255 + err = PTR_ERR(cl); 256 + goto err_out; 257 + } 258 + 259 + bd->set_page[cnr] = cl; 260 + } 261 + 262 + /* Remember current page to avoid unneeded page select */ 263 + err = ee1004_get_current_page(bd); 264 + if (err < 0) 265 + goto err_out; 266 + 267 + dev_dbg(&client->dev, "Currently selected page: %d\n", err); 268 + bd->current_page = err; 269 + } 270 + 271 + return 0; 272 + 273 + err_out: 274 + ee1004_cleanup(cnr, bd); 275 + 276 + return err; 277 + } 278 + 236 279 static int ee1004_probe(struct i2c_client *client) 237 280 { 238 281 struct nvmem_config config = { ··· 294 251 .compat = true, 295 252 .base_dev = &client->dev, 296 253 }; 297 - struct ee1004_bus_data *bd; 298 254 struct nvmem_device *ndev; 299 - int err, cnr = 0; 255 + int err; 300 256 301 257 /* Make sure we can operate on this adapter */ 302 258 if (!i2c_check_functionality(client->adapter, ··· 306 264 307 265 mutex_lock(&ee1004_bus_lock); 308 266 309 - bd = ee1004_get_bus_data(client->adapter); 310 - if (!bd) { 267 + err = ee1004_init_bus_data(client); 268 + if (err < 0) { 311 269 mutex_unlock(&ee1004_bus_lock); 312 - return dev_err_probe(&client->dev, -ENOSPC, 313 - "Only %d busses supported", EE1004_MAX_BUSSES); 314 - } 315 - 316 - err = devm_add_action_or_reset(&client->dev, ee1004_cleanup_bus_data, bd); 317 - if (err < 0) 318 270 return err; 319 - 320 - i2c_set_clientdata(client, bd); 321 - 322 - if (++bd->dev_count == 1) { 323 - /* Use 2 dummy devices for page select command */ 324 - for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) { 325 - struct i2c_client *cl; 326 - 327 - cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr); 328 - if (IS_ERR(cl)) { 329 - mutex_unlock(&ee1004_bus_lock); 330 - return PTR_ERR(cl); 331 - } 332 - bd->set_page[cnr] = cl; 333 - } 334 - 335 - /* Remember current page to avoid unneeded page select */ 336 - err = ee1004_get_current_page(bd); 337 - if (err < 0) { 338 - mutex_unlock(&ee1004_bus_lock); 339 - return err; 340 - } 341 - dev_dbg(&client->dev, "Currently selected page: %d\n", err); 342 - bd->current_page = err; 343 271 } 344 272 345 273 ee1004_probe_temp_sensor(client); 346 274 347 275 mutex_unlock(&ee1004_bus_lock); 276 + 277 + err = devm_add_action_or_reset(&client->dev, ee1004_cleanup_bus_data, 278 + i2c_get_clientdata(client)); 279 + if (err < 0) 280 + return err; 348 281 349 282 ndev = devm_nvmem_register(&client->dev, &config); 350 283 if (IS_ERR(ndev))
+38 -13
drivers/net/dsa/vitesse-vsc73xx-core.c
··· 248 248 #define VSC73XX_MII_MPRES_PRESCALEVAL GENMASK(5, 0) 249 249 #define VSC73XX_MII_PRESCALEVAL_MIN 3 /* min allowed mdio clock prescaler */ 250 250 251 + #define VSC73XX_MII_STAT_BUSY BIT(3) 252 + 251 253 /* Arbiter block 5 registers */ 252 254 #define VSC73XX_ARBEMPTY 0x0c 253 255 #define VSC73XX_ARBDISC 0x0e ··· 324 322 #define IS_739X(a) (IS_7395(a) || IS_7398(a)) 325 323 326 324 #define VSC73XX_POLL_SLEEP_US 1000 325 + #define VSC73XX_MDIO_POLL_SLEEP_US 5 327 326 #define VSC73XX_POLL_TIMEOUT_US 10000 328 327 329 328 struct vsc73xx_counter { ··· 553 550 return 0; 554 551 } 555 552 553 + static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc) 554 + { 555 + int ret, err; 556 + u32 val; 557 + 558 + ret = read_poll_timeout(vsc73xx_read, err, 559 + err < 0 || !(val & VSC73XX_MII_STAT_BUSY), 560 + VSC73XX_MDIO_POLL_SLEEP_US, 561 + VSC73XX_POLL_TIMEOUT_US, false, vsc, 562 + VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, 563 + VSC73XX_MII_STAT, &val); 564 + if (ret) 565 + return ret; 566 + return err; 567 + } 568 + 556 569 static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) 557 570 { 558 571 struct vsc73xx *vsc = ds->priv; 559 572 u32 cmd; 560 573 u32 val; 561 574 int ret; 575 + 576 + ret = vsc73xx_mdio_busy_check(vsc); 577 + if (ret) 578 + return ret; 562 579 563 580 /* Setting bit 26 means "read" */ 564 581 cmd = VSC73XX_MII_CMD_OPERATION | ··· 588 565 VSC73XX_MII_CMD, cmd); 589 566 if (ret) 590 567 return ret; 591 - msleep(2); 568 + 569 + ret = vsc73xx_mdio_busy_check(vsc); 570 + if (ret) 571 + return ret; 572 + 592 573 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, 593 574 VSC73XX_MII_DATA, &val); 594 575 if (ret) ··· 617 590 u32 cmd; 618 591 int ret; 619 592 620 - /* It was found through tedious experiments that this router 621 - * chip really hates to have it's PHYs reset. They 622 - * never recover if that happens: autonegotiation stops 623 - * working after a reset. Just filter out this command. 624 - * (Resetting the whole chip is OK.) 625 - */ 626 - if (regnum == 0 && (val & BIT(15))) { 627 - dev_info(vsc->dev, "reset PHY - disallowed\n"); 628 - return 0; 629 - } 593 + ret = vsc73xx_mdio_busy_check(vsc); 594 + if (ret) 595 + return ret; 630 596 631 597 cmd = FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) | 632 - FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum); 598 + FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum) | 599 + FIELD_PREP(VSC73XX_MII_CMD_WRITE_DATA, val); 633 600 ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, 634 601 VSC73XX_MII_CMD, cmd); 635 602 if (ret) ··· 1078 1057 1079 1058 if (duplex == DUPLEX_FULL) 1080 1059 val |= VSC73XX_MAC_CFG_FDX; 1060 + else 1061 + /* In datasheet description ("Port Mode Procedure" in 5.6.2) 1062 + * this bit is configured only for half duplex. 1063 + */ 1064 + val |= VSC73XX_MAC_CFG_WEXC_DIS; 1081 1065 1082 1066 /* This routine is described in the datasheet (below ARBDISC register 1083 1067 * description) ··· 1093 1067 get_random_bytes(&seed, 1); 1094 1068 val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET; 1095 1069 val |= VSC73XX_MAC_CFG_SEED_LOAD; 1096 - val |= VSC73XX_MAC_CFG_WEXC_DIS; 1097 1070 1098 1071 /* Those bits are responsible for MTU only. Kernel takes care about MTU, 1099 1072 * let's enable +8 bytes frame length unconditionally.
+2 -2
drivers/net/ethernet/cadence/macb_main.c
··· 5250 5250 if (bp->wol & MACB_WOL_ENABLED) { 5251 5251 /* Check for IP address in WOL ARP mode */ 5252 5252 idev = __in_dev_get_rcu(bp->dev); 5253 - if (idev && idev->ifa_list) 5254 - ifa = rcu_access_pointer(idev->ifa_list); 5253 + if (idev) 5254 + ifa = rcu_dereference(idev->ifa_list); 5255 5255 if ((bp->wolopts & WAKE_ARP) && !ifa) { 5256 5256 netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n"); 5257 5257 return -EOPNOTSUPP;
+21 -9
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1054 1054 1055 1055 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) 1056 1056 { 1057 - struct lmac *lmac, **priv; 1057 + struct lmac *lmac; 1058 1058 u64 cfg; 1059 1059 1060 1060 lmac = &bgx->lmac[lmacid]; 1061 1061 lmac->bgx = bgx; 1062 - 1063 - lmac->netdev = alloc_netdev_dummy(sizeof(struct lmac *)); 1064 - if (!lmac->netdev) 1065 - return -ENOMEM; 1066 - priv = netdev_priv(lmac->netdev); 1067 - *priv = lmac; 1068 1062 1069 1063 if ((lmac->lmac_type == BGX_MODE_SGMII) || 1070 1064 (lmac->lmac_type == BGX_MODE_QSGMII) || ··· 1185 1191 (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) 1186 1192 phy_disconnect(lmac->phydev); 1187 1193 1188 - free_netdev(lmac->netdev); 1189 1194 lmac->phydev = NULL; 1190 1195 } 1191 1196 ··· 1646 1653 1647 1654 bgx_get_qlm_mode(bgx); 1648 1655 1656 + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1657 + struct lmac *lmacp, **priv; 1658 + 1659 + lmacp = &bgx->lmac[lmac]; 1660 + lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *)); 1661 + 1662 + if (!lmacp->netdev) { 1663 + for (int i = 0; i < lmac; i++) 1664 + free_netdev(bgx->lmac[i].netdev); 1665 + err = -ENOMEM; 1666 + goto err_enable; 1667 + } 1668 + 1669 + priv = netdev_priv(lmacp->netdev); 1670 + *priv = lmacp; 1671 + } 1672 + 1649 1673 err = bgx_init_phy(bgx); 1650 1674 if (err) 1651 1675 goto err_enable; ··· 1702 1692 u8 lmac; 1703 1693 1704 1694 /* Disable all LMACs */ 1705 - for (lmac = 0; lmac < bgx->lmac_count; lmac++) 1695 + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1706 1696 bgx_lmac_disable(bgx, lmac); 1697 + free_netdev(bgx->lmac[lmac].netdev); 1698 + } 1707 1699 1708 1700 pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); 1709 1701
+3
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 5724 5724 struct net_device *netdev = handle->kinfo.netdev; 5725 5725 struct hns3_nic_priv *priv = netdev_priv(netdev); 5726 5726 5727 + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 5728 + hns3_nic_net_stop(netdev); 5729 + 5727 5730 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 5728 5731 netdev_warn(netdev, "already uninitialized\n"); 5729 5732 return 0;
+3 -3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
··· 1598 1598 { 1599 1599 u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0}; 1600 1600 struct hclge_mod_reg_common_msg msg; 1601 - u8 i, j, num; 1602 - u32 loop_time; 1601 + u8 i, j, num, loop_time; 1603 1602 1604 1603 num = ARRAY_SIZE(hclge_ssu_reg_common_msg); 1605 1604 for (i = 0; i < num; i++) { ··· 1608 1609 loop_time = 1; 1609 1610 loop_para[0] = 0; 1610 1611 if (msg.need_para) { 1611 - loop_time = hdev->ae_dev->dev_specs.tnl_num; 1612 + loop_time = min(hdev->ae_dev->dev_specs.tnl_num, 1613 + HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE); 1612 1614 for (j = 0; j < loop_time; j++) 1613 1615 loop_para[j] = j + 1; 1614 1616 }
+21 -9
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 2653 2653 { 2654 2654 struct hclge_vport *vport = hclge_get_vport(handle); 2655 2655 struct hclge_dev *hdev = vport->back; 2656 + int ret; 2656 2657 2657 - return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); 2658 + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); 2659 + 2660 + if (ret) 2661 + return ret; 2662 + 2663 + hdev->hw.mac.req_speed = speed; 2664 + hdev->hw.mac.req_duplex = duplex; 2665 + 2666 + return 0; 2658 2667 } 2659 2668 2660 2669 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) ··· 2965 2956 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2966 2957 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 2967 2958 2968 - ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 2969 - hdev->hw.mac.duplex, hdev->hw.mac.lane_num); 2970 - if (ret) 2971 - return ret; 2972 - 2973 2959 if (hdev->hw.mac.support_autoneg) { 2974 2960 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); 2961 + if (ret) 2962 + return ret; 2963 + } 2964 + 2965 + if (!hdev->hw.mac.autoneg) { 2966 + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed, 2967 + hdev->hw.mac.req_duplex, 2968 + hdev->hw.mac.lane_num); 2975 2969 if (ret) 2976 2970 return ret; 2977 2971 } ··· 11456 11444 11457 11445 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11458 11446 pci_free_irq_vectors(pdev); 11459 - pci_release_mem_regions(pdev); 11447 + pci_release_regions(pdev); 11460 11448 pci_disable_device(pdev); 11461 11449 } 11462 11450 ··· 11528 11516 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); 11529 11517 11530 11518 hdev->reset_type = HNAE3_NONE_RESET; 11531 - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11532 - up(&hdev->reset_sem); 11519 + if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11520 + up(&hdev->reset_sem); 11533 11521 } 11534 11522 11535 11523 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
··· 191 191 if (ret) 192 192 netdev_err(netdev, "failed to adjust link.\n"); 193 193 194 + hdev->hw.mac.req_speed = (u32)speed; 195 + hdev->hw.mac.req_duplex = (u8)duplex; 196 + 194 197 ret = hclge_cfg_flowctrl(hdev); 195 198 if (ret) 196 199 netdev_err(netdev, "failed to configure flow control.\n");
+2 -2
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 1747 1747 ret); 1748 1748 1749 1749 hdev->reset_type = HNAE3_NONE_RESET; 1750 - clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1751 - up(&hdev->reset_sem); 1750 + if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1751 + up(&hdev->reset_sem); 1752 1752 } 1753 1753 1754 1754 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+6
drivers/net/ethernet/intel/igc/igc_defines.h
··· 404 404 #define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ 405 405 #define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ 406 406 407 + /* Retry Buffer Control */ 408 + #define IGC_RETX_CTL 0x041C 409 + #define IGC_RETX_CTL_WATERMARK_MASK 0xF 410 + #define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */ 411 + #define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */ 412 + 407 413 /* Transmit Scheduling Latency */ 408 414 /* Latency between transmission scheduling (LaunchTime) and the time 409 415 * the packet is transmitted to the network in nanosecond.
+6 -2
drivers/net/ethernet/intel/igc/igc_main.c
··· 6315 6315 if (!validate_schedule(adapter, qopt)) 6316 6316 return -EINVAL; 6317 6317 6318 + igc_ptp_read(adapter, &now); 6319 + 6320 + if (igc_tsn_is_taprio_activated_by_user(adapter) && 6321 + is_base_time_past(qopt->base_time, &now)) 6322 + adapter->qbv_config_change_errors++; 6323 + 6318 6324 adapter->cycle_time = qopt->cycle_time; 6319 6325 adapter->base_time = qopt->base_time; 6320 6326 adapter->taprio_offload_enable = true; 6321 - 6322 - igc_ptp_read(adapter, &now); 6323 6327 6324 6328 for (n = 0; n < qopt->num_entries; n++) { 6325 6329 struct tc_taprio_sched_entry *e = &qopt->entries[n];
+62 -14
drivers/net/ethernet/intel/igc/igc_tsn.c
··· 49 49 return new_flags; 50 50 } 51 51 52 + static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter) 53 + { 54 + struct igc_hw *hw = &adapter->hw; 55 + 56 + return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN); 57 + } 58 + 52 59 void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) 53 60 { 54 61 struct igc_hw *hw = &adapter->hw; 55 62 u16 txoffset; 56 63 57 - if (!is_any_launchtime(adapter)) 64 + if (!igc_tsn_is_tx_mode_in_tsn(adapter)) 58 65 return; 59 66 60 67 switch (adapter->link_speed) { ··· 85 78 wr32(IGC_GTXOFFSET, txoffset); 86 79 } 87 80 81 + static void igc_tsn_restore_retx_default(struct igc_adapter *adapter) 82 + { 83 + struct igc_hw *hw = &adapter->hw; 84 + u32 retxctl; 85 + 86 + retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK; 87 + wr32(IGC_RETX_CTL, retxctl); 88 + } 89 + 90 + bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter) 91 + { 92 + struct igc_hw *hw = &adapter->hw; 93 + 94 + return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && 95 + adapter->taprio_offload_enable; 96 + } 97 + 88 98 /* Returns the TSN specific registers to their default values after 89 99 * the adapter is reset. 90 100 */ ··· 114 90 wr32(IGC_GTXOFFSET, 0); 115 91 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 116 92 wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); 93 + 94 + if (igc_is_device_id_i226(hw)) 95 + igc_tsn_restore_retx_default(adapter); 117 96 118 97 tqavctrl = rd32(IGC_TQAVCTRL); 119 98 tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | ··· 138 111 return 0; 139 112 } 140 113 114 + /* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes 115 + * to 88 Bytes by setting RETX_CTL register using the recommendation from: 116 + * a) Ethernet Controller I225/I226 Specification Update Rev 2.1 117 + * Item 9: TSN: Packet Transmission Might Cross the Qbv Window 118 + * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control 119 + */ 120 + static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter) 121 + { 122 + struct igc_hw *hw = &adapter->hw; 123 + u32 retxctl, watermark; 124 + 125 + retxctl = rd32(IGC_RETX_CTL); 126 + watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK; 127 + /* Set QBVFULLTH value using watermark and set QBVFULLEN */ 128 + retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) | 129 + IGC_RETX_CTL_QBVFULLEN; 130 + wr32(IGC_RETX_CTL, retxctl); 131 + } 132 + 141 133 static int igc_tsn_enable_offload(struct igc_adapter *adapter) 142 134 { 143 135 struct igc_hw *hw = &adapter->hw; ··· 168 122 wr32(IGC_TSAUXC, 0); 169 123 wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); 170 124 wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); 125 + 126 + if (igc_is_device_id_i226(hw)) 127 + igc_tsn_set_retx_qbvfullthreshold(adapter); 171 128 172 129 for (i = 0; i < adapter->num_tx_queues; i++) { 173 130 struct igc_ring *ring = adapter->tx_ring[i]; ··· 311 262 s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); 312 263 313 264 base_time = ktime_add_ns(base_time, (n + 1) * cycle); 314 - 315 - /* Increase the counter if scheduling into the past while 316 - * Gate Control List (GCL) is running. 317 - */ 318 - if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && 319 - (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) && 320 - (adapter->qbv_count > 1)) 321 - adapter->qbv_config_change_errors++; 322 265 } else { 323 266 if (igc_is_device_id_i226(hw)) { 324 267 ktime_t adjust_time, expires_time; ··· 372 331 return err; 373 332 } 374 333 334 + static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter) 335 + { 336 + bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) & 337 + IGC_FLAG_TSN_ANY_ENABLED); 338 + 339 + return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) || 340 + (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter)); 341 + } 342 + 375 343 int igc_tsn_offload_apply(struct igc_adapter *adapter) 376 344 { 377 - struct igc_hw *hw = &adapter->hw; 378 - 379 - /* Per I225/6 HW Design Section 7.5.2.1, transmit mode 380 - * cannot be changed dynamically. Require reset the adapter. 345 + /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change 346 + * from legacy->tsn or tsn->legacy, then reset adapter is needed. 381 347 */ 382 348 if (netif_running(adapter->netdev) && 383 - (igc_is_device_id_i225(hw) || !adapter->qbv_count)) { 349 + igc_tsn_will_tx_mode_change(adapter)) { 384 350 schedule_work(&adapter->reset_task); 385 351 return 0; 386 352 }
+1
drivers/net/ethernet/intel/igc/igc_tsn.h
··· 7 7 int igc_tsn_offload_apply(struct igc_adapter *adapter); 8 8 int igc_tsn_reset(struct igc_adapter *adapter); 9 9 void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); 10 + bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter); 10 11 11 12 #endif /* _IGC_BASE_H */
+4 -6
drivers/net/ethernet/jme.c
··· 946 946 if (skb->protocol != htons(ETH_P_IP)) 947 947 return csum; 948 948 skb_set_network_header(skb, ETH_HLEN); 949 - if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || 950 - (skb->len < (ETH_HLEN + 951 - (ip_hdr(skb)->ihl << 2) + 952 - sizeof(struct udphdr)))) { 949 + 950 + if (ip_hdr(skb)->protocol != IPPROTO_UDP || 951 + skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) { 953 952 skb_reset_network_header(skb); 954 953 return csum; 955 954 } 956 - skb_set_transport_header(skb, 957 - ETH_HLEN + (ip_hdr(skb)->ihl << 2)); 955 + skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb)); 958 956 csum = udp_hdr(skb)->check; 959 957 skb_reset_transport_header(skb); 960 958 skb_reset_network_header(skb);
+4 -2
drivers/net/ethernet/mediatek/mtk_wed.c
··· 2666 2666 { 2667 2667 struct mtk_wed_flow_block_priv *priv = cb_priv; 2668 2668 struct flow_cls_offload *cls = type_data; 2669 - struct mtk_wed_hw *hw = priv->hw; 2669 + struct mtk_wed_hw *hw = NULL; 2670 2670 2671 - if (!tc_can_offload(priv->dev)) 2671 + if (!priv || !tc_can_offload(priv->dev)) 2672 2672 return -EOPNOTSUPP; 2673 2673 2674 2674 if (type != TC_SETUP_CLSFLOWER) 2675 2675 return -EOPNOTSUPP; 2676 2676 2677 + hw = priv->hw; 2677 2678 return mtk_flow_offload_cmd(hw->eth, cls, hw->index); 2678 2679 } 2679 2680 ··· 2730 2729 flow_block_cb_remove(block_cb, f); 2731 2730 list_del(&block_cb->driver_list); 2732 2731 kfree(block_cb->cb_priv); 2732 + block_cb->cb_priv = NULL; 2733 2733 } 2734 2734 return 0; 2735 2735 default:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 130 130 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 131 131 132 132 #define MLX5E_DEFAULT_LRO_TIMEOUT 32 133 - #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 133 + #define MLX5E_DEFAULT_SHAMPO_TIMEOUT 1024 134 134 135 135 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 136 136 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
+15 -1
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 928 928 MLX5_SET(wq, wq, log_headers_entry_size, 929 929 mlx5e_shampo_get_log_hd_entry_size(mdev, params)); 930 930 MLX5_SET(rqc, rqc, reservation_timeout, 931 - params->packet_merge.timeout); 931 + mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT)); 932 932 MLX5_SET(rqc, rqc, shampo_match_criteria_type, 933 933 params->packet_merge.shampo.match_criteria_type); 934 934 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, ··· 1085 1085 wqebbs += MLX5E_KSM_UMR_WQEBBS(rest); 1086 1086 wqebbs *= wq_size; 1087 1087 return wqebbs; 1088 + } 1089 + 1090 + #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 1091 + 1092 + u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) 1093 + { 1094 + int i; 1095 + 1096 + /* The supported periods are organized in ascending order */ 1097 + for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++) 1098 + if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout) 1099 + break; 1100 + 1101 + return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); 1088 1102 } 1089 1103 1090 1104 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
+1
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
··· 108 108 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, 109 109 struct mlx5e_params *params, 110 110 struct mlx5e_rq_param *rq_param); 111 + u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); 111 112 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, 112 113 struct mlx5e_params *params, 113 114 struct mlx5e_xsk_param *xsk);
+2
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
··· 146 146 return err; 147 147 } 148 148 149 + mutex_lock(&priv->state_lock); 149 150 err = mlx5e_safe_reopen_channels(priv); 151 + mutex_unlock(&priv->state_lock); 150 152 if (!err) { 151 153 to_ctx->status = 1; /* all channels recovered */ 152 154 return err;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
··· 734 734 if (num_tuples <= 0) { 735 735 netdev_warn(priv->netdev, "%s: flow is not valid %d\n", 736 736 __func__, num_tuples); 737 - return num_tuples; 737 + return num_tuples < 0 ? num_tuples : -EINVAL; 738 738 } 739 739 740 740 eth_ft = get_flow_table(priv, fs, num_tuples);
+4 -13
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 5167 5167 #endif 5168 5168 }; 5169 5169 5170 - static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) 5171 - { 5172 - int i; 5173 - 5174 - /* The supported periods are organized in ascending order */ 5175 - for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++) 5176 - if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout) 5177 - break; 5178 - 5179 - return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); 5180 - } 5181 - 5182 5170 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu) 5183 5171 { 5184 5172 struct mlx5e_params *params = &priv->channels.params; ··· 5296 5308 struct mlx5e_rq_stats *rq_stats; 5297 5309 5298 5310 ASSERT_RTNL(); 5299 - if (mlx5e_is_uplink_rep(priv)) 5311 + if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch) 5300 5312 return; 5301 5313 5302 5314 channel_stats = priv->channel_stats[i]; ··· 5316 5328 struct mlx5e_sq_stats *sq_stats; 5317 5329 5318 5330 ASSERT_RTNL(); 5331 + if (!priv->stats_nch) 5332 + return; 5333 + 5319 5334 /* no special case needed for ptp htb etc since txq2sq_stats is kept up 5320 5335 * to date for active sq_stats, otherwise get_base_stats takes care of 5321 5336 * inactive sqs.
+9 -9
drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
··· 126 126 } 127 127 128 128 static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm, 129 - u8 *host_buses, u8 *sd_group) 129 + u8 *host_buses) 130 130 { 131 131 u32 out[MLX5_ST_SZ_DW(mpir_reg)]; 132 132 int err; 133 133 134 134 err = mlx5_query_mpir_reg(dev, out); 135 - if (err) 136 - return err; 137 - 138 - err = mlx5_query_nic_vport_sd_group(dev, sd_group); 139 135 if (err) 140 136 return err; 141 137 ··· 162 166 if (mlx5_core_is_ecpf(dev)) 163 167 return 0; 164 168 169 + err = mlx5_query_nic_vport_sd_group(dev, &sd_group); 170 + if (err) 171 + return err; 172 + 173 + if (!sd_group) 174 + return 0; 175 + 165 176 if (!MLX5_CAP_MCAM_REG(dev, mpir)) 166 177 return 0; 167 178 168 - err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group); 179 + err = mlx5_query_sd(dev, &sdm, &host_buses); 169 180 if (err) 170 181 return err; 171 182 172 183 if (!sdm) 173 - return 0; 174 - 175 - if (!sd_group) 176 184 return 0; 177 185 178 186 group_id = mlx5_sd_group_id(dev, sd_group);
+8
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
··· 40 40 */ 41 41 #define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0 42 42 #define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1 43 + #define MLXBF_GIGE_MAX_FILTER_IDX 3 43 44 44 45 /* Define for broadcast MAC literal */ 45 46 #define BCAST_MAC_ADDR 0xFFFFFFFFFFFF ··· 176 175 int mlxbf_gige_mdio_probe(struct platform_device *pdev, 177 176 struct mlxbf_gige *priv); 178 177 void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv); 178 + 179 + void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv); 180 + void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv); 181 + void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv, 182 + unsigned int index); 183 + void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv, 184 + unsigned int index); 179 185 void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, 180 186 unsigned int index, u64 dmac); 181 187 void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+10
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
··· 168 168 if (err) 169 169 goto napi_deinit; 170 170 171 + mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX); 172 + mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX); 173 + mlxbf_gige_enable_multicast_rx(priv); 174 + 171 175 /* Set bits in INT_EN that we care about */ 172 176 int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR | 173 177 MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS | ··· 383 379 void __iomem *plu_base; 384 380 void __iomem *base; 385 381 int addr, phy_irq; 382 + unsigned int i; 386 383 int err; 387 384 388 385 base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC); ··· 427 422 428 423 priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ; 429 424 priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ; 425 + 426 + for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++) 427 + mlxbf_gige_disable_mac_rx_filter(priv, i); 428 + mlxbf_gige_disable_multicast_rx(priv); 429 + mlxbf_gige_disable_promisc(priv); 430 430 431 431 /* Write initial MAC address to hardware */ 432 432 mlxbf_gige_initial_mac(priv);
+2
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
··· 62 62 #define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1) 63 63 #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520 64 64 #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528 65 + #define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530 66 + #define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1) 65 67 #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540 66 68 #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0) 67 69 #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548
+44 -6
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
··· 11 11 #include "mlxbf_gige.h" 12 12 #include "mlxbf_gige_regs.h" 13 13 14 - void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, 15 - unsigned int index, u64 dmac) 14 + void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv) 15 + { 16 + void __iomem *base = priv->base; 17 + u64 data; 18 + 19 + data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); 20 + data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST; 21 + writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); 22 + } 23 + 24 + void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv) 25 + { 26 + void __iomem *base = priv->base; 27 + u64 data; 28 + 29 + data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); 30 + data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST; 31 + writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); 32 + } 33 + 34 + void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv, 35 + unsigned int index) 16 36 { 17 37 void __iomem *base = priv->base; 18 38 u64 control; 19 - 20 - /* Write destination MAC to specified MAC RX filter */ 21 - writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER + 22 - (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE)); 23 39 24 40 /* Enable MAC receive filter mask for specified index */ 25 41 control = readq(base + MLXBF_GIGE_CONTROL); 26 42 control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index); 27 43 writeq(control, base + MLXBF_GIGE_CONTROL); 44 + } 45 + 46 + void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv, 47 + unsigned int index) 48 + { 49 + void __iomem *base = priv->base; 50 + u64 control; 51 + 52 + /* Disable MAC receive filter mask for specified index */ 53 + control = readq(base + MLXBF_GIGE_CONTROL); 54 + control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index); 55 + writeq(control, base + MLXBF_GIGE_CONTROL); 56 + } 57 + 58 + void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, 59 + unsigned int index, u64 dmac) 60 + { 61 + void __iomem *base = priv->base; 62 + 63 + /* Write destination MAC to specified MAC RX filter */ 64 + writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER + 65 + (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE)); 28 66 } 29 67 30 68 void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+19 -9
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 599 599 else 600 600 *headroom = XDP_PACKET_HEADROOM; 601 601 602 - *alloc_size = mtu + MANA_RXBUF_PAD + *headroom; 602 + *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom); 603 + 604 + /* Using page pool in this case, so alloc_size is PAGE_SIZE */ 605 + if (*alloc_size < PAGE_SIZE) 606 + *alloc_size = PAGE_SIZE; 603 607 604 608 *datasize = mtu + ETH_HLEN; 605 609 } ··· 1792 1788 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) 1793 1789 { 1794 1790 struct mana_cq *cq = context; 1795 - u8 arm_bit; 1796 1791 int w; 1797 1792 1798 1793 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); ··· 1802 1799 mana_poll_tx_cq(cq); 1803 1800 1804 1801 w = cq->work_done; 1802 + cq->work_done_since_doorbell += w; 1805 1803 1806 - if (w < cq->budget && 1807 - napi_complete_done(&cq->napi, w)) { 1808 - arm_bit = SET_ARM_BIT; 1809 - } else { 1810 - arm_bit = 0; 1804 + if (w < cq->budget) { 1805 + mana_gd_ring_cq(gdma_queue, SET_ARM_BIT); 1806 + cq->work_done_since_doorbell = 0; 1807 + napi_complete_done(&cq->napi, w); 1808 + } else if (cq->work_done_since_doorbell > 1809 + cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) { 1810 + /* MANA hardware requires at least one doorbell ring every 8 1811 + * wraparounds of CQ even if there is no need to arm the CQ. 1812 + * This driver rings the doorbell as soon as we have exceeded 1813 + * 4 wraparounds. 1814 + */ 1815 + mana_gd_ring_cq(gdma_queue, 0); 1816 + cq->work_done_since_doorbell = 0; 1811 1817 } 1812 - 1813 - mana_gd_ring_cq(gdma_queue, arm_bit); 1814 1818 1815 1819 return w; 1816 1820 }
+8 -8
drivers/net/ethernet/xilinx/xilinx_axienet.h
··· 160 160 #define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */ 161 161 #define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */ 162 162 #define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */ 163 - #define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */ 164 - #define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */ 163 + #define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */ 164 + #define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */ 165 165 #define XAE_ID_OFFSET 0x000004F8 /* Identification register */ 166 - #define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */ 167 - #define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */ 168 - #define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */ 169 - #define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */ 166 + #define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */ 167 + #define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */ 168 + #define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */ 169 + #define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */ 170 170 #define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */ 171 171 #define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */ 172 - #define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */ 172 + #define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */ 173 173 #define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */ 174 174 #define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */ 175 175 ··· 308 308 */ 309 309 #define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF 310 310 311 - /* Bit masks for Axi Ethernet FMI register */ 311 + /* Bit masks for Axi Ethernet FMC register */ 312 312 #define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */ 313 313 #define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */ 314 314
+3
drivers/net/gtp.c
··· 1269 1269 if (skb_cow_head(skb, dev->needed_headroom)) 1270 1270 goto tx_err; 1271 1271 1272 + if (!pskb_inet_may_pull(skb)) 1273 + goto tx_err; 1274 + 1272 1275 skb_reset_inner_headers(skb); 1273 1276 1274 1277 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
-14
drivers/net/phy/vitesse.c
··· 319 319 return 0; 320 320 } 321 321 322 - static int vsc73xx_config_aneg(struct phy_device *phydev) 323 - { 324 - /* The VSC73xx switches does not like to be instructed to 325 - * do autonegotiation in any way, it prefers that you just go 326 - * with the power-on/reset defaults. Writing some registers will 327 - * just make autonegotiation permanently fail. 328 - */ 329 - return 0; 330 - } 331 - 332 322 /* This adds a skew for both TX and RX clocks, so the skew should only be 333 323 * applied to "rgmii-id" interfaces. It may not work as expected 334 324 * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. ··· 516 526 .phy_id_mask = 0x000ffff0, 517 527 /* PHY_GBIT_FEATURES */ 518 528 .config_init = vsc738x_config_init, 519 - .config_aneg = vsc73xx_config_aneg, 520 529 .read_page = vsc73xx_read_page, 521 530 .write_page = vsc73xx_write_page, 522 531 .get_tunable = vsc73xx_get_tunable, ··· 526 537 .phy_id_mask = 0x000ffff0, 527 538 /* PHY_GBIT_FEATURES */ 528 539 .config_init = vsc738x_config_init, 529 - .config_aneg = vsc73xx_config_aneg, 530 540 .read_page = vsc73xx_read_page, 531 541 .write_page = vsc73xx_write_page, 532 542 .get_tunable = vsc73xx_get_tunable, ··· 536 548 .phy_id_mask = 0x000ffff0, 537 549 /* PHY_GBIT_FEATURES */ 538 550 .config_init = vsc739x_config_init, 539 - .config_aneg = vsc73xx_config_aneg, 540 551 .read_page = vsc73xx_read_page, 541 552 .write_page = vsc73xx_write_page, 542 553 .get_tunable = vsc73xx_get_tunable, ··· 546 559 .phy_id_mask = 0x000ffff0, 547 560 /* PHY_GBIT_FEATURES */ 548 561 .config_init = vsc739x_config_init, 549 - .config_aneg = vsc73xx_config_aneg, 550 562 .read_page = vsc73xx_read_page, 551 563 .write_page = vsc73xx_write_page, 552 564 .get_tunable = vsc73xx_get_tunable,
+8 -3
drivers/net/pse-pd/pse_core.c
··· 401 401 rdesc->ops = &pse_pi_ops; 402 402 rdesc->owner = pcdev->owner; 403 403 404 - rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS | 405 - REGULATOR_CHANGE_CURRENT; 406 - rinit_data->constraints.max_uA = MAX_PI_CURRENT; 404 + rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; 405 + 406 + if (pcdev->ops->pi_set_current_limit) { 407 + rinit_data->constraints.valid_ops_mask |= 408 + REGULATOR_CHANGE_CURRENT; 409 + rinit_data->constraints.max_uA = MAX_PI_CURRENT; 410 + } 411 + 407 412 rinit_data->supply_regulator = "vpwr"; 408 413 409 414 rconfig.dev = pcdev->dev;
+11 -9
drivers/net/usb/ipheth.c
··· 286 286 return; 287 287 } 288 288 289 - if (urb->actual_length <= IPHETH_IP_ALIGN) { 290 - dev->net->stats.rx_length_errors++; 291 - return; 292 - } 289 + /* iPhone may periodically send URBs with no payload 290 + * on the "bulk in" endpoint. It is safe to ignore them. 291 + */ 292 + if (urb->actual_length == 0) 293 + goto rx_submit; 293 294 294 295 /* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames, 295 296 * but rather are control frames. Their purpose is not documented, and ··· 299 298 * URB received from the bulk IN endpoint. 300 299 */ 301 300 if (unlikely 302 - (((char *)urb->transfer_buffer)[0] == 0 && 301 + (urb->actual_length == 4 && 302 + ((char *)urb->transfer_buffer)[0] == 0 && 303 303 ((char *)urb->transfer_buffer)[1] == 1)) 304 304 goto rx_submit; 305 305 ··· 308 306 if (retval != 0) { 309 307 dev_err(&dev->intf->dev, "%s: callback retval: %d\n", 310 308 __func__, retval); 311 - return; 312 309 } 313 310 314 311 rx_submit: ··· 355 354 0x02, /* index */ 356 355 dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE, 357 356 IPHETH_CTRL_TIMEOUT); 358 - if (retval < 0) { 357 + if (retval <= 0) { 359 358 dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n", 360 359 __func__, retval); 361 360 return retval; 362 361 } 363 362 364 - if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) { 363 + if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) || 364 + (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) { 365 365 netif_carrier_on(dev->net); 366 366 if (dev->tx_urb->status != -EINPROGRESS) 367 367 netif_wake_queue(dev->net); ··· 477 475 { 478 476 struct ipheth_device *dev = netdev_priv(net); 479 477 480 - cancel_delayed_work_sync(&dev->carrier_work); 481 478 netif_stop_queue(net); 479 + cancel_delayed_work_sync(&dev->carrier_work); 482 480 return 0; 483 481 } 484 482
+72
drivers/net/wireless/ath/ath12k/dp_tx.c
··· 162 162 return 0; 163 163 } 164 164 165 + static void ath12k_dp_tx_move_payload(struct sk_buff *skb, 166 + unsigned long delta, 167 + bool head) 168 + { 169 + unsigned long len = skb->len; 170 + 171 + if (head) { 172 + skb_push(skb, delta); 173 + memmove(skb->data, skb->data + delta, len); 174 + skb_trim(skb, len); 175 + } else { 176 + skb_put(skb, delta); 177 + memmove(skb->data + delta, skb->data, len); 178 + skb_pull(skb, delta); 179 + } 180 + } 181 + 182 + static int ath12k_dp_tx_align_payload(struct ath12k_base *ab, 183 + struct sk_buff **pskb) 184 + { 185 + u32 iova_mask = ab->hw_params->iova_mask; 186 + unsigned long offset, delta1, delta2; 187 + struct sk_buff *skb2, *skb = *pskb; 188 + unsigned int headroom = skb_headroom(skb); 189 + int tailroom = skb_tailroom(skb); 190 + int ret = 0; 191 + 192 + offset = (unsigned long)skb->data & iova_mask; 193 + delta1 = offset; 194 + delta2 = iova_mask - offset + 1; 195 + 196 + if (headroom >= delta1) { 197 + ath12k_dp_tx_move_payload(skb, delta1, true); 198 + } else if (tailroom >= delta2) { 199 + ath12k_dp_tx_move_payload(skb, delta2, false); 200 + } else { 201 + skb2 = skb_realloc_headroom(skb, iova_mask); 202 + if (!skb2) { 203 + ret = -ENOMEM; 204 + goto out; 205 + } 206 + 207 + dev_kfree_skb_any(skb); 208 + 209 + offset = (unsigned long)skb2->data & iova_mask; 210 + if (offset) 211 + ath12k_dp_tx_move_payload(skb2, offset, true); 212 + *pskb = skb2; 213 + } 214 + 215 + out: 216 + return ret; 217 + } 218 + 165 219 int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, 166 220 struct sk_buff *skb) 167 221 { ··· 238 184 bool tcl_ring_retry; 239 185 bool msdu_ext_desc = false; 240 186 bool add_htt_metadata = false; 187 + u32 iova_mask = ab->hw_params->iova_mask; 241 188 242 189 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) 243 190 return -ESHUTDOWN; ··· 334 279 goto fail_remove_tx_buf; 335 280 } 336 281 282 + if (iova_mask && 283 + (unsigned long)skb->data & iova_mask) { 284 + ret = ath12k_dp_tx_align_payload(ab, &skb); 285 + if (ret) { 286 + ath12k_warn(ab, "failed to align TX buffer %d\n", ret); 287 + /* don't bail out, give original buffer 288 + * a chance even unaligned. 289 + */ 290 + goto map; 291 + } 292 + 293 + /* hdr is pointing to a wrong place after alignment, 294 + * so refresh it for later use. 295 + */ 296 + hdr = (void *)skb->data; 297 + } 298 + map: 337 299 ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); 338 300 if (dma_mapping_error(ab->dev, ti.paddr)) { 339 301 atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+6
drivers/net/wireless/ath/ath12k/hw.c
··· 924 924 925 925 .acpi_guid = NULL, 926 926 .supports_dynamic_smps_6ghz = true, 927 + 928 + .iova_mask = 0, 927 929 }, 928 930 { 929 931 .name = "wcn7850 hw2.0", ··· 1002 1000 1003 1001 .acpi_guid = &wcn7850_uuid, 1004 1002 .supports_dynamic_smps_6ghz = false, 1003 + 1004 + .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1, 1005 1005 }, 1006 1006 { 1007 1007 .name = "qcn9274 hw2.0", ··· 1076 1072 1077 1073 .acpi_guid = NULL, 1078 1074 .supports_dynamic_smps_6ghz = true, 1075 + 1076 + .iova_mask = 0, 1079 1077 }, 1080 1078 }; 1081 1079
+4
drivers/net/wireless/ath/ath12k/hw.h
··· 96 96 #define ATH12K_M3_FILE "m3.bin" 97 97 #define ATH12K_REGDB_FILE_NAME "regdb.bin" 98 98 99 + #define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128 100 + 99 101 enum ath12k_hw_rate_cck { 100 102 ATH12K_HW_RATE_CCK_LP_11M = 0, 101 103 ATH12K_HW_RATE_CCK_LP_5_5M, ··· 217 215 218 216 const guid_t *acpi_guid; 219 217 bool supports_dynamic_smps_6ghz; 218 + 219 + u32 iova_mask; 220 220 }; 221 221 222 222 struct ath12k_hw_ops {
+1
drivers/net/wireless/ath/ath12k/mac.c
··· 9193 9193 9194 9194 hw->vif_data_size = sizeof(struct ath12k_vif); 9195 9195 hw->sta_data_size = sizeof(struct ath12k_sta); 9196 + hw->extra_tx_headroom = ab->hw_params->iova_mask; 9196 9197 9197 9198 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 9198 9199 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+10 -3
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 4320 4320 /* Single PMK operation */ 4321 4321 pmk_op->count = cpu_to_le16(1); 4322 4322 length += sizeof(struct brcmf_pmksa_v3); 4323 - memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN); 4324 - memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); 4325 - pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN; 4323 + if (pmksa->bssid) 4324 + memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN); 4325 + if (pmksa->pmkid) { 4326 + memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); 4327 + pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN; 4328 + } 4329 + if (pmksa->ssid && pmksa->ssid_len) { 4330 + memcpy(pmk_op->pmk[0].ssid.SSID, pmksa->ssid, pmksa->ssid_len); 4331 + pmk_op->pmk[0].ssid.SSID_len = pmksa->ssid_len; 4332 + } 4326 4333 pmk_op->pmk[0].time_left = cpu_to_le32(alive ? BRCMF_PMKSA_NO_EXPIRY : 0); 4327 4334 } 4328 4335
+2 -1
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
··· 639 639 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, 640 640 int slots_num, bool cmd_queue); 641 641 642 - dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr); 642 + dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, 643 + unsigned int len); 643 644 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, 644 645 struct iwl_cmd_meta *cmd_meta, 645 646 u8 **hdr, unsigned int hdr_room);
+4 -1
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
··· 168 168 struct ieee80211_hdr *hdr = (void *)skb->data; 169 169 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 170 170 unsigned int mss = skb_shinfo(skb)->gso_size; 171 + unsigned int data_offset = 0; 171 172 dma_addr_t start_hdr_phys; 172 173 u16 length, amsdu_pad; 173 174 u8 *start_hdr; ··· 261 260 int ret; 262 261 263 262 tb_len = min_t(unsigned int, tso.size, data_left); 264 - tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data); 263 + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, 264 + tb_len); 265 265 /* Not a real mapping error, use direct comparison */ 266 266 if (unlikely(tb_phys == DMA_MAPPING_ERROR)) 267 267 goto out_err; ··· 274 272 goto out_err; 275 273 276 274 data_left -= tb_len; 275 + data_offset += tb_len; 277 276 tso_build_data(skb, &tso, tb_len); 278 277 } 279 278 }
+22 -10
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 1814 1814 /** 1815 1815 * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list 1816 1816 * @sgt: scatter gather table 1817 - * @addr: Virtual address 1817 + * @offset: Offset into the mapped memory (i.e. SKB payload data) 1818 + * @len: Length of the area 1818 1819 * 1819 - * Find the entry that includes the address for the given address and return 1820 - * correct physical address for the TB entry. 1820 + * Find the DMA address that corresponds to the SKB payload data at the 1821 + * position given by @offset. 1821 1822 * 1822 1823 * Returns: Address for TB entry 1823 1824 */ 1824 - dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr) 1825 + dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, 1826 + unsigned int len) 1825 1827 { 1826 1828 struct scatterlist *sg; 1829 + unsigned int sg_offset = 0; 1827 1830 int i; 1828 1831 1832 + /* 1833 + * Search the mapped DMA areas in the SG for the area that contains the 1834 + * data at offset with the given length. 1835 + */ 1829 1836 for_each_sgtable_dma_sg(sgt, sg, i) { 1830 - if (addr >= sg_virt(sg) && 1831 - (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg)) 1832 - return sg_dma_address(sg) + 1833 - ((unsigned long)addr - (unsigned long)sg_virt(sg)); 1837 + if (offset >= sg_offset && 1838 + offset + len <= sg_offset + sg_dma_len(sg)) 1839 + return sg_dma_address(sg) + offset - sg_offset; 1840 + 1841 + sg_offset += sg_dma_len(sg); 1834 1842 } 1835 1843 1836 1844 WARN_ON_ONCE(1); ··· 1883 1875 1884 1876 sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); 1885 1877 1886 - sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); 1878 + /* Only map the data, not the header (it is copied to the TSO page) */ 1879 + sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb), 1880 + skb->data_len); 1887 1881 if (WARN_ON_ONCE(sgt->orig_nents <= 0)) 1888 1882 return NULL; 1889 1883 ··· 1910 1900 struct ieee80211_hdr *hdr = (void *)skb->data; 1911 1901 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 1912 1902 unsigned int mss = skb_shinfo(skb)->gso_size; 1903 + unsigned int data_offset = 0; 1913 1904 u16 length, iv_len, amsdu_pad; 1914 1905 dma_addr_t start_hdr_phys; 1915 1906 u8 *start_hdr, *pos_hdr; ··· 2011 2000 data_left); 2012 2001 dma_addr_t tb_phys; 2013 2002 2014 - tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data); 2003 + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size); 2015 2004 /* Not a real mapping error, use direct comparison */ 2016 2005 if (unlikely(tb_phys == DMA_MAPPING_ERROR)) 2017 2006 return -EINVAL; ··· 2022 2011 tb_phys, size); 2023 2012 2024 2013 data_left -= size; 2014 + data_offset += size; 2025 2015 tso_build_data(skb, &tso, size); 2026 2016 } 2027 2017 }
+1 -1
drivers/net/wireless/mediatek/mt76/mt7921/main.c
··· 1183 1183 struct inet6_dev *idev) 1184 1184 { 1185 1185 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1186 - struct mt792x_dev *dev = mvif->phy->dev; 1186 + struct mt792x_dev *dev = mt792x_hw_dev(hw); 1187 1187 struct inet6_ifaddr *ifa; 1188 1188 struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; 1189 1189 struct sk_buff *skb;
+1 -1
drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
··· 181 181 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 182 182 u32 txqpagenum, txqpageunit; 183 183 u32 txqremainingpage; 184 + u32 value32 = 0; 184 185 u32 numhq = 0; 185 186 u32 numlq = 0; 186 187 u32 numnq = 0; 187 188 u32 numpubq; 188 - u32 value32; 189 189 190 190 if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY) { 191 191 numpubq = NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC;
+9 -9
drivers/nvme/host/core.c
··· 36 36 struct nvme_ns_ids ids; 37 37 u32 nsid; 38 38 __le32 anagrpid; 39 + u8 pi_offset; 39 40 bool is_shared; 40 41 bool is_readonly; 41 42 bool is_ready; ··· 1758 1757 return 0; 1759 1758 } 1760 1759 1761 - static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head, 1762 - struct queue_limits *lim) 1760 + static bool nvme_init_integrity(struct nvme_ns_head *head, 1761 + struct queue_limits *lim, struct nvme_ns_info *info) 1763 1762 { 1764 1763 struct blk_integrity *bi = &lim->integrity; 1765 1764 ··· 1817 1816 } 1818 1817 1819 1818 bi->tuple_size = head->ms; 1820 - bi->pi_offset = head->pi_offset; 1819 + bi->pi_offset = info->pi_offset; 1821 1820 return true; 1822 1821 } 1823 1822 ··· 1903 1902 1904 1903 static void nvme_configure_metadata(struct nvme_ctrl *ctrl, 1905 1904 struct nvme_ns_head *head, struct nvme_id_ns *id, 1906 - struct nvme_id_ns_nvm *nvm) 1905 + struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info) 1907 1906 { 1908 1907 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1909 1908 head->pi_type = 0; 1910 1909 head->pi_size = 0; 1911 - head->pi_offset = 0; 1912 1910 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); 1913 1911 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1914 1912 return; ··· 1922 1922 if (head->pi_size && head->ms >= head->pi_size) 1923 1923 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1924 1924 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) 1925 - head->pi_offset = head->ms - head->pi_size; 1925 + info->pi_offset = head->ms - head->pi_size; 1926 1926 1927 1927 if (ctrl->ops->flags & NVME_F_FABRICS) { 1928 1928 /* ··· 2156 2156 2157 2157 lim = queue_limits_start_update(ns->disk->queue); 2158 2158 nvme_set_ctrl_limits(ns->ctrl, &lim); 2159 - nvme_configure_metadata(ns->ctrl, ns->head, id, nvm); 2159 + nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info); 2160 2160 nvme_set_chunk_sectors(ns, id, &lim); 2161 2161 if (!nvme_update_disk_info(ns, id, &lim)) 2162 2162 capacity = 0; ··· 2176 2176 * I/O to namespaces with metadata except when the namespace supports 2177 2177 * PI, as it can strip/insert in that case. 2178 2178 */ 2179 - if (!nvme_init_integrity(ns->disk, ns->head, &lim)) 2179 + if (!nvme_init_integrity(ns->head, &lim, info)) 2180 2180 capacity = 0; 2181 2181 2182 2182 ret = queue_limits_commit_update(ns->disk->queue, &lim); ··· 2280 2280 if (unsupported) 2281 2281 ns->head->disk->flags |= GENHD_FL_HIDDEN; 2282 2282 else 2283 - nvme_init_integrity(ns->head->disk, ns->head, &lim); 2283 + nvme_init_integrity(ns->head, &lim, info); 2284 2284 ret = queue_limits_commit_update(ns->head->disk->queue, &lim); 2285 2285 2286 2286 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
+6 -7
drivers/nvme/host/nvme.h
··· 462 462 struct srcu_struct srcu; 463 463 struct nvme_subsystem *subsys; 464 464 struct nvme_ns_ids ids; 465 + u8 lba_shift; 466 + u16 ms; 467 + u16 pi_size; 468 + u8 pi_type; 469 + u8 guard_type; 465 470 struct list_head entry; 466 471 struct kref ref; 467 472 bool shared; 468 473 bool passthru_err_log_enabled; 469 - int instance; 470 474 struct nvme_effects_log *effects; 471 475 u64 nuse; 472 476 unsigned ns_id; 473 - int lba_shift; 474 - u16 ms; 475 - u16 pi_size; 476 - u8 pi_type; 477 - u8 pi_offset; 478 - u8 guard_type; 477 + int instance; 479 478 #ifdef CONFIG_BLK_DEV_ZONED 480 479 u64 zsze; 481 480 #endif
+55 -25
drivers/platform/cznic/Kconfig
··· 16 16 tristate "Turris Omnia MCU driver" 17 17 depends on MACH_ARMADA_38X || COMPILE_TEST 18 18 depends on I2C 19 - depends on OF 20 - depends on WATCHDOG 21 - depends on GPIOLIB 22 - depends on HW_RANDOM 23 - depends on RTC_CLASS 24 - depends on WATCHDOG_CORE 25 - select GPIOLIB_IRQCHIP 26 19 help 27 20 Say Y here to add support for the features implemented by the 28 21 microcontroller on the CZ.NIC's Turris Omnia SOHO router. 29 - The features include: 30 - - board poweroff into true low power mode (with voltage regulators 31 - disabled) and the ability to configure wake up from this mode (via 32 - rtcwake) 33 - - true random number generator (if available on the MCU) 34 - - MCU watchdog 35 - - GPIO pins 36 - - to get front button press events (the front button can be 37 - configured either to generate press events to the CPU or to change 38 - front LEDs panel brightness) 39 - - to enable / disable USB port voltage regulators and to detect 40 - USB overcurrent 41 - - to detect MiniPCIe / mSATA card presence in MiniPCIe port 0 42 - - to configure resets of various peripherals on board revisions 32+ 43 - - to enable / disable the VHV voltage regulator to the SOC in order 44 - to be able to program SOC's OTP on board revisions 32+ 45 - - to get input from the LED output pins of the WAN ethernet PHY, LAN 46 - switch and MiniPCIe ports 22 + This option only enables the core part of the driver. Specific 23 + features can be enabled by subsequent config options. 47 24 To compile this driver as a module, choose M here; the module will be 48 25 called turris-omnia-mcu. 26 + 27 + if TURRIS_OMNIA_MCU 28 + 29 + config TURRIS_OMNIA_MCU_GPIO 30 + bool "Turris Omnia MCU GPIOs" 31 + default y 32 + depends on GPIOLIB 33 + depends on OF 34 + select GPIOLIB_IRQCHIP 35 + help 36 + Say Y here to add support for controlling MCU GPIO pins and receiving 37 + MCU interrupts on CZ.NIC's Turris Omnia. 38 + This enables you to 39 + - get front button press events (the front button can be configured 40 + either to generate press events to the CPU or to change front LEDs 41 + panel brightness), 42 + - enable / disable USB port voltage regulators and to detect USB 43 + overcurrent, 44 + - detect MiniPCIe / mSATA card presence in MiniPCIe port 0, 45 + - configure resets of various peripherals on board revisions 32+, 46 + - enable / disable the VHV voltage regulator to the SOC in order to be 47 + able to program SOC's OTP on board revisions 32+, 48 + - get input from the LED output pins of the WAN ethernet PHY, LAN 49 + switch and MiniPCIe ports. 50 + 51 + config TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 52 + bool "Turris Omnia MCU system off and RTC wakeup" 53 + default y 54 + depends on RTC_CLASS 55 + help 56 + Say Y here to add support for CZ.NIC's Turris Omnia board poweroff 57 + into true low power mode (with voltage regulators disabled) and the 58 + ability to configure wake up from this mode (via rtcwake). 59 + 60 + config TURRIS_OMNIA_MCU_WATCHDOG 61 + bool "Turris Omnia MCU watchdog" 62 + default y 63 + depends on WATCHDOG 64 + select WATCHDOG_CORE 65 + help 66 + Say Y here to add support for watchdog provided by CZ.NIC's Turris 67 + Omnia MCU. 68 + 69 + config TURRIS_OMNIA_MCU_TRNG 70 + bool "Turris Omnia MCU true random number generator" 71 + default y 72 + depends on TURRIS_OMNIA_MCU_GPIO 73 + depends on HW_RANDOM 74 + help 75 + Say Y here to add support for the true random number generator 76 + provided by CZ.NIC's Turris Omnia MCU. 77 + 78 + endif # TURRIS_OMNIA_MCU 49 79 50 80 endif # CZNIC_PLATFORMS
+4 -4
drivers/platform/cznic/Makefile
··· 2 2 3 3 obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o 4 4 turris-omnia-mcu-y := turris-omnia-mcu-base.o 5 - turris-omnia-mcu-y += turris-omnia-mcu-gpio.o 6 - turris-omnia-mcu-y += turris-omnia-mcu-sys-off-wakeup.o 7 - turris-omnia-mcu-y += turris-omnia-mcu-trng.o 8 - turris-omnia-mcu-y += turris-omnia-mcu-watchdog.o 5 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_GPIO) += turris-omnia-mcu-gpio.o 6 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP) += turris-omnia-mcu-sys-off-wakeup.o 7 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_TRNG) += turris-omnia-mcu-trng.o 8 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_WATCHDOG) += turris-omnia-mcu-watchdog.o
+4
drivers/platform/cznic/turris-omnia-mcu-base.c
··· 197 197 198 198 static const struct attribute_group *omnia_mcu_groups[] = { 199 199 &omnia_mcu_base_group, 200 + #ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO 200 201 &omnia_mcu_gpio_group, 202 + #endif 203 + #ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 201 204 &omnia_mcu_poweroff_group, 205 + #endif 202 206 NULL 203 207 }; 204 208
+40 -2
drivers/platform/cznic/turris-omnia-mcu.h
··· 33 33 u8 board_first_mac[ETH_ALEN]; 34 34 u8 board_revision; 35 35 36 + #ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO 36 37 /* GPIO chip */ 37 38 struct gpio_chip gc; 38 39 struct mutex lock; ··· 42 41 struct delayed_work button_release_emul_work; 43 42 unsigned long last_status; 44 43 bool button_pressed_emul; 44 + #endif 45 45 46 + #ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 46 47 /* RTC device for configuring wake-up */ 47 48 struct rtc_device *rtcdev; 48 49 u32 rtc_alarm; 49 50 bool front_button_poweron; 51 + #endif 50 52 53 + #ifdef CONFIG_TURRIS_OMNIA_MCU_WATCHDOG 51 54 /* MCU watchdog */ 52 55 struct watchdog_device wdt; 56 + #endif 53 57 58 + #ifdef CONFIG_TURRIS_OMNIA_MCU_TRNG 54 59 /* true random number generator */ 55 60 struct hwrng trng; 56 61 struct completion trng_entropy_ready; 62 + #endif 57 63 }; 58 64 59 65 int omnia_cmd_write_read(const struct i2c_client *client, ··· 190 182 return omnia_cmd_read(client, cmd, reply, sizeof(*reply)); 191 183 } 192 184 185 + #ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO 193 186 extern const u8 omnia_int_to_gpio_idx[32]; 194 187 extern const struct attribute_group omnia_mcu_gpio_group; 195 - extern const struct attribute_group omnia_mcu_poweroff_group; 196 - 197 188 int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu); 189 + #else 190 + static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) 191 + { 192 + return 0; 193 + } 194 + #endif 195 + 196 + #ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 197 + extern const struct attribute_group omnia_mcu_poweroff_group; 198 198 int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu); 199 + #else 200 + static inline int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu) 201 + { 202 + return 0; 203 + } 204 + #endif 205 + 206 + #ifdef CONFIG_TURRIS_OMNIA_MCU_TRNG 199 207 int omnia_mcu_register_trng(struct omnia_mcu *mcu); 208 + #else 209 + static inline int omnia_mcu_register_trng(struct omnia_mcu *mcu) 210 + { 211 + return 0; 212 + } 213 + #endif 214 + 215 + #ifdef CONFIG_TURRIS_OMNIA_MCU_WATCHDOG 200 216 int omnia_mcu_register_watchdog(struct omnia_mcu *mcu); 217 + #else 218 + static inline int omnia_mcu_register_watchdog(struct omnia_mcu *mcu) 219 + { 220 + return 0; 221 + } 222 + #endif 201 223 202 224 #endif /* __TURRIS_OMNIA_MCU_H */
+1
drivers/platform/x86/Kconfig
··· 477 477 tristate "Lenovo Yoga Tablet Mode Control" 478 478 depends on ACPI_WMI 479 479 depends on INPUT 480 + depends on IDEAPAD_LAPTOP 480 481 select INPUT_SPARSEKMAP 481 482 help 482 483 This driver maps the Tablet Mode Control switch to SW_TABLET_MODE input
+11 -21
drivers/platform/x86/amd/pmf/spc.c
··· 150 150 return 0; 151 151 } 152 152 153 - static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) 153 + static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) 154 154 { 155 155 struct amd_sfh_info sfh_info; 156 - int ret; 156 + 157 + /* Get the latest information from SFH */ 158 + in->ev_info.user_present = false; 157 159 158 160 /* Get ALS data */ 159 - ret = amd_get_sfh_info(&sfh_info, MT_ALS); 160 - if (!ret) 161 + if (!amd_get_sfh_info(&sfh_info, MT_ALS)) 161 162 in->ev_info.ambient_light = sfh_info.ambient_light; 162 163 else 163 - return ret; 164 + dev_dbg(dev->dev, "ALS is not enabled/detected\n"); 164 165 165 166 /* get HPD data */ 166 - ret = amd_get_sfh_info(&sfh_info, MT_HPD); 167 - if (ret) 168 - return ret; 169 - 170 - switch (sfh_info.user_present) { 171 - case SFH_NOT_DETECTED: 172 - in->ev_info.user_present = 0xff; /* assume no sensors connected */ 173 - break; 174 - case SFH_USER_PRESENT: 175 - in->ev_info.user_present = 1; 176 - break; 177 - case SFH_USER_AWAY: 178 - in->ev_info.user_present = 0; 179 - break; 167 + if (!amd_get_sfh_info(&sfh_info, MT_HPD)) { 168 + if (sfh_info.user_present == SFH_USER_PRESENT) 169 + in->ev_info.user_present = true; 170 + } else { 171 + dev_dbg(dev->dev, "HPD is not enabled/detected\n"); 180 172 } 181 - 182 - return 0; 183 173 } 184 174 185 175 void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+132 -16
drivers/platform/x86/ideapad-laptop.c
··· 126 126 127 127 struct ideapad_private { 128 128 struct acpi_device *adev; 129 + struct mutex vpc_mutex; /* protects the VPC calls */ 129 130 struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; 130 131 struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM]; 131 132 struct platform_device *platform_device; ··· 147 146 bool touchpad_ctrl_via_ec : 1; 148 147 bool ctrl_ps2_aux_port : 1; 149 148 bool usb_charging : 1; 149 + bool ymc_ec_trigger : 1; 150 150 } features; 151 151 struct { 152 152 bool initialized; ··· 195 193 MODULE_PARM_DESC(touchpad_ctrl_via_ec, 196 194 "Enable registering a 'touchpad' sysfs-attribute which can be used to manually " 197 195 "tell the EC to enable/disable the touchpad. This may not work on all models."); 196 + 197 + static bool ymc_ec_trigger __read_mostly; 198 + module_param(ymc_ec_trigger, bool, 0444); 199 + MODULE_PARM_DESC(ymc_ec_trigger, 200 + "Enable EC triggering work-around to force emitting tablet mode events. " 201 + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); 198 202 199 203 /* 200 204 * shared data ··· 301 293 { 302 294 struct ideapad_private *priv = s->private; 303 295 unsigned long value; 296 + 297 + guard(mutex)(&priv->vpc_mutex); 304 298 305 299 if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value)) 306 300 seq_printf(s, "Backlight max: %lu\n", value); ··· 422 412 unsigned long result; 423 413 int err; 424 414 425 - err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result); 415 + scoped_guard(mutex, &priv->vpc_mutex) 416 + err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result); 426 417 if (err) 427 418 return err; 428 419 ··· 442 431 if (err) 443 432 return err; 444 433 445 - err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); 434 + scoped_guard(mutex, &priv->vpc_mutex) 435 + err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); 446 436 if (err) 447 437 return err; 448 438 ··· 496 484 unsigned long result; 497 485 int err; 498 486 499 - err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result); 487 + scoped_guard(mutex, &priv->vpc_mutex) 488 + err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result); 500 489 if (err) 501 490 return err; 502 491 ··· 519 506 if (state > 4 || state == 3) 520 507 return -EINVAL; 521 508 522 - err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); 509 + scoped_guard(mutex, &priv->vpc_mutex) 510 + err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); 523 511 if (err) 524 512 return err; 525 513 ··· 605 591 unsigned long result; 606 592 int err; 607 593 608 - err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result); 594 + scoped_guard(mutex, &priv->vpc_mutex) 595 + err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result); 609 596 if (err) 610 597 return err; 611 598 ··· 627 612 if (err) 628 613 return err; 629 614 630 - err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state); 615 + scoped_guard(mutex, &priv->vpc_mutex) 616 + err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state); 631 617 if (err) 632 618 return err; 633 619 ··· 1021 1005 struct ideapad_rfk_priv *priv = data; 1022 1006 int opcode = ideapad_rfk_data[priv->dev].opcode; 1023 1007 1008 + guard(mutex)(&priv->priv->vpc_mutex); 1009 + 1024 1010 return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked); 1025 1011 } 1026 1012 ··· 1036 1018 int i; 1037 1019 1038 1020 if (priv->features.hw_rfkill_switch) { 1021 + guard(mutex)(&priv->vpc_mutex); 1022 + 1039 1023 if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked)) 1040 1024 return; 1041 1025 hw_blocked = !hw_blocked; ··· 1211 1191 { 1212 1192 unsigned long long_pressed; 1213 1193 1214 - if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed)) 1215 - return; 1194 + scoped_guard(mutex, &priv->vpc_mutex) 1195 + if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed)) 1196 + return; 1216 1197 1217 1198 if (long_pressed) 1218 1199 ideapad_input_report(priv, 17); ··· 1225 1204 { 1226 1205 unsigned long bit, value; 1227 1206 1228 - if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value)) 1229 - return; 1207 + scoped_guard(mutex, &priv->vpc_mutex) 1208 + if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value)) 1209 + return; 1230 1210 1231 1211 for_each_set_bit (bit, &value, 16) { 1232 1212 switch (bit) { ··· 1260 1238 unsigned long now; 1261 1239 int err; 1262 1240 1241 + guard(mutex)(&priv->vpc_mutex); 1242 + 1263 1243 err = read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); 1264 1244 if (err) 1265 1245 return err; ··· 1273 1249 { 1274 1250 struct ideapad_private *priv = bl_get_data(blightdev); 1275 1251 int err; 1252 + 1253 + guard(mutex)(&priv->vpc_mutex); 1276 1254 1277 1255 err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL, 1278 1256 blightdev->props.brightness); ··· 1353 1327 if (!blightdev) 1354 1328 return; 1355 1329 1330 + guard(mutex)(&priv->vpc_mutex); 1331 + 1356 1332 if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power)) 1357 1333 return; 1358 1334 ··· 1367 1339 1368 1340 /* if we control brightness via acpi video driver */ 1369 1341 if (!priv->blightdev) 1370 - read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); 1342 + scoped_guard(mutex, &priv->vpc_mutex) 1343 + read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); 1371 1344 else 1372 1345 backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY); 1373 1346 } ··· 1593 1564 int ret; 1594 1565 1595 1566 /* Without reading from EC touchpad LED doesn't switch state */ 1596 - ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value); 1567 + scoped_guard(mutex, &priv->vpc_mutex) 1568 + ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value); 1597 1569 if (ret) 1598 1570 return; 1599 1571 ··· 1622 1592 priv->r_touchpad_val = value; 1623 1593 } 1624 1594 1595 + static const struct dmi_system_id ymc_ec_trigger_quirk_dmi_table[] = { 1596 + { 1597 + /* Lenovo Yoga 7 14ARB7 */ 1598 + .matches = { 1599 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1600 + DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), 1601 + }, 1602 + }, 1603 + { 1604 + /* Lenovo Yoga 7 14ACN6 */ 1605 + .matches = { 1606 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1607 + DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), 1608 + }, 1609 + }, 1610 + { } 1611 + }; 1612 + 1613 + static void ideapad_laptop_trigger_ec(void) 1614 + { 1615 + struct ideapad_private *priv; 1616 + int ret; 1617 + 1618 + guard(mutex)(&ideapad_shared_mutex); 1619 + 1620 + priv = ideapad_shared; 1621 + if (!priv) 1622 + return; 1623 + 1624 + if (!priv->features.ymc_ec_trigger) 1625 + return; 1626 + 1627 + scoped_guard(mutex, &priv->vpc_mutex) 1628 + ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_YMC, 1); 1629 + if (ret) 1630 + dev_warn(&priv->platform_device->dev, "Could not write YMC: %d\n", ret); 1631 + } 1632 + 1633 + static int ideapad_laptop_nb_notify(struct notifier_block *nb, 1634 + unsigned long action, void *data) 1635 + { 1636 + switch (action) { 1637 + case IDEAPAD_LAPTOP_YMC_EVENT: 1638 + ideapad_laptop_trigger_ec(); 1639 + break; 1640 + } 1641 + 1642 + return 0; 1643 + } 1644 + 1645 + static struct notifier_block ideapad_laptop_notifier = { 1646 + .notifier_call = ideapad_laptop_nb_notify, 1647 + }; 1648 + 1649 + static BLOCKING_NOTIFIER_HEAD(ideapad_laptop_chain_head); 1650 + 1651 + int ideapad_laptop_register_notifier(struct notifier_block *nb) 1652 + { 1653 + return blocking_notifier_chain_register(&ideapad_laptop_chain_head, nb); 1654 + } 1655 + EXPORT_SYMBOL_NS_GPL(ideapad_laptop_register_notifier, IDEAPAD_LAPTOP); 1656 + 1657 + int ideapad_laptop_unregister_notifier(struct notifier_block *nb) 1658 + { 1659 + return blocking_notifier_chain_unregister(&ideapad_laptop_chain_head, nb); 1660 + } 1661 + EXPORT_SYMBOL_NS_GPL(ideapad_laptop_unregister_notifier, IDEAPAD_LAPTOP); 1662 + 1663 + void ideapad_laptop_call_notifier(unsigned long action, void *data) 1664 + { 1665 + blocking_notifier_call_chain(&ideapad_laptop_chain_head, action, data); 1666 + } 1667 + EXPORT_SYMBOL_NS_GPL(ideapad_laptop_call_notifier, IDEAPAD_LAPTOP); 1668 + 1625 1669 static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) 1626 1670 { 1627 1671 struct ideapad_private *priv = data; 1628 1672 unsigned long vpc1, vpc2, bit; 1629 1673 1630 - if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1)) 1631 - return; 1674 + scoped_guard(mutex, &priv->vpc_mutex) { 1675 + if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1)) 1676 + return; 1632 1677 1633 - if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2)) 1634 - return; 1678 + if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2)) 1679 + return; 1680 + } 1635 1681 1636 1682 vpc1 = (vpc2 << 8) | vpc1; 1637 1683 ··· 1834 1728 priv->features.ctrl_ps2_aux_port = 1835 1729 ctrl_ps2_aux_port || dmi_check_system(ctrl_ps2_aux_port_list); 1836 1730 priv->features.touchpad_ctrl_via_ec = touchpad_ctrl_via_ec; 1731 + priv->features.ymc_ec_trigger = 1732 + ymc_ec_trigger || dmi_check_system(ymc_ec_trigger_quirk_dmi_table); 1837 1733 1838 1734 if (!read_ec_data(handle, VPCCMD_R_FAN, &val)) 1839 1735 priv->features.fan_mode = true; ··· 2014 1906 priv->adev = adev; 2015 1907 priv->platform_device = pdev; 2016 1908 1909 + err = devm_mutex_init(&pdev->dev, &priv->vpc_mutex); 1910 + if (err) 1911 + return err; 1912 + 2017 1913 ideapad_check_features(priv); 2018 1914 2019 1915 err = ideapad_sysfs_init(priv); ··· 2086 1974 if (err) 2087 1975 goto shared_init_failed; 2088 1976 1977 + ideapad_laptop_register_notifier(&ideapad_laptop_notifier); 1978 + 2089 1979 return 0; 2090 1980 2091 1981 shared_init_failed: ··· 2119 2005 { 2120 2006 struct ideapad_private *priv = dev_get_drvdata(&pdev->dev); 2121 2007 int i; 2008 + 2009 + ideapad_laptop_unregister_notifier(&ideapad_laptop_notifier); 2122 2010 2123 2011 ideapad_shared_exit(priv); 2124 2012
+9
drivers/platform/x86/ideapad-laptop.h
··· 12 12 #include <linux/acpi.h> 13 13 #include <linux/jiffies.h> 14 14 #include <linux/errno.h> 15 + #include <linux/notifier.h> 16 + 17 + enum ideapad_laptop_notifier_actions { 18 + IDEAPAD_LAPTOP_YMC_EVENT, 19 + }; 20 + 21 + int ideapad_laptop_register_notifier(struct notifier_block *nb); 22 + int ideapad_laptop_unregister_notifier(struct notifier_block *nb); 23 + void ideapad_laptop_call_notifier(unsigned long action, void *data); 15 24 16 25 enum { 17 26 VPCCMD_R_VPC1 = 0x10,
+2 -58
drivers/platform/x86/lenovo-ymc.c
··· 20 20 #define LENOVO_YMC_QUERY_INSTANCE 0 21 21 #define LENOVO_YMC_QUERY_METHOD 0x01 22 22 23 - static bool ec_trigger __read_mostly; 24 - module_param(ec_trigger, bool, 0444); 25 - MODULE_PARM_DESC(ec_trigger, "Enable EC triggering work-around to force emitting tablet mode events"); 26 - 27 23 static bool force; 28 24 module_param(force, bool, 0444); 29 25 MODULE_PARM_DESC(force, "Force loading on boards without a convertible DMI chassis-type"); 30 - 31 - static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = { 32 - { 33 - /* Lenovo Yoga 7 14ARB7 */ 34 - .matches = { 35 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 36 - DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), 37 - }, 38 - }, 39 - { 40 - /* Lenovo Yoga 7 14ACN6 */ 41 - .matches = { 42 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 43 - DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), 44 - }, 45 - }, 46 - { } 47 - }; 48 26 49 27 static const struct dmi_system_id allowed_chasis_types_dmi_table[] = { 50 28 { ··· 40 62 41 63 struct lenovo_ymc_private { 42 64 struct input_dev *input_dev; 43 - struct acpi_device *ec_acpi_dev; 44 65 }; 45 - 46 - static void lenovo_ymc_trigger_ec(struct wmi_device *wdev, struct lenovo_ymc_private *priv) 47 - { 48 - int err; 49 - 50 - if (!priv->ec_acpi_dev) 51 - return; 52 - 53 - err = write_ec_cmd(priv->ec_acpi_dev->handle, VPCCMD_W_YMC, 1); 54 - if (err) 55 - dev_warn(&wdev->dev, "Could not write YMC: %d\n", err); 56 - } 57 66 58 67 static const struct key_entry lenovo_ymc_keymap[] = { 59 68 /* Laptop */ ··· 90 125 91 126 free_obj: 92 127 kfree(obj); 93 - lenovo_ymc_trigger_ec(wdev, priv); 128 + ideapad_laptop_call_notifier(IDEAPAD_LAPTOP_YMC_EVENT, &code); 94 129 } 95 - 96 - static void acpi_dev_put_helper(void *p) { acpi_dev_put(p); } 97 130 98 131 static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) 99 132 { ··· 106 143 return -ENODEV; 107 144 } 108 145 109 - ec_trigger |= dmi_check_system(ec_trigger_quirk_dmi_table); 110 - 111 146 priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); 112 147 if (!priv) 113 148 return -ENOMEM; 114 - 115 - if (ec_trigger) { 116 - pr_debug("Lenovo YMC enable EC triggering.\n"); 117 - priv->ec_acpi_dev = acpi_dev_get_first_match_dev("VPC2004", NULL, -1); 118 - 119 - if (!priv->ec_acpi_dev) { 120 - dev_err(&wdev->dev, "Could not find EC ACPI device.\n"); 121 - return -ENODEV; 122 - } 123 - err = devm_add_action_or_reset(&wdev->dev, 124 - acpi_dev_put_helper, priv->ec_acpi_dev); 125 - if (err) { 126 - dev_err(&wdev->dev, 127 - "Could not clean up EC ACPI device: %d\n", err); 128 - return err; 129 - } 130 - } 131 149 132 150 input_dev = devm_input_allocate_device(&wdev->dev); 133 151 if (!input_dev) ··· 136 192 dev_set_drvdata(&wdev->dev, priv); 137 193 138 194 /* Report the state for the first time on probe */ 139 - lenovo_ymc_trigger_ec(wdev, priv); 140 195 lenovo_ymc_notify(wdev, NULL); 141 196 return 0; 142 197 } ··· 160 217 MODULE_AUTHOR("Gergo Koteles <soyer@irl.hu>"); 161 218 MODULE_DESCRIPTION("Lenovo Yoga Mode Control driver"); 162 219 MODULE_LICENSE("GPL"); 220 + MODULE_IMPORT_NS(IDEAPAD_LAPTOP);
+10 -5
drivers/scsi/sd.c
··· 2711 2711 2712 2712 if (buffer[14] & 0x40) /* LBPRZ */ 2713 2713 sdkp->lbprz = 1; 2714 - 2715 - sd_config_discard(sdkp, lim, SD_LBP_WS16); 2716 2714 } 2717 2715 2718 2716 sdkp->capacity = lba + 1; ··· 3363 3365 sdkp->unmap_alignment = 3364 3366 get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); 3365 3367 3366 - sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); 3367 - 3368 3368 config_atomic: 3369 3369 sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]); 3370 3370 sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]); ··· 3749 3753 sd_read_block_limits_ext(sdkp); 3750 3754 sd_read_block_characteristics(sdkp, &lim); 3751 3755 sd_zbc_read_zones(sdkp, &lim, buffer); 3752 - sd_read_cpr(sdkp); 3753 3756 } 3757 + 3758 + sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp)); 3754 3759 3755 3760 sd_print_capacity(sdkp, old_capacity); 3756 3761 ··· 3804 3807 blk_mq_unfreeze_queue(sdkp->disk->queue); 3805 3808 if (err) 3806 3809 return err; 3810 + 3811 + /* 3812 + * Query concurrent positioning ranges after 3813 + * queue_limits_commit_update() unlocked q->limits_lock to avoid 3814 + * deadlock with q->sysfs_dir_lock and q->sysfs_lock. 3815 + */ 3816 + if (sdkp->media_present && scsi_device_supports_vpd(sdp)) 3817 + sd_read_cpr(sdkp); 3807 3818 3808 3819 /* 3809 3820 * For a zoned drive, revalidating the zones can be done only once
+4 -2
drivers/spi/spi-fsl-lpspi.c
··· 296 296 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) 297 297 { 298 298 struct lpspi_config config = fsl_lpspi->config; 299 - unsigned int perclk_rate, scldiv; 299 + unsigned int perclk_rate, scldiv, div; 300 300 u8 prescale; 301 301 302 302 perclk_rate = clk_get_rate(fsl_lpspi->clk_per); ··· 313 313 return -EINVAL; 314 314 } 315 315 316 + div = DIV_ROUND_UP(perclk_rate, config.speed_hz); 317 + 316 318 for (prescale = 0; prescale < 8; prescale++) { 317 - scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2; 319 + scldiv = div / (1 << prescale) - 2; 318 320 if (scldiv < 256) { 319 321 fsl_lpspi->config.prescale = prescale; 320 322 break;
+4
drivers/spi/spi-hisi-kunpeng.c
··· 481 481 return -EINVAL; 482 482 } 483 483 484 + if (host->max_speed_hz == 0) 485 + return dev_err_probe(dev, -EINVAL, "spi-max-frequency can't be 0\n"); 486 + 484 487 ret = device_property_read_u16(dev, "num-cs", 485 488 &host->num_chipselect); 486 489 if (ret) ··· 498 495 host->transfer_one = hisi_spi_transfer_one; 499 496 host->handle_err = hisi_spi_handle_err; 500 497 host->dev.fwnode = dev->fwnode; 498 + host->min_speed_hz = DIV_ROUND_UP(host->max_speed_hz, CLK_DIV_MAX); 501 499 502 500 hisi_spi_hw_init(hs); 503 501
+1
drivers/spi/spidev.c
··· 700 700 }; 701 701 702 702 static const struct spi_device_id spidev_spi_ids[] = { 703 + { .name = "bh2228fv" }, 703 704 { .name = "dh2228fv" }, 704 705 { .name = "ltc2488" }, 705 706 { .name = "sx1301" },
+5 -6
drivers/spmi/spmi-pmic-arb.c
··· 398 398 399 399 *offset = rc; 400 400 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 401 - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", 401 + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", 402 402 PMIC_ARB_MAX_TRANS_BYTES, len); 403 403 return -EINVAL; 404 404 } ··· 477 477 478 478 *offset = rc; 479 479 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 480 - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", 480 + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", 481 481 PMIC_ARB_MAX_TRANS_BYTES, len); 482 482 return -EINVAL; 483 483 } ··· 1702 1702 1703 1703 index = of_property_match_string(node, "reg-names", "cnfg"); 1704 1704 if (index < 0) { 1705 - dev_err(dev, "cnfg reg region missing"); 1705 + dev_err(dev, "cnfg reg region missing\n"); 1706 1706 return -EINVAL; 1707 1707 } 1708 1708 ··· 1712 1712 1713 1713 index = of_property_match_string(node, "reg-names", "intr"); 1714 1714 if (index < 0) { 1715 - dev_err(dev, "intr reg region missing"); 1715 + dev_err(dev, "intr reg region missing\n"); 1716 1716 return -EINVAL; 1717 1717 } 1718 1718 ··· 1737 1737 1738 1738 dev_dbg(&pdev->dev, "adding irq domain for bus %d\n", bus_index); 1739 1739 1740 - bus->domain = irq_domain_add_tree(dev->of_node, 1741 - &pmic_arb_irq_domain_ops, bus); 1740 + bus->domain = irq_domain_add_tree(node, &pmic_arb_irq_domain_ops, bus); 1742 1741 if (!bus->domain) { 1743 1742 dev_err(&pdev->dev, "unable to create irq_domain\n"); 1744 1743 return -ENOMEM;
+6 -2
drivers/staging/media/atomisp/pci/ia_css_stream_public.h
··· 27 27 #include "ia_css_prbs.h" 28 28 #include "ia_css_input_port.h" 29 29 30 - /* Input modes, these enumerate all supported input modes. 31 - * Note that not all ISP modes support all input modes. 30 + /* 31 + * Input modes, these enumerate all supported input modes. 32 + * This enum is part of the atomisp firmware ABI and must 33 + * NOT be changed! 34 + * Note that not all ISP modes support all input modes. 32 35 */ 33 36 enum ia_css_input_mode { 34 37 IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */ 35 38 IA_CSS_INPUT_MODE_FIFO, /** data from input-fifo */ 39 + IA_CSS_INPUT_MODE_TPG, /** data from test-pattern generator */ 36 40 IA_CSS_INPUT_MODE_PRBS, /** data from pseudo-random bit stream */ 37 41 IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */ 38 42 IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
+16 -3
drivers/staging/media/atomisp/pci/sh_css_internal.h
··· 344 344 345 345 #define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3) 346 346 347 - /* SP configuration information */ 347 + /* 348 + * SP configuration information 349 + * 350 + * This struct is part of the atomisp firmware ABI and is directly copied 351 + * to ISP DRAM by sh_css_store_sp_group_to_ddr() 352 + * 353 + * Do NOT change this struct's layout or remove seemingly unused fields! 354 + */ 348 355 struct sh_css_sp_config { 349 356 u8 no_isp_sync; /* Signal host immediately after start */ 350 357 u8 enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */ ··· 361 354 host (true) or when they are passed to the preview/video pipe 362 355 (false). */ 363 356 357 + /* 358 + * Note the fields below are only used on the ISP2400 not on the ISP2401, 359 + * sh_css_store_sp_group_to_ddr() skip copying these when run on the ISP2401. 360 + */ 364 361 struct { 365 362 u8 a_changed; 366 363 u8 b_changed; ··· 374 363 } input_formatter; 375 364 376 365 sync_generator_cfg_t sync_gen; 366 + tpg_cfg_t tpg; 377 367 prbs_cfg_t prbs; 378 368 input_system_cfg_t input_circuit; 379 369 u8 input_circuit_cfg_changed; 380 - u32 mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT]; 381 - u8 enable_isys_event_queue; 370 + u32 mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT]; 371 + /* These last 2 fields are used on both the ISP2400 and the ISP2401 */ 372 + u8 enable_isys_event_queue; 382 373 u8 disable_cont_vf; 383 374 }; 384 375
+15 -10
drivers/tty/serial/sc16is7xx.c
··· 327 327 struct kthread_work reg_work; 328 328 struct kthread_delayed_work ms_work; 329 329 struct sc16is7xx_one_config config; 330 + unsigned char buf[SC16IS7XX_FIFO_SIZE]; /* Rx buffer. */ 330 331 unsigned int old_mctrl; 331 332 u8 old_lcr; /* Value before EFR access. */ 332 333 bool irda_mode; ··· 341 340 unsigned long gpio_valid_mask; 342 341 #endif 343 342 u8 mctrl_mask; 344 - unsigned char buf[SC16IS7XX_FIFO_SIZE]; 345 343 struct kthread_worker kworker; 346 344 struct task_struct *kworker_task; 347 345 struct sc16is7xx_one p[]; ··· 592 592 SC16IS7XX_MCR_CLKSEL_BIT, 593 593 prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT); 594 594 595 + mutex_lock(&one->efr_lock); 596 + 595 597 /* Backup LCR and access special register set (DLL/DLH) */ 596 598 lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); 597 599 sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, ··· 608 606 /* Restore LCR and access to general register set */ 609 607 sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); 610 608 609 + mutex_unlock(&one->efr_lock); 610 + 611 611 return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div); 612 612 } 613 613 614 614 static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, 615 615 unsigned int iir) 616 616 { 617 - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); 617 + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); 618 618 unsigned int lsr = 0, bytes_read, i; 619 619 bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false; 620 620 u8 ch, flag; 621 621 622 - if (unlikely(rxlen >= sizeof(s->buf))) { 622 + if (unlikely(rxlen >= sizeof(one->buf))) { 623 623 dev_warn_ratelimited(port->dev, 624 624 "ttySC%i: Possible RX FIFO overrun: %d\n", 625 625 port->line, rxlen); 626 626 port->icount.buf_overrun++; 627 627 /* Ensure sanity of RX level */ 628 - rxlen = sizeof(s->buf); 628 + rxlen = sizeof(one->buf); 629 629 } 630 630 631 631 while (rxlen) { ··· 640 636 lsr = 0; 641 637 642 638 if (read_lsr) { 643 - s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 639 + one->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 644 640 bytes_read = 1; 645 641 } else { 646 - sc16is7xx_fifo_read(port, s->buf, rxlen); 642 + sc16is7xx_fifo_read(port, one->buf, rxlen); 647 643 bytes_read = rxlen; 648 644 } 649 645 ··· 676 672 } 677 673 678 674 for (i = 0; i < bytes_read; ++i) { 679 - ch = s->buf[i]; 675 + ch = one->buf[i]; 680 676 if (uart_handle_sysrq_char(port, ch)) 681 677 continue; 682 678 ··· 694 690 695 691 static void sc16is7xx_handle_tx(struct uart_port *port) 696 692 { 697 - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); 698 693 struct tty_port *tport = &port->state->port; 699 694 unsigned long flags; 700 695 unsigned int txlen; 696 + unsigned char *tail; 701 697 702 698 if (unlikely(port->x_char)) { 703 699 sc16is7xx_port_write(port, SC16IS7XX_THR_REG, port->x_char); ··· 722 718 txlen = 0; 723 719 } 724 720 725 - txlen = uart_fifo_out(port, s->buf, txlen); 726 - sc16is7xx_fifo_write(port, s->buf, txlen); 721 + txlen = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, txlen); 722 + sc16is7xx_fifo_write(port, tail, txlen); 723 + uart_xmit_advance(port, txlen); 727 724 728 725 uart_port_lock_irqsave(port, &flags); 729 726 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
+8
drivers/tty/serial/serial_core.c
··· 881 881 new_flags = (__force upf_t)new_info->flags; 882 882 old_custom_divisor = uport->custom_divisor; 883 883 884 + if (!(uport->flags & UPF_FIXED_PORT)) { 885 + unsigned int uartclk = new_info->baud_base * 16; 886 + /* check needs to be done here before other settings made */ 887 + if (uartclk == 0) { 888 + retval = -EINVAL; 889 + goto exit; 890 + } 891 + } 884 892 if (!capable(CAP_SYS_ADMIN)) { 885 893 retval = -EPERM; 886 894 if (change_irq || change_port ||
+7 -13
drivers/tty/vt/conmakehash.c
··· 11 11 * Copyright (C) 1995-1997 H. Peter Anvin 12 12 */ 13 13 14 + #include <libgen.h> 15 + #include <linux/limits.h> 14 16 #include <stdio.h> 15 17 #include <stdlib.h> 16 18 #include <sysexits.h> ··· 78 76 int main(int argc, char *argv[]) 79 77 { 80 78 FILE *ctbl; 81 - const char *tblname, *rel_tblname; 82 - const char *abs_srctree; 79 + const char *tblname; 80 + char base_tblname[PATH_MAX]; 83 81 char buffer[65536]; 84 82 int fontlen; 85 83 int i, nuni, nent; ··· 103 101 exit(EX_NOINPUT); 104 102 } 105 103 } 106 - 107 - abs_srctree = getenv("abs_srctree"); 108 - if (abs_srctree && !strncmp(abs_srctree, tblname, strlen(abs_srctree))) 109 - { 110 - rel_tblname = tblname + strlen(abs_srctree); 111 - while (*rel_tblname == '/') 112 - ++rel_tblname; 113 - } 114 - else 115 - rel_tblname = tblname; 116 104 117 105 /* For now we assume the default font is always 256 characters. */ 118 106 fontlen = 256; ··· 245 253 for ( i = 0 ; i < fontlen ; i++ ) 246 254 nuni += unicount[i]; 247 255 256 + strncpy(base_tblname, tblname, PATH_MAX); 257 + base_tblname[PATH_MAX - 1] = 0; 248 258 printf("\ 249 259 /*\n\ 250 260 * Do not edit this file; it was automatically generated by\n\ ··· 258 264 #include <linux/types.h>\n\ 259 265 \n\ 260 266 u8 dfont_unicount[%d] = \n\ 261 - {\n\t", rel_tblname, fontlen); 267 + {\n\t", basename(base_tblname), fontlen); 262 268 263 269 for ( i = 0 ; i < fontlen ; i++ ) 264 270 {
+8 -3
drivers/ufs/core/ufshcd.c
··· 4100 4100 min_sleep_time_us = 4101 4101 MIN_DELAY_BEFORE_DME_CMDS_US - delta; 4102 4102 else 4103 - return; /* no more delay required */ 4103 + min_sleep_time_us = 0; /* no more delay required */ 4104 4104 } 4105 4105 4106 - /* allow sleep for extra 50us if needed */ 4107 - usleep_range(min_sleep_time_us, min_sleep_time_us + 50); 4106 + if (min_sleep_time_us > 0) { 4107 + /* allow sleep for extra 50us if needed */ 4108 + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); 4109 + } 4110 + 4111 + /* update the last_dme_cmd_tstamp */ 4112 + hba->last_dme_cmd_tstamp = ktime_get(); 4108 4113 } 4109 4114 4110 4115 /**
+20 -12
drivers/usb/gadget/function/f_fs.c
··· 3734 3734 if (alt > MAX_ALT_SETTINGS) 3735 3735 return -EINVAL; 3736 3736 3737 - if (alt != (unsigned)-1) { 3738 - intf = ffs_func_revmap_intf(func, interface); 3739 - if (intf < 0) 3740 - return intf; 3741 - } 3737 + intf = ffs_func_revmap_intf(func, interface); 3738 + if (intf < 0) 3739 + return intf; 3742 3740 3743 3741 if (ffs->func) 3744 3742 ffs_func_eps_disable(ffs->func); ··· 3751 3753 if (ffs->state != FFS_ACTIVE) 3752 3754 return -ENODEV; 3753 3755 3754 - if (alt == (unsigned)-1) { 3755 - ffs->func = NULL; 3756 - ffs_event_add(ffs, FUNCTIONFS_DISABLE); 3757 - return 0; 3758 - } 3759 - 3760 3756 ffs->func = func; 3761 3757 ret = ffs_func_eps_enable(func); 3762 3758 if (ret >= 0) { ··· 3762 3770 3763 3771 static void ffs_func_disable(struct usb_function *f) 3764 3772 { 3765 - ffs_func_set_alt(f, 0, (unsigned)-1); 3773 + struct ffs_function *func = ffs_func_from_usb(f); 3774 + struct ffs_data *ffs = func->ffs; 3775 + 3776 + if (ffs->func) 3777 + ffs_func_eps_disable(ffs->func); 3778 + 3779 + if (ffs->state == FFS_DEACTIVATED) { 3780 + ffs->state = FFS_CLOSING; 3781 + INIT_WORK(&ffs->reset_work, ffs_reset_work); 3782 + schedule_work(&ffs->reset_work); 3783 + return; 3784 + } 3785 + 3786 + if (ffs->state == FFS_ACTIVE) { 3787 + ffs->func = NULL; 3788 + ffs_event_add(ffs, FUNCTIONFS_DISABLE); 3789 + } 3766 3790 } 3767 3791 3768 3792 static int ffs_func_setup(struct usb_function *f,
+15 -6
drivers/usb/gadget/function/f_midi2.c
··· 642 642 if (format) 643 643 return; // invalid 644 644 blk = (*data >> 8) & 0xff; 645 - if (blk >= ep->num_blks) 646 - return; 647 - if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) 648 - reply_ump_stream_fb_info(ep, blk); 649 - if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) 650 - reply_ump_stream_fb_name(ep, blk); 645 + if (blk == 0xff) { 646 + /* inquiry for all blocks */ 647 + for (blk = 0; blk < ep->num_blks; blk++) { 648 + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) 649 + reply_ump_stream_fb_info(ep, blk); 650 + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) 651 + reply_ump_stream_fb_name(ep, blk); 652 + } 653 + } else if (blk < ep->num_blks) { 654 + /* only the specified block */ 655 + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) 656 + reply_ump_stream_fb_info(ep, blk); 657 + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) 658 + reply_ump_stream_fb_name(ep, blk); 659 + } 651 660 return; 652 661 } 653 662 }
+34 -8
drivers/usb/gadget/function/u_audio.c
··· 592 592 struct usb_ep *ep, *ep_fback; 593 593 struct uac_rtd_params *prm; 594 594 struct uac_params *params = &audio_dev->params; 595 - int req_len, i; 595 + int req_len, i, ret; 596 596 597 597 prm = &uac->c_prm; 598 598 dev_dbg(dev, "start capture with rate %d\n", prm->srate); 599 599 ep = audio_dev->out_ep; 600 - config_ep_by_speed(gadget, &audio_dev->func, ep); 600 + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); 601 + if (ret < 0) { 602 + dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret); 603 + return ret; 604 + } 605 + 601 606 req_len = ep->maxpacket; 602 607 603 608 prm->ep_enabled = true; 604 - usb_ep_enable(ep); 609 + ret = usb_ep_enable(ep); 610 + if (ret < 0) { 611 + dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret); 612 + return ret; 613 + } 605 614 606 615 for (i = 0; i < params->req_number; i++) { 607 616 if (!prm->reqs[i]) { ··· 638 629 return 0; 639 630 640 631 /* Setup feedback endpoint */ 641 - config_ep_by_speed(gadget, &audio_dev->func, ep_fback); 632 + ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback); 633 + if (ret < 0) { 634 + dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret); 635 + return ret; // TODO: Clean up out_ep 636 + } 637 + 642 638 prm->fb_ep_enabled = true; 643 - usb_ep_enable(ep_fback); 639 + ret = usb_ep_enable(ep_fback); 640 + if (ret < 0) { 641 + dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret); 642 + return ret; // TODO: Clean up out_ep 643 + } 644 644 req_len = ep_fback->maxpacket; 645 645 646 646 req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC); ··· 705 687 struct uac_params *params = &audio_dev->params; 706 688 unsigned int factor; 707 689 const struct usb_endpoint_descriptor *ep_desc; 708 - int req_len, i; 690 + int req_len, i, ret; 709 691 unsigned int p_pktsize; 710 692 711 693 prm = &uac->p_prm; 712 694 dev_dbg(dev, "start playback with rate %d\n", prm->srate); 713 695 ep = audio_dev->in_ep; 714 - config_ep_by_speed(gadget, &audio_dev->func, ep); 696 + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); 697 + if (ret < 0) { 698 + dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret); 699 + return ret; 700 + } 715 701 716 702 ep_desc = ep->desc; 717 703 /* ··· 742 720 uac->p_residue_mil = 0; 743 721 744 722 prm->ep_enabled = true; 745 - usb_ep_enable(ep); 723 + ret = usb_ep_enable(ep); 724 + if (ret < 0) { 725 + dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret); 726 + return ret; 727 + } 746 728 747 729 for (i = 0; i < params->req_number; i++) { 748 730 if (!prm->reqs[i]) {
+1
drivers/usb/gadget/function/u_serial.c
··· 1441 1441 spin_lock(&port->port_lock); 1442 1442 spin_unlock(&serial_port_lock); 1443 1443 port->suspended = true; 1444 + port->start_delayed = true; 1444 1445 spin_unlock_irqrestore(&port->port_lock, flags); 1445 1446 } 1446 1447 EXPORT_SYMBOL_GPL(gserial_suspend);
+4 -6
drivers/usb/gadget/udc/core.c
··· 118 118 goto out; 119 119 120 120 /* UDC drivers can't handle endpoints with maxpacket size 0 */ 121 - if (usb_endpoint_maxp(ep->desc) == 0) { 122 - /* 123 - * We should log an error message here, but we can't call 124 - * dev_err() because there's no way to find the gadget 125 - * given only ep. 126 - */ 121 + if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { 122 + WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, 123 + (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); 124 + 127 125 ret = -EINVAL; 128 126 goto out; 129 127 }
+1
drivers/usb/serial/ch341.c
··· 863 863 864 864 module_usb_serial_driver(serial_drivers, id_table); 865 865 866 + MODULE_DESCRIPTION("Winchiphead CH341 USB Serial driver"); 866 867 MODULE_LICENSE("GPL v2");
+2 -3
drivers/usb/serial/garmin_gps.c
··· 104 104 int seq; 105 105 /* the real size of the data array, always > 0 */ 106 106 int size; 107 - __u8 data[]; 107 + __u8 data[] __counted_by(size); 108 108 }; 109 109 110 110 /* structure used to keep the current state of the driver */ ··· 267 267 268 268 /* process only packets containing data ... */ 269 269 if (data_length) { 270 - pkt = kmalloc(sizeof(struct garmin_packet)+data_length, 271 - GFP_ATOMIC); 270 + pkt = kmalloc(struct_size(pkt, data, data_length), GFP_ATOMIC); 272 271 if (!pkt) 273 272 return 0; 274 273
+1
drivers/usb/serial/mxuport.c
··· 1315 1315 1316 1316 MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>"); 1317 1317 MODULE_AUTHOR("<support@moxa.com>"); 1318 + MODULE_DESCRIPTION("Moxa UPORT USB Serial driver"); 1318 1319 MODULE_LICENSE("GPL");
+1
drivers/usb/serial/navman.c
··· 112 112 113 113 module_usb_serial_driver(serial_drivers, id_table); 114 114 115 + MODULE_DESCRIPTION("Navman USB Serial driver"); 115 116 MODULE_LICENSE("GPL v2");
+1
drivers/usb/serial/qcaux.c
··· 84 84 }; 85 85 86 86 module_usb_serial_driver(serial_drivers, id_table); 87 + MODULE_DESCRIPTION("Qualcomm USB Auxiliary Serial Port driver"); 87 88 MODULE_LICENSE("GPL v2");
-10
drivers/usb/serial/spcp8x5.c
··· 49 49 }; 50 50 MODULE_DEVICE_TABLE(usb, id_table); 51 51 52 - struct spcp8x5_usb_ctrl_arg { 53 - u8 type; 54 - u8 cmd; 55 - u8 cmd_type; 56 - u16 value; 57 - u16 index; 58 - u16 length; 59 - }; 60 - 61 - 62 52 /* spcp8x5 spec register define */ 63 53 #define MCR_CONTROL_LINE_RTS 0x02 64 54 #define MCR_CONTROL_LINE_DTR 0x01
+1
drivers/usb/serial/symbolserial.c
··· 190 190 191 191 module_usb_serial_driver(serial_drivers, id_table); 192 192 193 + MODULE_DESCRIPTION("Symbol USB barcode to serial driver"); 193 194 MODULE_LICENSE("GPL v2");
+1
drivers/usb/serial/usb-serial-simple.c
··· 163 163 MODULE_DEVICE_TABLE(usb, id_table); 164 164 165 165 module_usb_serial_driver(serial_drivers, id_table); 166 + MODULE_DESCRIPTION("USB Serial 'Simple' driver"); 166 167 MODULE_LICENSE("GPL v2");
+8
drivers/usb/serial/usb_debug.c
··· 76 76 usb_serial_generic_process_read_urb(urb); 77 77 } 78 78 79 + static void usb_debug_init_termios(struct tty_struct *tty) 80 + { 81 + tty->termios.c_lflag &= ~(ECHO | ECHONL); 82 + } 83 + 79 84 static struct usb_serial_driver debug_device = { 80 85 .driver = { 81 86 .owner = THIS_MODULE, ··· 90 85 .num_ports = 1, 91 86 .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, 92 87 .break_ctl = usb_debug_break_ctl, 88 + .init_termios = usb_debug_init_termios, 93 89 .process_read_urb = usb_debug_process_read_urb, 94 90 }; 95 91 ··· 102 96 .id_table = dbc_id_table, 103 97 .num_ports = 1, 104 98 .break_ctl = usb_debug_break_ctl, 99 + .init_termios = usb_debug_init_termios, 105 100 .process_read_urb = usb_debug_process_read_urb, 106 101 }; 107 102 ··· 111 104 }; 112 105 113 106 module_usb_serial_driver(serial_drivers, id_table_combined); 107 + MODULE_DESCRIPTION("USB Debug cable driver"); 114 108 MODULE_LICENSE("GPL v2");
+14
drivers/usb/typec/mux/fsa4480.c
··· 13 13 #include <linux/usb/typec_dp.h> 14 14 #include <linux/usb/typec_mux.h> 15 15 16 + #define FSA4480_DEVICE_ID 0x00 17 + #define FSA4480_DEVICE_ID_VENDOR_ID GENMASK(7, 6) 18 + #define FSA4480_DEVICE_ID_VERSION_ID GENMASK(5, 3) 19 + #define FSA4480_DEVICE_ID_REV_ID GENMASK(2, 0) 16 20 #define FSA4480_SWITCH_ENABLE 0x04 17 21 #define FSA4480_SWITCH_SELECT 0x05 18 22 #define FSA4480_SWITCH_STATUS1 0x07 ··· 255 251 struct typec_switch_desc sw_desc = { }; 256 252 struct typec_mux_desc mux_desc = { }; 257 253 struct fsa4480 *fsa; 254 + int val = 0; 258 255 int ret; 259 256 260 257 fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL); ··· 272 267 fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config); 273 268 if (IS_ERR(fsa->regmap)) 274 269 return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n"); 270 + 271 + ret = regmap_read(fsa->regmap, FSA4480_DEVICE_ID, &val); 272 + if (ret || !val) 273 + return dev_err_probe(dev, -ENODEV, "FSA4480 not found\n"); 274 + 275 + dev_dbg(dev, "Found FSA4480 v%lu.%lu (Vendor ID = %lu)\n", 276 + FIELD_GET(FSA4480_DEVICE_ID_VERSION_ID, val), 277 + FIELD_GET(FSA4480_DEVICE_ID_REV_ID, val), 278 + FIELD_GET(FSA4480_DEVICE_ID_VENDOR_ID, val)); 275 279 276 280 /* Safe mode */ 277 281 fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+1 -1
drivers/usb/typec/tcpm/tcpci.c
··· 67 67 return regmap_raw_write(tcpci->regmap, reg, &val, sizeof(u16)); 68 68 } 69 69 70 - static bool tcpci_check_std_output_cap(struct regmap *regmap, u8 mask) 70 + static int tcpci_check_std_output_cap(struct regmap *regmap, u8 mask) 71 71 { 72 72 unsigned int reg; 73 73 int ret;
+1 -1
drivers/usb/typec/tcpm/tcpm.c
··· 4515 4515 return ERROR_RECOVERY; 4516 4516 if (port->pwr_role == TYPEC_SOURCE) 4517 4517 return SRC_UNATTACHED; 4518 - if (port->state == SNK_WAIT_CAPABILITIES) 4518 + if (port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) 4519 4519 return SNK_READY; 4520 4520 return SNK_UNATTACHED; 4521 4521 }
+2 -2
drivers/usb/typec/tipd/core.c
··· 1191 1191 dev_info(tps->dev, "Firmware update succeeded\n"); 1192 1192 1193 1193 release_fw: 1194 - release_firmware(fw); 1195 1194 if (ret) { 1196 1195 dev_err(tps->dev, "Failed to write patch %s of %zu bytes\n", 1197 1196 firmware_name, fw->size); 1198 1197 } 1198 + release_firmware(fw); 1199 1199 1200 1200 return ret; 1201 - }; 1201 + } 1202 1202 1203 1203 static int cd321x_init(struct tps6598x *tps) 1204 1204 {
+4 -7
drivers/usb/typec/ucsi/ucsi.c
··· 238 238 mutex_lock(&ucsi->ppm_lock); 239 239 240 240 ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack); 241 - if (cci & UCSI_CCI_BUSY) { 242 - ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false); 243 - return ret ? ret : -EBUSY; 244 - } 245 - 246 - if (cci & UCSI_CCI_ERROR) 247 - return ucsi_read_error(ucsi, connector_num); 241 + if (cci & UCSI_CCI_BUSY) 242 + ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false) ?: -EBUSY; 243 + else if (cci & UCSI_CCI_ERROR) 244 + ret = ucsi_read_error(ucsi, connector_num); 248 245 249 246 mutex_unlock(&ucsi->ppm_lock); 250 247 return ret;
+6 -3
drivers/usb/usbip/vhci_hcd.c
··· 745 745 * 746 746 */ 747 747 if (usb_pipedevice(urb->pipe) == 0) { 748 + struct usb_device *old; 748 749 __u8 type = usb_pipetype(urb->pipe); 749 750 struct usb_ctrlrequest *ctrlreq = 750 751 (struct usb_ctrlrequest *) urb->setup_packet; ··· 756 755 goto no_need_xmit; 757 756 } 758 757 758 + old = vdev->udev; 759 759 switch (ctrlreq->bRequest) { 760 760 case USB_REQ_SET_ADDRESS: 761 761 /* set_address may come when a device is reset */ 762 762 dev_info(dev, "SetAddress Request (%d) to port %d\n", 763 763 ctrlreq->wValue, vdev->rhport); 764 764 765 - usb_put_dev(vdev->udev); 766 765 vdev->udev = usb_get_dev(urb->dev); 766 + usb_put_dev(old); 767 767 768 768 spin_lock(&vdev->ud.lock); 769 769 vdev->ud.status = VDEV_ST_USED; ··· 783 781 usbip_dbg_vhci_hc( 784 782 "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n"); 785 783 786 - usb_put_dev(vdev->udev); 787 784 vdev->udev = usb_get_dev(urb->dev); 785 + usb_put_dev(old); 788 786 goto out; 789 787 790 788 default: ··· 1069 1067 static void vhci_device_reset(struct usbip_device *ud) 1070 1068 { 1071 1069 struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); 1070 + struct usb_device *old = vdev->udev; 1072 1071 unsigned long flags; 1073 1072 1074 1073 spin_lock_irqsave(&ud->lock, flags); ··· 1077 1074 vdev->speed = 0; 1078 1075 vdev->devid = 0; 1079 1076 1080 - usb_put_dev(vdev->udev); 1081 1077 vdev->udev = NULL; 1078 + usb_put_dev(old); 1082 1079 1083 1080 if (ud->tcp_socket) { 1084 1081 sockfd_put(ud->tcp_socket);
+2 -1
fs/9p/vfs_addr.c
··· 75 75 76 76 /* if we just extended the file size, any portion not in 77 77 * cache won't be on server and is zeroes */ 78 - __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 78 + if (subreq->rreq->origin != NETFS_DIO_READ) 79 + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 79 80 80 81 netfs_subreq_terminated(subreq, err ?: total, false); 81 82 }
+2 -1
fs/afs/file.c
··· 242 242 243 243 req->error = error; 244 244 if (subreq) { 245 - __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 245 + if (subreq->rreq->origin != NETFS_DIO_READ) 246 + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 246 247 netfs_subreq_terminated(subreq, error ?: req->actual_len, false); 247 248 req->subreq = NULL; 248 249 } else if (req->done) {
+7 -4
fs/bcachefs/acl.c
··· 272 272 return xattr; 273 273 } 274 274 275 - struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap, 276 - struct dentry *dentry, int type) 275 + struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu) 277 276 { 278 - struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 277 + struct bch_inode_info *inode = to_bch_ei(vinode); 279 278 struct bch_fs *c = inode->v.i_sb->s_fs_info; 280 279 struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); 281 280 struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0); 282 - struct btree_trans *trans = bch2_trans_get(c); 283 281 struct btree_iter iter = { NULL }; 284 282 struct posix_acl *acl = NULL; 283 + 284 + if (rcu) 285 + return ERR_PTR(-ECHILD); 286 + 287 + struct btree_trans *trans = bch2_trans_get(c); 285 288 retry: 286 289 bch2_trans_begin(trans); 287 290
+1 -1
fs/bcachefs/acl.h
··· 28 28 29 29 #ifdef CONFIG_BCACHEFS_POSIX_ACL 30 30 31 - struct posix_acl *bch2_get_acl(struct mnt_idmap *, struct dentry *, int); 31 + struct posix_acl *bch2_get_acl(struct inode *, int, bool); 32 32 33 33 int bch2_set_acl_trans(struct btree_trans *, subvol_inum, 34 34 struct bch_inode_unpacked *,
+1 -1
fs/bcachefs/alloc_foreground.c
··· 1740 1740 printbuf_tabstop_push(out, 16); 1741 1741 printbuf_tabstop_push(out, 16); 1742 1742 1743 - bch2_dev_usage_to_text(out, &stats); 1743 + bch2_dev_usage_to_text(out, ca, &stats); 1744 1744 1745 1745 prt_newline(out); 1746 1746
+2 -1
fs/bcachefs/bcachefs_format.h
··· 675 675 x(btree_subvolume_children, BCH_VERSION(1, 6)) \ 676 676 x(mi_btree_bitmap, BCH_VERSION(1, 7)) \ 677 677 x(bucket_stripe_sectors, BCH_VERSION(1, 8)) \ 678 - x(disk_accounting_v2, BCH_VERSION(1, 9)) 678 + x(disk_accounting_v2, BCH_VERSION(1, 9)) \ 679 + x(disk_accounting_v3, BCH_VERSION(1, 10)) 679 680 680 681 enum bcachefs_metadata_version { 681 682 bcachefs_metadata_version_min = 9,
+8 -4
fs/bcachefs/buckets.c
··· 71 71 return ret; 72 72 } 73 73 74 - void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage) 74 + void bch2_dev_usage_to_text(struct printbuf *out, 75 + struct bch_dev *ca, 76 + struct bch_dev_usage *usage) 75 77 { 76 78 prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n"); 77 79 78 80 for (unsigned i = 0; i < BCH_DATA_NR; i++) { 79 81 bch2_prt_data_type(out, i); 80 82 prt_printf(out, "\t%llu\r%llu\r%llu\r\n", 81 - usage->d[i].buckets, 82 - usage->d[i].sectors, 83 - usage->d[i].fragmented); 83 + usage->d[i].buckets, 84 + usage->d[i].sectors, 85 + usage->d[i].fragmented); 84 86 } 87 + 88 + prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets); 85 89 } 86 90 87 91 static int bch2_check_fix_ptr(struct btree_trans *trans,
+1 -1
fs/bcachefs/buckets.h
··· 212 212 return ret; 213 213 } 214 214 215 - void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *); 215 + void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *); 216 216 217 217 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark) 218 218 {
+64 -1
fs/bcachefs/disk_accounting.c
··· 114 114 return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc); 115 115 } 116 116 117 + static inline bool is_zero(char *start, char *end) 118 + { 119 + BUG_ON(start > end); 120 + 121 + for (; start < end; start++) 122 + if (*start) 123 + return false; 124 + return true; 125 + } 126 + 127 + #define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member)) 128 + 117 129 int bch2_accounting_invalid(struct bch_fs *c, struct bkey_s_c k, 118 130 enum bch_validate_flags flags, 119 131 struct printbuf *err) 120 132 { 121 - return 0; 133 + struct disk_accounting_pos acc_k; 134 + bpos_to_disk_accounting_pos(&acc_k, k.k->p); 135 + void *end = &acc_k + 1; 136 + int ret = 0; 137 + 138 + switch (acc_k.type) { 139 + case BCH_DISK_ACCOUNTING_nr_inodes: 140 + end = field_end(acc_k, nr_inodes); 141 + break; 142 + case BCH_DISK_ACCOUNTING_persistent_reserved: 143 + end = field_end(acc_k, persistent_reserved); 144 + break; 145 + case BCH_DISK_ACCOUNTING_replicas: 146 + bkey_fsck_err_on(!acc_k.replicas.nr_devs, 147 + c, err, accounting_key_replicas_nr_devs_0, 148 + "accounting key replicas entry with nr_devs=0"); 149 + 150 + bkey_fsck_err_on(acc_k.replicas.nr_required > acc_k.replicas.nr_devs || 151 + (acc_k.replicas.nr_required > 1 && 152 + acc_k.replicas.nr_required == acc_k.replicas.nr_devs), 153 + c, err, accounting_key_replicas_nr_required_bad, 154 + "accounting key replicas entry with bad nr_required"); 155 + 156 + for (unsigned i = 0; i + 1 < acc_k.replicas.nr_devs; i++) 157 + bkey_fsck_err_on(acc_k.replicas.devs[i] > acc_k.replicas.devs[i + 1], 158 + c, err, accounting_key_replicas_devs_unsorted, 159 + "accounting key replicas entry with unsorted devs"); 160 + 161 + end = (void *) &acc_k.replicas + replicas_entry_bytes(&acc_k.replicas); 162 + break; 163 + case BCH_DISK_ACCOUNTING_dev_data_type: 164 + end = field_end(acc_k, dev_data_type); 165 + break; 166 + case BCH_DISK_ACCOUNTING_compression: 167 + end = field_end(acc_k, compression); 168 + break; 169 + case BCH_DISK_ACCOUNTING_snapshot: 170 + end = field_end(acc_k, snapshot); 171 + break; 172 + case BCH_DISK_ACCOUNTING_btree: 173 + end = field_end(acc_k, btree); 174 + break; 175 + case BCH_DISK_ACCOUNTING_rebalance_work: 176 + end = field_end(acc_k, rebalance_work); 177 + break; 178 + } 179 + 180 + bkey_fsck_err_on(!is_zero(end, (void *) (&acc_k + 1)), 181 + c, err, accounting_key_junk_at_end, 182 + "junk at end of accounting key"); 183 + fsck_err: 184 + return ret; 122 185 } 123 186 124 187 void bch2_accounting_key_to_text(struct printbuf *out, struct disk_accounting_pos *k)
+7 -8
fs/bcachefs/disk_accounting_format.h
··· 124 124 __u8 data_type; 125 125 }; 126 126 127 - struct bch_dev_stripe_buckets { 128 - __u8 dev; 129 - }; 130 - 131 127 struct bch_acct_compression { 132 128 __u8 type; 133 129 }; 134 130 135 131 struct bch_acct_snapshot { 136 132 __u32 id; 137 - }; 133 + } __packed; 138 134 139 135 struct bch_acct_btree { 140 136 __u32 id; 137 + } __packed; 138 + 139 + struct bch_acct_rebalance_work { 141 140 }; 142 141 143 142 struct disk_accounting_pos { ··· 148 149 struct bch_persistent_reserved persistent_reserved; 149 150 struct bch_replicas_entry_v1 replicas; 150 151 struct bch_dev_data_type dev_data_type; 151 - struct bch_dev_stripe_buckets dev_stripe_buckets; 152 152 struct bch_acct_compression compression; 153 153 struct bch_acct_snapshot snapshot; 154 154 struct bch_acct_btree btree; 155 - }; 156 - }; 155 + struct bch_acct_rebalance_work rebalance_work; 156 + } __packed; 157 + } __packed; 157 158 struct bpos _pad; 158 159 }; 159 160 };
+4 -4
fs/bcachefs/fs.c
··· 1199 1199 .fiemap = bch2_fiemap, 1200 1200 .listxattr = bch2_xattr_list, 1201 1201 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1202 - .get_acl = bch2_get_acl, 1202 + .get_inode_acl = bch2_get_acl, 1203 1203 .set_acl = bch2_set_acl, 1204 1204 #endif 1205 1205 }; ··· 1219 1219 .tmpfile = bch2_tmpfile, 1220 1220 .listxattr = bch2_xattr_list, 1221 1221 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1222 - .get_acl = bch2_get_acl, 1222 + .get_inode_acl = bch2_get_acl, 1223 1223 .set_acl = bch2_set_acl, 1224 1224 #endif 1225 1225 }; ··· 1241 1241 .setattr = bch2_setattr, 1242 1242 .listxattr = bch2_xattr_list, 1243 1243 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1244 - .get_acl = bch2_get_acl, 1244 + .get_inode_acl = bch2_get_acl, 1245 1245 .set_acl = bch2_set_acl, 1246 1246 #endif 1247 1247 }; ··· 1251 1251 .setattr = bch2_setattr, 1252 1252 .listxattr = bch2_xattr_list, 1253 1253 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1254 - .get_acl = bch2_get_acl, 1254 + .get_inode_acl = bch2_get_acl, 1255 1255 .set_acl = bch2_set_acl, 1256 1256 #endif 1257 1257 };
-1
fs/bcachefs/replicas.c
··· 24 24 static void verify_replicas_entry(struct bch_replicas_entry_v1 *e) 25 25 { 26 26 #ifdef CONFIG_BCACHEFS_DEBUG 27 - BUG_ON(e->data_type >= BCH_DATA_NR); 28 27 BUG_ON(!e->nr_devs); 29 28 BUG_ON(e->nr_required > 1 && 30 29 e->nr_required >= e->nr_devs);
+26 -1
fs/bcachefs/sb-downgrade.c
··· 61 61 BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 62 62 BCH_FSCK_ERR_dev_usage_sectors_wrong, \ 63 63 BCH_FSCK_ERR_dev_usage_fragmented_wrong, \ 64 - BCH_FSCK_ERR_accounting_mismatch) 64 + BCH_FSCK_ERR_accounting_mismatch) \ 65 + x(disk_accounting_v3, \ 66 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 67 + BCH_FSCK_ERR_bkey_version_in_future, \ 68 + BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 69 + BCH_FSCK_ERR_dev_usage_sectors_wrong, \ 70 + BCH_FSCK_ERR_dev_usage_fragmented_wrong, \ 71 + BCH_FSCK_ERR_accounting_mismatch, \ 72 + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ 73 + BCH_FSCK_ERR_accounting_key_replicas_nr_required_bad, \ 74 + BCH_FSCK_ERR_accounting_key_replicas_devs_unsorted, \ 75 + BCH_FSCK_ERR_accounting_key_junk_at_end) 65 76 66 77 #define DOWNGRADE_TABLE() \ 67 78 x(bucket_stripe_sectors, \ 68 79 0) \ 69 80 x(disk_accounting_v2, \ 81 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 82 + BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 83 + BCH_FSCK_ERR_dev_usage_sectors_wrong, \ 84 + BCH_FSCK_ERR_dev_usage_fragmented_wrong, \ 85 + BCH_FSCK_ERR_fs_usage_hidden_wrong, \ 86 + BCH_FSCK_ERR_fs_usage_btree_wrong, \ 87 + BCH_FSCK_ERR_fs_usage_data_wrong, \ 88 + BCH_FSCK_ERR_fs_usage_cached_wrong, \ 89 + BCH_FSCK_ERR_fs_usage_reserved_wrong, \ 90 + BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \ 91 + BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \ 92 + BCH_FSCK_ERR_fs_usage_replicas_wrong, \ 93 + BCH_FSCK_ERR_bkey_version_in_future) \ 94 + x(disk_accounting_v3, \ 70 95 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 71 96 BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 72 97 BCH_FSCK_ERR_dev_usage_sectors_wrong, \
+5 -1
fs/bcachefs/sb-errors_format.h
··· 287 287 x(accounting_replicas_not_marked, 273, 0) \ 288 288 x(invalid_btree_id, 274, 0) \ 289 289 x(alloc_key_io_time_bad, 275, 0) \ 290 - x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) 290 + x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) \ 291 + x(accounting_key_junk_at_end, 277, 0) \ 292 + x(accounting_key_replicas_nr_devs_0, 278, 0) \ 293 + x(accounting_key_replicas_nr_required_bad, 279, 0) \ 294 + x(accounting_key_replicas_devs_unsorted, 280, 0) \ 291 295 292 296 enum bch_sb_error_id { 293 297 #define x(t, n, ...) BCH_FSCK_ERR_##t = n,
+3 -1
fs/binfmt_flat.c
··· 72 72 73 73 #ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET 74 74 #define DATA_START_OFFSET_WORDS (0) 75 + #define MAX_SHARED_LIBS_UPDATE (0) 75 76 #else 76 77 #define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS) 78 + #define MAX_SHARED_LIBS_UPDATE (MAX_SHARED_LIBS) 77 79 #endif 78 80 79 81 struct lib_info { ··· 882 880 return res; 883 881 884 882 /* Update data segment pointers for all libraries */ 885 - for (i = 0; i < MAX_SHARED_LIBS; i++) { 883 + for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) { 886 884 if (!libinfo.lib_list[i].loaded) 887 885 continue; 888 886 for (j = 0; j < MAX_SHARED_LIBS; j++) {
+67
fs/btrfs/delayed-ref.c
··· 1134 1134 return find_ref_head(delayed_refs, bytenr, false); 1135 1135 } 1136 1136 1137 + static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent) 1138 + { 1139 + int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY; 1140 + 1141 + if (type < entry->type) 1142 + return -1; 1143 + if (type > entry->type) 1144 + return 1; 1145 + 1146 + if (type == BTRFS_TREE_BLOCK_REF_KEY) { 1147 + if (root < entry->ref_root) 1148 + return -1; 1149 + if (root > entry->ref_root) 1150 + return 1; 1151 + } else { 1152 + if (parent < entry->parent) 1153 + return -1; 1154 + if (parent > entry->parent) 1155 + return 1; 1156 + } 1157 + return 0; 1158 + } 1159 + 1160 + /* 1161 + * Check to see if a given root/parent reference is attached to the head. This 1162 + * only checks for BTRFS_ADD_DELAYED_REF references that match, as that 1163 + * indicates the reference exists for the given root or parent. This is for 1164 + * tree blocks only. 1165 + * 1166 + * @head: the head of the bytenr we're searching. 1167 + * @root: the root objectid of the reference if it is a normal reference. 1168 + * @parent: the parent if this is a shared backref. 1169 + */ 1170 + bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head, 1171 + u64 root, u64 parent) 1172 + { 1173 + struct rb_node *node; 1174 + bool found = false; 1175 + 1176 + lockdep_assert_held(&head->mutex); 1177 + 1178 + spin_lock(&head->lock); 1179 + node = head->ref_tree.rb_root.rb_node; 1180 + while (node) { 1181 + struct btrfs_delayed_ref_node *entry; 1182 + int ret; 1183 + 1184 + entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); 1185 + ret = find_comp(entry, root, parent); 1186 + if (ret < 0) { 1187 + node = node->rb_left; 1188 + } else if (ret > 0) { 1189 + node = node->rb_right; 1190 + } else { 1191 + /* 1192 + * We only want to count ADD actions, as drops mean the 1193 + * ref doesn't exist. 1194 + */ 1195 + if (entry->action == BTRFS_ADD_DELAYED_REF) 1196 + found = true; 1197 + break; 1198 + } 1199 + } 1200 + spin_unlock(&head->lock); 1201 + return found; 1202 + } 1203 + 1137 1204 void __cold btrfs_delayed_ref_exit(void) 1138 1205 { 1139 1206 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
+2
fs/btrfs/delayed-ref.h
··· 389 389 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, 390 390 enum btrfs_reserve_flush_enum flush); 391 391 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); 392 + bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head, 393 + u64 root, u64 parent); 392 394 393 395 static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node) 394 396 {
+45 -6
fs/btrfs/extent-tree.c
··· 5472 5472 struct btrfs_root *root, u64 bytenr, u64 parent, 5473 5473 int level) 5474 5474 { 5475 + struct btrfs_delayed_ref_root *delayed_refs; 5476 + struct btrfs_delayed_ref_head *head; 5475 5477 struct btrfs_path *path; 5476 5478 struct btrfs_extent_inline_ref *iref; 5477 5479 int ret; 5480 + bool exists = false; 5478 5481 5479 5482 path = btrfs_alloc_path(); 5480 5483 if (!path) 5481 5484 return -ENOMEM; 5482 - 5485 + again: 5483 5486 ret = lookup_extent_backref(trans, path, &iref, bytenr, 5484 5487 root->fs_info->nodesize, parent, 5485 5488 btrfs_root_id(root), level, 0); 5489 + if (ret != -ENOENT) { 5490 + /* 5491 + * If we get 0 then we found our reference, return 1, else 5492 + * return the error if it's not -ENOENT; 5493 + */ 5494 + btrfs_free_path(path); 5495 + return (ret < 0 ) ? ret : 1; 5496 + } 5497 + 5498 + /* 5499 + * We could have a delayed ref with this reference, so look it up while 5500 + * we're holding the path open to make sure we don't race with the 5501 + * delayed ref running. 5502 + */ 5503 + delayed_refs = &trans->transaction->delayed_refs; 5504 + spin_lock(&delayed_refs->lock); 5505 + head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 5506 + if (!head) 5507 + goto out; 5508 + if (!mutex_trylock(&head->mutex)) { 5509 + /* 5510 + * We're contended, means that the delayed ref is running, get a 5511 + * reference and wait for the ref head to be complete and then 5512 + * try again. 5513 + */ 5514 + refcount_inc(&head->refs); 5515 + spin_unlock(&delayed_refs->lock); 5516 + 5517 + btrfs_release_path(path); 5518 + 5519 + mutex_lock(&head->mutex); 5520 + mutex_unlock(&head->mutex); 5521 + btrfs_put_delayed_ref_head(head); 5522 + goto again; 5523 + } 5524 + 5525 + exists = btrfs_find_delayed_tree_ref(head, root->root_key.objectid, parent); 5526 + mutex_unlock(&head->mutex); 5527 + out: 5528 + spin_unlock(&delayed_refs->lock); 5486 5529 btrfs_free_path(path); 5487 - if (ret == -ENOENT) 5488 - return 0; 5489 - if (ret < 0) 5490 - return ret; 5491 - return 1; 5530 + return exists ? 1 : 0; 5492 5531 } 5493 5532 5494 5533 /*
+7 -7
fs/btrfs/extent_io.c
··· 1496 1496 free_extent_map(em); 1497 1497 em = NULL; 1498 1498 1499 + /* 1500 + * Although the PageDirty bit might be cleared before entering 1501 + * this function, subpage dirty bit is not cleared. 1502 + * So clear subpage dirty bit here so next time we won't submit 1503 + * page for range already written to disk. 1504 + */ 1505 + btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize); 1499 1506 btrfs_set_range_writeback(inode, cur, cur + iosize - 1); 1500 1507 if (!PageWriteback(page)) { 1501 1508 btrfs_err(inode->root->fs_info, ··· 1510 1503 page->index, cur, end); 1511 1504 } 1512 1505 1513 - /* 1514 - * Although the PageDirty bit is cleared before entering this 1515 - * function, subpage dirty bit is not cleared. 1516 - * So clear subpage dirty bit here so next time we won't submit 1517 - * page for range already written to disk. 1518 - */ 1519 - btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize); 1520 1506 1521 1507 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize, 1522 1508 cur - page_offset(page));
+6 -16
fs/btrfs/extent_map.c
··· 1147 1147 return 0; 1148 1148 1149 1149 /* 1150 - * We want to be fast because we can be called from any path trying to 1151 - * allocate memory, so if the lock is busy we don't want to spend time 1150 + * We want to be fast so if the lock is busy we don't want to spend time 1152 1151 * waiting for it - either some task is about to do IO for the inode or 1153 1152 * we may have another task shrinking extent maps, here in this code, so 1154 1153 * skip this inode. ··· 1190 1191 /* 1191 1192 * Stop if we need to reschedule or there's contention on the 1192 1193 * lock. This is to avoid slowing other tasks trying to take the 1193 - * lock and because the shrinker might be called during a memory 1194 - * allocation path and we want to avoid taking a very long time 1195 - * and slowing down all sorts of tasks. 1194 + * lock. 1196 1195 */ 1197 1196 if (need_resched() || rwlock_needbreak(&tree->lock)) 1198 1197 break; ··· 1219 1222 if (ctx->scanned >= ctx->nr_to_scan) 1220 1223 break; 1221 1224 1222 - /* 1223 - * We may be called from memory allocation paths, so we don't 1224 - * want to take too much time and slowdown tasks. 1225 - */ 1226 - if (need_resched()) 1227 - break; 1225 + cond_resched(); 1228 1226 1229 1227 inode = btrfs_find_first_inode(root, min_ino); 1230 1228 } ··· 1277 1285 ctx.last_ino); 1278 1286 } 1279 1287 1280 - /* 1281 - * We may be called from memory allocation paths, so we don't want to 1282 - * take too much time and slowdown tasks, so stop if we need reschedule. 1283 - */ 1284 - while (ctx.scanned < ctx.nr_to_scan && !need_resched()) { 1288 + while (ctx.scanned < ctx.nr_to_scan) { 1285 1289 struct btrfs_root *root; 1286 1290 unsigned long count; 1291 + 1292 + cond_resched(); 1287 1293 1288 1294 spin_lock(&fs_info->fs_roots_radix_lock); 1289 1295 count = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+39 -13
fs/btrfs/send.c
··· 6157 6157 u64 offset = key->offset; 6158 6158 u64 end; 6159 6159 u64 bs = sctx->send_root->fs_info->sectorsize; 6160 + struct btrfs_file_extent_item *ei; 6161 + u64 disk_byte; 6162 + u64 data_offset; 6163 + u64 num_bytes; 6164 + struct btrfs_inode_info info = { 0 }; 6160 6165 6161 6166 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size); 6162 6167 if (offset >= end) 6163 6168 return 0; 6164 6169 6165 - if (clone_root && IS_ALIGNED(end, bs)) { 6166 - struct btrfs_file_extent_item *ei; 6167 - u64 disk_byte; 6168 - u64 data_offset; 6170 + num_bytes = end - offset; 6169 6171 6170 - ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 6171 - struct btrfs_file_extent_item); 6172 - disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); 6173 - data_offset = btrfs_file_extent_offset(path->nodes[0], ei); 6174 - ret = clone_range(sctx, path, clone_root, disk_byte, 6175 - data_offset, offset, end - offset); 6176 - } else { 6177 - ret = send_extent_data(sctx, path, offset, end - offset); 6178 - } 6172 + if (!clone_root) 6173 + goto write_data; 6174 + 6175 + if (IS_ALIGNED(end, bs)) 6176 + goto clone_data; 6177 + 6178 + /* 6179 + * If the extent end is not aligned, we can clone if the extent ends at 6180 + * the i_size of the inode and the clone range ends at the i_size of the 6181 + * source inode, otherwise the clone operation fails with -EINVAL. 6182 + */ 6183 + if (end != sctx->cur_inode_size) 6184 + goto write_data; 6185 + 6186 + ret = get_inode_info(clone_root->root, clone_root->ino, &info); 6187 + if (ret < 0) 6188 + return ret; 6189 + 6190 + if (clone_root->offset + num_bytes == info.size) 6191 + goto clone_data; 6192 + 6193 + write_data: 6194 + ret = send_extent_data(sctx, path, offset, num_bytes); 6195 + sctx->cur_inode_next_write_offset = end; 6196 + return ret; 6197 + 6198 + clone_data: 6199 + ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 6200 + struct btrfs_file_extent_item); 6201 + disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); 6202 + data_offset = btrfs_file_extent_offset(path->nodes[0], ei); 6203 + ret = clone_range(sctx, path, clone_root, disk_byte, data_offset, offset, 6204 + num_bytes); 6179 6205 sctx->cur_inode_next_write_offset = end; 6180 6206 return ret; 6181 6207 }
+10
fs/btrfs/super.c
··· 28 28 #include <linux/btrfs.h> 29 29 #include <linux/security.h> 30 30 #include <linux/fs_parser.h> 31 + #include <linux/swap.h> 31 32 #include "messages.h" 32 33 #include "delayed-inode.h" 33 34 #include "ctree.h" ··· 2409 2408 { 2410 2409 const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan); 2411 2410 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2411 + 2412 + /* 2413 + * We may be called from any task trying to allocate memory and we don't 2414 + * want to slow it down with scanning and dropping extent maps. It would 2415 + * also cause heavy lock contention if many tasks concurrently enter 2416 + * here. Therefore only allow kswapd tasks to scan and drop extent maps. 2417 + */ 2418 + if (!current_is_kswapd()) 2419 + return 0; 2412 2420 2413 2421 return btrfs_free_extent_maps(fs_info, nr_to_scan); 2414 2422 }
+3 -2
fs/btrfs/tree-checker.c
··· 569 569 570 570 /* dir type check */ 571 571 dir_type = btrfs_dir_ftype(leaf, di); 572 - if (unlikely(dir_type >= BTRFS_FT_MAX)) { 572 + if (unlikely(dir_type <= BTRFS_FT_UNKNOWN || 573 + dir_type >= BTRFS_FT_MAX)) { 573 574 dir_item_err(leaf, slot, 574 - "invalid dir item type, have %u expect [0, %u)", 575 + "invalid dir item type, have %u expect (0, %u)", 575 576 dir_type, BTRFS_FT_MAX); 576 577 return -EUCLEAN; 577 578 }
+25 -3
fs/ceph/addr.c
··· 246 246 if (err >= 0) { 247 247 if (sparse && err > 0) 248 248 err = ceph_sparse_ext_map_end(op); 249 - if (err < subreq->len) 249 + if (err < subreq->len && 250 + subreq->rreq->origin != NETFS_DIO_READ) 250 251 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 251 252 if (IS_ENCRYPTED(inode) && err > 0) { 252 253 err = ceph_fscrypt_decrypt_extents(inode, ··· 283 282 size_t len; 284 283 int mode; 285 284 286 - __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 285 + if (rreq->origin != NETFS_DIO_READ) 286 + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 287 287 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 288 288 289 289 if (subreq->start >= inode->i_size) ··· 426 424 struct ceph_netfs_request_data *priv; 427 425 int ret = 0; 428 426 427 + /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 428 + __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); 429 + 429 430 if (rreq->origin != NETFS_READAHEAD) 430 431 return 0; 431 432 ··· 503 498 }; 504 499 505 500 #ifdef CONFIG_CEPH_FSCACHE 501 + static void ceph_set_page_fscache(struct page *page) 502 + { 503 + folio_start_private_2(page_folio(page)); /* [DEPRECATED] */ 504 + } 505 + 506 506 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) 507 507 { 508 508 struct inode *inode = priv; ··· 525 515 ceph_fscache_write_terminated, inode, true, caching); 526 516 } 527 517 #else 518 + static inline void ceph_set_page_fscache(struct page *page) 519 + { 520 + } 521 + 528 522 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 529 523 { 530 524 } ··· 720 706 len = wlen; 721 707 722 708 set_page_writeback(page); 709 + if (caching) 710 + ceph_set_page_fscache(page); 723 711 ceph_fscache_write_to_cache(inode, page_off, len, caching); 724 712 725 713 if (IS_ENCRYPTED(inode)) { ··· 804 788 redirty_page_for_writepage(wbc, page); 805 789 return AOP_WRITEPAGE_ACTIVATE; 806 790 } 791 + 792 + folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */ 807 793 808 794 err = writepage_nounlock(page, wbc); 809 795 if (err == -ERESTARTSYS) { ··· 1080 1062 unlock_page(page); 1081 1063 break; 1082 1064 } 1083 - if (PageWriteback(page)) { 1065 + if (PageWriteback(page) || 1066 + PagePrivate2(page) /* [DEPRECATED] */) { 1084 1067 if (wbc->sync_mode == WB_SYNC_NONE) { 1085 1068 doutc(cl, "%p under writeback\n", page); 1086 1069 unlock_page(page); ··· 1089 1070 } 1090 1071 doutc(cl, "waiting on writeback %p\n", page); 1091 1072 wait_on_page_writeback(page); 1073 + folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */ 1092 1074 } 1093 1075 1094 1076 if (!clear_page_dirty_for_io(page)) { ··· 1274 1254 } 1275 1255 1276 1256 set_page_writeback(page); 1257 + if (caching) 1258 + ceph_set_page_fscache(page); 1277 1259 len += thp_size(page); 1278 1260 } 1279 1261 ceph_fscache_write_to_cache(inode, offset, len, caching);
-2
fs/ceph/inode.c
··· 577 577 578 578 /* Set parameters for the netfs library */ 579 579 netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false); 580 - /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 581 - __set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags); 582 580 583 581 spin_lock_init(&ci->i_ceph_lock); 584 582
+7 -1
fs/exec.c
··· 1692 1692 unsigned int mode; 1693 1693 vfsuid_t vfsuid; 1694 1694 vfsgid_t vfsgid; 1695 + int err; 1695 1696 1696 1697 if (!mnt_may_suid(file->f_path.mnt)) 1697 1698 return; ··· 1709 1708 /* Be careful if suid/sgid is set */ 1710 1709 inode_lock(inode); 1711 1710 1712 - /* reload atomically mode/uid/gid now that lock held */ 1711 + /* Atomically reload and check mode/uid/gid now that lock held. */ 1713 1712 mode = inode->i_mode; 1714 1713 vfsuid = i_uid_into_vfsuid(idmap, inode); 1715 1714 vfsgid = i_gid_into_vfsgid(idmap, inode); 1715 + err = inode_permission(idmap, inode, MAY_EXEC); 1716 1716 inode_unlock(inode); 1717 + 1718 + /* Did the exec bit vanish out from under us? Give up. */ 1719 + if (err) 1720 + return; 1717 1721 1718 1722 /* We ignore suid/sgid if there are no mappings for them in the ns */ 1719 1723 if (!vfsuid_has_mapping(bprm->cred->user_ns, vfsuid) ||
+12 -16
fs/file.c
··· 46 46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) 47 47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) 48 48 49 + #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds 49 50 /* 50 51 * Copy 'count' fd bits from the old table to the new table and clear the extra 51 52 * space if any. This does not copy the file pointers. Called with the files 52 53 * spinlock held for write. 53 54 */ 54 - static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, 55 - unsigned int count) 55 + static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, 56 + unsigned int copy_words) 56 57 { 57 - unsigned int cpy, set; 58 + unsigned int nwords = fdt_words(nfdt); 58 59 59 - cpy = count / BITS_PER_BYTE; 60 - set = (nfdt->max_fds - count) / BITS_PER_BYTE; 61 - memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 62 - memset((char *)nfdt->open_fds + cpy, 0, set); 63 - memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 64 - memset((char *)nfdt->close_on_exec + cpy, 0, set); 65 - 66 - cpy = BITBIT_SIZE(count); 67 - set = BITBIT_SIZE(nfdt->max_fds) - cpy; 68 - memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); 69 - memset((char *)nfdt->full_fds_bits + cpy, 0, set); 60 + bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds, 61 + copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); 62 + bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec, 63 + copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); 64 + bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits, 65 + copy_words, nwords); 70 66 } 71 67 72 68 /* ··· 80 84 memcpy(nfdt->fd, ofdt->fd, cpy); 81 85 memset((char *)nfdt->fd + cpy, 0, set); 82 86 83 - copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); 87 + copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt)); 84 88 } 85 89 86 90 /* ··· 375 379 open_files = sane_fdtable_size(old_fdt, max_fds); 376 380 } 377 381 378 - copy_fd_bitmaps(new_fdt, old_fdt, open_files); 382 + copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG); 379 383 380 384 old_fds = old_fdt->fd; 381 385 new_fds = new_fdt->fd;
+37 -2
fs/inode.c
··· 488 488 this_cpu_dec(nr_unused); 489 489 } 490 490 491 + static void inode_pin_lru_isolating(struct inode *inode) 492 + { 493 + lockdep_assert_held(&inode->i_lock); 494 + WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE)); 495 + inode->i_state |= I_LRU_ISOLATING; 496 + } 497 + 498 + static void inode_unpin_lru_isolating(struct inode *inode) 499 + { 500 + spin_lock(&inode->i_lock); 501 + WARN_ON(!(inode->i_state & I_LRU_ISOLATING)); 502 + inode->i_state &= ~I_LRU_ISOLATING; 503 + smp_mb(); 504 + wake_up_bit(&inode->i_state, __I_LRU_ISOLATING); 505 + spin_unlock(&inode->i_lock); 506 + } 507 + 508 + static void inode_wait_for_lru_isolating(struct inode *inode) 509 + { 510 + spin_lock(&inode->i_lock); 511 + if (inode->i_state & I_LRU_ISOLATING) { 512 + DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING); 513 + wait_queue_head_t *wqh; 514 + 515 + wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING); 516 + spin_unlock(&inode->i_lock); 517 + __wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE); 518 + spin_lock(&inode->i_lock); 519 + WARN_ON(inode->i_state & I_LRU_ISOLATING); 520 + } 521 + spin_unlock(&inode->i_lock); 522 + } 523 + 491 524 /** 492 525 * inode_sb_list_add - add inode to the superblock list of inodes 493 526 * @inode: inode to add ··· 689 656 inode_io_list_del(inode); 690 657 691 658 inode_sb_list_del(inode); 659 + 660 + inode_wait_for_lru_isolating(inode); 692 661 693 662 /* 694 663 * Wait for flusher thread to be done with the inode so that filesystem ··· 890 855 * be under pressure before the cache inside the highmem zone. 891 856 */ 892 857 if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) { 893 - __iget(inode); 858 + inode_pin_lru_isolating(inode); 894 859 spin_unlock(&inode->i_lock); 895 860 spin_unlock(lru_lock); 896 861 if (remove_inode_buffers(inode)) { ··· 902 867 __count_vm_events(PGINODESTEAL, reap); 903 868 mm_account_reclaimed_pages(reap); 904 869 } 905 - iput(inode); 870 + inode_unpin_lru_isolating(inode); 906 871 spin_lock(lru_lock); 907 872 return LRU_RETRY; 908 873 }
+24 -11
fs/libfs.c
··· 450 450 mtree_destroy(&octx->mt); 451 451 } 452 452 453 + static int offset_dir_open(struct inode *inode, struct file *file) 454 + { 455 + struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode); 456 + 457 + file->private_data = (void *)ctx->next_offset; 458 + return 0; 459 + } 460 + 453 461 /** 454 462 * offset_dir_llseek - Advance the read position of a directory descriptor 455 463 * @file: an open directory whose position is to be updated ··· 471 463 */ 472 464 static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence) 473 465 { 466 + struct inode *inode = file->f_inode; 467 + struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode); 468 + 474 469 switch (whence) { 475 470 case SEEK_CUR: 476 471 offset += file->f_pos; ··· 487 476 } 488 477 489 478 /* In this case, ->private_data is protected by f_pos_lock */ 490 - file->private_data = NULL; 479 + if (!offset) 480 + file->private_data = (void *)ctx->next_offset; 491 481 return vfs_setpos(file, offset, LONG_MAX); 492 482 } 493 483 ··· 519 507 inode->i_ino, fs_umode_to_dtype(inode->i_mode)); 520 508 } 521 509 522 - static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx) 510 + static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index) 523 511 { 524 512 struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode); 525 513 struct dentry *dentry; ··· 527 515 while (true) { 528 516 dentry = offset_find_next(octx, ctx->pos); 529 517 if (!dentry) 530 - return ERR_PTR(-ENOENT); 518 + return; 519 + 520 + if (dentry2offset(dentry) >= last_index) { 521 + dput(dentry); 522 + return; 523 + } 531 524 532 525 if (!offset_dir_emit(ctx, dentry)) { 533 526 dput(dentry); 534 - break; 527 + return; 535 528 } 536 529 537 530 ctx->pos = dentry2offset(dentry) + 1; 538 531 dput(dentry); 539 532 } 540 - return NULL; 541 533 } 542 534 543 535 /** ··· 568 552 static int offset_readdir(struct file *file, struct dir_context *ctx) 569 553 { 570 554 struct dentry *dir = file->f_path.dentry; 555 + long last_index = (long)file->private_data; 571 556 572 557 lockdep_assert_held(&d_inode(dir)->i_rwsem); 573 558 574 559 if (!dir_emit_dots(file, ctx)) 575 560 return 0; 576 561 577 - /* In this case, ->private_data is protected by f_pos_lock */ 578 - if (ctx->pos == DIR_OFFSET_MIN) 579 - file->private_data = NULL; 580 - else if (file->private_data == ERR_PTR(-ENOENT)) 581 - return 0; 582 - file->private_data = offset_iterate_dir(d_inode(dir), ctx); 562 + offset_iterate_dir(d_inode(dir), ctx, last_index); 583 563 return 0; 584 564 } 585 565 586 566 const struct file_operations simple_offset_dir_operations = { 567 + .open = offset_dir_open, 587 568 .llseek = offset_dir_llseek, 588 569 .iterate_shared = offset_readdir, 589 570 .read = generic_read_dir,
+1 -1
fs/locks.c
··· 2984 2984 filelock_cache = kmem_cache_create("file_lock_cache", 2985 2985 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2986 2986 2987 - filelease_cache = kmem_cache_create("file_lock_cache", 2987 + filelease_cache = kmem_cache_create("file_lease_cache", 2988 2988 sizeof(struct file_lease), 0, SLAB_PANIC, NULL); 2989 2989 2990 2990 for_each_possible_cpu(i) {
+1 -1
fs/netfs/Kconfig
··· 24 24 25 25 config NETFS_DEBUG 26 26 bool "Enable dynamic debugging netfslib and FS-Cache" 27 - depends on NETFS 27 + depends on NETFS_SUPPORT 28 28 help 29 29 This permits debugging to be dynamically enabled in the local caching 30 30 management module. If this is set, the debugging output may be
+109 -14
fs/netfs/buffered_read.c
··· 10 10 #include "internal.h" 11 11 12 12 /* 13 + * [DEPRECATED] Unlock the folios in a read operation for when the filesystem 14 + * is using PG_private_2 and direct writing to the cache from here rather than 15 + * marking the page for writeback. 16 + * 17 + * Note that we don't touch folio->private in this code. 18 + */ 19 + static void netfs_rreq_unlock_folios_pgpriv2(struct netfs_io_request *rreq, 20 + size_t *account) 21 + { 22 + struct netfs_io_subrequest *subreq; 23 + struct folio *folio; 24 + pgoff_t start_page = rreq->start / PAGE_SIZE; 25 + pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; 26 + bool subreq_failed = false; 27 + 28 + XA_STATE(xas, &rreq->mapping->i_pages, start_page); 29 + 30 + /* Walk through the pagecache and the I/O request lists simultaneously. 31 + * We may have a mixture of cached and uncached sections and we only 32 + * really want to write out the uncached sections. This is slightly 33 + * complicated by the possibility that we might have huge pages with a 34 + * mixture inside. 35 + */ 36 + subreq = list_first_entry(&rreq->subrequests, 37 + struct netfs_io_subrequest, rreq_link); 38 + subreq_failed = (subreq->error < 0); 39 + 40 + trace_netfs_rreq(rreq, netfs_rreq_trace_unlock_pgpriv2); 41 + 42 + rcu_read_lock(); 43 + xas_for_each(&xas, folio, last_page) { 44 + loff_t pg_end; 45 + bool pg_failed = false; 46 + bool folio_started = false; 47 + 48 + if (xas_retry(&xas, folio)) 49 + continue; 50 + 51 + pg_end = folio_pos(folio) + folio_size(folio) - 1; 52 + 53 + for (;;) { 54 + loff_t sreq_end; 55 + 56 + if (!subreq) { 57 + pg_failed = true; 58 + break; 59 + } 60 + 61 + if (!folio_started && 62 + test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags) && 63 + fscache_operation_valid(&rreq->cache_resources)) { 64 + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); 65 + folio_start_private_2(folio); 66 + folio_started = true; 67 + } 68 + 69 + pg_failed |= subreq_failed; 70 + sreq_end = subreq->start + subreq->len - 1; 71 + if (pg_end < sreq_end) 72 + break; 73 + 74 + *account += subreq->transferred; 75 + if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { 76 + subreq = list_next_entry(subreq, rreq_link); 77 + subreq_failed = (subreq->error < 0); 78 + } else { 79 + subreq = NULL; 80 + subreq_failed = false; 81 + } 82 + 83 + if (pg_end == sreq_end) 84 + break; 85 + } 86 + 87 + if (!pg_failed) { 88 + flush_dcache_folio(folio); 89 + folio_mark_uptodate(folio); 90 + } 91 + 92 + if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { 93 + if (folio->index == rreq->no_unlock_folio && 94 + test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) 95 + _debug("no unlock"); 96 + else 97 + folio_unlock(folio); 98 + } 99 + } 100 + rcu_read_unlock(); 101 + } 102 + 103 + /* 13 104 * Unlock the folios in a read operation. We need to set PG_writeback on any 14 105 * folios we're going to write back before we unlock them. 15 106 * ··· 126 35 } 127 36 } 128 37 38 + /* Handle deprecated PG_private_2 case. */ 39 + if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) { 40 + netfs_rreq_unlock_folios_pgpriv2(rreq, &account); 41 + goto out; 42 + } 43 + 129 44 /* Walk through the pagecache and the I/O request lists simultaneously. 130 45 * We may have a mixture of cached and uncached sections and we only 131 46 * really want to write out the uncached sections. This is slightly ··· 149 52 loff_t pg_end; 150 53 bool pg_failed = false; 151 54 bool wback_to_cache = false; 152 - bool folio_started = false; 153 55 154 56 if (xas_retry(&xas, folio)) 155 57 continue; ··· 162 66 pg_failed = true; 163 67 break; 164 68 } 165 - if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) { 166 - if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, 167 - &subreq->flags)) { 168 - trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); 169 - folio_start_private_2(folio); 170 - folio_started = true; 171 - } 172 - } else { 173 - wback_to_cache |= 174 - test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 175 - } 69 + 70 + wback_to_cache |= test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 176 71 pg_failed |= subreq_failed; 177 72 sreq_end = subreq->start + subreq->len - 1; 178 73 if (pg_end < sreq_end) ··· 211 124 } 212 125 rcu_read_unlock(); 213 126 127 + out: 214 128 task_io_account_read(account); 215 129 if (rreq->netfs_ops->done) 216 130 rreq->netfs_ops->done(rreq); ··· 483 395 } 484 396 485 397 /** 486 - * netfs_write_begin - Helper to prepare for writing 398 + * netfs_write_begin - Helper to prepare for writing [DEPRECATED] 487 399 * @ctx: The netfs context 488 400 * @file: The file to read from 489 401 * @mapping: The mapping to read from ··· 514 426 * inode before calling this. 515 427 * 516 428 * This is usable whether or not caching is enabled. 429 + * 430 + * Note that this should be considered deprecated and netfs_perform_write() 431 + * used instead. 517 432 */ 518 433 int netfs_write_begin(struct netfs_inode *ctx, 519 434 struct file *file, struct address_space *mapping, ··· 557 466 if (!netfs_is_cache_enabled(ctx) && 558 467 netfs_skip_folio_read(folio, pos, len, false)) { 559 468 netfs_stat(&netfs_n_rh_write_zskip); 560 - goto have_folio; 469 + goto have_folio_no_wait; 561 470 } 562 471 563 472 rreq = netfs_alloc_request(mapping, file, ··· 598 507 netfs_put_request(rreq, false, netfs_rreq_trace_put_return); 599 508 600 509 have_folio: 510 + ret = folio_wait_private_2_killable(folio); 511 + if (ret < 0) 512 + goto error; 513 + have_folio_no_wait: 601 514 *_folio = folio; 602 515 _leave(" = 0"); 603 516 return 0;
+1 -1
fs/netfs/buffered_write.c
··· 184 184 unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0; 185 185 ssize_t written = 0, ret, ret2; 186 186 loff_t i_size, pos = iocb->ki_pos, from, to; 187 - size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; 187 + size_t max_chunk = mapping_max_folio_size(mapping); 188 188 bool maybe_trouble = false; 189 189 190 190 if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
+4
fs/netfs/fscache_cookie.c
··· 741 741 spin_lock(&cookie->lock); 742 742 } 743 743 if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) { 744 + if (atomic_read(&cookie->n_accesses) != 0) 745 + /* still being accessed: postpone it */ 746 + break; 747 + 744 748 __fscache_set_cookie_state(cookie, 745 749 FSCACHE_COOKIE_STATE_LRU_DISCARDING); 746 750 wake = true;
+155 -6
fs/netfs/io.c
··· 99 99 } 100 100 101 101 /* 102 + * [DEPRECATED] Deal with the completion of writing the data to the cache. We 103 + * have to clear the PG_fscache bits on the folios involved and release the 104 + * caller's ref. 105 + * 106 + * May be called in softirq mode and we inherit a ref from the caller. 107 + */ 108 + static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, 109 + bool was_async) 110 + { 111 + struct netfs_io_subrequest *subreq; 112 + struct folio *folio; 113 + pgoff_t unlocked = 0; 114 + bool have_unlocked = false; 115 + 116 + rcu_read_lock(); 117 + 118 + list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 119 + XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE); 120 + 121 + xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) { 122 + if (xas_retry(&xas, folio)) 123 + continue; 124 + 125 + /* We might have multiple writes from the same huge 126 + * folio, but we mustn't unlock a folio more than once. 127 + */ 128 + if (have_unlocked && folio->index <= unlocked) 129 + continue; 130 + unlocked = folio_next_index(folio) - 1; 131 + trace_netfs_folio(folio, netfs_folio_trace_end_copy); 132 + folio_end_private_2(folio); 133 + have_unlocked = true; 134 + } 135 + } 136 + 137 + rcu_read_unlock(); 138 + netfs_rreq_completed(rreq, was_async); 139 + } 140 + 141 + static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, 142 + bool was_async) /* [DEPRECATED] */ 143 + { 144 + struct netfs_io_subrequest *subreq = priv; 145 + struct netfs_io_request *rreq = subreq->rreq; 146 + 147 + if (IS_ERR_VALUE(transferred_or_error)) { 148 + netfs_stat(&netfs_n_rh_write_failed); 149 + trace_netfs_failure(rreq, subreq, transferred_or_error, 150 + netfs_fail_copy_to_cache); 151 + } else { 152 + netfs_stat(&netfs_n_rh_write_done); 153 + } 154 + 155 + trace_netfs_sreq(subreq, netfs_sreq_trace_write_term); 156 + 157 + /* If we decrement nr_copy_ops to 0, the ref belongs to us. */ 158 + if (atomic_dec_and_test(&rreq->nr_copy_ops)) 159 + netfs_rreq_unmark_after_write(rreq, was_async); 160 + 161 + netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); 162 + } 163 + 164 + /* 165 + * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref 166 + * from the caller. 167 + */ 168 + static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) 169 + { 170 + struct netfs_cache_resources *cres = &rreq->cache_resources; 171 + struct netfs_io_subrequest *subreq, *next, *p; 172 + struct iov_iter iter; 173 + int ret; 174 + 175 + trace_netfs_rreq(rreq, netfs_rreq_trace_copy); 176 + 177 + /* We don't want terminating writes trying to wake us up whilst we're 178 + * still going through the list. 179 + */ 180 + atomic_inc(&rreq->nr_copy_ops); 181 + 182 + list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { 183 + if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { 184 + list_del_init(&subreq->rreq_link); 185 + netfs_put_subrequest(subreq, false, 186 + netfs_sreq_trace_put_no_copy); 187 + } 188 + } 189 + 190 + list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 191 + /* Amalgamate adjacent writes */ 192 + while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { 193 + next = list_next_entry(subreq, rreq_link); 194 + if (next->start != subreq->start + subreq->len) 195 + break; 196 + subreq->len += next->len; 197 + list_del_init(&next->rreq_link); 198 + netfs_put_subrequest(next, false, 199 + netfs_sreq_trace_put_merged); 200 + } 201 + 202 + ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len, 203 + subreq->len, rreq->i_size, true); 204 + if (ret < 0) { 205 + trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write); 206 + trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip); 207 + continue; 208 + } 209 + 210 + iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages, 211 + subreq->start, subreq->len); 212 + 213 + atomic_inc(&rreq->nr_copy_ops); 214 + netfs_stat(&netfs_n_rh_write); 215 + netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache); 216 + trace_netfs_sreq(subreq, netfs_sreq_trace_write); 217 + cres->ops->write(cres, subreq->start, &iter, 218 + netfs_rreq_copy_terminated, subreq); 219 + } 220 + 221 + /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */ 222 + if (atomic_dec_and_test(&rreq->nr_copy_ops)) 223 + netfs_rreq_unmark_after_write(rreq, false); 224 + } 225 + 226 + static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */ 227 + { 228 + struct netfs_io_request *rreq = 229 + container_of(work, struct netfs_io_request, work); 230 + 231 + netfs_rreq_do_write_to_cache(rreq); 232 + } 233 + 234 + static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */ 235 + { 236 + rreq->work.func = netfs_rreq_write_to_cache_work; 237 + if (!queue_work(system_unbound_wq, &rreq->work)) 238 + BUG(); 239 + } 240 + 241 + /* 102 242 * Handle a short read. 103 243 */ 104 244 static void netfs_rreq_short_read(struct netfs_io_request *rreq, ··· 415 275 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 416 276 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); 417 277 278 + if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) && 279 + test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) 280 + return netfs_rreq_write_to_cache(rreq); 281 + 418 282 netfs_rreq_completed(rreq, was_async); 419 283 } 420 284 ··· 530 386 531 387 if (transferred_or_error == 0) { 532 388 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) { 533 - subreq->error = -ENODATA; 389 + if (rreq->origin != NETFS_DIO_READ) 390 + subreq->error = -ENODATA; 534 391 goto failed; 535 392 } 536 393 } else { ··· 602 457 } 603 458 if (subreq->len > ictx->zero_point - subreq->start) 604 459 subreq->len = ictx->zero_point - subreq->start; 460 + 461 + /* We limit buffered reads to the EOF, but let the 462 + * server deal with larger-than-EOF DIO/unbuffered 463 + * reads. 464 + */ 465 + if (subreq->len > rreq->i_size - subreq->start) 466 + subreq->len = rreq->i_size - subreq->start; 605 467 } 606 - if (subreq->len > rreq->i_size - subreq->start) 607 - subreq->len = rreq->i_size - subreq->start; 608 468 if (rreq->rsize && subreq->len > rreq->rsize) 609 469 subreq->len = rreq->rsize; 610 470 ··· 745 595 do { 746 596 _debug("submit %llx + %llx >= %llx", 747 597 rreq->start, rreq->submitted, rreq->i_size); 748 - if (rreq->origin == NETFS_DIO_READ && 749 - rreq->start + rreq->submitted >= rreq->i_size) 750 - break; 751 598 if (!netfs_rreq_submit_slice(rreq, &io_iter)) 599 + break; 600 + if (test_bit(NETFS_SREQ_NO_PROGRESS, &rreq->flags)) 752 601 break; 753 602 if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) && 754 603 test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
-10
fs/netfs/objects.c
··· 24 24 struct netfs_io_request *rreq; 25 25 mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool; 26 26 struct kmem_cache *cache = mempool->pool_data; 27 - bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE || 28 - origin == NETFS_DIO_READ || 29 - origin == NETFS_DIO_WRITE); 30 - bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx); 31 27 int ret; 32 28 33 29 for (;;) { ··· 52 56 refcount_set(&rreq->ref, 1); 53 57 54 58 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 55 - if (cached) { 56 - __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); 57 - if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) 58 - /* Filesystem uses deprecated PG_private_2 marking. */ 59 - __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); 60 - } 61 59 if (file && file->f_flags & O_NONBLOCK) 62 60 __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags); 63 61 if (rreq->netfs_ops->init_request) {
+3 -1
fs/netfs/write_issue.c
··· 94 94 { 95 95 struct netfs_io_request *wreq; 96 96 struct netfs_inode *ictx; 97 + bool is_buffered = (origin == NETFS_WRITEBACK || 98 + origin == NETFS_WRITETHROUGH); 97 99 98 100 wreq = netfs_alloc_request(mapping, file, start, 0, origin); 99 101 if (IS_ERR(wreq)) ··· 104 102 _enter("R=%x", wreq->debug_id); 105 103 106 104 ictx = netfs_inode(wreq->inode); 107 - if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags)) 105 + if (is_buffered && netfs_is_cache_enabled(ictx)) 108 106 fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); 109 107 110 108 wreq->contiguity = wreq->start;
+4 -1
fs/nfs/fscache.c
··· 265 265 { 266 266 rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file)); 267 267 rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id); 268 + /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 269 + __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); 268 270 269 271 return 0; 270 272 } ··· 363 361 return; 364 362 365 363 sreq = netfs->sreq; 366 - if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) 364 + if (test_bit(NFS_IOHDR_EOF, &hdr->flags) && 365 + sreq->rreq->origin != NETFS_DIO_READ) 367 366 __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags); 368 367 369 368 if (hdr->error)
-2
fs/nfs/fscache.h
··· 81 81 static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) 82 82 { 83 83 netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false); 84 - /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 85 - __set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags); 86 84 } 87 85 extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr); 88 86 extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
+1 -2
fs/nfsd/nfsctl.c
··· 2069 2069 continue; 2070 2070 } 2071 2071 2072 - ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 2073 - SVC_SOCK_ANONYMOUS, 2072 + ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0, 2074 2073 get_current_cred()); 2075 2074 /* always save the latest error */ 2076 2075 if (ret < 0)
+1 -1
fs/smb/client/cifs_debug.c
··· 1072 1072 static void 1073 1073 cifs_security_flags_handle_must_flags(unsigned int *flags) 1074 1074 { 1075 - unsigned int signflags = *flags & CIFSSEC_MUST_SIGN; 1075 + unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL); 1076 1076 1077 1077 if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) 1078 1078 *flags = CIFSSEC_MUST_KRB5;
+6 -6
fs/smb/client/cifsglob.h
··· 345 345 /* connect to a server share */ 346 346 int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *, 347 347 struct cifs_tcon *, const struct nls_table *); 348 - /* close tree connecion */ 348 + /* close tree connection */ 349 349 int (*tree_disconnect)(const unsigned int, struct cifs_tcon *); 350 350 /* get DFS referrals */ 351 351 int (*get_dfs_refer)(const unsigned int, struct cifs_ses *, ··· 816 816 * Protected by @refpath_lock and @srv_lock. The @refpath_lock is 817 817 * mostly used for not requiring a copy of @leaf_fullpath when getting 818 818 * cached or new DFS referrals (which might also sleep during I/O). 819 - * While @srv_lock is held for making string and NULL comparions against 819 + * While @srv_lock is held for making string and NULL comparisons against 820 820 * both fields as in mount(2) and cache refresh. 821 821 * 822 822 * format: \\HOST\SHARE[\OPTIONAL PATH] ··· 1881 1881 #define CIFSSEC_MAY_SIGN 0x00001 1882 1882 #define CIFSSEC_MAY_NTLMV2 0x00004 1883 1883 #define CIFSSEC_MAY_KRB5 0x00008 1884 - #define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */ 1884 + #define CIFSSEC_MAY_SEAL 0x00040 1885 1885 #define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */ 1886 1886 1887 1887 #define CIFSSEC_MUST_SIGN 0x01001 ··· 1891 1891 #define CIFSSEC_MUST_NTLMV2 0x04004 1892 1892 #define CIFSSEC_MUST_KRB5 0x08008 1893 1893 #ifdef CONFIG_CIFS_UPCALL 1894 - #define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ 1894 + #define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */ 1895 1895 #else 1896 - #define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */ 1896 + #define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */ 1897 1897 #endif /* UPCALL */ 1898 - #define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ 1898 + #define CIFSSEC_MUST_SEAL 0x40040 1899 1899 #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */ 1900 1900 1901 1901 #define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
+2 -1
fs/smb/client/file.c
··· 217 217 goto out; 218 218 } 219 219 220 - __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 220 + if (subreq->rreq->origin != NETFS_DIO_READ) 221 + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 221 222 222 223 rc = rdata->server->ops->async_readv(rdata); 223 224 out:
+6 -5
fs/smb/client/misc.c
··· 352 352 * on simple responses (wct, bcc both zero) 353 353 * in particular have seen this on 354 354 * ulogoffX and FindClose. This leaves 355 - * one byte of bcc potentially unitialized 355 + * one byte of bcc potentially uninitialized 356 356 */ 357 357 /* zero rest of bcc */ 358 358 tmp[sizeof(struct smb_hdr)+1] = 0; ··· 1234 1234 const char *full_path, 1235 1235 bool *islink) 1236 1236 { 1237 + struct TCP_Server_Info *server = tcon->ses->server; 1237 1238 struct cifs_ses *ses = tcon->ses; 1238 1239 size_t len; 1239 1240 char *path; ··· 1251 1250 !is_tcon_dfs(tcon)) 1252 1251 return 0; 1253 1252 1254 - spin_lock(&tcon->tc_lock); 1255 - if (!tcon->origin_fullpath) { 1256 - spin_unlock(&tcon->tc_lock); 1253 + spin_lock(&server->srv_lock); 1254 + if (!server->leaf_fullpath) { 1255 + spin_unlock(&server->srv_lock); 1257 1256 return 0; 1258 1257 } 1259 - spin_unlock(&tcon->tc_lock); 1258 + spin_unlock(&server->srv_lock); 1260 1259 1261 1260 /* 1262 1261 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
+3
fs/smb/client/smb2pdu.c
··· 82 82 if (tcon->seal && 83 83 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 84 84 return 1; 85 + if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) && 86 + (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 87 + return 1; 85 88 return 0; 86 89 } 87 90
+4 -4
fs/smb/client/smbdirect.c
··· 406 406 else 407 407 response = get_empty_queue_buffer(info); 408 408 if (!response) { 409 - /* now switch to emtpy packet queue */ 409 + /* now switch to empty packet queue */ 410 410 if (use_receive_queue) { 411 411 use_receive_queue = 0; 412 412 continue; ··· 618 618 619 619 /* 620 620 * Test if FRWR (Fast Registration Work Requests) is supported on the device 621 - * This implementation requries FRWR on RDMA read/write 621 + * This implementation requires FRWR on RDMA read/write 622 622 * return value: true if it is supported 623 623 */ 624 624 static bool frwr_is_supported(struct ib_device_attr *attrs) ··· 2177 2177 * MR available in the list. It may access the list while the 2178 2178 * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock 2179 2179 * as they never modify the same places. However, there may be several CPUs 2180 - * issueing I/O trying to get MR at the same time, mr_list_lock is used to 2180 + * issuing I/O trying to get MR at the same time, mr_list_lock is used to 2181 2181 * protect this situation. 2182 2182 */ 2183 2183 static struct smbd_mr *get_mr(struct smbd_connection *info) ··· 2311 2311 /* 2312 2312 * There is no need for waiting for complemtion on ib_post_send 2313 2313 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution 2314 - * on the next ib_post_send when we actaully send I/O to remote peer 2314 + * on the next ib_post_send when we actually send I/O to remote peer 2315 2315 */ 2316 2316 rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL); 2317 2317 if (!rc)
+1 -1
fs/smb/client/transport.c
··· 1289 1289 out: 1290 1290 /* 1291 1291 * This will dequeue all mids. After this it is important that the 1292 - * demultiplex_thread will not process any of these mids any futher. 1292 + * demultiplex_thread will not process any of these mids any further. 1293 1293 * This is prevented above by using a noop callback that will not 1294 1294 * wake this thread except for the very last PDU. 1295 1295 */
+12 -3
fs/smb/server/mgmt/share_config.c
··· 15 15 #include "share_config.h" 16 16 #include "user_config.h" 17 17 #include "user_session.h" 18 + #include "../connection.h" 18 19 #include "../transport_ipc.h" 19 20 #include "../misc.h" 20 21 ··· 121 120 return 0; 122 121 } 123 122 124 - static struct ksmbd_share_config *share_config_request(struct unicode_map *um, 123 + static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work, 125 124 const char *name) 126 125 { 127 126 struct ksmbd_share_config_response *resp; 128 127 struct ksmbd_share_config *share = NULL; 129 128 struct ksmbd_share_config *lookup; 129 + struct unicode_map *um = work->conn->um; 130 130 int ret; 131 131 132 132 resp = ksmbd_ipc_share_config_request(name); ··· 183 181 KSMBD_SHARE_CONFIG_VETO_LIST(resp), 184 182 resp->veto_list_sz); 185 183 if (!ret && share->path) { 184 + if (__ksmbd_override_fsids(work, share)) { 185 + kill_share(share); 186 + share = NULL; 187 + goto out; 188 + } 189 + 186 190 ret = kern_path(share->path, 0, &share->vfs_path); 191 + ksmbd_revert_fsids(work); 187 192 if (ret) { 188 193 ksmbd_debug(SMB, "failed to access '%s'\n", 189 194 share->path); ··· 223 214 return share; 224 215 } 225 216 226 - struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um, 217 + struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work, 227 218 const char *name) 228 219 { 229 220 struct ksmbd_share_config *share; ··· 236 227 237 228 if (share) 238 229 return share; 239 - return share_config_request(um, name); 230 + return share_config_request(work, name); 240 231 } 241 232 242 233 bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+3 -1
fs/smb/server/mgmt/share_config.h
··· 11 11 #include <linux/path.h> 12 12 #include <linux/unicode.h> 13 13 14 + struct ksmbd_work; 15 + 14 16 struct ksmbd_share_config { 15 17 char *name; 16 18 char *path; ··· 70 68 __ksmbd_share_config_put(share); 71 69 } 72 70 73 - struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um, 71 + struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work, 74 72 const char *name); 75 73 bool ksmbd_share_veto_filename(struct ksmbd_share_config *share, 76 74 const char *filename);
+5 -4
fs/smb/server/mgmt/tree_connect.c
··· 16 16 #include "user_session.h" 17 17 18 18 struct ksmbd_tree_conn_status 19 - ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess, 20 - const char *share_name) 19 + ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name) 21 20 { 22 21 struct ksmbd_tree_conn_status status = {-ENOENT, NULL}; 23 22 struct ksmbd_tree_connect_response *resp = NULL; 24 23 struct ksmbd_share_config *sc; 25 24 struct ksmbd_tree_connect *tree_conn = NULL; 26 25 struct sockaddr *peer_addr; 26 + struct ksmbd_conn *conn = work->conn; 27 + struct ksmbd_session *sess = work->sess; 27 28 int ret; 28 29 29 - sc = ksmbd_share_config_get(conn->um, share_name); 30 + sc = ksmbd_share_config_get(work, share_name); 30 31 if (!sc) 31 32 return status; 32 33 ··· 62 61 struct ksmbd_share_config *new_sc; 63 62 64 63 ksmbd_share_config_del(sc); 65 - new_sc = ksmbd_share_config_get(conn->um, share_name); 64 + new_sc = ksmbd_share_config_get(work, share_name); 66 65 if (!new_sc) { 67 66 pr_err("Failed to update stale share config\n"); 68 67 status.ret = -ESTALE;
+2 -2
fs/smb/server/mgmt/tree_connect.h
··· 13 13 struct ksmbd_share_config; 14 14 struct ksmbd_user; 15 15 struct ksmbd_conn; 16 + struct ksmbd_work; 16 17 17 18 enum { 18 19 TREE_NEW = 0, ··· 51 50 struct ksmbd_session; 52 51 53 52 struct ksmbd_tree_conn_status 54 - ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess, 55 - const char *share_name); 53 + ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name); 56 54 void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon); 57 55 58 56 int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+8 -1
fs/smb/server/smb2pdu.c
··· 1955 1955 ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n", 1956 1956 name, treename); 1957 1957 1958 - status = ksmbd_tree_conn_connect(conn, sess, name); 1958 + status = ksmbd_tree_conn_connect(work, name); 1959 1959 if (status.ret == KSMBD_TREE_CONN_STATUS_OK) 1960 1960 rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id); 1961 1961 else ··· 5596 5596 5597 5597 ksmbd_debug(SMB, "GOT query info request\n"); 5598 5598 5599 + if (ksmbd_override_fsids(work)) { 5600 + rc = -ENOMEM; 5601 + goto err_out; 5602 + } 5603 + 5599 5604 switch (req->InfoType) { 5600 5605 case SMB2_O_INFO_FILE: 5601 5606 ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n"); ··· 5619 5614 req->InfoType); 5620 5615 rc = -EOPNOTSUPP; 5621 5616 } 5617 + ksmbd_revert_fsids(work); 5622 5618 5623 5619 if (!rc) { 5624 5620 rsp->StructureSize = cpu_to_le16(9); ··· 5629 5623 le32_to_cpu(rsp->OutputBufferLength)); 5630 5624 } 5631 5625 5626 + err_out: 5632 5627 if (rc < 0) { 5633 5628 if (rc == -EACCES) 5634 5629 rsp->hdr.Status = STATUS_ACCESS_DENIED;
+7 -2
fs/smb/server/smb_common.c
··· 732 732 return p && p[0] == '*'; 733 733 } 734 734 735 - int ksmbd_override_fsids(struct ksmbd_work *work) 735 + int __ksmbd_override_fsids(struct ksmbd_work *work, 736 + struct ksmbd_share_config *share) 736 737 { 737 738 struct ksmbd_session *sess = work->sess; 738 - struct ksmbd_share_config *share = work->tcon->share_conf; 739 739 struct cred *cred; 740 740 struct group_info *gi; 741 741 unsigned int uid; ··· 773 773 return -EINVAL; 774 774 } 775 775 return 0; 776 + } 777 + 778 + int ksmbd_override_fsids(struct ksmbd_work *work) 779 + { 780 + return __ksmbd_override_fsids(work, work->tcon->share_conf); 776 781 } 777 782 778 783 void ksmbd_revert_fsids(struct ksmbd_work *work)
+2
fs/smb/server/smb_common.h
··· 447 447 int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command); 448 448 449 449 int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp); 450 + int __ksmbd_override_fsids(struct ksmbd_work *work, 451 + struct ksmbd_share_config *share); 450 452 int ksmbd_override_fsids(struct ksmbd_work *work); 451 453 void ksmbd_revert_fsids(struct ksmbd_work *work); 452 454
+6 -1
fs/squashfs/inode.c
··· 279 279 if (err < 0) 280 280 goto failed_read; 281 281 282 - set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); 283 282 inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); 283 + if (inode->i_size > PAGE_SIZE) { 284 + ERROR("Corrupted symlink\n"); 285 + return -EINVAL; 286 + } 287 + 288 + set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); 284 289 inode->i_op = &squashfs_symlink_inode_ops; 285 290 inode_nohighmem(inode); 286 291 inode->i_data.a_ops = &squashfs_symlink_aops;
+2
include/drm/drm_buddy.h
··· 27 27 #define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) 28 28 #define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) 29 29 #define DRM_BUDDY_CLEARED BIT(4) 30 + #define DRM_BUDDY_TRIM_DISABLE BIT(5) 30 31 31 32 struct drm_buddy_block { 32 33 #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) ··· 156 155 unsigned long flags); 157 156 158 157 int drm_buddy_block_trim(struct drm_buddy *mm, 158 + u64 *start, 159 159 u64 new_size, 160 160 struct list_head *blocks); 161 161
+12
include/linux/bitmap.h
··· 270 270 dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); 271 271 } 272 272 273 + static inline void bitmap_copy_and_extend(unsigned long *to, 274 + const unsigned long *from, 275 + unsigned int count, unsigned int size) 276 + { 277 + unsigned int copy = BITS_TO_LONGS(count); 278 + 279 + memcpy(to, from, copy * sizeof(long)); 280 + if (count % BITS_PER_LONG) 281 + to[copy - 1] &= BITMAP_LAST_WORD_MASK(count); 282 + memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long)); 283 + } 284 + 273 285 /* 274 286 * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64 275 287 * machines the order of hi and lo parts of numbers match the bitmap structure.
+2 -2
include/linux/bpf_verifier.h
··· 856 856 /* only use after check_attach_btf_id() */ 857 857 static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) 858 858 { 859 - return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ? 860 - prog->aux->dst_prog->type : prog->type; 859 + return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ? 860 + prog->aux->saved_dst_prog_type : prog->type; 861 861 } 862 862 863 863 static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+1 -1
include/linux/cpumask.h
··· 1037 1037 assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val)) 1038 1038 1039 1039 #define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible)) 1040 - #define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_possible_mask, (enabled)) 1040 + #define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled)) 1041 1041 #define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present)) 1042 1042 #define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active)) 1043 1043 #define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
+1 -1
include/linux/file.h
··· 110 110 * 111 111 * f = dentry_open(&path, O_RDONLY, current_cred()); 112 112 * if (IS_ERR(f)) 113 - * return PTR_ERR(fd); 113 + * return PTR_ERR(f); 114 114 * 115 115 * fd_install(fd, f); 116 116 * return take_fd(fd);
+5
include/linux/fs.h
··· 2392 2392 * 2393 2393 * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback. 2394 2394 * 2395 + * I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding 2396 + * i_count. 2397 + * 2395 2398 * Q: What is the difference between I_WILL_FREE and I_FREEING? 2396 2399 */ 2397 2400 #define I_DIRTY_SYNC (1 << 0) ··· 2418 2415 #define I_DONTCACHE (1 << 16) 2419 2416 #define I_SYNC_QUEUED (1 << 17) 2420 2417 #define I_PINNING_NETFS_WB (1 << 18) 2418 + #define __I_LRU_ISOLATING 19 2419 + #define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING) 2421 2420 2422 2421 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 2423 2422 #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+1 -1
include/linux/i2c.h
··· 1066 1066 struct acpi_resource; 1067 1067 struct acpi_resource_i2c_serialbus; 1068 1068 1069 - #if IS_ENABLED(CONFIG_ACPI) 1069 + #if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C) 1070 1070 bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, 1071 1071 struct acpi_resource_i2c_serialbus **i2c); 1072 1072 int i2c_acpi_client_count(struct acpi_device *adev);
+7
include/linux/kvm_host.h
··· 715 715 } 716 716 #endif 717 717 718 + #ifndef kvm_arch_has_readonly_mem 719 + static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) 720 + { 721 + return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM); 722 + } 723 + #endif 724 + 718 725 struct kvm_memslots { 719 726 u64 generation; 720 727 atomic_long_t last_used_slot;
-3
include/linux/netfs.h
··· 73 73 #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ 74 74 #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ 75 75 #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ 76 - #define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark 77 - * write to cache on read */ 78 76 }; 79 77 80 78 /* ··· 267 269 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ 268 270 #define NETFS_RREQ_FAILED 4 /* The request failed */ 269 271 #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ 270 - #define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */ 271 272 #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ 272 273 #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ 273 274 #define NETFS_RREQ_BLOCKED 10 /* We blocked */
-1
include/linux/trace_events.h
··· 880 880 struct perf_event; 881 881 882 882 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); 883 - DECLARE_PER_CPU(int, bpf_kprobe_override); 884 883 885 884 extern int perf_trace_init(struct perf_event *event); 886 885 extern void perf_trace_destroy(struct perf_event *event);
+4
include/net/af_vsock.h
··· 233 233 int vsock_add_tap(struct vsock_tap *vt); 234 234 int vsock_remove_tap(struct vsock_tap *vt); 235 235 void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque); 236 + int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 237 + int flags); 236 238 int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 237 239 int flags); 240 + int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 241 + size_t len, int flags); 238 242 int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 239 243 size_t len, int flags); 240 244
+1
include/net/mana/mana.h
··· 275 275 /* NAPI data */ 276 276 struct napi_struct napi; 277 277 int work_done; 278 + int work_done_since_doorbell; 278 279 int budget; 279 280 }; 280 281
+5
include/sound/cs35l56.h
··· 277 277 return 0; 278 278 } 279 279 280 + static inline bool cs35l56_is_otp_register(unsigned int reg) 281 + { 282 + return (reg >> 16) == 3; 283 + } 284 + 280 285 extern struct regmap_config cs35l56_regmap_i2c; 281 286 extern struct regmap_config cs35l56_regmap_spi; 282 287 extern struct regmap_config cs35l56_regmap_sdw;
+5
include/sound/soc-component.h
··· 462 462 const char *pin); 463 463 464 464 /* component controls */ 465 + struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component, 466 + const char * const ctl); 467 + struct snd_kcontrol * 468 + snd_soc_component_get_kcontrol_locked(struct snd_soc_component *component, 469 + const char * const ctl); 465 470 int snd_soc_component_notify_control(struct snd_soc_component *component, 466 471 const char * const ctl); 467 472
+2
include/trace/events/netfs.h
··· 51 51 EM(netfs_rreq_trace_resubmit, "RESUBMT") \ 52 52 EM(netfs_rreq_trace_set_pause, "PAUSE ") \ 53 53 EM(netfs_rreq_trace_unlock, "UNLOCK ") \ 54 + EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \ 54 55 EM(netfs_rreq_trace_unmark, "UNMARK ") \ 55 56 EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \ 56 57 EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \ ··· 146 145 EM(netfs_folio_trace_clear_g, "clear-g") \ 147 146 EM(netfs_folio_trace_clear_s, "clear-s") \ 148 147 EM(netfs_folio_trace_copy_to_cache, "mark-copy") \ 148 + EM(netfs_folio_trace_end_copy, "end-copy") \ 149 149 EM(netfs_folio_trace_filled_gaps, "filled-gaps") \ 150 150 EM(netfs_folio_trace_kill, "kill") \ 151 151 EM(netfs_folio_trace_kill_cc, "kill-cc") \
+2 -1
include/uapi/linux/nsfs.h
··· 3 3 #define __LINUX_NSFS_H 4 4 5 5 #include <linux/ioctl.h> 6 + #include <linux/types.h> 6 7 7 8 #define NSIO 0xb7 8 9 ··· 17 16 /* Get owner UID (in the caller's user namespace) for a user namespace */ 18 17 #define NS_GET_OWNER_UID _IO(NSIO, 0x4) 19 18 /* Get the id for a mount namespace */ 20 - #define NS_GET_MNTNS_ID _IO(NSIO, 0x5) 19 + #define NS_GET_MNTNS_ID _IOR(NSIO, 0x5, __u64) 21 20 /* Translate pid from target pid namespace into the caller's pid namespace. */ 22 21 #define NS_GET_PID_FROM_PIDNS _IOR(NSIO, 0x6, int) 23 22 /* Return thread-group leader id of pid in the callers pid namespace. */
+1
include/uapi/linux/psp-sev.h
··· 51 51 SEV_RET_INVALID_PLATFORM_STATE, 52 52 SEV_RET_INVALID_GUEST_STATE, 53 53 SEV_RET_INAVLID_CONFIG, 54 + SEV_RET_INVALID_CONFIG = SEV_RET_INAVLID_CONFIG, 54 55 SEV_RET_INVALID_LEN, 55 56 SEV_RET_ALREADY_OWNED, 56 57 SEV_RET_INVALID_CERTIFICATE,
+5 -2
io_uring/net.c
··· 601 601 .iovs = &kmsg->fast_iov, 602 602 .max_len = INT_MAX, 603 603 .nr_iovs = 1, 604 - .mode = KBUF_MODE_EXPAND, 605 604 }; 606 605 607 606 if (kmsg->free_iov) { 608 607 arg.nr_iovs = kmsg->free_iov_nr; 609 608 arg.iovs = kmsg->free_iov; 610 - arg.mode |= KBUF_MODE_FREE; 609 + arg.mode = KBUF_MODE_FREE; 611 610 } 612 611 613 612 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) 614 613 arg.nr_iovs = 1; 614 + else 615 + arg.mode |= KBUF_MODE_EXPAND; 615 616 616 617 ret = io_buffers_select(req, &arg, issue_flags); 617 618 if (unlikely(ret < 0)) ··· 624 623 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { 625 624 kmsg->free_iov_nr = ret; 626 625 kmsg->free_iov = arg.iovs; 626 + req->flags |= REQ_F_NEED_CLEANUP; 627 627 } 628 628 } 629 629 ··· 1096 1094 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { 1097 1095 kmsg->free_iov_nr = ret; 1098 1096 kmsg->free_iov = arg.iovs; 1097 + req->flags |= REQ_F_NEED_CLEANUP; 1099 1098 } 1100 1099 } else { 1101 1100 void __user *buf;
+3 -2
kernel/bpf/verifier.c
··· 16884 16884 spi = i / BPF_REG_SIZE; 16885 16885 16886 16886 if (exact != NOT_EXACT && 16887 - old->stack[spi].slot_type[i % BPF_REG_SIZE] != 16888 - cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 16887 + (i >= cur->allocated_stack || 16888 + old->stack[spi].slot_type[i % BPF_REG_SIZE] != 16889 + cur->stack[spi].slot_type[i % BPF_REG_SIZE])) 16889 16890 return false; 16890 16891 16891 16892 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)
+4 -1
kernel/dma/debug.c
··· 416 416 * dma_active_cacheline entry to track per event. dma_map_sg(), on the 417 417 * other hand, consumes a single dma_debug_entry, but inserts 'nents' 418 418 * entries into the tree. 419 + * 420 + * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end 421 + * up right back in the DMA debugging code, leading to a deadlock. 419 422 */ 420 - static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); 423 + static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN); 421 424 static DEFINE_SPINLOCK(radix_lock); 422 425 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 423 426 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+2 -1
kernel/events/core.c
··· 9706 9706 9707 9707 ret = __perf_event_account_interrupt(event, throttle); 9708 9708 9709 - if (event->prog && !bpf_overflow_handler(event, data, regs)) 9709 + if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && 9710 + !bpf_overflow_handler(event, data, regs)) 9710 9711 return ret; 9711 9712 9712 9713 /*
+22 -3
kernel/fork.c
··· 2053 2053 */ 2054 2054 int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) 2055 2055 { 2056 - bool thread = flags & PIDFD_THREAD; 2057 - 2058 - if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID)) 2056 + if (!pid) 2059 2057 return -EINVAL; 2058 + 2059 + scoped_guard(rcu) { 2060 + struct task_struct *tsk; 2061 + 2062 + if (flags & PIDFD_THREAD) 2063 + tsk = pid_task(pid, PIDTYPE_PID); 2064 + else 2065 + tsk = pid_task(pid, PIDTYPE_TGID); 2066 + if (!tsk) 2067 + return -EINVAL; 2068 + 2069 + /* Don't create pidfds for kernel threads for now. */ 2070 + if (tsk->flags & PF_KTHREAD) 2071 + return -EINVAL; 2072 + } 2060 2073 2061 2074 return __pidfd_prepare(pid, flags, ret); 2062 2075 } ··· 2415 2402 */ 2416 2403 if (clone_flags & CLONE_PIDFD) { 2417 2404 int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0; 2405 + 2406 + /* Don't create pidfds for kernel threads for now. */ 2407 + if (args->kthread) { 2408 + retval = -EINVAL; 2409 + goto bad_fork_free_pid; 2410 + } 2418 2411 2419 2412 /* Note that no task has been attached to @pid yet. */ 2420 2413 retval = __pidfd_prepare(pid, flags, &pidfile);
+1
kernel/irq/irqdesc.c
··· 530 530 flags = IRQD_AFFINITY_MANAGED | 531 531 IRQD_MANAGED_SHUTDOWN; 532 532 } 533 + flags |= IRQD_AFFINITY_SET; 533 534 mask = &affinity->mask; 534 535 node = cpu_to_node(cpumask_first(mask)); 535 536 affinity++;
+2 -2
kernel/kprobes.c
··· 1557 1557 if (lookup_symbol_name(addr, symbuf)) 1558 1558 return false; 1559 1559 1560 - return str_has_prefix("__cfi_", symbuf) || 1561 - str_has_prefix("__pfx_", symbuf); 1560 + return str_has_prefix(symbuf, "__cfi_") || 1561 + str_has_prefix(symbuf, "__pfx_"); 1562 1562 } 1563 1563 1564 1564 static int check_kprobe_address_safe(struct kprobe *p,
+25 -15
kernel/module/main.c
··· 3104 3104 struct idempotent *existing; 3105 3105 bool first; 3106 3106 3107 - u->ret = 0; 3107 + u->ret = -EINTR; 3108 3108 u->cookie = cookie; 3109 3109 init_completion(&u->complete); 3110 3110 ··· 3140 3140 hlist_for_each_entry_safe(pos, next, head, entry) { 3141 3141 if (pos->cookie != cookie) 3142 3142 continue; 3143 - hlist_del(&pos->entry); 3143 + hlist_del_init(&pos->entry); 3144 3144 pos->ret = ret; 3145 3145 complete(&pos->complete); 3146 3146 } 3147 3147 spin_unlock(&idem_lock); 3148 3148 return ret; 3149 + } 3150 + 3151 + /* 3152 + * Wait for the idempotent worker. 3153 + * 3154 + * If we get interrupted, we need to remove ourselves from the 3155 + * the idempotent list, and the completion may still come in. 3156 + * 3157 + * The 'idem_lock' protects against the race, and 'idem.ret' was 3158 + * initialized to -EINTR and is thus always the right return 3159 + * value even if the idempotent work then completes between 3160 + * the wait_for_completion and the cleanup. 3161 + */ 3162 + static int idempotent_wait_for_completion(struct idempotent *u) 3163 + { 3164 + if (wait_for_completion_interruptible(&u->complete)) { 3165 + spin_lock(&idem_lock); 3166 + if (!hlist_unhashed(&u->entry)) 3167 + hlist_del(&u->entry); 3168 + spin_unlock(&idem_lock); 3169 + } 3170 + return u->ret; 3149 3171 } 3150 3172 3151 3173 static int init_module_from_file(struct file *f, const char __user * uargs, int flags) ··· 3213 3191 3214 3192 /* 3215 3193 * Somebody else won the race and is loading the module. 3216 - * 3217 - * We have to wait for it forever, since our 'idem' is 3218 - * on the stack and the list entry stays there until 3219 - * completed (but we could fix it under the idem_lock) 3220 - * 3221 - * It's also unclear what a real timeout might be, 3222 - * but we could maybe at least make this killable 3223 - * and remove the idem entry in that case? 3224 3194 */ 3225 - for (;;) { 3226 - if (wait_for_completion_timeout(&idem.complete, 10*HZ)) 3227 - return idem.ret; 3228 - pr_warn_once("module '%pD' taking a long time to load", f); 3229 - } 3195 + return idempotent_wait_for_completion(&idem); 3230 3196 } 3231 3197 3232 3198 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
+4 -5
kernel/time/ntp.c
··· 727 727 } 728 728 729 729 if (txc->modes & ADJ_MAXERROR) 730 - time_maxerror = txc->maxerror; 730 + time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT); 731 731 732 732 if (txc->modes & ADJ_ESTERROR) 733 - time_esterror = txc->esterror; 733 + time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT); 734 734 735 735 if (txc->modes & ADJ_TIMECONST) { 736 - time_constant = txc->constant; 736 + time_constant = clamp(txc->constant, 0, MAXTC); 737 737 if (!(time_status & STA_NANO)) 738 738 time_constant += 4; 739 - time_constant = min(time_constant, (long)MAXTC); 740 - time_constant = max(time_constant, 0l); 739 + time_constant = clamp(time_constant, 0, MAXTC); 741 740 } 742 741 743 742 if (txc->modes & ADJ_TAI &&
+1 -1
kernel/time/timekeeping.c
··· 2606 2606 clock_set |= timekeeping_advance(TK_ADV_FREQ); 2607 2607 2608 2608 if (clock_set) 2609 - clock_was_set(CLOCK_REALTIME); 2609 + clock_was_set(CLOCK_SET_WALL); 2610 2610 2611 2611 ntp_notify_cmos_timer(); 2612 2612
+5 -1
net/bridge/br_netfilter_hooks.c
··· 622 622 if (likely(nf_ct_is_confirmed(ct))) 623 623 return NF_ACCEPT; 624 624 625 + if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) { 626 + nf_reset_ct(skb); 627 + return NF_ACCEPT; 628 + } 629 + 625 630 WARN_ON_ONCE(skb_shared(skb)); 626 - WARN_ON_ONCE(refcount_read(&nfct->use) != 1); 627 631 628 632 /* We can't call nf_confirm here, it would create a dependency 629 633 * on nf_conntrack module.
+17 -9
net/core/dev.c
··· 9912 9912 } 9913 9913 } 9914 9914 9915 + static bool netdev_has_ip_or_hw_csum(netdev_features_t features) 9916 + { 9917 + netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 9918 + bool ip_csum = (features & ip_csum_mask) == ip_csum_mask; 9919 + bool hw_csum = features & NETIF_F_HW_CSUM; 9920 + 9921 + return ip_csum || hw_csum; 9922 + } 9923 + 9915 9924 static netdev_features_t netdev_fix_features(struct net_device *dev, 9916 9925 netdev_features_t features) 9917 9926 { ··· 10002 9993 features &= ~NETIF_F_LRO; 10003 9994 } 10004 9995 10005 - if (features & NETIF_F_HW_TLS_TX) { 10006 - bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 10007 - (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 10008 - bool hw_csum = features & NETIF_F_HW_CSUM; 10009 - 10010 - if (!ip_csum && !hw_csum) { 10011 - netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 10012 - features &= ~NETIF_F_HW_TLS_TX; 10013 - } 9996 + if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) { 9997 + netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9998 + features &= ~NETIF_F_HW_TLS_TX; 10014 9999 } 10015 10000 10016 10001 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 10017 10002 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 10018 10003 features &= ~NETIF_F_HW_TLS_RX; 10004 + } 10005 + 10006 + if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) { 10007 + netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n"); 10008 + features &= ~NETIF_F_GSO_UDP_L4; 10019 10009 } 10020 10010 10021 10011 return features;
+6 -2
net/ethtool/cmis_fw_update.c
··· 35 35 __be16 resv7; 36 36 }; 37 37 38 - #define CMIS_CDB_FW_WRITE_MECHANISM_LPL 0x01 38 + enum cmis_cdb_fw_write_mechanism { 39 + CMIS_CDB_FW_WRITE_MECHANISM_LPL = 0x01, 40 + CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 0x11, 41 + }; 39 42 40 43 static int 41 44 cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb, ··· 67 64 } 68 65 69 66 rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload; 70 - if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL)) { 67 + if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL || 68 + rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_BOTH)) { 71 69 ethnl_module_fw_flash_ntf_err(dev, ntf_params, 72 70 "Write LPL is not supported", 73 71 NULL);
+12 -16
net/ipv4/tcp_input.c
··· 238 238 */ 239 239 if (unlikely(len != icsk->icsk_ack.rcv_mss)) { 240 240 u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE; 241 + u8 old_ratio = tcp_sk(sk)->scaling_ratio; 241 242 242 243 do_div(val, skb->truesize); 243 244 tcp_sk(sk)->scaling_ratio = val ? val : 1; 245 + 246 + if (old_ratio != tcp_sk(sk)->scaling_ratio) 247 + WRITE_ONCE(tcp_sk(sk)->window_clamp, 248 + tcp_win_from_space(sk, sk->sk_rcvbuf)); 244 249 } 245 250 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 246 251 tcp_sk(sk)->advmss); ··· 759 754 * <prev RTT . ><current RTT .. ><next RTT .... > 760 755 */ 761 756 762 - if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) { 757 + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && 758 + !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 763 759 u64 rcvwin, grow; 764 760 int rcvbuf; 765 761 ··· 776 770 777 771 rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin), 778 772 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); 779 - if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 780 - if (rcvbuf > sk->sk_rcvbuf) { 781 - WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 773 + if (rcvbuf > sk->sk_rcvbuf) { 774 + WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 782 775 783 - /* Make the window clamp follow along. */ 784 - WRITE_ONCE(tp->window_clamp, 785 - tcp_win_from_space(sk, rcvbuf)); 786 - } 787 - } else { 788 - /* Make the window clamp follow along while being bounded 789 - * by SO_RCVBUF. 790 - */ 791 - int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf)); 792 - 793 - if (clamp > tp->window_clamp) 794 - WRITE_ONCE(tp->window_clamp, clamp); 776 + /* Make the window clamp follow along. */ 777 + WRITE_ONCE(tp->window_clamp, 778 + tcp_win_from_space(sk, rcvbuf)); 795 779 } 796 780 } 797 781 tp->rcvq_space.space = copied;
+6
net/ipv4/udp_offload.c
··· 282 282 skb_transport_header(gso_skb))) 283 283 return ERR_PTR(-EINVAL); 284 284 285 + /* We don't know if egress device can segment and checksum the packet 286 + * when IPv6 extension headers are present. Fall back to software GSO. 287 + */ 288 + if (gso_skb->ip_summed != CHECKSUM_PARTIAL) 289 + features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK); 290 + 285 291 if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) { 286 292 /* Packet is from an untrusted source, reset gso_segs. */ 287 293 skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
+4
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 154 154 }; 155 155 struct inet_frag_queue *q; 156 156 157 + if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST | 158 + IPV6_ADDR_LINKLOCAL))) 159 + key.iif = 0; 160 + 157 161 q = inet_frag_find(nf_frag->fqdir, &key); 158 162 if (!q) 159 163 return NULL;
+1 -1
net/mptcp/diag.c
··· 94 94 nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */ 95 95 nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */ 96 96 nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */ 97 - nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */ 97 + nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */ 98 98 nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */ 99 99 nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */ 100 100 nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */
+1 -1
net/netfilter/nf_flow_table_offload.c
··· 841 841 struct list_head *block_cb_list) 842 842 { 843 843 struct flow_cls_offload cls_flow = {}; 844 + struct netlink_ext_ack extack = {}; 844 845 struct flow_block_cb *block_cb; 845 - struct netlink_ext_ack extack; 846 846 __be16 proto = ETH_P_ALL; 847 847 int err, i = 0; 848 848
+110 -53
net/netfilter/nf_tables_api.c
··· 8020 8020 return skb->len; 8021 8021 } 8022 8022 8023 + static int nf_tables_dumpreset_obj(struct sk_buff *skb, 8024 + struct netlink_callback *cb) 8025 + { 8026 + struct nftables_pernet *nft_net = nft_pernet(sock_net(skb->sk)); 8027 + int ret; 8028 + 8029 + mutex_lock(&nft_net->commit_mutex); 8030 + ret = nf_tables_dump_obj(skb, cb); 8031 + mutex_unlock(&nft_net->commit_mutex); 8032 + 8033 + return ret; 8034 + } 8035 + 8023 8036 static int nf_tables_dump_obj_start(struct netlink_callback *cb) 8024 8037 { 8025 8038 struct nft_obj_dump_ctx *ctx = (void *)cb->ctx; ··· 8049 8036 if (nla[NFTA_OBJ_TYPE]) 8050 8037 ctx->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 8051 8038 8052 - if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET) 8053 - ctx->reset = true; 8054 - 8055 8039 return 0; 8040 + } 8041 + 8042 + static int nf_tables_dumpreset_obj_start(struct netlink_callback *cb) 8043 + { 8044 + struct nft_obj_dump_ctx *ctx = (void *)cb->ctx; 8045 + 8046 + ctx->reset = true; 8047 + 8048 + return nf_tables_dump_obj_start(cb); 8056 8049 } 8057 8050 8058 8051 static int nf_tables_dump_obj_done(struct netlink_callback *cb) ··· 8071 8052 } 8072 8053 8073 8054 /* called with rcu_read_lock held */ 8074 - static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info, 8075 - const struct nlattr * const nla[]) 8055 + static struct sk_buff * 8056 + nf_tables_getobj_single(u32 portid, const struct nfnl_info *info, 8057 + const struct nlattr * const nla[], bool reset) 8076 8058 { 8077 8059 struct netlink_ext_ack *extack = info->extack; 8078 8060 u8 genmask = nft_genmask_cur(info->net); ··· 8082 8062 struct net *net = info->net; 8083 8063 struct nft_object *obj; 8084 8064 struct sk_buff *skb2; 8085 - bool reset = false; 8086 8065 u32 objtype; 8087 8066 int err; 8067 + 8068 + if (!nla[NFTA_OBJ_NAME] || 8069 + !nla[NFTA_OBJ_TYPE]) 8070 + return ERR_PTR(-EINVAL); 8071 + 8072 + table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0); 8073 + if (IS_ERR(table)) { 8074 + NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]); 8075 + return ERR_CAST(table); 8076 + } 8077 + 8078 + objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 8079 + obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask); 8080 + if (IS_ERR(obj)) { 8081 + NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]); 8082 + return ERR_CAST(obj); 8083 + } 8084 + 8085 + skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 8086 + if (!skb2) 8087 + return ERR_PTR(-ENOMEM); 8088 + 8089 + err = nf_tables_fill_obj_info(skb2, net, portid, 8090 + info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, 8091 + family, table, obj, reset); 8092 + if (err < 0) { 8093 + kfree_skb(skb2); 8094 + return ERR_PTR(err); 8095 + } 8096 + 8097 + return skb2; 8098 + } 8099 + 8100 + static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info, 8101 + const struct nlattr * const nla[]) 8102 + { 8103 + u32 portid = NETLINK_CB(skb).portid; 8104 + struct sk_buff *skb2; 8088 8105 8089 8106 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 8090 8107 struct netlink_dump_control c = { ··· 8135 8078 return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c); 8136 8079 } 8137 8080 8138 - if (!nla[NFTA_OBJ_NAME] || 8139 - !nla[NFTA_OBJ_TYPE]) 8081 + skb2 = nf_tables_getobj_single(portid, info, nla, false); 8082 + if (IS_ERR(skb2)) 8083 + return PTR_ERR(skb2); 8084 + 8085 + return nfnetlink_unicast(skb2, info->net, portid); 8086 + } 8087 + 8088 + static int nf_tables_getobj_reset(struct sk_buff *skb, 8089 + const struct nfnl_info *info, 8090 + const struct nlattr * const nla[]) 8091 + { 8092 + struct nftables_pernet *nft_net = nft_pernet(info->net); 8093 + u32 portid = NETLINK_CB(skb).portid; 8094 + struct net *net = info->net; 8095 + struct sk_buff *skb2; 8096 + char *buf; 8097 + 8098 + if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 8099 + struct netlink_dump_control c = { 8100 + .start = nf_tables_dumpreset_obj_start, 8101 + .dump = nf_tables_dumpreset_obj, 8102 + .done = nf_tables_dump_obj_done, 8103 + .module = THIS_MODULE, 8104 + .data = (void *)nla, 8105 + }; 8106 + 8107 + return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c); 8108 + } 8109 + 8110 + if (!try_module_get(THIS_MODULE)) 8140 8111 return -EINVAL; 8112 + rcu_read_unlock(); 8113 + mutex_lock(&nft_net->commit_mutex); 8114 + skb2 = nf_tables_getobj_single(portid, info, nla, true); 8115 + mutex_unlock(&nft_net->commit_mutex); 8116 + rcu_read_lock(); 8117 + module_put(THIS_MODULE); 8141 8118 8142 - table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0); 8143 - if (IS_ERR(table)) { 8144 - NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]); 8145 - return PTR_ERR(table); 8146 - } 8119 + if (IS_ERR(skb2)) 8120 + return PTR_ERR(skb2); 8147 8121 8148 - objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 8149 - obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask); 8150 - if (IS_ERR(obj)) { 8151 - NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]); 8152 - return PTR_ERR(obj); 8153 - } 8122 + buf = kasprintf(GFP_ATOMIC, "%.*s:%u", 8123 + nla_len(nla[NFTA_OBJ_TABLE]), 8124 + (char *)nla_data(nla[NFTA_OBJ_TABLE]), 8125 + nft_net->base_seq); 8126 + audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1, 8127 + AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC); 8128 + kfree(buf); 8154 8129 8155 - skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 8156 - if (!skb2) 8157 - return -ENOMEM; 8158 - 8159 - if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET) 8160 - reset = true; 8161 - 8162 - if (reset) { 8163 - const struct nftables_pernet *nft_net; 8164 - char *buf; 8165 - 8166 - nft_net = nft_pernet(net); 8167 - buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq); 8168 - 8169 - audit_log_nfcfg(buf, 8170 - family, 8171 - 1, 8172 - AUDIT_NFT_OP_OBJ_RESET, 8173 - GFP_ATOMIC); 8174 - kfree(buf); 8175 - } 8176 - 8177 - err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid, 8178 - info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, 8179 - family, table, obj, reset); 8180 - if (err < 0) 8181 - goto err_fill_obj_info; 8182 - 8183 - return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); 8184 - 8185 - err_fill_obj_info: 8186 - kfree_skb(skb2); 8187 - return err; 8130 + return nfnetlink_unicast(skb2, net, portid); 8188 8131 } 8189 8132 8190 8133 static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj) ··· 9467 9410 .policy = nft_obj_policy, 9468 9411 }, 9469 9412 [NFT_MSG_GETOBJ_RESET] = { 9470 - .call = nf_tables_getobj, 9413 + .call = nf_tables_getobj_reset, 9471 9414 .type = NFNL_CB_RCU, 9472 9415 .attr_count = NFTA_OBJ_MAX, 9473 9416 .policy = nft_obj_policy,
+4 -1
net/netfilter/nfnetlink.c
··· 427 427 428 428 nfnl_unlock(subsys_id); 429 429 430 - if (nlh->nlmsg_flags & NLM_F_ACK) 430 + if (nlh->nlmsg_flags & NLM_F_ACK) { 431 + memset(&extack, 0, sizeof(extack)); 431 432 nfnl_err_add(&err_list, nlh, 0, &extack); 433 + } 432 434 433 435 while (skb->len >= nlmsg_total_size(0)) { 434 436 int msglen, type; ··· 579 577 ss->abort(net, oskb, NFNL_ABORT_NONE); 580 578 netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); 581 579 } else if (nlh->nlmsg_flags & NLM_F_ACK) { 580 + memset(&extack, 0, sizeof(extack)); 582 581 nfnl_err_add(&err_list, nlh, 0, &extack); 583 582 } 584 583 } else {
+1 -1
net/sunrpc/svc.c
··· 161 161 str[len] = '\n'; 162 162 str[len + 1] = '\0'; 163 163 164 - return sysfs_emit(buf, str); 164 + return sysfs_emit(buf, "%s", str); 165 165 } 166 166 167 167 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
+29 -21
net/vmw_vsock/af_vsock.c
··· 1271 1271 return err; 1272 1272 } 1273 1273 1274 + int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 1275 + size_t len, int flags) 1276 + { 1277 + struct sock *sk = sock->sk; 1278 + struct vsock_sock *vsk = vsock_sk(sk); 1279 + 1280 + return vsk->transport->dgram_dequeue(vsk, msg, len, flags); 1281 + } 1282 + 1274 1283 int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 1275 1284 size_t len, int flags) 1276 1285 { 1277 1286 #ifdef CONFIG_BPF_SYSCALL 1287 + struct sock *sk = sock->sk; 1278 1288 const struct proto *prot; 1279 - #endif 1280 - struct vsock_sock *vsk; 1281 - struct sock *sk; 1282 1289 1283 - sk = sock->sk; 1284 - vsk = vsock_sk(sk); 1285 - 1286 - #ifdef CONFIG_BPF_SYSCALL 1287 1290 prot = READ_ONCE(sk->sk_prot); 1288 1291 if (prot != &vsock_proto) 1289 1292 return prot->recvmsg(sk, msg, len, flags, NULL); 1290 1293 #endif 1291 1294 1292 - return vsk->transport->dgram_dequeue(vsk, msg, len, flags); 1295 + return __vsock_dgram_recvmsg(sock, msg, len, flags); 1293 1296 } 1294 1297 EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg); 1295 1298 ··· 2229 2226 } 2230 2227 2231 2228 int 2232 - vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 2233 - int flags) 2229 + __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 2230 + int flags) 2234 2231 { 2235 2232 struct sock *sk; 2236 2233 struct vsock_sock *vsk; 2237 2234 const struct vsock_transport *transport; 2238 - #ifdef CONFIG_BPF_SYSCALL 2239 - const struct proto *prot; 2240 - #endif 2241 2235 int err; 2242 2236 2243 2237 sk = sock->sk; ··· 2285 2285 goto out; 2286 2286 } 2287 2287 2288 - #ifdef CONFIG_BPF_SYSCALL 2289 - prot = READ_ONCE(sk->sk_prot); 2290 - if (prot != &vsock_proto) { 2291 - release_sock(sk); 2292 - return prot->recvmsg(sk, msg, len, flags, NULL); 2293 - } 2294 - #endif 2295 - 2296 2288 if (sk->sk_type == SOCK_STREAM) 2297 2289 err = __vsock_stream_recvmsg(sk, msg, len, flags); 2298 2290 else ··· 2293 2301 out: 2294 2302 release_sock(sk); 2295 2303 return err; 2304 + } 2305 + 2306 + int 2307 + vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 2308 + int flags) 2309 + { 2310 + #ifdef CONFIG_BPF_SYSCALL 2311 + struct sock *sk = sock->sk; 2312 + const struct proto *prot; 2313 + 2314 + prot = READ_ONCE(sk->sk_prot); 2315 + if (prot != &vsock_proto) 2316 + return prot->recvmsg(sk, msg, len, flags, NULL); 2317 + #endif 2318 + 2319 + return __vsock_connectible_recvmsg(sock, msg, len, flags); 2296 2320 } 2297 2321 EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg); 2298 2322
+2 -2
net/vmw_vsock/vsock_bpf.c
··· 64 64 int err; 65 65 66 66 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) 67 - err = vsock_connectible_recvmsg(sock, msg, len, flags); 67 + err = __vsock_connectible_recvmsg(sock, msg, len, flags); 68 68 else if (sk->sk_type == SOCK_DGRAM) 69 - err = vsock_dgram_recvmsg(sock, msg, len, flags); 69 + err = __vsock_dgram_recvmsg(sock, msg, len, flags); 70 70 else 71 71 err = -EPROTOTYPE; 72 72
+1 -1
rust/kernel/firmware.rs
··· 2 2 3 3 //! Firmware abstraction 4 4 //! 5 - //! C header: [`include/linux/firmware.h`](srctree/include/linux/firmware.h") 5 + //! C header: [`include/linux/firmware.h`](srctree/include/linux/firmware.h) 6 6 7 7 use crate::{bindings, device::Device, error::Error, error::Result, str::CStr}; 8 8 use core::ptr::NonNull;
+2 -1
scripts/syscall.tbl
··· 53 53 39 common umount2 sys_umount 54 54 40 common mount sys_mount 55 55 41 common pivot_root sys_pivot_root 56 + 42 common nfsservctl sys_ni_syscall 56 57 43 32 statfs64 sys_statfs64 compat_sys_statfs64 57 58 43 64 statfs sys_statfs 58 59 44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 ··· 101 100 79 stat64 fstatat64 sys_fstatat64 102 101 79 64 newfstatat sys_newfstatat 103 102 80 stat64 fstat64 sys_fstat64 104 - 80 64 newfstat sys_newfstat 103 + 80 64 fstat sys_newfstat 105 104 81 common sync sys_sync 106 105 82 common fsync sys_fsync 107 106 83 common fdatasync sys_fdatasync
+6 -2
security/selinux/avc.c
··· 330 330 { 331 331 struct avc_xperms_decision_node *dest_xpd; 332 332 333 - node->ae.xp_node->xp.len++; 334 333 dest_xpd = avc_xperms_decision_alloc(src->used); 335 334 if (!dest_xpd) 336 335 return -ENOMEM; 337 336 avc_copy_xperms_decision(&dest_xpd->xpd, src); 338 337 list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head); 338 + node->ae.xp_node->xp.len++; 339 339 return 0; 340 340 } 341 341 ··· 907 907 node->ae.avd.auditdeny &= ~perms; 908 908 break; 909 909 case AVC_CALLBACK_ADD_XPERMS: 910 - avc_add_xperms_decision(node, xpd); 910 + rc = avc_add_xperms_decision(node, xpd); 911 + if (rc) { 912 + avc_node_kill(node); 913 + goto out_unlock; 914 + } 911 915 break; 912 916 } 913 917 avc_node_replace(node, orig);
+11 -1
security/selinux/hooks.c
··· 3852 3852 if (default_noexec && 3853 3853 (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { 3854 3854 int rc = 0; 3855 - if (vma_is_initial_heap(vma)) { 3855 + /* 3856 + * We don't use the vma_is_initial_heap() helper as it has 3857 + * a history of problems and is currently broken on systems 3858 + * where there is no heap, e.g. brk == start_brk. Before 3859 + * replacing the conditional below with vma_is_initial_heap(), 3860 + * or something similar, please ensure that the logic is the 3861 + * same as what we have below or you have tested every possible 3862 + * corner case you can think to test. 3863 + */ 3864 + if (vma->vm_start >= vma->vm_mm->start_brk && 3865 + vma->vm_end <= vma->vm_mm->brk) { 3856 3866 rc = avc_has_perm(sid, sid, SECCLASS_PROCESS, 3857 3867 PROCESS__EXECHEAP, NULL); 3858 3868 } else if (!vma->vm_file && (vma_is_initial_stack(vma) ||
-13
sound/pci/hda/cs35l41_hda.c
··· 133 133 { CS35L41_AMP_DIG_VOL_CTRL, 0x0000A678 }, // AMP_HPF_PCM_EN = 1, AMP_VOL_PCM Mute 134 134 }; 135 135 136 - static void cs35l41_add_controls(struct cs35l41_hda *cs35l41) 137 - { 138 - struct hda_cs_dsp_ctl_info info; 139 - 140 - info.device_name = cs35l41->amp_name; 141 - info.fw_type = cs35l41->firmware_type; 142 - info.card = cs35l41->codec->card; 143 - 144 - hda_cs_dsp_add_controls(&cs35l41->cs_dsp, &info); 145 - } 146 - 147 136 static const struct cs_dsp_client_ops client_ops = { 148 137 .control_remove = hda_cs_dsp_control_remove, 149 138 }; ··· 591 602 hda_cs_dsp_fw_ids[cs35l41->firmware_type]); 592 603 if (ret) 593 604 goto err; 594 - 595 - cs35l41_add_controls(cs35l41); 596 605 597 606 cs35l41_hda_apply_calibration(cs35l41); 598 607
+1 -37
sound/pci/hda/cs35l56_hda.c
··· 559 559 kfree(coeff_filename); 560 560 } 561 561 562 - static void cs35l56_hda_create_dsp_controls_work(struct work_struct *work) 563 - { 564 - struct cs35l56_hda *cs35l56 = container_of(work, struct cs35l56_hda, control_work); 565 - struct hda_cs_dsp_ctl_info info; 566 - 567 - info.device_name = cs35l56->amp_name; 568 - info.fw_type = HDA_CS_DSP_FW_MISC; 569 - info.card = cs35l56->codec->card; 570 - 571 - hda_cs_dsp_add_controls(&cs35l56->cs_dsp, &info); 572 - } 573 - 574 562 static void cs35l56_hda_apply_calibration(struct cs35l56_hda *cs35l56) 575 563 { 576 564 int ret; ··· 583 595 char *wmfw_filename = NULL; 584 596 unsigned int preloaded_fw_ver; 585 597 bool firmware_missing; 586 - bool add_dsp_controls_required = false; 587 598 int ret; 588 - 589 - /* 590 - * control_work must be flushed before proceeding, but we can't do that 591 - * here as it would create a deadlock on controls_rwsem so it must be 592 - * performed before queuing dsp_work. 593 - */ 594 - WARN_ON_ONCE(work_busy(&cs35l56->control_work)); 595 599 596 600 /* 597 601 * Prepare for a new DSP power-up. If the DSP has had firmware 598 602 * downloaded previously then it needs to be powered down so that it 599 - * can be updated and if hadn't been patched before then the controls 600 - * will need to be added once firmware download succeeds. 603 + * can be updated. 601 604 */ 602 605 if (cs35l56->base.fw_patched) 603 606 cs_dsp_power_down(&cs35l56->cs_dsp); 604 - else 605 - add_dsp_controls_required = true; 606 607 607 608 cs35l56->base.fw_patched = false; 608 609 ··· 675 698 CS35L56_FIRMWARE_MISSING); 676 699 cs35l56->base.fw_patched = true; 677 700 678 - /* 679 - * Adding controls is deferred to prevent a lock inversion - ALSA takes 680 - * the controls_rwsem when adding a control, the get() / put() 681 - * functions of a control are called holding controls_rwsem and those 682 - * that depend on running firmware wait for dsp_work() to complete. 683 - */ 684 - if (add_dsp_controls_required) 685 - queue_work(system_long_wq, &cs35l56->control_work); 686 - 687 701 ret = cs_dsp_run(&cs35l56->cs_dsp); 688 702 if (ret) 689 703 dev_dbg(cs35l56->base.dev, "%s: cs_dsp_run ret %d\n", __func__, ret); ··· 721 753 strscpy(comp->name, dev_name(dev), sizeof(comp->name)); 722 754 comp->playback_hook = cs35l56_hda_playback_hook; 723 755 724 - flush_work(&cs35l56->control_work); 725 756 queue_work(system_long_wq, &cs35l56->dsp_work); 726 757 727 758 cs35l56_hda_create_controls(cs35l56); ··· 742 775 struct hda_component *comp; 743 776 744 777 cancel_work_sync(&cs35l56->dsp_work); 745 - cancel_work_sync(&cs35l56->control_work); 746 778 747 779 cs35l56_hda_remove_controls(cs35l56); 748 780 ··· 772 806 struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev); 773 807 774 808 cs35l56_hda_wait_dsp_ready(cs35l56); 775 - flush_work(&cs35l56->control_work); 776 809 777 810 if (cs35l56->playing) 778 811 cs35l56_hda_pause(cs35l56); ··· 991 1026 dev_set_drvdata(cs35l56->base.dev, cs35l56); 992 1027 993 1028 INIT_WORK(&cs35l56->dsp_work, cs35l56_hda_dsp_work); 994 - INIT_WORK(&cs35l56->control_work, cs35l56_hda_create_dsp_controls_work); 995 1029 996 1030 ret = cs35l56_hda_read_acpi(cs35l56, hid, id); 997 1031 if (ret)
-1
sound/pci/hda/cs35l56_hda.h
··· 23 23 struct cs35l56_base base; 24 24 struct hda_codec *codec; 25 25 struct work_struct dsp_work; 26 - struct work_struct control_work; 27 26 28 27 int index; 29 28 const char *system_name;
+2
sound/pci/hda/patch_hdmi.c
··· 1989 1989 } 1990 1990 1991 1991 static const struct snd_pci_quirk force_connect_list[] = { 1992 + SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1), 1993 + SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1), 1992 1994 SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), 1993 1995 SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), 1994 1996 SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+1
sound/pci/hda/patch_realtek.c
··· 10678 10678 SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK), 10679 10679 SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10680 10680 SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10681 + SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10681 10682 10682 10683 #if 0 10683 10684 /* Below is a quirk table taken from the old code.
+21
sound/soc/amd/yc/acp6x-mach.c
··· 224 224 .driver_data = &acp6x_card, 225 225 .matches = { 226 226 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 227 + DMI_MATCH(DMI_PRODUCT_NAME, "21M3"), 228 + } 229 + }, 230 + { 231 + .driver_data = &acp6x_card, 232 + .matches = { 233 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 227 234 DMI_MATCH(DMI_PRODUCT_NAME, "21M5"), 228 235 } 229 236 }, ··· 420 413 .driver_data = &acp6x_card, 421 414 .matches = { 422 415 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 416 + DMI_MATCH(DMI_BOARD_NAME, "8A44"), 417 + } 418 + }, 419 + { 420 + .driver_data = &acp6x_card, 421 + .matches = { 422 + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 423 423 DMI_MATCH(DMI_BOARD_NAME, "8A22"), 424 424 } 425 425 }, ··· 435 421 .matches = { 436 422 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 437 423 DMI_MATCH(DMI_BOARD_NAME, "8A3E"), 424 + } 425 + }, 426 + { 427 + .driver_data = &acp6x_card, 428 + .matches = { 429 + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 430 + DMI_MATCH(DMI_BOARD_NAME, "8B27"), 438 431 } 439 432 }, 440 433 {
+1 -1
sound/soc/codecs/cs-amp-lib.c
··· 108 108 109 109 KUNIT_STATIC_STUB_REDIRECT(cs_amp_get_efi_variable, name, guid, size, buf); 110 110 111 - if (IS_ENABLED(CONFIG_EFI)) 111 + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) 112 112 return efi.get_variable(name, guid, &attr, size, buf); 113 113 114 114 return EFI_NOT_FOUND;
+2 -9
sound/soc/codecs/cs35l45.c
··· 176 176 struct snd_kcontrol *kcontrol; 177 177 struct snd_kcontrol_volatile *vd; 178 178 unsigned int index_offset; 179 - char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 180 179 181 - if (component->name_prefix) 182 - snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s", 183 - component->name_prefix, ctl_name); 184 - else 185 - snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s", ctl_name); 186 - 187 - kcontrol = snd_soc_card_get_kcontrol_locked(component->card, name); 180 + kcontrol = snd_soc_component_get_kcontrol_locked(component, ctl_name); 188 181 if (!kcontrol) { 189 - dev_err(component->dev, "Can't find kcontrol %s\n", name); 182 + dev_err(component->dev, "Can't find kcontrol %s\n", ctl_name); 190 183 return -EINVAL; 191 184 } 192 185
+77
sound/soc/codecs/cs35l56-sdw.c
··· 23 23 /* Register addresses are offset when sent over SoundWire */ 24 24 #define CS35L56_SDW_ADDR_OFFSET 0x8000 25 25 26 + /* Cirrus bus bridge registers */ 27 + #define CS35L56_SDW_MEM_ACCESS_STATUS 0xd0 28 + #define CS35L56_SDW_MEM_READ_DATA 0xd8 29 + 30 + #define CS35L56_SDW_LAST_LATE BIT(3) 31 + #define CS35L56_SDW_CMD_IN_PROGRESS BIT(2) 32 + #define CS35L56_SDW_RDATA_RDY BIT(0) 33 + 34 + #define CS35L56_LATE_READ_POLL_US 10 35 + #define CS35L56_LATE_READ_TIMEOUT_US 1000 36 + 37 + static int cs35l56_sdw_poll_mem_status(struct sdw_slave *peripheral, 38 + unsigned int mask, 39 + unsigned int match) 40 + { 41 + int ret, val; 42 + 43 + ret = read_poll_timeout(sdw_read_no_pm, val, 44 + (val < 0) || ((val & mask) == match), 45 + CS35L56_LATE_READ_POLL_US, CS35L56_LATE_READ_TIMEOUT_US, 46 + false, peripheral, CS35L56_SDW_MEM_ACCESS_STATUS); 47 + if (ret < 0) 48 + return ret; 49 + 50 + if (val < 0) 51 + return val; 52 + 53 + return 0; 54 + } 55 + 56 + static int cs35l56_sdw_slow_read(struct sdw_slave *peripheral, unsigned int reg, 57 + u8 *buf, size_t val_size) 58 + { 59 + int ret, i; 60 + 61 + reg += CS35L56_SDW_ADDR_OFFSET; 62 + 63 + for (i = 0; i < val_size; i += sizeof(u32)) { 64 + /* Poll for bus bridge idle */ 65 + ret = cs35l56_sdw_poll_mem_status(peripheral, 66 + CS35L56_SDW_CMD_IN_PROGRESS, 67 + 0); 68 + if (ret < 0) { 69 + dev_err(&peripheral->dev, "!CMD_IN_PROGRESS fail: %d\n", ret); 70 + return ret; 71 + } 72 + 73 + /* Reading LSByte triggers read of register to holding buffer */ 74 + sdw_read_no_pm(peripheral, reg + i); 75 + 76 + /* Wait for data available */ 77 + ret = cs35l56_sdw_poll_mem_status(peripheral, 78 + CS35L56_SDW_RDATA_RDY, 79 + CS35L56_SDW_RDATA_RDY); 80 + if (ret < 0) { 81 + dev_err(&peripheral->dev, "RDATA_RDY fail: %d\n", ret); 82 + return ret; 83 + } 84 + 85 + /* Read data from buffer */ 86 + ret = sdw_nread_no_pm(peripheral, CS35L56_SDW_MEM_READ_DATA, 87 + sizeof(u32), &buf[i]); 88 + if (ret) { 89 + dev_err(&peripheral->dev, "Late read @%#x failed: %d\n", reg + i, ret); 90 + return ret; 91 + } 92 + 93 + swab32s((u32 *)&buf[i]); 94 + } 95 + 96 + return 0; 97 + } 98 + 26 99 static int cs35l56_sdw_read_one(struct sdw_slave *peripheral, unsigned int reg, void *buf) 27 100 { 28 101 int ret; ··· 121 48 int ret; 122 49 123 50 reg = le32_to_cpu(*(const __le32 *)reg_buf); 51 + 52 + if (cs35l56_is_otp_register(reg)) 53 + return cs35l56_sdw_slow_read(peripheral, reg, buf8, val_size); 54 + 124 55 reg += CS35L56_SDW_ADDR_OFFSET; 125 56 126 57 if (val_size == 4)
+1
sound/soc/codecs/cs35l56-shared.c
··· 36 36 { CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 }, 37 37 { CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 }, 38 38 { CS35L56_SWIRE_DP3_CH4_INPUT, 0x00000028 }, 39 + { CS35L56_IRQ1_MASK_18, 0x1f7df0ff }, 39 40 40 41 /* These are not reset by a soft-reset, so patch to defaults. */ 41 42 { CS35L56_MAIN_RENDER_USER_MUTE, 0x00000000 },
+11
sound/soc/codecs/cs35l56.c
··· 1095 1095 } 1096 1096 EXPORT_SYMBOL_GPL(cs35l56_system_resume); 1097 1097 1098 + static int cs35l56_control_add_nop(struct wm_adsp *dsp, struct cs_dsp_coeff_ctl *cs_ctl) 1099 + { 1100 + return 0; 1101 + } 1102 + 1098 1103 static int cs35l56_dsp_init(struct cs35l56_private *cs35l56) 1099 1104 { 1100 1105 struct wm_adsp *dsp; ··· 1121 1116 */ 1122 1117 dsp->fw = 12; 1123 1118 dsp->wmfw_optional = true; 1119 + 1120 + /* 1121 + * None of the firmware controls need to be exported so add a no-op 1122 + * callback that suppresses creating an ALSA control. 1123 + */ 1124 + dsp->control_add = &cs35l56_control_add_nop; 1124 1125 1125 1126 dev_dbg(cs35l56->base.dev, "DSP system name: '%s'\n", dsp->system_name); 1126 1127
+57 -18
sound/soc/codecs/cs42l43.c
··· 7 7 8 8 #include <linux/bitops.h> 9 9 #include <linux/bits.h> 10 + #include <linux/build_bug.h> 10 11 #include <linux/clk.h> 11 12 #include <linux/device.h> 12 13 #include <linux/err.h> ··· 253 252 static irqreturn_t cs42l43_mic_shutter(int irq, void *data) 254 253 { 255 254 struct cs42l43_codec *priv = data; 256 - static const char * const controls[] = { 257 - "Decimator 1 Switch", 258 - "Decimator 2 Switch", 259 - "Decimator 3 Switch", 260 - "Decimator 4 Switch", 261 - }; 262 - int i, ret; 255 + struct snd_soc_component *component = priv->component; 256 + int i; 263 257 264 258 dev_dbg(priv->dev, "Microphone shutter changed\n"); 265 259 266 - if (!priv->component) 260 + if (!component) 267 261 return IRQ_NONE; 268 262 269 - for (i = 0; i < ARRAY_SIZE(controls); i++) { 270 - ret = snd_soc_component_notify_control(priv->component, 271 - controls[i]); 272 - if (ret) 263 + for (i = 1; i < ARRAY_SIZE(priv->kctl); i++) { 264 + if (!priv->kctl[i]) 273 265 return IRQ_NONE; 266 + 267 + snd_ctl_notify(component->card->snd_card, 268 + SNDRV_CTL_EVENT_MASK_VALUE, &priv->kctl[i]->id); 274 269 } 275 270 276 271 return IRQ_HANDLED; ··· 275 278 static irqreturn_t cs42l43_spk_shutter(int irq, void *data) 276 279 { 277 280 struct cs42l43_codec *priv = data; 278 - int ret; 281 + struct snd_soc_component *component = priv->component; 279 282 280 283 dev_dbg(priv->dev, "Speaker shutter changed\n"); 281 284 282 - if (!priv->component) 285 + if (!component) 283 286 return IRQ_NONE; 284 287 285 - ret = snd_soc_component_notify_control(priv->component, 286 - "Speaker Digital Switch"); 287 - if (ret) 288 + if (!priv->kctl[0]) 288 289 return IRQ_NONE; 290 + 291 + snd_ctl_notify(component->card->snd_card, 292 + SNDRV_CTL_EVENT_MASK_VALUE, &priv->kctl[0]->id); 289 293 290 294 return IRQ_HANDLED; 291 295 } ··· 588 590 return 0; 589 591 } 590 592 593 + static int cs42l43_dai_probe(struct snd_soc_dai *dai) 594 + { 595 + struct snd_soc_component *component = dai->component; 596 + struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component); 597 + static const char * const controls[] = { 598 + "Speaker Digital Switch", 599 + "Decimator 1 Switch", 600 + "Decimator 2 Switch", 601 + "Decimator 3 Switch", 602 + "Decimator 4 Switch", 603 + }; 604 + int i; 605 + 606 + static_assert(ARRAY_SIZE(controls) == ARRAY_SIZE(priv->kctl)); 607 + 608 + for (i = 0; i < ARRAY_SIZE(controls); i++) { 609 + if (priv->kctl[i]) 610 + continue; 611 + 612 + priv->kctl[i] = snd_soc_component_get_kcontrol(component, controls[i]); 613 + } 614 + 615 + return 0; 616 + } 617 + 618 + static int cs42l43_dai_remove(struct snd_soc_dai *dai) 619 + { 620 + struct snd_soc_component *component = dai->component; 621 + struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component); 622 + int i; 623 + 624 + for (i = 0; i < ARRAY_SIZE(priv->kctl); i++) 625 + priv->kctl[i] = NULL; 626 + 627 + return 0; 628 + } 629 + 591 630 static const struct snd_soc_dai_ops cs42l43_asp_ops = { 631 + .probe = cs42l43_dai_probe, 632 + .remove = cs42l43_dai_remove, 592 633 .startup = cs42l43_startup, 593 634 .hw_params = cs42l43_asp_hw_params, 594 635 .set_fmt = cs42l43_asp_set_fmt, ··· 645 608 return ret; 646 609 647 610 return cs42l43_set_sample_rate(substream, params, dai); 648 - }; 611 + } 649 612 650 613 static const struct snd_soc_dai_ops cs42l43_sdw_ops = { 614 + .probe = cs42l43_dai_probe, 615 + .remove = cs42l43_dai_remove, 651 616 .startup = cs42l43_startup, 652 617 .set_stream = cs42l43_sdw_set_stream, 653 618 .hw_params = cs42l43_sdw_hw_params,
+2
sound/soc/codecs/cs42l43.h
··· 100 100 struct delayed_work hp_ilimit_clear_work; 101 101 bool hp_ilimited; 102 102 int hp_ilimit_count; 103 + 104 + struct snd_kcontrol *kctl[5]; 103 105 }; 104 106 105 107 #if IS_REACHABLE(CONFIG_SND_SOC_CS42L43_SDW)
+4 -4
sound/soc/codecs/cs530x.c
··· 129 129 130 130 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -1270, 50, 0); 131 131 132 - static const char * const cs530x_in_hpf_text[] = { 132 + static const char * const cs530x_in_filter_text[] = { 133 133 "Min Phase Slow Roll-off", 134 134 "Min Phase Fast Roll-off", 135 135 "Linear Phase Slow Roll-off", 136 136 "Linear Phase Fast Roll-off", 137 137 }; 138 138 139 - static SOC_ENUM_SINGLE_DECL(cs530x_in_hpf_enum, CS530X_IN_FILTER, 139 + static SOC_ENUM_SINGLE_DECL(cs530x_in_filter_enum, CS530X_IN_FILTER, 140 140 CS530X_IN_FILTER_SHIFT, 141 - cs530x_in_hpf_text); 141 + cs530x_in_filter_text); 142 142 143 143 static const char * const cs530x_in_4ch_sum_text[] = { 144 144 "None", ··· 189 189 SOC_SINGLE_EXT_TLV("IN2 Volume", CS530X_IN_VOL_CTRL1_1, 0, 255, 1, 190 190 snd_soc_get_volsw, cs530x_put_volsw_vu, in_vol_tlv), 191 191 192 - SOC_ENUM("IN HPF Select", cs530x_in_hpf_enum), 192 + SOC_ENUM("IN DEC Filter Select", cs530x_in_filter_enum), 193 193 SOC_ENUM("Input Ramp Up", cs530x_ramp_inc_enum), 194 194 SOC_ENUM("Input Ramp Down", cs530x_ramp_dec_enum), 195 195
+2
sound/soc/codecs/es8326.c
··· 843 843 es8326_disable_micbias(es8326->component); 844 844 if (es8326->jack->status & SND_JACK_HEADPHONE) { 845 845 dev_dbg(comp->dev, "Report hp remove event\n"); 846 + snd_soc_jack_report(es8326->jack, 0, 847 + SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2); 846 848 snd_soc_jack_report(es8326->jack, 0, SND_JACK_HEADSET); 847 849 /* mute adc when mic path switch */ 848 850 regmap_write(es8326->regmap, ES8326_ADC1_SRC, 0x44);
+2
sound/soc/codecs/lpass-va-macro.c
··· 1472 1472 1473 1473 if ((core_id_0 == 0x01) && (core_id_1 == 0x0F)) 1474 1474 version = LPASS_CODEC_VERSION_2_0; 1475 + if ((core_id_0 == 0x02) && (core_id_1 == 0x0F) && core_id_2 == 0x01) 1476 + version = LPASS_CODEC_VERSION_2_0; 1475 1477 if ((core_id_0 == 0x02) && (core_id_1 == 0x0E)) 1476 1478 version = LPASS_CODEC_VERSION_2_1; 1477 1479 if ((core_id_0 == 0x02) && (core_id_1 == 0x0F) && (core_id_2 == 0x50 || core_id_2 == 0x51))
+1 -1
sound/soc/codecs/nau8822.c
··· 736 736 return ret; 737 737 } 738 738 739 - dev_info(component->dev, 739 + dev_dbg(component->dev, 740 740 "pll_int=%x pll_frac=%x mclk_scaler=%x pre_factor=%x\n", 741 741 pll_param->pll_int, pll_param->pll_frac, 742 742 pll_param->mclk_scaler, pll_param->pre_factor);
+2 -2
sound/soc/codecs/wcd937x-sdw.c
··· 1049 1049 pdev->prop.lane_control_support = true; 1050 1050 pdev->prop.simple_clk_stop_capable = true; 1051 1051 if (wcd->is_tx) { 1052 - pdev->prop.source_ports = GENMASK(WCD937X_MAX_TX_SWR_PORTS, 0); 1052 + pdev->prop.source_ports = GENMASK(WCD937X_MAX_TX_SWR_PORTS - 1, 0); 1053 1053 pdev->prop.src_dpn_prop = wcd937x_dpn_prop; 1054 1054 wcd->ch_info = &wcd937x_sdw_tx_ch_info[0]; 1055 1055 pdev->prop.wake_capable = true; ··· 1062 1062 /* Start in cache-only until device is enumerated */ 1063 1063 regcache_cache_only(wcd->regmap, true); 1064 1064 } else { 1065 - pdev->prop.sink_ports = GENMASK(WCD937X_MAX_SWR_PORTS, 0); 1065 + pdev->prop.sink_ports = GENMASK(WCD937X_MAX_SWR_PORTS - 1, 0); 1066 1066 pdev->prop.sink_dpn_prop = wcd937x_dpn_prop; 1067 1067 wcd->ch_info = &wcd937x_sdw_rx_ch_info[0]; 1068 1068 }
+2 -2
sound/soc/codecs/wcd938x-sdw.c
··· 1252 1252 pdev->prop.lane_control_support = true; 1253 1253 pdev->prop.simple_clk_stop_capable = true; 1254 1254 if (wcd->is_tx) { 1255 - pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); 1255 + pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); 1256 1256 pdev->prop.src_dpn_prop = wcd938x_dpn_prop; 1257 1257 wcd->ch_info = &wcd938x_sdw_tx_ch_info[0]; 1258 1258 pdev->prop.wake_capable = true; 1259 1259 } else { 1260 - pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); 1260 + pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); 1261 1261 pdev->prop.sink_dpn_prop = wcd938x_dpn_prop; 1262 1262 wcd->ch_info = &wcd938x_sdw_rx_ch_info[0]; 1263 1263 }
+2 -2
sound/soc/codecs/wcd939x-sdw.c
··· 1453 1453 pdev->prop.lane_control_support = true; 1454 1454 pdev->prop.simple_clk_stop_capable = true; 1455 1455 if (wcd->is_tx) { 1456 - pdev->prop.source_ports = GENMASK(WCD939X_MAX_TX_SWR_PORTS, 0); 1456 + pdev->prop.source_ports = GENMASK(WCD939X_MAX_TX_SWR_PORTS - 1, 0); 1457 1457 pdev->prop.src_dpn_prop = wcd939x_tx_dpn_prop; 1458 1458 wcd->ch_info = &wcd939x_sdw_tx_ch_info[0]; 1459 1459 pdev->prop.wake_capable = true; 1460 1460 } else { 1461 - pdev->prop.sink_ports = GENMASK(WCD939X_MAX_RX_SWR_PORTS, 0); 1461 + pdev->prop.sink_ports = GENMASK(WCD939X_MAX_RX_SWR_PORTS - 1, 0); 1462 1462 pdev->prop.sink_dpn_prop = wcd939x_rx_dpn_prop; 1463 1463 wcd->ch_info = &wcd939x_sdw_rx_ch_info[0]; 1464 1464 }
+14 -3
sound/soc/codecs/wm_adsp.c
··· 583 583 kfree(kcontrol); 584 584 } 585 585 586 - static int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl) 586 + int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl) 587 587 { 588 588 struct wm_adsp *dsp = container_of(cs_ctl->dsp, struct wm_adsp, cs_dsp); 589 589 struct cs_dsp *cs_dsp = &dsp->cs_dsp; ··· 657 657 kfree(ctl); 658 658 659 659 return ret; 660 + } 661 + EXPORT_SYMBOL_GPL(wm_adsp_control_add); 662 + 663 + static int wm_adsp_control_add_cb(struct cs_dsp_coeff_ctl *cs_ctl) 664 + { 665 + struct wm_adsp *dsp = container_of(cs_ctl->dsp, struct wm_adsp, cs_dsp); 666 + 667 + if (dsp->control_add) 668 + return (dsp->control_add)(dsp, cs_ctl); 669 + else 670 + return wm_adsp_control_add(cs_ctl); 660 671 } 661 672 662 673 static void wm_adsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl) ··· 2083 2072 EXPORT_SYMBOL_GPL(wm_halo_wdt_expire); 2084 2073 2085 2074 static const struct cs_dsp_client_ops wm_adsp1_client_ops = { 2086 - .control_add = wm_adsp_control_add, 2075 + .control_add = wm_adsp_control_add_cb, 2087 2076 .control_remove = wm_adsp_control_remove, 2088 2077 }; 2089 2078 2090 2079 static const struct cs_dsp_client_ops wm_adsp2_client_ops = { 2091 - .control_add = wm_adsp_control_add, 2080 + .control_add = wm_adsp_control_add_cb, 2092 2081 .control_remove = wm_adsp_control_remove, 2093 2082 .pre_run = wm_adsp_pre_run, 2094 2083 .post_run = wm_adsp_event_post_run,
+3
sound/soc/codecs/wm_adsp.h
··· 37 37 bool wmfw_optional; 38 38 39 39 struct work_struct boot_work; 40 + int (*control_add)(struct wm_adsp *dsp, struct cs_dsp_coeff_ctl *cs_ctl); 40 41 int (*pre_run)(struct wm_adsp *dsp); 41 42 42 43 bool preloaded; ··· 133 132 int wm_adsp_compr_copy(struct snd_soc_component *component, 134 133 struct snd_compr_stream *stream, 135 134 char __user *buf, size_t count); 135 + 136 + int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl); 136 137 int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type, 137 138 unsigned int alg, void *buf, size_t len); 138 139 int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+1 -1
sound/soc/codecs/wsa881x.c
··· 1152 1152 wsa881x->sconfig.frame_rate = 48000; 1153 1153 wsa881x->sconfig.direction = SDW_DATA_DIR_RX; 1154 1154 wsa881x->sconfig.type = SDW_STREAM_PDM; 1155 - pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0); 1155 + pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS - 1, 0); 1156 1156 pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; 1157 1157 pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; 1158 1158 pdev->prop.clk_stop_mode1 = true;
+1 -1
sound/soc/codecs/wsa883x.c
··· 1406 1406 WSA883X_MAX_SWR_PORTS)) 1407 1407 dev_dbg(dev, "Static Port mapping not specified\n"); 1408 1408 1409 - pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS, 0); 1409 + pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS - 1, 0); 1410 1410 pdev->prop.simple_clk_stop_capable = true; 1411 1411 pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; 1412 1412 pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+1 -1
sound/soc/codecs/wsa884x.c
··· 1895 1895 WSA884X_MAX_SWR_PORTS)) 1896 1896 dev_dbg(dev, "Static Port mapping not specified\n"); 1897 1897 1898 - pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS, 0); 1898 + pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS - 1, 0); 1899 1899 pdev->prop.simple_clk_stop_capable = true; 1900 1900 pdev->prop.sink_dpn_prop = wsa884x_sink_dpn_prop; 1901 1901 pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+15 -5
sound/soc/fsl/fsl_micfil.c
··· 831 831 {REG_MICFIL_CTRL1, 0x00000000}, 832 832 {REG_MICFIL_CTRL2, 0x00000000}, 833 833 {REG_MICFIL_STAT, 0x00000000}, 834 - {REG_MICFIL_FIFO_CTRL, 0x00000007}, 834 + {REG_MICFIL_FIFO_CTRL, 0x0000001F}, 835 835 {REG_MICFIL_FIFO_STAT, 0x00000000}, 836 836 {REG_MICFIL_DATACH0, 0x00000000}, 837 837 {REG_MICFIL_DATACH1, 0x00000000}, ··· 855 855 856 856 static bool fsl_micfil_readable_reg(struct device *dev, unsigned int reg) 857 857 { 858 + struct fsl_micfil *micfil = dev_get_drvdata(dev); 859 + 858 860 switch (reg) { 859 861 case REG_MICFIL_CTRL1: 860 862 case REG_MICFIL_CTRL2: ··· 874 872 case REG_MICFIL_DC_CTRL: 875 873 case REG_MICFIL_OUT_CTRL: 876 874 case REG_MICFIL_OUT_STAT: 877 - case REG_MICFIL_FSYNC_CTRL: 878 - case REG_MICFIL_VERID: 879 - case REG_MICFIL_PARAM: 880 875 case REG_MICFIL_VAD0_CTRL1: 881 876 case REG_MICFIL_VAD0_CTRL2: 882 877 case REG_MICFIL_VAD0_STAT: ··· 882 883 case REG_MICFIL_VAD0_NDATA: 883 884 case REG_MICFIL_VAD0_ZCD: 884 885 return true; 886 + case REG_MICFIL_FSYNC_CTRL: 887 + case REG_MICFIL_VERID: 888 + case REG_MICFIL_PARAM: 889 + if (micfil->soc->use_verid) 890 + return true; 891 + fallthrough; 885 892 default: 886 893 return false; 887 894 } ··· 895 890 896 891 static bool fsl_micfil_writeable_reg(struct device *dev, unsigned int reg) 897 892 { 893 + struct fsl_micfil *micfil = dev_get_drvdata(dev); 894 + 898 895 switch (reg) { 899 896 case REG_MICFIL_CTRL1: 900 897 case REG_MICFIL_CTRL2: ··· 906 899 case REG_MICFIL_DC_CTRL: 907 900 case REG_MICFIL_OUT_CTRL: 908 901 case REG_MICFIL_OUT_STAT: /* Write 1 to Clear */ 909 - case REG_MICFIL_FSYNC_CTRL: 910 902 case REG_MICFIL_VAD0_CTRL1: 911 903 case REG_MICFIL_VAD0_CTRL2: 912 904 case REG_MICFIL_VAD0_STAT: /* Write 1 to Clear */ ··· 913 907 case REG_MICFIL_VAD0_NCONFIG: 914 908 case REG_MICFIL_VAD0_ZCD: 915 909 return true; 910 + case REG_MICFIL_FSYNC_CTRL: 911 + if (micfil->soc->use_verid) 912 + return true; 913 + fallthrough; 916 914 default: 917 915 return false; 918 916 }
+1 -1
sound/soc/fsl/fsl_micfil.h
··· 72 72 #define MICFIL_STAT_CHXF(ch) BIT(ch) 73 73 74 74 /* MICFIL FIFO Control Register -- REG_MICFIL_FIFO_CTRL 0x10 */ 75 - #define MICFIL_FIFO_CTRL_FIFOWMK GENMASK(2, 0) 75 + #define MICFIL_FIFO_CTRL_FIFOWMK GENMASK(4, 0) 76 76 77 77 /* MICFIL FIFO Status Register -- REG_MICFIL_FIFO_STAT 0x14 */ 78 78 #define MICFIL_FIFO_STAT_FIFOX_OVER(ch) BIT(ch)
+10 -16
sound/soc/meson/axg-fifo.c
··· 207 207 status = FIELD_GET(STATUS1_INT_STS, status); 208 208 axg_fifo_ack_irq(fifo, status); 209 209 210 - /* Use the thread to call period elapsed on nonatomic links */ 211 - if (status & FIFO_INT_COUNT_REPEAT) 212 - return IRQ_WAKE_THREAD; 210 + if (status & ~FIFO_INT_COUNT_REPEAT) 211 + dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", 212 + status); 213 213 214 - dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", 215 - status); 214 + if (status & FIFO_INT_COUNT_REPEAT) { 215 + snd_pcm_period_elapsed(ss); 216 + return IRQ_HANDLED; 217 + } 216 218 217 219 return IRQ_NONE; 218 - } 219 - 220 - static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id) 221 - { 222 - struct snd_pcm_substream *ss = dev_id; 223 - 224 - snd_pcm_period_elapsed(ss); 225 - 226 - return IRQ_HANDLED; 227 220 } 228 221 229 222 int axg_fifo_pcm_open(struct snd_soc_component *component, ··· 244 251 if (ret) 245 252 return ret; 246 253 247 - ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block, 248 - axg_fifo_pcm_irq_block_thread, 254 + /* Use the threaded irq handler only with non-atomic links */ 255 + ret = request_threaded_irq(fifo->irq, NULL, 256 + axg_fifo_pcm_irq_block, 249 257 IRQF_ONESHOT, dev_name(dev), ss); 250 258 if (ret) 251 259 return ret;
+34 -8
sound/soc/soc-component.c
··· 236 236 } 237 237 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked); 238 238 239 + static void soc_get_kcontrol_name(struct snd_soc_component *component, 240 + char *buf, int size, const char * const ctl) 241 + { 242 + /* When updating, change also snd_soc_dapm_widget_name_cmp() */ 243 + if (component->name_prefix) 244 + snprintf(buf, size, "%s %s", component->name_prefix, ctl); 245 + else 246 + snprintf(buf, size, "%s", ctl); 247 + } 248 + 249 + struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component, 250 + const char * const ctl) 251 + { 252 + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 253 + 254 + soc_get_kcontrol_name(component, name, ARRAY_SIZE(name), ctl); 255 + 256 + return snd_soc_card_get_kcontrol(component->card, name); 257 + } 258 + EXPORT_SYMBOL_GPL(snd_soc_component_get_kcontrol); 259 + 260 + struct snd_kcontrol * 261 + snd_soc_component_get_kcontrol_locked(struct snd_soc_component *component, 262 + const char * const ctl) 263 + { 264 + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 265 + 266 + soc_get_kcontrol_name(component, name, ARRAY_SIZE(name), ctl); 267 + 268 + return snd_soc_card_get_kcontrol_locked(component->card, name); 269 + } 270 + EXPORT_SYMBOL_GPL(snd_soc_component_get_kcontrol_locked); 271 + 239 272 int snd_soc_component_notify_control(struct snd_soc_component *component, 240 273 const char * const ctl) 241 274 { 242 - char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 243 275 struct snd_kcontrol *kctl; 244 276 245 - /* When updating, change also snd_soc_dapm_widget_name_cmp() */ 246 - if (component->name_prefix) 247 - snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl); 248 - else 249 - snprintf(name, ARRAY_SIZE(name), "%s", ctl); 250 - 251 - kctl = snd_soc_card_get_kcontrol(component->card, name); 277 + kctl = snd_soc_component_get_kcontrol(component, ctl); 252 278 if (!kctl) 253 279 return soc_component_ret(component, -EINVAL); 254 280
+1 -1
sound/soc/sof/mediatek/mt8195/mt8195.c
··· 573 573 static struct snd_sof_of_mach sof_mt8195_machs[] = { 574 574 { 575 575 .compatible = "google,tomato", 576 - .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg" 576 + .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg" 577 577 }, { 578 578 .compatible = "mediatek,mt8195", 579 579 .sof_tplg_filename = "sof-mt8195.tplg"
+1 -1
sound/soc/sti/sti_uniperif.c
··· 352 352 return ret; 353 353 } 354 354 355 - static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) 355 + int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) 356 356 { 357 357 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); 358 358 struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+1
sound/soc/sti/uniperif.h
··· 1380 1380 struct uniperif *reader); 1381 1381 1382 1382 /* common */ 1383 + int sti_uniperiph_dai_probe(struct snd_soc_dai *dai); 1383 1384 int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, 1384 1385 unsigned int fmt); 1385 1386
+1
sound/soc/sti/uniperif_player.c
··· 1038 1038 .startup = uni_player_startup, 1039 1039 .shutdown = uni_player_shutdown, 1040 1040 .prepare = uni_player_prepare, 1041 + .probe = sti_uniperiph_dai_probe, 1041 1042 .trigger = uni_player_trigger, 1042 1043 .hw_params = sti_uniperiph_dai_hw_params, 1043 1044 .set_fmt = sti_uniperiph_dai_set_fmt,
+1
sound/soc/sti/uniperif_reader.c
··· 401 401 .startup = uni_reader_startup, 402 402 .shutdown = uni_reader_shutdown, 403 403 .prepare = uni_reader_prepare, 404 + .probe = sti_uniperiph_dai_probe, 404 405 .trigger = uni_reader_trigger, 405 406 .hw_params = sti_uniperiph_dai_hw_params, 406 407 .set_fmt = sti_uniperiph_dai_set_fmt,
+5
sound/usb/line6/driver.c
··· 286 286 { 287 287 struct usb_line6 *line6 = (struct usb_line6 *)urb->context; 288 288 struct midi_buffer *mb = &line6->line6midi->midibuf_in; 289 + unsigned long flags; 289 290 int done; 290 291 291 292 if (urb->status == -ESHUTDOWN) 292 293 return; 293 294 294 295 if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { 296 + spin_lock_irqsave(&line6->line6midi->lock, flags); 295 297 done = 296 298 line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length); 297 299 ··· 302 300 dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n", 303 301 done, urb->actual_length); 304 302 } 303 + spin_unlock_irqrestore(&line6->line6midi->lock, flags); 305 304 306 305 for (;;) { 306 + spin_lock_irqsave(&line6->line6midi->lock, flags); 307 307 done = 308 308 line6_midibuf_read(mb, line6->buffer_message, 309 309 LINE6_MIDI_MESSAGE_MAXLEN, 310 310 LINE6_MIDIBUF_READ_RX); 311 + spin_unlock_irqrestore(&line6->line6midi->lock, flags); 311 312 312 313 if (done <= 0) 313 314 break;
+4
sound/usb/quirks-table.h
··· 2594 2594 } 2595 2595 }, 2596 2596 2597 + /* Stanton ScratchAmp */ 2598 + { USB_DEVICE(0x103d, 0x0100) }, 2599 + { USB_DEVICE(0x103d, 0x0101) }, 2600 + 2597 2601 /* Novation EMS devices */ 2598 2602 { 2599 2603 USB_DEVICE_VENDOR_SPEC(0x1235, 0x0001),
+54
tools/testing/selftests/bpf/progs/iters.c
··· 1432 1432 return sum; 1433 1433 } 1434 1434 1435 + __u32 upper, select_n, result; 1436 + __u64 global; 1437 + 1438 + static __noinline bool nest_2(char *str) 1439 + { 1440 + /* some insns (including branch insns) to ensure stacksafe() is triggered 1441 + * in nest_2(). This way, stacksafe() can compare frame associated with nest_1(). 1442 + */ 1443 + if (str[0] == 't') 1444 + return true; 1445 + if (str[1] == 'e') 1446 + return true; 1447 + if (str[2] == 's') 1448 + return true; 1449 + if (str[3] == 't') 1450 + return true; 1451 + return false; 1452 + } 1453 + 1454 + static __noinline bool nest_1(int n) 1455 + { 1456 + /* case 0: allocate stack, case 1: no allocate stack */ 1457 + switch (n) { 1458 + case 0: { 1459 + char comm[16]; 1460 + 1461 + if (bpf_get_current_comm(comm, 16)) 1462 + return false; 1463 + return nest_2(comm); 1464 + } 1465 + case 1: 1466 + return nest_2((char *)&global); 1467 + default: 1468 + return false; 1469 + } 1470 + } 1471 + 1472 + SEC("raw_tp") 1473 + __success 1474 + int iter_subprog_check_stacksafe(const void *ctx) 1475 + { 1476 + long i; 1477 + 1478 + bpf_for(i, 0, upper) { 1479 + if (!nest_1(select_n)) { 1480 + result = 1; 1481 + return 0; 1482 + } 1483 + } 1484 + 1485 + result = 2; 1486 + return 0; 1487 + } 1488 + 1435 1489 char _license[] SEC("license") = "GPL";
+35
tools/testing/selftests/core/close_range_test.c
··· 589 589 EXPECT_EQ(close(fd3), 0); 590 590 } 591 591 592 + TEST(close_range_bitmap_corruption) 593 + { 594 + pid_t pid; 595 + int status; 596 + struct __clone_args args = { 597 + .flags = CLONE_FILES, 598 + .exit_signal = SIGCHLD, 599 + }; 600 + 601 + /* get the first 128 descriptors open */ 602 + for (int i = 2; i < 128; i++) 603 + EXPECT_GE(dup2(0, i), 0); 604 + 605 + /* get descriptor table shared */ 606 + pid = sys_clone3(&args, sizeof(args)); 607 + ASSERT_GE(pid, 0); 608 + 609 + if (pid == 0) { 610 + /* unshare and truncate descriptor table down to 64 */ 611 + if (sys_close_range(64, ~0U, CLOSE_RANGE_UNSHARE)) 612 + exit(EXIT_FAILURE); 613 + 614 + ASSERT_EQ(fcntl(64, F_GETFD), -1); 615 + /* ... and verify that the range 64..127 is not 616 + stuck "fully used" according to secondary bitmap */ 617 + EXPECT_EQ(dup(0), 64) 618 + exit(EXIT_FAILURE); 619 + exit(EXIT_SUCCESS); 620 + } 621 + 622 + EXPECT_EQ(waitpid(pid, &status, 0), pid); 623 + EXPECT_EQ(true, WIFEXITED(status)); 624 + EXPECT_EQ(0, WEXITSTATUS(status)); 625 + } 626 + 592 627 TEST_HARNESS_MAIN
+2 -2
tools/testing/selftests/kvm/aarch64/get-reg-list.c
··· 32 32 { 33 33 ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */ 34 34 ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */ 35 - 4, 35 + 8, 36 36 1 37 37 }, 38 38 { 39 39 ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */ 40 40 ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */ 41 - 4, 41 + 8, 42 42 1 43 43 } 44 44 };
+28
tools/testing/selftests/kvm/x86_64/xapic_state_test.c
··· 184 184 kvm_vm_free(vm); 185 185 } 186 186 187 + static void test_x2apic_id(void) 188 + { 189 + struct kvm_lapic_state lapic = {}; 190 + struct kvm_vcpu *vcpu; 191 + struct kvm_vm *vm; 192 + int i; 193 + 194 + vm = vm_create_with_one_vcpu(&vcpu, NULL); 195 + vcpu_set_msr(vcpu, MSR_IA32_APICBASE, MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); 196 + 197 + /* 198 + * Try stuffing a modified x2APIC ID, KVM should ignore the value and 199 + * always return the vCPU's default/readonly x2APIC ID. 200 + */ 201 + for (i = 0; i <= 0xff; i++) { 202 + *(u32 *)(lapic.regs + APIC_ID) = i << 24; 203 + *(u32 *)(lapic.regs + APIC_SPIV) = APIC_SPIV_APIC_ENABLED; 204 + vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic); 205 + 206 + vcpu_ioctl(vcpu, KVM_GET_LAPIC, &lapic); 207 + TEST_ASSERT(*((u32 *)&lapic.regs[APIC_ID]) == vcpu->id << 24, 208 + "x2APIC ID should be fully readonly"); 209 + } 210 + 211 + kvm_vm_free(vm); 212 + } 213 + 187 214 int main(int argc, char *argv[]) 188 215 { 189 216 struct xapic_vcpu x = { ··· 238 211 kvm_vm_free(vm); 239 212 240 213 test_apic_id(); 214 + test_x2apic_id(); 241 215 }
+1 -1
tools/testing/selftests/net/af_unix/msg_oob.c
··· 209 209 210 210 static void __recvpair(struct __test_metadata *_metadata, 211 211 FIXTURE_DATA(msg_oob) *self, 212 - const void *expected_buf, int expected_len, 212 + const char *expected_buf, int expected_len, 213 213 int buf_len, int flags) 214 214 { 215 215 int i, ret[2], recv_errno[2], expected_errno = 0;
+1
tools/testing/selftests/net/lib.sh
··· 146 146 147 147 for ns in "$@"; do 148 148 [ -z "${ns}" ] && continue 149 + ip netns pids "${ns}" 2> /dev/null | xargs -r kill || true 149 150 ip netns delete "${ns}" &> /dev/null || true 150 151 if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then 151 152 echo "Warn: Failed to remove namespace $ns"
+1
tools/testing/selftests/net/netfilter/Makefile
··· 7 7 MNL_LDLIBS := $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl) 8 8 9 9 TEST_PROGS := br_netfilter.sh bridge_brouter.sh 10 + TEST_PROGS += br_netfilter_queue.sh 10 11 TEST_PROGS += conntrack_icmp_related.sh 11 12 TEST_PROGS += conntrack_ipip_mtu.sh 12 13 TEST_PROGS += conntrack_tcp_unreplied.sh
+78
tools/testing/selftests/net/netfilter/br_netfilter_queue.sh
··· 1 + #!/bin/bash 2 + 3 + source lib.sh 4 + 5 + checktool "nft --version" "run test without nft tool" 6 + 7 + cleanup() { 8 + cleanup_all_ns 9 + } 10 + 11 + setup_ns c1 c2 c3 sender 12 + 13 + trap cleanup EXIT 14 + 15 + nf_queue_wait() 16 + { 17 + grep -q "^ *$1 " "/proc/self/net/netfilter/nfnetlink_queue" 18 + } 19 + 20 + port_add() { 21 + ns="$1" 22 + dev="$2" 23 + a="$3" 24 + 25 + ip link add name "$dev" type veth peer name "$dev" netns "$ns" 26 + 27 + ip -net "$ns" addr add 192.168.1."$a"/24 dev "$dev" 28 + ip -net "$ns" link set "$dev" up 29 + 30 + ip link set "$dev" master br0 31 + ip link set "$dev" up 32 + } 33 + 34 + [ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; } 35 + 36 + ip link add br0 type bridge 37 + ip addr add 192.168.1.254/24 dev br0 38 + 39 + port_add "$c1" "c1" 1 40 + port_add "$c2" "c2" 2 41 + port_add "$c3" "c3" 3 42 + port_add "$sender" "sender" 253 43 + 44 + ip link set br0 up 45 + 46 + modprobe -q br_netfilter 47 + 48 + sysctl net.bridge.bridge-nf-call-iptables=1 || exit 1 49 + 50 + ip netns exec "$sender" ping -I sender -c1 192.168.1.1 || exit 1 51 + ip netns exec "$sender" ping -I sender -c1 192.168.1.2 || exit 2 52 + ip netns exec "$sender" ping -I sender -c1 192.168.1.3 || exit 3 53 + 54 + nft -f /dev/stdin <<EOF 55 + table ip filter { 56 + chain forward { 57 + type filter hook forward priority 0; policy accept; 58 + ct state new counter 59 + ip protocol icmp counter queue num 0 bypass 60 + } 61 + } 62 + EOF 63 + ./nf_queue -t 5 > /dev/null & 64 + 65 + busywait 5000 nf_queue_wait 66 + 67 + for i in $(seq 1 5); do conntrack -F > /dev/null 2> /dev/null; sleep 0.1 ; done & 68 + ip netns exec "$sender" ping -I sender -f -c 50 -b 192.168.1.255 69 + 70 + read t < /proc/sys/kernel/tainted 71 + if [ "$t" -eq 0 ];then 72 + echo PASS: kernel not tainted 73 + else 74 + echo ERROR: kernel is tainted 75 + exit 1 76 + fi 77 + 78 + exit 0
+24 -1
tools/testing/selftests/net/udpgso.c
··· 67 67 int gso_len; /* mss after applying gso */ 68 68 int r_num_mss; /* recv(): number of calls of full mss */ 69 69 int r_len_last; /* recv(): size of last non-mss dgram, if any */ 70 + bool v6_ext_hdr; /* send() dgrams with IPv6 extension headers */ 70 71 }; 71 72 72 73 const struct in6_addr addr6 = { ··· 77 76 const struct in_addr addr4 = { 78 77 __constant_htonl(0x0a000001), /* 10.0.0.1 */ 79 78 }; 79 + 80 + static const char ipv6_hopopts_pad1[8] = { 0 }; 80 81 81 82 struct testcase testcases_v4[] = { 82 83 { ··· 259 256 .r_num_mss = 2, 260 257 }, 261 258 { 259 + /* send 2 1B segments with extension headers */ 260 + .tlen = 2, 261 + .gso_len = 1, 262 + .r_num_mss = 2, 263 + .v6_ext_hdr = true, 264 + }, 265 + { 262 266 /* send 2B + 2B + 1B segments */ 263 267 .tlen = 5, 264 268 .gso_len = 2, ··· 406 396 int i, ret, val, mss; 407 397 bool sent; 408 398 409 - fprintf(stderr, "ipv%d tx:%d gso:%d %s\n", 399 + fprintf(stderr, "ipv%d tx:%d gso:%d %s%s\n", 410 400 addr->sa_family == AF_INET ? 4 : 6, 411 401 test->tlen, test->gso_len, 402 + test->v6_ext_hdr ? "ext-hdr " : "", 412 403 test->tfail ? "(fail)" : ""); 404 + 405 + if (test->v6_ext_hdr) { 406 + if (setsockopt(fdt, IPPROTO_IPV6, IPV6_HOPOPTS, 407 + ipv6_hopopts_pad1, sizeof(ipv6_hopopts_pad1))) 408 + error(1, errno, "setsockopt ipv6 hopopts"); 409 + } 413 410 414 411 val = test->gso_len; 415 412 if (cfg_do_setsockopt) { ··· 429 412 error(1, 0, "send succeeded while expecting failure"); 430 413 if (!sent && !test->tfail) 431 414 error(1, 0, "send failed while expecting success"); 415 + 416 + if (test->v6_ext_hdr) { 417 + if (setsockopt(fdt, IPPROTO_IPV6, IPV6_HOPOPTS, NULL, 0)) 418 + error(1, errno, "setsockopt ipv6 hopopts clear"); 419 + } 420 + 432 421 if (!sent) 433 422 return; 434 423
+7 -6
virt/kvm/eventfd.c
··· 97 97 mutex_lock(&kvm->irqfds.resampler_lock); 98 98 99 99 list_del_rcu(&irqfd->resampler_link); 100 - synchronize_srcu(&kvm->irq_srcu); 101 100 102 101 if (list_empty(&resampler->list)) { 103 102 list_del_rcu(&resampler->link); 104 103 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); 105 104 /* 106 - * synchronize_srcu(&kvm->irq_srcu) already called 105 + * synchronize_srcu_expedited(&kvm->irq_srcu) already called 107 106 * in kvm_unregister_irq_ack_notifier(). 108 107 */ 109 108 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 110 109 resampler->notifier.gsi, 0, false); 111 110 kfree(resampler); 111 + } else { 112 + synchronize_srcu_expedited(&kvm->irq_srcu); 112 113 } 113 114 114 115 mutex_unlock(&kvm->irqfds.resampler_lock); ··· 127 126 u64 cnt; 128 127 129 128 /* Make sure irqfd has been initialized in assign path. */ 130 - synchronize_srcu(&kvm->irq_srcu); 129 + synchronize_srcu_expedited(&kvm->irq_srcu); 131 130 132 131 /* 133 132 * Synchronize with the wait-queue and unhook ourselves to prevent ··· 385 384 } 386 385 387 386 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); 388 - synchronize_srcu(&kvm->irq_srcu); 387 + synchronize_srcu_expedited(&kvm->irq_srcu); 389 388 390 389 mutex_unlock(&kvm->irqfds.resampler_lock); 391 390 } ··· 524 523 mutex_lock(&kvm->irq_lock); 525 524 hlist_del_init_rcu(&kian->link); 526 525 mutex_unlock(&kvm->irq_lock); 527 - synchronize_srcu(&kvm->irq_srcu); 526 + synchronize_srcu_expedited(&kvm->irq_srcu); 528 527 kvm_arch_post_irq_ack_notifier_list_update(kvm); 529 528 } 530 529 ··· 609 608 610 609 /* 611 610 * Take note of a change in irq routing. 612 - * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. 611 + * Caller must invoke synchronize_srcu_expedited(&kvm->irq_srcu) afterwards. 613 612 */ 614 613 void kvm_irq_routing_update(struct kvm *kvm) 615 614 {
+2 -3
virt/kvm/kvm_main.c
··· 1578 1578 if (mem->flags & KVM_MEM_GUEST_MEMFD) 1579 1579 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 1580 1580 1581 - #ifdef CONFIG_HAVE_KVM_READONLY_MEM 1582 1581 /* 1583 1582 * GUEST_MEMFD is incompatible with read-only memslots, as writes to 1584 1583 * read-only memslots have emulated MMIO, not page fault, semantics, 1585 1584 * and KVM doesn't allow emulated MMIO for private memory. 1586 1585 */ 1587 - if (!(mem->flags & KVM_MEM_GUEST_MEMFD)) 1586 + if (kvm_arch_has_readonly_mem(kvm) && 1587 + !(mem->flags & KVM_MEM_GUEST_MEMFD)) 1588 1588 valid_flags |= KVM_MEM_READONLY; 1589 - #endif 1590 1589 1591 1590 if (mem->flags & ~valid_flags) 1592 1591 return -EINVAL;