Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 6.11-rc3 into char-misc-next

We need the char/misc fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+5993 -3326
+1
.mailmap
··· 166 166 Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com> 167 167 David Brownell <david-b@pacbell.net> 168 168 David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org> 169 + David Heidelberg <david@ixit.cz> <d.okias@gmail.com> 169 170 David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com> 170 171 David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com> 171 172 David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>
+3 -3
Documentation/ABI/testing/sysfs-bus-i2c-devices-turris-omnia-mcu
··· 32 32 interrupt. 33 33 34 34 This file switches between these two modes: 35 - - "mcu" makes the button press event be handled by the MCU to 36 - change the LEDs panel intensity. 37 - - "cpu" makes the button press event be handled by the CPU. 35 + - ``mcu`` makes the button press event be handled by the MCU to 36 + change the LEDs panel intensity. 37 + - ``cpu`` makes the button press event be handled by the CPU. 38 38 39 39 Format: %s. 40 40
+1 -1
Documentation/admin-guide/cifs/usage.rst
··· 742 742 may use NTLMSSP 0x00080 743 743 must use NTLMSSP 0x80080 744 744 seal (packet encryption) 0x00040 745 - must seal (not implemented yet) 0x40040 745 + must seal 0x40040 746 746 747 747 cifsFYI If set to non-zero value, additional debug information 748 748 will be logged to the system error log. This field
+1 -3
Documentation/admin-guide/kernel-parameters.txt
··· 4798 4798 4799 4799 profile= [KNL] Enable kernel profiling via /proc/profile 4800 4800 Format: [<profiletype>,]<number> 4801 - Param: <profiletype>: "schedule", "sleep", or "kvm" 4801 + Param: <profiletype>: "schedule" or "kvm" 4802 4802 [defaults to kernel profiling] 4803 4803 Param: "schedule" - profile schedule points. 4804 - Param: "sleep" - profile D-state sleeping (millisecs). 4805 - Requires CONFIG_SCHEDSTATS 4806 4804 Param: "kvm" - profile VM exits. 4807 4805 Param: <number> - step/bucket size as a power of 2 for 4808 4806 statistical time based profiling.
+18
Documentation/arch/arm64/silicon-errata.rst
··· 122 122 +----------------+-----------------+-----------------+-----------------------------+ 123 123 | ARM | Cortex-A76 | #1490853 | N/A | 124 124 +----------------+-----------------+-----------------+-----------------------------+ 125 + | ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 | 126 + +----------------+-----------------+-----------------+-----------------------------+ 125 127 | ARM | Cortex-A77 | #1491015 | N/A | 126 128 +----------------+-----------------+-----------------+-----------------------------+ 127 129 | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | 130 + +----------------+-----------------+-----------------+-----------------------------+ 131 + | ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 | 132 + +----------------+-----------------+-----------------+-----------------------------+ 133 + | ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 | 134 + +----------------+-----------------+-----------------+-----------------------------+ 135 + | ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 | 128 136 +----------------+-----------------+-----------------+-----------------------------+ 129 137 | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | 130 138 +----------------+-----------------+-----------------+-----------------------------+ ··· 146 138 +----------------+-----------------+-----------------+-----------------------------+ 147 139 | ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 | 148 140 +----------------+-----------------+-----------------+-----------------------------+ 141 + | ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 | 142 + +----------------+-----------------+-----------------+-----------------------------+ 149 143 | ARM | Cortex-X1 | #1502854 | N/A | 144 + +----------------+-----------------+-----------------+-----------------------------+ 145 + | ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 | 146 + +----------------+-----------------+-----------------+-----------------------------+ 147 + | ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 | 150 148 +----------------+-----------------+-----------------+-----------------------------+ 151 149 | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | 152 150 +----------------+-----------------+-----------------+-----------------------------+ ··· 174 160 +----------------+-----------------+-----------------+-----------------------------+ 175 161 | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | 176 162 +----------------+-----------------+-----------------+-----------------------------+ 163 + | ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 | 164 + +----------------+-----------------+-----------------+-----------------------------+ 177 165 | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 | 178 166 +----------------+-----------------+-----------------+-----------------------------+ 179 167 | ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | ··· 185 169 | ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 | 186 170 +----------------+-----------------+-----------------+-----------------------------+ 187 171 | ARM | Neoverse-V1 | #1619801 | N/A | 172 + +----------------+-----------------+-----------------+-----------------------------+ 173 + | ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 | 188 174 +----------------+-----------------+-----------------+-----------------------------+ 189 175 | ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 | 190 176 +----------------+-----------------+-----------------+-----------------------------+
+3
Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml
··· 35 35 ports-implemented: 36 36 const: 1 37 37 38 + power-domains: 39 + maxItems: 1 40 + 38 41 sata-port@0: 39 42 $ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port 40 43
+6 -3
Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
··· 17 17 oneOf: 18 18 # Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel 19 19 - const: samsung,atna33xc20 20 - # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel 21 20 - items: 22 - - const: samsung,atna45af01 23 - - const: samsung,atna33xc20 21 + - enum: 22 + # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel 23 + - samsung,atna45af01 24 + # Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel 25 + - samsung,atna45dc02 26 + - const: samsung,atna33xc20 24 27 25 28 enable-gpios: true 26 29 port: true
+2 -1
Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
··· 199 199 200 200 examples: 201 201 - | 202 + #include <dt-bindings/gpio/gpio.h> 202 203 codec@1,0{ 203 204 compatible = "slim217,250"; 204 205 reg = <1 0>; 205 - reset-gpios = <&tlmm 64 0>; 206 + reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>; 206 207 slim-ifc-dev = <&wcd9340_ifd>; 207 208 #sound-dai-cells = <1>; 208 209 interrupt-parent = <&tlmm>;
+1 -1
Documentation/devicetree/bindings/sound/qcom,wcd937x.yaml
··· 42 42 pinctrl-names = "default", "sleep"; 43 43 pinctrl-0 = <&wcd_reset_n>; 44 44 pinctrl-1 = <&wcd_reset_n_sleep>; 45 - reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>; 45 + reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>; 46 46 vdd-buck-supply = <&vreg_l17b_1p8>; 47 47 vdd-rxtx-supply = <&vreg_l18b_1p8>; 48 48 vdd-px-supply = <&vreg_l18b_1p8>;
+2 -1
Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml
··· 34 34 35 35 examples: 36 36 - | 37 + #include <dt-bindings/gpio/gpio.h> 37 38 codec { 38 39 compatible = "qcom,wcd9380-codec"; 39 - reset-gpios = <&tlmm 32 0>; 40 + reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>; 40 41 #sound-dai-cells = <1>; 41 42 qcom,tx-device = <&wcd938x_tx>; 42 43 qcom,rx-device = <&wcd938x_rx>;
+2 -2
Documentation/devicetree/bindings/sound/qcom,wcd939x.yaml
··· 52 52 53 53 examples: 54 54 - | 55 - #include <dt-bindings/interrupt-controller/irq.h> 55 + #include <dt-bindings/gpio/gpio.h> 56 56 codec { 57 57 compatible = "qcom,wcd9390-codec"; 58 - reset-gpios = <&tlmm 32 IRQ_TYPE_NONE>; 58 + reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>; 59 59 #sound-dai-cells = <1>; 60 60 qcom,tx-device = <&wcd939x_tx>; 61 61 qcom,rx-device = <&wcd939x_rx>;
+1
Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
··· 18 18 - usb424,2412 19 19 - usb424,2417 20 20 - usb424,2514 21 + - usb424,2517 21 22 22 23 reg: true 23 24
+30 -35
Documentation/driver-api/thermal/sysfs-api.rst
··· 4 4 5 5 Written by Sujith Thomas <sujith.thomas@intel.com>, Zhang Rui <rui.zhang@intel.com> 6 6 7 - Updated: 2 January 2008 8 - 9 7 Copyright (c) 2008 Intel Corporation 10 8 11 9 ··· 36 38 37 39 :: 38 40 39 - struct thermal_zone_device 40 - *thermal_zone_device_register(char *type, 41 - int trips, int mask, void *devdata, 42 - struct thermal_zone_device_ops *ops, 43 - const struct thermal_zone_params *tzp, 44 - int passive_delay, int polling_delay)) 41 + struct thermal_zone_device * 42 + thermal_zone_device_register_with_trips(const char *type, 43 + const struct thermal_trip *trips, 44 + int num_trips, void *devdata, 45 + const struct thermal_zone_device_ops *ops, 46 + const struct thermal_zone_params *tzp, 47 + unsigned int passive_delay, 48 + unsigned int polling_delay) 45 49 46 - This interface function adds a new thermal zone device (sensor) to 50 + This interface function adds a new thermal zone device (sensor) to the 47 51 /sys/class/thermal folder as `thermal_zone[0-*]`. It tries to bind all the 48 - thermal cooling devices registered at the same time. 52 + thermal cooling devices registered to it at the same time. 49 53 50 54 type: 51 55 the thermal zone type. 52 56 trips: 53 - the total number of trip points this thermal zone supports. 54 - mask: 55 - Bit string: If 'n'th bit is set, then trip point 'n' is writable. 57 + the table of trip points for this thermal zone. 56 58 devdata: 57 59 device private data 58 60 ops: ··· 65 67 .get_temp: 66 68 get the current temperature of the thermal zone. 67 69 .set_trips: 68 - set the trip points window. Whenever the current temperature 69 - is updated, the trip points immediately below and above the 70 - current temperature are found. 71 - .get_mode: 72 - get the current mode (enabled/disabled) of the thermal zone. 73 - 74 - - "enabled" means the kernel thermal management is 75 - enabled. 76 - - "disabled" will prevent kernel thermal driver action 77 - upon trip points so that user applications can take 78 - charge of thermal management. 79 - .set_mode: 80 - set the mode (enabled/disabled) of the thermal zone. 81 - .get_trip_type: 82 - get the type of certain trip point. 83 - .get_trip_temp: 84 - get the temperature above which the certain trip point 85 - will be fired. 70 + set the trip points window. Whenever the current temperature 71 + is updated, the trip points immediately below and above the 72 + current temperature are found. 73 + .change_mode: 74 + change the mode (enabled/disabled) of the thermal zone. 75 + .set_trip_temp: 76 + set the temperature of a given trip point. 77 + .get_crit_temp: 78 + get the critical temperature for this thermal zone. 86 79 .set_emul_temp: 87 - set the emulation temperature which helps in debugging 88 - different threshold temperature points. 80 + set the emulation temperature which helps in debugging 81 + different threshold temperature points. 82 + .get_trend: 83 + get the trend of most recent zone temperature changes. 84 + .hot: 85 + hot trip point crossing handler. 86 + .critical: 87 + critical trip point crossing handler. 89 88 tzp: 90 89 thermal zone platform parameters. 91 90 passive_delay: 92 - number of milliseconds to wait between polls when 93 - performing passive cooling. 91 + number of milliseconds to wait between polls when performing passive 92 + cooling. 94 93 polling_delay: 95 94 number of milliseconds to wait between polls when checking 96 95 whether trip points have been crossed (0 for interrupt driven systems).
+1 -1
Documentation/netlink/specs/ethtool.yaml
··· 1753 1753 request: 1754 1754 attributes: 1755 1755 - header 1756 + - context 1756 1757 reply: 1757 1758 attributes: 1758 1759 - header ··· 1762 1761 - indir 1763 1762 - hkey 1764 1763 - input_xfrm 1765 - dump: *rss-get-op 1766 1764 - 1767 1765 name: plca-get-cfg 1768 1766 doc: Get PLCA params.
+1
Documentation/networking/ethtool-netlink.rst
··· 1875 1875 1876 1876 ===================================== ====== ========================== 1877 1877 ``ETHTOOL_A_RSS_HEADER`` nested reply header 1878 + ``ETHTOOL_A_RSS_CONTEXT`` u32 context number 1878 1879 ``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func 1879 1880 ``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes 1880 1881 ``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
+96 -57
Documentation/process/embargoed-hardware-issues.rst
··· 13 13 Hardware issues like Meltdown, Spectre, L1TF etc. must be treated 14 14 differently because they usually affect all Operating Systems ("OS") and 15 15 therefore need coordination across different OS vendors, distributions, 16 - hardware vendors and other parties. For some of the issues, software 17 - mitigations can depend on microcode or firmware updates, which need further 18 - coordination. 16 + silicon vendors, hardware integrators, and other parties. For some of the 17 + issues, software mitigations can depend on microcode or firmware updates, 18 + which need further coordination. 19 19 20 20 .. _Contact: 21 21 ··· 32 32 <securitybugs>`) instead. 33 33 34 34 The team can be contacted by email at <hardware-security@kernel.org>. This 35 - is a private list of security officers who will help you to coordinate a 36 - fix according to our documented process. 35 + is a private list of security officers who will help you coordinate a fix 36 + according to our documented process. 37 37 38 38 The list is encrypted and email to the list can be sent by either PGP or 39 39 S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME ··· 43 43 - PGP: https://www.kernel.org/static/files/hardware-security.asc 44 44 - S/MIME: https://www.kernel.org/static/files/hardware-security.crt 45 45 46 - While hardware security issues are often handled by the affected hardware 46 + While hardware security issues are often handled by the affected silicon 47 47 vendor, we welcome contact from researchers or individuals who have 48 48 identified a potential hardware flaw. 49 49 ··· 65 65 ability to access the embargoed information, but are obliged to 66 66 confidentiality by their employment contract. Linux Foundation IT 67 67 personnel are also responsible for operating and managing the rest of 68 - kernel.org infrastructure. 68 + kernel.org's infrastructure. 69 69 70 70 The Linux Foundation's current director of IT Project infrastructure is 71 71 Konstantin Ryabitsev. ··· 85 85 86 86 The Linux kernel community has a deep understanding of the requirement to 87 87 keep hardware security issues under embargo for coordination between 88 - different OS vendors, distributors, hardware vendors and other parties. 88 + different OS vendors, distributors, silicon vendors, and other parties. 89 89 90 90 The Linux kernel community has successfully handled hardware security 91 91 issues in the past and has the necessary mechanisms in place to allow ··· 103 103 All involved developers pledge to adhere to the embargo rules and to keep 104 104 the received information confidential. Violation of the pledge will lead to 105 105 immediate exclusion from the current issue and removal from all related 106 - mailing-lists. In addition, the hardware security team will also exclude 106 + mailing lists. In addition, the hardware security team will also exclude 107 107 the offender from future issues. The impact of this consequence is a highly 108 108 effective deterrent in our community. In case a violation happens the 109 109 hardware security team will inform the involved parties immediately. If you 110 - or anyone becomes aware of a potential violation, please report it 110 + or anyone else becomes aware of a potential violation, please report it 111 111 immediately to the Hardware security officers. 112 112 113 113 ··· 124 124 Start of Disclosure 125 125 """"""""""""""""""" 126 126 127 - Disclosure starts by contacting the Linux kernel hardware security team by 128 - email. This initial contact should contain a description of the problem and 129 - a list of any known affected hardware. If your organization builds or 130 - distributes the affected hardware, we encourage you to also consider what 131 - other hardware could be affected. 127 + Disclosure starts by emailing the Linux kernel hardware security team per 128 + the Contact section above. This initial contact should contain a 129 + description of the problem and a list of any known affected silicon. If 130 + your organization builds or distributes the affected hardware, we encourage 131 + you to also consider what other hardware could be affected. The disclosing 132 + party is responsible for contacting the affected silicon vendors in a 133 + timely manner. 132 134 133 135 The hardware security team will provide an incident-specific encrypted 134 - mailing-list which will be used for initial discussion with the reporter, 136 + mailing list which will be used for initial discussion with the reporter, 135 137 further disclosure, and coordination of fixes. 136 138 137 139 The hardware security team will provide the disclosing party a list of ··· 160 158 - The disclosed entities can be contacted to name experts who should 161 159 participate in the mitigation development. 162 160 163 - - If an expert which is required to handle an issue is employed by an 164 - listed entity or member of an listed entity, then the response teams can 161 + - If an expert who is required to handle an issue is employed by a listed 162 + entity or member of an listed entity, then the response teams can 165 163 request the disclosure of that expert from that entity. This ensures 166 164 that the expert is also part of the entity's response team. 167 165 ··· 171 169 The disclosing party provides detailed information to the initial response 172 170 team via the specific encrypted mailing-list. 173 171 174 - From our experience the technical documentation of these issues is usually 175 - a sufficient starting point and further technical clarification is best 172 + From our experience, the technical documentation of these issues is usually 173 + a sufficient starting point, and further technical clarification is best 176 174 done via email. 177 175 178 176 Mitigation development ··· 181 179 The initial response team sets up an encrypted mailing-list or repurposes 182 180 an existing one if appropriate. 183 181 184 - Using a mailing-list is close to the normal Linux development process and 185 - has been successfully used in developing mitigations for various hardware 182 + Using a mailing list is close to the normal Linux development process and 183 + has been successfully used to develop mitigations for various hardware 186 184 security issues in the past. 187 185 188 - The mailing-list operates in the same way as normal Linux development. 189 - Patches are posted, discussed and reviewed and if agreed on applied to a 190 - non-public git repository which is only accessible to the participating 186 + The mailing list operates in the same way as normal Linux development. 187 + Patches are posted, discussed, and reviewed and if agreed upon, applied to 188 + a non-public git repository which is only accessible to the participating 191 189 developers via a secure connection. The repository contains the main 192 190 development branch against the mainline kernel and backport branches for 193 191 stable kernel versions as necessary. 194 192 195 193 The initial response team will identify further experts from the Linux 196 - kernel developer community as needed. Bringing in experts can happen at any 197 - time of the development process and needs to be handled in a timely manner. 194 + kernel developer community as needed. Any involved party can suggest 195 + further experts to be included, each of which will be subject to the same 196 + requirements outlined above. 198 197 199 - If an expert is employed by or member of an entity on the disclosure list 198 + Bringing in experts can happen at any time in the development process and 199 + needs to be handled in a timely manner. 200 + 201 + If an expert is employed by or a member of an entity on the disclosure list 200 202 provided by the disclosing party, then participation will be requested from 201 203 the relevant entity. 202 204 203 - If not, then the disclosing party will be informed about the experts 205 + If not, then the disclosing party will be informed about the experts' 204 206 participation. The experts are covered by the Memorandum of Understanding 205 - and the disclosing party is requested to acknowledge the participation. In 206 - case that the disclosing party has a compelling reason to object, then this 207 - objection has to be raised within five work days and resolved with the 208 - incident team immediately. If the disclosing party does not react within 209 - five work days this is taken as silent acknowledgement. 207 + and the disclosing party is requested to acknowledge their participation. 208 + In the case where the disclosing party has a compelling reason to object, 209 + any objection must to be raised within five working days and resolved with 210 + the incident team immediately. If the disclosing party does not react 211 + within five working days this is taken as silent acknowledgment. 210 212 211 - After acknowledgement or resolution of an objection the expert is disclosed 212 - by the incident team and brought into the development process. 213 + After the incident team acknowledges or resolves an objection, the expert 214 + is disclosed and brought into the development process. 213 215 214 216 List participants may not communicate about the issue outside of the 215 217 private mailing list. List participants may not use any shared resources 216 218 (e.g. employer build farms, CI systems, etc) when working on patches. 217 219 220 + Early access 221 + """""""""""" 222 + 223 + The patches discussed and developed on the list can neither be distributed 224 + to any individual who is not a member of the response team nor to any other 225 + organization. 226 + 227 + To allow the affected silicon vendors to work with their internal teams and 228 + industry partners on testing, validation, and logistics, the following 229 + exception is provided: 230 + 231 + Designated representatives of the affected silicon vendors are 232 + allowed to hand over the patches at any time to the silicon 233 + vendor’s response team. The representative must notify the kernel 234 + response team about the handover. The affected silicon vendor must 235 + have and maintain their own documented security process for any 236 + patches shared with their response team that is consistent with 237 + this policy. 238 + 239 + The silicon vendor’s response team can distribute these patches to 240 + their industry partners and to their internal teams under the 241 + silicon vendor’s documented security process. Feedback from the 242 + industry partners goes back to the silicon vendor and is 243 + communicated by the silicon vendor to the kernel response team. 244 + 245 + The handover to the silicon vendor’s response team removes any 246 + responsibility or liability from the kernel response team regarding 247 + premature disclosure, which happens due to the involvement of the 248 + silicon vendor’s internal teams or industry partners. The silicon 249 + vendor guarantees this release of liability by agreeing to this 250 + process. 218 251 219 252 Coordinated release 220 253 """"""""""""""""""" 221 254 222 - The involved parties will negotiate the date and time where the embargo 223 - ends. At that point the prepared mitigations are integrated into the 224 - relevant kernel trees and published. There is no pre-notification process: 225 - fixes are published in public and available to everyone at the same time. 255 + The involved parties will negotiate the date and time when the embargo 256 + ends. At that point, the prepared mitigations are published into the 257 + relevant kernel trees. There is no pre-notification process: the 258 + mitigations are published in public and available to everyone at the same 259 + time. 226 260 227 261 While we understand that hardware security issues need coordinated embargo 228 - time, the embargo time should be constrained to the minimum time which is 229 - required for all involved parties to develop, test and prepare the 262 + time, the embargo time should be constrained to the minimum time that is 263 + required for all involved parties to develop, test, and prepare their 230 264 mitigations. Extending embargo time artificially to meet conference talk 231 - dates or other non-technical reasons is creating more work and burden for 232 - the involved developers and response teams as the patches need to be kept 233 - up to date in order to follow the ongoing upstream kernel development, 234 - which might create conflicting changes. 265 + dates or other non-technical reasons creates more work and burden for the 266 + involved developers and response teams as the patches need to be kept up to 267 + date in order to follow the ongoing upstream kernel development, which 268 + might create conflicting changes. 235 269 236 270 CVE assignment 237 271 """""""""""""" ··· 313 275 314 276 If you want your organization to be added to the ambassadors list, please 315 277 contact the hardware security team. The nominated ambassador has to 316 - understand and support our process fully and is ideally well connected in 278 + understand and support our process fully and is ideally well-connected in 317 279 the Linux kernel community. 318 280 319 281 Encrypted mailing-lists 320 282 ----------------------- 321 283 322 - We use encrypted mailing-lists for communication. The operating principle 284 + We use encrypted mailing lists for communication. The operating principle 323 285 of these lists is that email sent to the list is encrypted either with the 324 - list's PGP key or with the list's S/MIME certificate. The mailing-list 286 + list's PGP key or with the list's S/MIME certificate. The mailing list 325 287 software decrypts the email and re-encrypts it individually for each 326 288 subscriber with the subscriber's PGP key or S/MIME certificate. Details 327 - about the mailing-list software and the setup which is used to ensure the 289 + about the mailing list software and the setup that is used to ensure the 328 290 security of the lists and protection of the data can be found here: 329 291 https://korg.wiki.kernel.org/userdoc/remail. 330 292 331 293 List keys 332 294 ^^^^^^^^^ 333 295 334 - For initial contact see :ref:`Contact`. For incident specific mailing-lists 335 - the key and S/MIME certificate are conveyed to the subscribers by email 336 - sent from the specific list. 296 + For initial contact see the :ref:`Contact` section above. For incident 297 + specific mailing lists, the key and S/MIME certificate are conveyed to the 298 + subscribers by email sent from the specific list. 337 299 338 - Subscription to incident specific lists 300 + Subscription to incident-specific lists 339 301 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 340 302 341 - Subscription is handled by the response teams. Disclosed parties who want 342 - to participate in the communication send a list of potential subscribers to 343 - the response team so the response team can validate subscription requests. 303 + Subscription to incident-specific lists is handled by the response teams. 304 + Disclosed parties who want to participate in the communication send a list 305 + of potential experts to the response team so the response team can validate 306 + subscription requests. 344 307 345 308 Each subscriber needs to send a subscription request to the response team 346 309 by email. The email must be signed with the subscriber's PGP key or S/MIME
+2 -2
Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst
··· 21 21 22 22 .. raw:: latex 23 23 24 - \scriptsize 24 + \tiny 25 25 26 - .. tabularcolumns:: |p{3.6cm}|p{3.0cm}|p{1.3cm}|p{2.6cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}| 26 + .. tabularcolumns:: |p{3.6cm}|p{2.4cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}| 27 27 28 28 .. flat-table:: Luma-Only Image Formats 29 29 :header-rows: 1
+7 -1
Documentation/virt/kvm/api.rst
··· 6368 6368 See KVM_SET_USER_MEMORY_REGION2 for additional details. 6369 6369 6370 6370 4.143 KVM_PRE_FAULT_MEMORY 6371 - ------------------------ 6371 + --------------------------- 6372 6372 6373 6373 :Capability: KVM_CAP_PRE_FAULT_MEMORY 6374 6374 :Architectures: none ··· 6404 6404 for the current vCPU state. KVM maps memory as if the vCPU generated a 6405 6405 stage-2 read page fault, e.g. faults in memory as needed, but doesn't break 6406 6406 CoW. However, KVM does not mark any newly created stage-2 PTE as Accessed. 6407 + 6408 + In the case of confidential VM types where there is an initial set up of 6409 + private guest memory before the guest is 'finalized'/measured, this ioctl 6410 + should only be issued after completing all the necessary setup to put the 6411 + guest into a 'finalized' state so that the above semantics can be reliably 6412 + ensured. 6407 6413 6408 6414 In some cases, multiple vCPUs might share the page tables. In this 6409 6415 case, the ioctl can be called in parallel.
+3 -3
Documentation/wmi/devices/msi-wmi-platform.rst
··· 130 130 131 131 Due to a peculiarity in how Windows handles the ``CreateByteField()`` ACPI operator (errors only 132 132 happen when a invalid byte field is ultimately accessed), all methods require a 32 byte input 133 - buffer, even if the Binay MOF says otherwise. 133 + buffer, even if the Binary MOF says otherwise. 134 134 135 135 The input buffer contains a single byte to select the subfeature to be accessed and 31 bytes of 136 136 input data, the meaning of which depends on the subfeature being accessed. 137 137 138 - The output buffer contains a singe byte which signals success or failure (``0x00`` on failure) 138 + The output buffer contains a single byte which signals success or failure (``0x00`` on failure) 139 139 and 31 bytes of output data, the meaning if which depends on the subfeature being accessed. 140 140 141 141 WMI method Get_EC() ··· 147 147 The first 4 bits of the flag byte contain the minor version of the embedded controller interface, 148 148 with the next 2 bits containing the major version of the embedded controller interface. 149 149 150 - The 7th bit signals if the embedded controller page chaged (exact meaning is unknown), and the 150 + The 7th bit signals if the embedded controller page changed (exact meaning is unknown), and the 151 151 last bit signals if the platform is a Tigerlake platform. 152 152 153 153 The MSI software seems to only use this interface when the last bit is set.
+8 -5
MAINTAINERS
··· 5306 5306 CIRRUS LOGIC AUDIO CODEC DRIVERS 5307 5307 M: David Rhodes <david.rhodes@cirrus.com> 5308 5308 M: Richard Fitzgerald <rf@opensource.cirrus.com> 5309 - L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5309 + L: linux-sound@vger.kernel.org 5310 5310 L: patches@opensource.cirrus.com 5311 5311 S: Maintained 5312 5312 F: Documentation/devicetree/bindings/sound/cirrus,cs* ··· 5375 5375 CIRRUS LOGIC MADERA CODEC DRIVERS 5376 5376 M: Charles Keepax <ckeepax@opensource.cirrus.com> 5377 5377 M: Richard Fitzgerald <rf@opensource.cirrus.com> 5378 - L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5378 + L: linux-sound@vger.kernel.org 5379 5379 L: patches@opensource.cirrus.com 5380 5380 S: Supported 5381 5381 W: https://github.com/CirrusLogic/linux-drivers/wiki ··· 13324 13324 F: drivers/i2c/muxes/i2c-mux-ltc4306.c 13325 13325 13326 13326 LTP (Linux Test Project) 13327 + M: Andrea Cervesato <andrea.cervesato@suse.com> 13327 13328 M: Cyril Hrubis <chrubis@suse.cz> 13328 13329 M: Jan Stancek <jstancek@redhat.com> 13329 13330 M: Petr Vorel <pvorel@suse.cz> 13330 13331 M: Li Wang <liwang@redhat.com> 13331 13332 M: Yang Xu <xuyang2018.jy@fujitsu.com> 13333 + M: Xiao Yang <yangx.jy@fujitsu.com> 13332 13334 L: ltp@lists.linux.it (subscribers-only) 13333 13335 S: Maintained 13334 - W: http://linux-test-project.github.io/ 13336 + W: https://linux-test-project.readthedocs.io/ 13335 13337 T: git https://github.com/linux-test-project/ltp.git 13336 13338 13337 13339 LTR390 AMBIENT/UV LIGHT SENSOR DRIVER ··· 13541 13539 M: Mirko Lindner <mlindner@marvell.com> 13542 13540 M: Stephen Hemminger <stephen@networkplumber.org> 13543 13541 L: netdev@vger.kernel.org 13544 - S: Maintained 13542 + S: Odd fixes 13545 13543 F: drivers/net/ethernet/marvell/sk* 13546 13544 13547 13545 MARVELL LIBERTAS WIRELESS DRIVER ··· 15938 15936 F: include/linux/indirect_call_wrapper.h 15939 15937 F: include/linux/net.h 15940 15938 F: include/linux/netdevice.h 15939 + F: include/linux/skbuff.h 15941 15940 F: include/net/ 15942 15941 F: include/uapi/linux/in.h 15943 15942 F: include/uapi/linux/net.h ··· 18559 18556 QCOM IPA DRIVER 18560 18557 M: Alex Elder <elder@kernel.org> 18561 18558 L: netdev@vger.kernel.org 18562 - S: Supported 18559 + S: Maintained 18563 18560 F: drivers/net/ipa/ 18564 18561 18565 18562 QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 11 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc1 5 + EXTRAVERSION = -rc3 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+2 -2
arch/alpha/include/asm/io.h
··· 534 534 535 535 #define ioread16be(p) swab16(ioread16(p)) 536 536 #define ioread32be(p) swab32(ioread32(p)) 537 + #define ioread64be(p) swab64(ioread64(p)) 537 538 #define iowrite16be(v,p) iowrite16(swab16(v), (p)) 538 539 #define iowrite32be(v,p) iowrite32(swab32(v), (p)) 540 + #define iowrite64be(v,p) iowrite64(swab64(v), (p)) 539 541 540 542 #define inb_p inb 541 543 #define inw_p inw ··· 636 634 */ 637 635 #define ioread64 ioread64 638 636 #define iowrite64 iowrite64 639 - #define ioread64be ioread64be 640 - #define iowrite64be iowrite64be 641 637 #define ioread8_rep ioread8_rep 642 638 #define ioread16_rep ioread16_rep 643 639 #define ioread32_rep ioread32_rep
+3 -1
arch/arm/Kconfig
··· 87 87 select HAVE_ARCH_PFN_VALID 88 88 select HAVE_ARCH_SECCOMP 89 89 select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT 90 + select HAVE_ARCH_STACKLEAK 90 91 select HAVE_ARCH_THREAD_STRUCT_WHITELIST 91 92 select HAVE_ARCH_TRACEHOOK 92 93 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE ··· 117 116 select HAVE_KERNEL_XZ 118 117 select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M 119 118 select HAVE_KRETPROBES if HAVE_KPROBES 119 + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION 120 120 select HAVE_MOD_ARCH_SPECIFIC 121 121 select HAVE_NMI 122 122 select HAVE_OPTPROBES if !THUMB2_KERNEL ··· 738 736 bool "ARM errata: Read to DBGPRSR and DBGOSLSR may generate Undefined instruction" 739 737 depends on CPU_V7 740 738 help 741 - This option enables the workaround for the 764319 Cortex A-9 erratum. 739 + This option enables the workaround for the 764319 Cortex-A9 erratum. 742 740 CP14 read accesses to the DBGPRSR and DBGOSLSR registers generate an 743 741 unexpected Undefined Instruction exception when the DBGSWENABLE 744 742 external pin is set to 0, even when the CP14 accesses are performed
+1
arch/arm/boot/compressed/Makefile
··· 9 9 10 10 HEAD = head.o 11 11 OBJS += misc.o decompress.o 12 + CFLAGS_decompress.o += $(DISABLE_STACKLEAK_PLUGIN) 12 13 ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) 13 14 OBJS += debug.o 14 15 AFLAGS_head.o += -DDEBUG
+1 -1
arch/arm/boot/compressed/vmlinux.lds.S
··· 125 125 126 126 . = BSS_START; 127 127 __bss_start = .; 128 - .bss : { *(.bss) } 128 + .bss : { *(.bss .bss.*) } 129 129 _end = .; 130 130 131 131 . = ALIGN(8); /* the stack must be 64-bit aligned */
+1 -1
arch/arm/boot/dts/arm/versatile-ab.dts
··· 157 157 clocks = <&xtal24mhz>; 158 158 }; 159 159 160 - pclk: clock-24000000 { 160 + pclk: clock-pclk { 161 161 #clock-cells = <0>; 162 162 compatible = "fixed-factor-clock"; 163 163 clock-div = <1>;
+7
arch/arm/include/asm/stacktrace.h
··· 26 26 #endif 27 27 }; 28 28 29 + static inline bool on_thread_stack(void) 30 + { 31 + unsigned long delta = current_stack_pointer ^ (unsigned long)current->stack; 32 + 33 + return delta < THREAD_SIZE; 34 + } 35 + 29 36 static __always_inline 30 37 void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame) 31 38 {
+1 -1
arch/arm/include/asm/vmlinux.lds.h
··· 42 42 #define PROC_INFO \ 43 43 . = ALIGN(4); \ 44 44 __proc_info_begin = .; \ 45 - *(.proc.info.init) \ 45 + KEEP(*(.proc.info.init)) \ 46 46 __proc_info_end = .; 47 47 48 48 #define IDMAP_TEXT \
+3
arch/arm/kernel/entry-armv.S
··· 1065 1065 .globl vector_fiq 1066 1066 1067 1067 .section .vectors, "ax", %progbits 1068 + .reloc .text, R_ARM_NONE, . 1068 1069 W(b) vector_rst 1069 1070 W(b) vector_und 1070 1071 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi ) ··· 1079 1078 1080 1079 #ifdef CONFIG_HARDEN_BRANCH_HISTORY 1081 1080 .section .vectors.bhb.loop8, "ax", %progbits 1081 + .reloc .text, R_ARM_NONE, . 1082 1082 W(b) vector_rst 1083 1083 W(b) vector_bhb_loop8_und 1084 1084 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi ) ··· 1092 1090 W(b) vector_bhb_loop8_fiq 1093 1091 1094 1092 .section .vectors.bhb.bpiall, "ax", %progbits 1093 + .reloc .text, R_ARM_NONE, . 1095 1094 W(b) vector_rst 1096 1095 W(b) vector_bhb_bpiall_und 1097 1096 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
+3
arch/arm/kernel/entry-common.S
··· 119 119 120 120 ct_user_enter save = 0 121 121 122 + #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 123 + bl stackleak_erase_on_task_stack 124 + #endif 122 125 restore_user_regs fast = 0, offset = 0 123 126 ENDPROC(ret_to_user_from_irq) 124 127 ENDPROC(ret_to_user)
-5
arch/arm/kernel/module.c
··· 395 395 return 0; 396 396 } 397 397 398 - struct mod_unwind_map { 399 - const Elf_Shdr *unw_sec; 400 - const Elf_Shdr *txt_sec; 401 - }; 402 - 403 398 static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, 404 399 const Elf_Shdr *sechdrs, const char *name) 405 400 {
+1 -2
arch/arm/kernel/perf_callchain.c
··· 85 85 callchain_trace(void *data, unsigned long pc) 86 86 { 87 87 struct perf_callchain_entry_ctx *entry = data; 88 - perf_callchain_store(entry, pc); 89 - return true; 88 + return perf_callchain_store(entry, pc) == 0; 90 89 } 91 90 92 91 void
+2 -2
arch/arm/kernel/vmlinux-xip.lds.S
··· 63 63 . = ALIGN(4); 64 64 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { 65 65 __start___ex_table = .; 66 - ARM_MMU_KEEP(*(__ex_table)) 66 + ARM_MMU_KEEP(KEEP(*(__ex_table))) 67 67 __stop___ex_table = .; 68 68 } 69 69 ··· 83 83 } 84 84 .init.arch.info : { 85 85 __arch_info_begin = .; 86 - *(.arch.info.init) 86 + KEEP(*(.arch.info.init)) 87 87 __arch_info_end = .; 88 88 } 89 89 .init.tagtable : {
+3 -3
arch/arm/kernel/vmlinux.lds.S
··· 74 74 . = ALIGN(4); 75 75 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { 76 76 __start___ex_table = .; 77 - ARM_MMU_KEEP(*(__ex_table)) 77 + ARM_MMU_KEEP(KEEP(*(__ex_table))) 78 78 __stop___ex_table = .; 79 79 } 80 80 ··· 99 99 } 100 100 .init.arch.info : { 101 101 __arch_info_begin = .; 102 - *(.arch.info.init) 102 + KEEP(*(.arch.info.init)) 103 103 __arch_info_end = .; 104 104 } 105 105 .init.tagtable : { ··· 116 116 #endif 117 117 .init.pv_table : { 118 118 __pv_table_begin = .; 119 - *(.pv_table) 119 + KEEP(*(.pv_table)) 120 120 __pv_table_end = .; 121 121 } 122 122
+1 -1
arch/arm/mach-alpine/alpine_cpu_pm.c
··· 29 29 /* 30 30 * Set CPU resume address - 31 31 * secure firmware running on boot will jump to this address 32 - * after setting proper CPU mode, and initialiing e.g. secure 32 + * after setting proper CPU mode, and initializing e.g. secure 33 33 * regs (the same mode all CPUs are booted to - usually HYP) 34 34 */ 35 35 writel(phys_resume_addr,
+7 -4
arch/arm/mach-pxa/gumstix.c
··· 21 21 #include <linux/mtd/mtd.h> 22 22 #include <linux/mtd/partitions.h> 23 23 #include <linux/gpio/machine.h> 24 + #include <linux/gpio/property.h> 24 25 #include <linux/gpio.h> 25 26 #include <linux/err.h> 26 27 #include <linux/clk.h> ··· 41 40 #include <linux/platform_data/mmc-pxamci.h> 42 41 #include "udc.h" 43 42 #include "gumstix.h" 43 + #include "devices.h" 44 44 45 45 #include "generic.h" 46 46 ··· 101 99 } 102 100 #endif 103 101 104 - #ifdef CONFIG_USB_PXA25X 105 - static const struct property_entry spitz_mci_props[] __initconst = { 102 + #if IS_ENABLED(CONFIG_USB_PXA25X) 103 + static const struct property_entry gumstix_vbus_props[] __initconst = { 106 104 PROPERTY_ENTRY_GPIO("vbus-gpios", &pxa2xx_gpiochip_node, 107 105 GPIO_GUMSTIX_USB_GPIOn, GPIO_ACTIVE_HIGH), 108 106 PROPERTY_ENTRY_GPIO("pullup-gpios", &pxa2xx_gpiochip_node, ··· 111 109 }; 112 110 113 111 static const struct platform_device_info gumstix_gpio_vbus_info __initconst = { 114 - .name = "gpio-vbus", 115 - .id = PLATFORM_DEVID_NONE, 112 + .name = "gpio-vbus", 113 + .id = PLATFORM_DEVID_NONE, 114 + .properties = gumstix_vbus_props, 116 115 }; 117 116 118 117 static void __init gumstix_udc_init(void)
+10 -10
arch/arm/mm/proc.c
··· 17 17 __ADDRESSABLE(cpu_arm7tdmi_proc_init); 18 18 void cpu_arm7tdmi_proc_fin(void); 19 19 __ADDRESSABLE(cpu_arm7tdmi_proc_fin); 20 - void cpu_arm7tdmi_reset(void); 20 + void cpu_arm7tdmi_reset(unsigned long addr, bool hvc); 21 21 __ADDRESSABLE(cpu_arm7tdmi_reset); 22 22 int cpu_arm7tdmi_do_idle(void); 23 23 __ADDRESSABLE(cpu_arm7tdmi_do_idle); ··· 32 32 __ADDRESSABLE(cpu_arm720_proc_init); 33 33 void cpu_arm720_proc_fin(void); 34 34 __ADDRESSABLE(cpu_arm720_proc_fin); 35 - void cpu_arm720_reset(void); 35 + void cpu_arm720_reset(unsigned long addr, bool hvc); 36 36 __ADDRESSABLE(cpu_arm720_reset); 37 37 int cpu_arm720_do_idle(void); 38 38 __ADDRESSABLE(cpu_arm720_do_idle); ··· 49 49 __ADDRESSABLE(cpu_arm740_proc_init); 50 50 void cpu_arm740_proc_fin(void); 51 51 __ADDRESSABLE(cpu_arm740_proc_fin); 52 - void cpu_arm740_reset(void); 52 + void cpu_arm740_reset(unsigned long addr, bool hvc); 53 53 __ADDRESSABLE(cpu_arm740_reset); 54 54 int cpu_arm740_do_idle(void); 55 55 __ADDRESSABLE(cpu_arm740_do_idle); ··· 64 64 __ADDRESSABLE(cpu_arm9tdmi_proc_init); 65 65 void cpu_arm9tdmi_proc_fin(void); 66 66 __ADDRESSABLE(cpu_arm9tdmi_proc_fin); 67 - void cpu_arm9tdmi_reset(void); 67 + void cpu_arm9tdmi_reset(unsigned long addr, bool hvc); 68 68 __ADDRESSABLE(cpu_arm9tdmi_reset); 69 69 int cpu_arm9tdmi_do_idle(void); 70 70 __ADDRESSABLE(cpu_arm9tdmi_do_idle); ··· 79 79 __ADDRESSABLE(cpu_arm920_proc_init); 80 80 void cpu_arm920_proc_fin(void); 81 81 __ADDRESSABLE(cpu_arm920_proc_fin); 82 - void cpu_arm920_reset(void); 82 + void cpu_arm920_reset(unsigned long addr, bool hvc); 83 83 __ADDRESSABLE(cpu_arm920_reset); 84 84 int cpu_arm920_do_idle(void); 85 85 __ADDRESSABLE(cpu_arm920_do_idle); ··· 102 102 __ADDRESSABLE(cpu_arm922_proc_init); 103 103 void cpu_arm922_proc_fin(void); 104 104 __ADDRESSABLE(cpu_arm922_proc_fin); 105 - void cpu_arm922_reset(void); 105 + void cpu_arm922_reset(unsigned long addr, bool hvc); 106 106 __ADDRESSABLE(cpu_arm922_reset); 107 107 int cpu_arm922_do_idle(void); 108 108 __ADDRESSABLE(cpu_arm922_do_idle); ··· 119 119 __ADDRESSABLE(cpu_arm925_proc_init); 120 120 void cpu_arm925_proc_fin(void); 121 121 __ADDRESSABLE(cpu_arm925_proc_fin); 122 - void cpu_arm925_reset(void); 122 + void cpu_arm925_reset(unsigned long addr, bool hvc); 123 123 __ADDRESSABLE(cpu_arm925_reset); 124 124 int cpu_arm925_do_idle(void); 125 125 __ADDRESSABLE(cpu_arm925_do_idle); ··· 159 159 __ADDRESSABLE(cpu_arm940_proc_init); 160 160 void cpu_arm940_proc_fin(void); 161 161 __ADDRESSABLE(cpu_arm940_proc_fin); 162 - void cpu_arm940_reset(void); 162 + void cpu_arm940_reset(unsigned long addr, bool hvc); 163 163 __ADDRESSABLE(cpu_arm940_reset); 164 164 int cpu_arm940_do_idle(void); 165 165 __ADDRESSABLE(cpu_arm940_do_idle); ··· 174 174 __ADDRESSABLE(cpu_arm946_proc_init); 175 175 void cpu_arm946_proc_fin(void); 176 176 __ADDRESSABLE(cpu_arm946_proc_fin); 177 - void cpu_arm946_reset(void); 177 + void cpu_arm946_reset(unsigned long addr, bool hvc); 178 178 __ADDRESSABLE(cpu_arm946_reset); 179 179 int cpu_arm946_do_idle(void); 180 180 __ADDRESSABLE(cpu_arm946_do_idle); ··· 429 429 __ADDRESSABLE(cpu_v7_proc_init); 430 430 void cpu_v7_proc_fin(void); 431 431 __ADDRESSABLE(cpu_v7_proc_fin); 432 - void cpu_v7_reset(void); 432 + void cpu_v7_reset(unsigned long addr, bool hvc); 433 433 __ADDRESSABLE(cpu_v7_reset); 434 434 int cpu_v7_do_idle(void); 435 435 __ADDRESSABLE(cpu_v7_do_idle);
+16 -6
arch/arm64/Kconfig
··· 1069 1069 If unsure, say Y. 1070 1070 1071 1071 config ARM64_ERRATUM_3194386 1072 - bool "Cortex-{A720,X4,X925}/Neoverse-V3: workaround for MSR SSBS not self-synchronizing" 1072 + bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing" 1073 1073 default y 1074 1074 help 1075 1075 This option adds the workaround for the following errata: 1076 1076 1077 + * ARM Cortex-A76 erratum 3324349 1078 + * ARM Cortex-A77 erratum 3324348 1079 + * ARM Cortex-A78 erratum 3324344 1080 + * ARM Cortex-A78C erratum 3324346 1081 + * ARM Cortex-A78C erratum 3324347 1077 1082 * ARM Cortex-A710 erratam 3324338 1078 1083 * ARM Cortex-A720 erratum 3456091 1084 + * ARM Cortex-A725 erratum 3456106 1085 + * ARM Cortex-X1 erratum 3324344 1086 + * ARM Cortex-X1C erratum 3324346 1079 1087 * ARM Cortex-X2 erratum 3324338 1080 1088 * ARM Cortex-X3 erratum 3324335 1081 1089 * ARM Cortex-X4 erratum 3194386 1082 1090 * ARM Cortex-X925 erratum 3324334 1091 + * ARM Neoverse-N1 erratum 3324349 1083 1092 * ARM Neoverse N2 erratum 3324339 1093 + * ARM Neoverse-V1 erratum 3324341 1084 1094 * ARM Neoverse V2 erratum 3324336 1085 1095 * ARM Neoverse-V3 erratum 3312417 1086 1096 ··· 1098 1088 subsequent speculative instructions, which may permit unexepected 1099 1089 speculative store bypassing. 1100 1090 1101 - Work around this problem by placing a speculation barrier after 1102 - kernel changes to SSBS. The presence of the SSBS special-purpose 1103 - register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such 1104 - that userspace will use the PR_SPEC_STORE_BYPASS prctl to change 1105 - SSBS. 1091 + Work around this problem by placing a Speculation Barrier (SB) or 1092 + Instruction Synchronization Barrier (ISB) after kernel changes to 1093 + SSBS. The presence of the SSBS special-purpose register is hidden 1094 + from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace 1095 + will use the PR_SPEC_STORE_BYPASS prctl to change SSBS. 1106 1096 1107 1097 If unsure, say Y. 1108 1098
-22
arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
··· 43 43 sound-dai = <&mcasp0>; 44 44 }; 45 45 }; 46 - 47 - reg_usb_hub: regulator-usb-hub { 48 - compatible = "regulator-fixed"; 49 - enable-active-high; 50 - /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */ 51 - gpio = <&main_gpio0 31 GPIO_ACTIVE_HIGH>; 52 - regulator-boot-on; 53 - regulator-name = "HUB_PWR_EN"; 54 - }; 55 46 }; 56 47 57 48 /* Verdin ETHs */ ··· 184 193 status = "okay"; 185 194 }; 186 195 187 - /* Do not force CTRL_SLEEP_MOCI# always enabled */ 188 - &reg_force_sleep_moci { 189 - status = "disabled"; 190 - }; 191 - 192 196 /* Verdin SD_1 */ 193 197 &sdhci1 { 194 198 status = "okay"; ··· 204 218 }; 205 219 206 220 &usb1 { 207 - #address-cells = <1>; 208 - #size-cells = <0>; 209 221 status = "okay"; 210 - 211 - usb-hub@1 { 212 - compatible = "usb424,2744"; 213 - reg = <1>; 214 - vdd-supply = <&reg_usb_hub>; 215 - }; 216 222 }; 217 223 218 224 /* Verdin CTRL_WAKE1_MICO# */
-6
arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
··· 138 138 vin-supply = <&reg_1v8>; 139 139 }; 140 140 141 - /* 142 - * By default we enable CTRL_SLEEP_MOCI#, this is required to have 143 - * peripherals on the carrier board powered. 144 - * If more granularity or power saving is required this can be disabled 145 - * in the carrier board device tree files. 146 - */ 147 141 reg_force_sleep_moci: regulator-force-sleep-moci { 148 142 compatible = "regulator-fixed"; 149 143 enable-active-high;
+2
arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
··· 146 146 power-domains = <&k3_pds 79 TI_SCI_PD_EXCLUSIVE>; 147 147 clocks = <&k3_clks 79 0>; 148 148 clock-names = "gpio"; 149 + gpio-ranges = <&mcu_pmx0 0 0 21>, <&mcu_pmx0 21 23 1>, 150 + <&mcu_pmx0 22 32 2>; 149 151 }; 150 152 151 153 mcu_rti0: watchdog@4880000 {
+2 -1
arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
··· 45 45 &main_pmx0 { 46 46 pinctrl-single,gpio-range = 47 47 <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, 48 - <&main_pmx0_range 33 92 PIN_GPIO_RANGE_IOPAD>, 48 + <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>, 49 + <&main_pmx0_range 72 22 PIN_GPIO_RANGE_IOPAD>, 49 50 <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, 50 51 <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>, 51 52 <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>;
+2 -1
arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
··· 193 193 &main_pmx0 { 194 194 pinctrl-single,gpio-range = 195 195 <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, 196 - <&main_pmx0_range 33 55 PIN_GPIO_RANGE_IOPAD>, 196 + <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>, 197 + <&main_pmx0_range 72 17 PIN_GPIO_RANGE_IOPAD>, 197 198 <&main_pmx0_range 101 25 PIN_GPIO_RANGE_IOPAD>, 198 199 <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, 199 200 <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,
+8 -17
arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
··· 1262 1262 &serdes0 { 1263 1263 status = "okay"; 1264 1264 1265 + serdes0_pcie1_link: phy@0 { 1266 + reg = <0>; 1267 + cdns,num-lanes = <2>; 1268 + #phy-cells = <0>; 1269 + cdns,phy-type = <PHY_TYPE_PCIE>; 1270 + resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>; 1271 + }; 1272 + 1265 1273 serdes0_usb_link: phy@3 { 1266 1274 reg = <3>; 1267 1275 cdns,num-lanes = <1>; ··· 1392 1384 pinctrl-names = "default"; 1393 1385 pinctrl-0 = <&main_mcan4_pins_default>; 1394 1386 phys = <&transceiver3>; 1395 - }; 1396 - 1397 - &serdes0 { 1398 - status = "okay"; 1399 - 1400 - serdes0_pcie1_link: phy@0 { 1401 - reg = <0>; 1402 - cdns,num-lanes = <4>; 1403 - #phy-cells = <0>; 1404 - cdns,phy-type = <PHY_TYPE_PCIE>; 1405 - resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>, 1406 - <&serdes_wiz0 3>, <&serdes_wiz0 4>; 1407 - }; 1408 - }; 1409 - 1410 - &serdes_wiz0 { 1411 - status = "okay"; 1412 1387 }; 1413 1388 1414 1389 &pcie1_rc {
+2 -2
arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
··· 2755 2755 interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>, 2756 2756 <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>; 2757 2757 interrupt-names = "tx", "rx"; 2758 - dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>; 2758 + dmas = <&main_udmap 0xc403>, <&main_udmap 0x4403>; 2759 2759 dma-names = "tx", "rx"; 2760 2760 clocks = <&k3_clks 268 0>; 2761 2761 clock-names = "fck"; ··· 2773 2773 interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>, 2774 2774 <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>; 2775 2775 interrupt-names = "tx", "rx"; 2776 - dmas = <&main_udmap 0xc501>, <&main_udmap 0x4501>; 2776 + dmas = <&main_udmap 0xc404>, <&main_udmap 0x4404>; 2777 2777 dma-names = "tx", "rx"; 2778 2778 clocks = <&k3_clks 269 0>; 2779 2779 clock-names = "fck";
+4
arch/arm64/include/asm/cputype.h
··· 86 86 #define ARM_CPU_PART_CORTEX_X2 0xD48 87 87 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 88 88 #define ARM_CPU_PART_CORTEX_A78C 0xD4B 89 + #define ARM_CPU_PART_CORTEX_X1C 0xD4C 89 90 #define ARM_CPU_PART_CORTEX_X3 0xD4E 90 91 #define ARM_CPU_PART_NEOVERSE_V2 0xD4F 91 92 #define ARM_CPU_PART_CORTEX_A720 0xD81 92 93 #define ARM_CPU_PART_CORTEX_X4 0xD82 93 94 #define ARM_CPU_PART_NEOVERSE_V3 0xD84 94 95 #define ARM_CPU_PART_CORTEX_X925 0xD85 96 + #define ARM_CPU_PART_CORTEX_A725 0xD87 95 97 96 98 #define APM_CPU_PART_XGENE 0x000 97 99 #define APM_CPU_VAR_POTENZA 0x00 ··· 167 165 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) 168 166 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) 169 167 #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) 168 + #define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C) 170 169 #define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3) 171 170 #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) 172 171 #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720) 173 172 #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) 174 173 #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) 175 174 #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) 175 + #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) 176 176 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 177 177 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 178 178 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+1
arch/arm64/include/asm/jump_label.h
··· 13 13 #include <linux/types.h> 14 14 #include <asm/insn.h> 15 15 16 + #define HAVE_JUMP_LABEL_BATCH 16 17 #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE 17 18 18 19 #define JUMP_TABLE_ENTRY(key, label) \
+1 -1
arch/arm64/kernel/Makefile.syscalls
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 syscall_abis_32 += 4 - syscall_abis_64 += renameat newstat rlimit memfd_secret 4 + syscall_abis_64 += renameat rlimit memfd_secret 5 5 6 6 syscalltbl = arch/arm64/tools/syscall_%.tbl
+10 -1
arch/arm64/kernel/cpu_errata.c
··· 434 434 435 435 #ifdef CONFIG_ARM64_ERRATUM_3194386 436 436 static const struct midr_range erratum_spec_ssbs_list[] = { 437 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), 438 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), 439 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), 440 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), 437 441 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 438 442 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), 443 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A725), 444 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), 445 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), 439 446 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), 440 447 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), 441 448 MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), 442 449 MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), 450 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), 443 451 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 444 - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), 452 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), 445 453 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), 454 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), 446 455 {} 447 456 }; 448 457 #endif
+9 -2
arch/arm64/kernel/jump_label.c
··· 7 7 */ 8 8 #include <linux/kernel.h> 9 9 #include <linux/jump_label.h> 10 + #include <linux/smp.h> 10 11 #include <asm/insn.h> 11 12 #include <asm/patching.h> 12 13 13 - void arch_jump_label_transform(struct jump_entry *entry, 14 - enum jump_label_type type) 14 + bool arch_jump_label_transform_queue(struct jump_entry *entry, 15 + enum jump_label_type type) 15 16 { 16 17 void *addr = (void *)jump_entry_code(entry); 17 18 u32 insn; ··· 26 25 } 27 26 28 27 aarch64_insn_patch_text_nosync(addr, insn); 28 + return true; 29 + } 30 + 31 + void arch_jump_label_transform_apply(void) 32 + { 33 + kick_all_cpus_sync(); 29 34 }
+2 -2
arch/loongarch/include/asm/hugetlb.h
··· 34 34 unsigned long addr, pte_t *ptep) 35 35 { 36 36 pte_t clear; 37 - pte_t pte = *ptep; 37 + pte_t pte = ptep_get(ptep); 38 38 39 39 pte_val(clear) = (unsigned long)invalid_pte_table; 40 40 set_pte_at(mm, addr, ptep, clear); ··· 65 65 pte_t *ptep, pte_t pte, 66 66 int dirty) 67 67 { 68 - int changed = !pte_same(*ptep, pte); 68 + int changed = !pte_same(ptep_get(ptep), pte); 69 69 70 70 if (changed) { 71 71 set_pte_at(vma->vm_mm, addr, ptep, pte);
+3 -3
arch/loongarch/include/asm/kfence.h
··· 53 53 { 54 54 pte_t *pte = virt_to_kpte(addr); 55 55 56 - if (WARN_ON(!pte) || pte_none(*pte)) 56 + if (WARN_ON(!pte) || pte_none(ptep_get(pte))) 57 57 return false; 58 58 59 59 if (protect) 60 - set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT))); 60 + set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT))); 61 61 else 62 - set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT))); 62 + set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT))); 63 63 64 64 preempt_disable(); 65 65 local_flush_tlb_one(addr);
-2
arch/loongarch/include/asm/kvm_host.h
··· 26 26 27 27 #define KVM_MAX_VCPUS 256 28 28 #define KVM_MAX_CPUCFG_REGS 21 29 - /* memory slots that does not exposed to userspace */ 30 - #define KVM_PRIVATE_MEM_SLOTS 0 31 29 32 30 #define KVM_HALT_POLL_NS_DEFAULT 500000 33 31 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
+2 -2
arch/loongarch/include/asm/kvm_para.h
··· 39 39 * Hypercall interface for KVM hypervisor 40 40 * 41 41 * a0: function identifier 42 - * a1-a6: args 42 + * a1-a5: args 43 43 * Return value will be placed in a0. 44 - * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6. 44 + * Up to 5 arguments are passed in a1, a2, a3, a4, a5. 45 45 */ 46 46 static __always_inline long kvm_hypercall0(u64 fid) 47 47 {
+30 -20
arch/loongarch/include/asm/pgtable.h
··· 106 106 #define KFENCE_AREA_START (VMEMMAP_END + 1) 107 107 #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) 108 108 109 + #define ptep_get(ptep) READ_ONCE(*(ptep)) 110 + #define pmdp_get(pmdp) READ_ONCE(*(pmdp)) 111 + 109 112 #define pte_ERROR(e) \ 110 113 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 111 114 #ifndef __PAGETABLE_PMD_FOLDED ··· 150 147 return p4d_val(p4d) != (unsigned long)invalid_pud_table; 151 148 } 152 149 153 - static inline void p4d_clear(p4d_t *p4dp) 154 - { 155 - p4d_val(*p4dp) = (unsigned long)invalid_pud_table; 156 - } 157 - 158 150 static inline pud_t *p4d_pgtable(p4d_t p4d) 159 151 { 160 152 return (pud_t *)p4d_val(p4d); ··· 157 159 158 160 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) 159 161 { 160 - *p4d = p4dval; 162 + WRITE_ONCE(*p4d, p4dval); 163 + } 164 + 165 + static inline void p4d_clear(p4d_t *p4dp) 166 + { 167 + set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table)); 161 168 } 162 169 163 170 #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) ··· 196 193 return pud_val(pud) != (unsigned long)invalid_pmd_table; 197 194 } 198 195 199 - static inline void pud_clear(pud_t *pudp) 200 - { 201 - pud_val(*pudp) = ((unsigned long)invalid_pmd_table); 202 - } 203 - 204 196 static inline pmd_t *pud_pgtable(pud_t pud) 205 197 { 206 198 return (pmd_t *)pud_val(pud); 207 199 } 208 200 209 - #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) 201 + static inline void set_pud(pud_t *pud, pud_t pudval) 202 + { 203 + WRITE_ONCE(*pud, pudval); 204 + } 205 + 206 + static inline void pud_clear(pud_t *pudp) 207 + { 208 + set_pud(pudp, __pud((unsigned long)invalid_pmd_table)); 209 + } 210 210 211 211 #define pud_phys(pud) PHYSADDR(pud_val(pud)) 212 212 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) ··· 237 231 return pmd_val(pmd) != (unsigned long)invalid_pte_table; 238 232 } 239 233 240 - static inline void pmd_clear(pmd_t *pmdp) 234 + static inline void set_pmd(pmd_t *pmd, pmd_t pmdval) 241 235 { 242 - pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); 236 + WRITE_ONCE(*pmd, pmdval); 243 237 } 244 238 245 - #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) 239 + static inline void pmd_clear(pmd_t *pmdp) 240 + { 241 + set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table)); 242 + } 246 243 247 244 #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) 248 245 ··· 323 314 324 315 static inline void set_pte(pte_t *ptep, pte_t pteval) 325 316 { 326 - *ptep = pteval; 317 + WRITE_ONCE(*ptep, pteval); 318 + 327 319 if (pte_val(pteval) & _PAGE_GLOBAL) { 328 320 pte_t *buddy = ptep_buddy(ptep); 329 321 /* ··· 351 341 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 352 342 : [global] "r" (page_global)); 353 343 #else /* !CONFIG_SMP */ 354 - if (pte_none(*buddy)) 355 - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 344 + if (pte_none(ptep_get(buddy))) 345 + WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); 356 346 #endif /* CONFIG_SMP */ 357 347 } 358 348 } ··· 360 350 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 361 351 { 362 352 /* Preserve global status for the pair */ 363 - if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 353 + if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL) 364 354 set_pte(ptep, __pte(_PAGE_GLOBAL)); 365 355 else 366 356 set_pte(ptep, __pte(0)); ··· 613 603 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 614 604 unsigned long address, pmd_t *pmdp) 615 605 { 616 - pmd_t old = *pmdp; 606 + pmd_t old = pmdp_get(pmdp); 617 607 618 608 pmd_clear(pmdp); 619 609
+2 -1
arch/loongarch/kernel/Makefile.syscalls
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - syscall_abis_64 += newstat 3 + # No special ABIs on loongarch so far 4 + syscall_abis_64 +=
+6
arch/loongarch/kernel/efi.c
··· 66 66 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 67 67 } 68 68 69 + bool efi_poweroff_required(void) 70 + { 71 + return efi_enabled(EFI_RUNTIME_SERVICES) && 72 + (acpi_gbl_reduced_hardware || acpi_no_s5); 73 + } 74 + 69 75 unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR; 70 76 71 77 #if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)
+4 -4
arch/loongarch/kvm/mmu.c
··· 714 714 * value) and then p*d_offset() walks into the target huge page instead 715 715 * of the old page table (sees the new value). 716 716 */ 717 - pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); 717 + pgd = pgdp_get(pgd_offset(kvm->mm, hva)); 718 718 if (pgd_none(pgd)) 719 719 goto out; 720 720 721 - p4d = READ_ONCE(*p4d_offset(&pgd, hva)); 721 + p4d = p4dp_get(p4d_offset(&pgd, hva)); 722 722 if (p4d_none(p4d) || !p4d_present(p4d)) 723 723 goto out; 724 724 725 - pud = READ_ONCE(*pud_offset(&p4d, hva)); 725 + pud = pudp_get(pud_offset(&p4d, hva)); 726 726 if (pud_none(pud) || !pud_present(pud)) 727 727 goto out; 728 728 729 - pmd = READ_ONCE(*pmd_offset(&pud, hva)); 729 + pmd = pmdp_get(pmd_offset(&pud, hva)); 730 730 if (pmd_none(pmd) || !pmd_present(pmd)) 731 731 goto out; 732 732
+3 -3
arch/loongarch/mm/hugetlbpage.c
··· 39 39 pmd_t *pmd = NULL; 40 40 41 41 pgd = pgd_offset(mm, addr); 42 - if (pgd_present(*pgd)) { 42 + if (pgd_present(pgdp_get(pgd))) { 43 43 p4d = p4d_offset(pgd, addr); 44 - if (p4d_present(*p4d)) { 44 + if (p4d_present(p4dp_get(p4d))) { 45 45 pud = pud_offset(p4d, addr); 46 - if (pud_present(*pud)) 46 + if (pud_present(pudp_get(pud))) 47 47 pmd = pmd_offset(pud, addr); 48 48 } 49 49 }
+5 -5
arch/loongarch/mm/init.c
··· 141 141 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, 142 142 unsigned long addr, unsigned long next) 143 143 { 144 - int huge = pmd_val(*pmd) & _PAGE_HUGE; 144 + int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE; 145 145 146 146 if (huge) 147 147 vmemmap_verify((pte_t *)pmd, node, addr, next); ··· 173 173 pud_t *pud; 174 174 pmd_t *pmd; 175 175 176 - if (p4d_none(*p4d)) { 176 + if (p4d_none(p4dp_get(p4d))) { 177 177 pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 178 178 if (!pud) 179 179 panic("%s: Failed to allocate memory\n", __func__); ··· 184 184 } 185 185 186 186 pud = pud_offset(p4d, addr); 187 - if (pud_none(*pud)) { 187 + if (pud_none(pudp_get(pud))) { 188 188 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 189 189 if (!pmd) 190 190 panic("%s: Failed to allocate memory\n", __func__); ··· 195 195 } 196 196 197 197 pmd = pmd_offset(pud, addr); 198 - if (!pmd_present(*pmd)) { 198 + if (!pmd_present(pmdp_get(pmd))) { 199 199 pte_t *pte; 200 200 201 201 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); ··· 216 216 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 217 217 218 218 ptep = populate_kernel_pte(addr); 219 - if (!pte_none(*ptep)) { 219 + if (!pte_none(ptep_get(ptep))) { 220 220 pte_ERROR(*ptep); 221 221 return; 222 222 }
+5 -5
arch/loongarch/mm/kasan_init.c
··· 105 105 106 106 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) 107 107 { 108 - if (__pmd_none(early, READ_ONCE(*pmdp))) { 108 + if (__pmd_none(early, pmdp_get(pmdp))) { 109 109 phys_addr_t pte_phys = early ? 110 110 __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node); 111 111 if (!early) ··· 118 118 119 119 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) 120 120 { 121 - if (__pud_none(early, READ_ONCE(*pudp))) { 121 + if (__pud_none(early, pudp_get(pudp))) { 122 122 phys_addr_t pmd_phys = early ? 123 123 __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); 124 124 if (!early) ··· 131 131 132 132 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) 133 133 { 134 - if (__p4d_none(early, READ_ONCE(*p4dp))) { 134 + if (__p4d_none(early, p4dp_get(p4dp))) { 135 135 phys_addr_t pud_phys = early ? 136 136 __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); 137 137 if (!early) ··· 154 154 : kasan_alloc_zeroed_page(node); 155 155 next = addr + PAGE_SIZE; 156 156 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); 157 - } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep))); 157 + } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep))); 158 158 } 159 159 160 160 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, ··· 166 166 do { 167 167 next = pmd_addr_end(addr, end); 168 168 kasan_pte_populate(pmdp, addr, next, node, early); 169 - } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp))); 169 + } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp))); 170 170 } 171 171 172 172 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
+1 -1
arch/loongarch/mm/pgtable.c
··· 128 128 void set_pmd_at(struct mm_struct *mm, unsigned long addr, 129 129 pmd_t *pmdp, pmd_t pmd) 130 130 { 131 - *pmdp = pmd; 131 + WRITE_ONCE(*pmdp, pmd); 132 132 flush_tlb_all(); 133 133 } 134 134
+1
arch/parisc/Kconfig
··· 20 20 select ARCH_SUPPORTS_HUGETLBFS if PA20 21 21 select ARCH_SUPPORTS_MEMORY_FAILURE 22 22 select ARCH_STACKWALK 23 + select ARCH_HAS_CACHE_LINE_SIZE 23 24 select ARCH_HAS_DEBUG_VM_PGTABLE 24 25 select HAVE_RELIABLE_STACKTRACE 25 26 select DMA_OPS
+10 -1
arch/parisc/include/asm/cache.h
··· 20 20 21 21 #define SMP_CACHE_BYTES L1_CACHE_BYTES 22 22 23 - #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 23 + #ifdef CONFIG_PA20 24 + #define ARCH_DMA_MINALIGN 128 25 + #else 26 + #define ARCH_DMA_MINALIGN 32 27 + #endif 28 + #define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */ 29 + 30 + #define arch_slab_minalign() ((unsigned)dcache_stride) 31 + #define cache_line_size() dcache_stride 32 + #define dma_get_cache_alignment cache_line_size 24 33 25 34 #define __read_mostly __section(".data..read_mostly") 26 35
+1 -1
arch/parisc/net/bpf_jit_core.c
··· 114 114 jit_data->header = 115 115 bpf_jit_binary_alloc(prog_size + extable_size, 116 116 &jit_data->image, 117 - sizeof(u32), 117 + sizeof(long), 118 118 bpf_fill_ill_insns); 119 119 if (!jit_data->header) { 120 120 prog = orig_prog;
+1 -1
arch/riscv/kernel/Makefile.syscalls
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 syscall_abis_32 += riscv memfd_secret 4 - syscall_abis_64 += riscv newstat rlimit memfd_secret 4 + syscall_abis_64 += riscv rlimit memfd_secret
+6 -8
arch/riscv/kernel/cpufeature.c
··· 432 432 bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX); 433 433 for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) { 434 434 ext = riscv_get_isa_ext_data(bit); 435 - if (!ext) 436 - continue; 437 435 438 - if (ext->validate) { 436 + if (ext && ext->validate) { 439 437 ret = ext->validate(ext, resolved_isa); 440 438 if (ret == -EPROBE_DEFER) { 441 439 loop = true; 442 440 continue; 443 441 } else if (ret) { 444 442 /* Disable the extension entirely */ 445 - clear_bit(ext->id, source_isa); 443 + clear_bit(bit, source_isa); 446 444 continue; 447 445 } 448 446 } 449 447 450 - set_bit(ext->id, resolved_isa); 448 + set_bit(bit, resolved_isa); 451 449 /* No need to keep it in source isa now that it is enabled */ 452 - clear_bit(ext->id, source_isa); 450 + clear_bit(bit, source_isa); 453 451 454 452 /* Single letter extensions get set in hwcap */ 455 - if (ext->id < RISCV_ISA_EXT_BASE) 456 - *this_hwcap |= isa2hwcap[ext->id]; 453 + if (bit < RISCV_ISA_EXT_BASE) 454 + *this_hwcap |= isa2hwcap[bit]; 457 455 } 458 456 } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); 459 457 }
+1 -1
arch/riscv/kernel/sbi-ipi.c
··· 71 71 * the masking/unmasking of virtual IPIs is done 72 72 * via generic IPI-Mux 73 73 */ 74 - cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 74 + cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, 75 75 "irqchip/sbi-ipi:starting", 76 76 sbi_ipi_starting_cpu, NULL); 77 77
+9 -8
arch/riscv/mm/fault.c
··· 61 61 62 62 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) 63 63 { 64 + if (!user_mode(regs)) { 65 + no_context(regs, addr); 66 + return; 67 + } 68 + 64 69 if (fault & VM_FAULT_OOM) { 65 70 /* 66 71 * We ran out of memory, call the OOM killer, and return the userspace 67 72 * (which will retry the fault, or kill us if we got oom-killed). 68 73 */ 69 - if (!user_mode(regs)) { 70 - no_context(regs, addr); 71 - return; 72 - } 73 74 pagefault_out_of_memory(); 74 75 return; 75 76 } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { 76 77 /* Kernel mode? Handle exceptions or die */ 77 - if (!user_mode(regs)) { 78 - no_context(regs, addr); 79 - return; 80 - } 81 78 do_trap(regs, SIGBUS, BUS_ADRERR, addr); 82 79 return; 80 + } else if (fault & VM_FAULT_SIGSEGV) { 81 + do_trap(regs, SIGSEGV, SEGV_MAPERR, addr); 82 + return; 83 83 } 84 + 84 85 BUG(); 85 86 } 86 87
+11 -4
arch/riscv/mm/init.c
··· 234 234 */ 235 235 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); 236 236 237 - phys_ram_end = memblock_end_of_DRAM(); 238 - 239 237 /* 240 238 * Make sure we align the start of the memory on a PMD boundary so that 241 239 * at worst, we map the linear mapping with PMD mappings. ··· 247 249 */ 248 250 if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) 249 251 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; 252 + 253 + /* 254 + * The size of the linear page mapping may restrict the amount of 255 + * usable RAM. 256 + */ 257 + if (IS_ENABLED(CONFIG_64BIT)) { 258 + max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE; 259 + memblock_cap_memory_range(phys_ram_base, 260 + max_mapped_addr - phys_ram_base); 261 + } 250 262 251 263 /* 252 264 * Reserve physical address space that would be mapped to virtual ··· 274 266 memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); 275 267 } 276 268 269 + phys_ram_end = memblock_end_of_DRAM(); 277 270 min_low_pfn = PFN_UP(phys_ram_base); 278 271 max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); 279 272 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); ··· 1293 1284 if (start <= __pa(PAGE_OFFSET) && 1294 1285 __pa(PAGE_OFFSET) < end) 1295 1286 start = __pa(PAGE_OFFSET); 1296 - if (end >= __pa(PAGE_OFFSET) + memory_limit) 1297 - end = __pa(PAGE_OFFSET) + memory_limit; 1298 1287 1299 1288 create_linear_mapping_range(start, end, 0, NULL); 1300 1289 }
+2
arch/riscv/purgatory/entry.S
··· 7 7 * Author: Li Zhengyu (lizhengyu3@huawei.com) 8 8 * 9 9 */ 10 + #include <asm/asm.h> 10 11 #include <linux/linkage.h> 11 12 12 13 .text ··· 35 34 36 35 .data 37 36 37 + .align LGREG 38 38 SYM_DATA(riscv_kernel_entry, .quad 0) 39 39 40 40 .end
arch/s390/kernel/alternative.h
+1 -1
arch/s390/kernel/fpu.c
··· 113 113 int mask; 114 114 115 115 if (flags & KERNEL_FPC) 116 - fpu_lfpc(&state->fpc); 116 + fpu_lfpc_safe(&state->fpc); 117 117 if (!cpu_has_vx()) { 118 118 if (flags & KERNEL_VXR_V0V7) 119 119 load_fp_regs_vx(state->vxrs);
+9 -8
arch/s390/kernel/vmlinux.lds.S
··· 59 59 } :text = 0x0700 60 60 61 61 RO_DATA(PAGE_SIZE) 62 - .data.rel.ro : { 63 - *(.data.rel.ro .data.rel.ro.*) 64 - } 65 - .got : { 66 - __got_start = .; 67 - *(.got) 68 - __got_end = .; 69 - } 70 62 71 63 . = ALIGN(PAGE_SIZE); 72 64 _sdata = .; /* Start of data section */ ··· 71 79 } :data 72 80 . = ALIGN(PAGE_SIZE); 73 81 __end_ro_after_init = .; 82 + 83 + .data.rel.ro : { 84 + *(.data.rel.ro .data.rel.ro.*) 85 + } 86 + .got : { 87 + __got_start = .; 88 + *(.got) 89 + __got_end = .; 90 + } 74 91 75 92 RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) 76 93 .data.rel : {
+79 -61
arch/s390/mm/dump_pagetables.c
··· 3 3 #include <linux/ptdump.h> 4 4 #include <linux/seq_file.h> 5 5 #include <linux/debugfs.h> 6 + #include <linux/sort.h> 6 7 #include <linux/mm.h> 7 8 #include <linux/kfence.h> 8 9 #include <linux/kasan.h> ··· 16 15 static unsigned long max_addr; 17 16 18 17 struct addr_marker { 18 + int is_start; 19 19 unsigned long start_address; 20 20 const char *name; 21 21 }; 22 22 23 23 enum address_markers_idx { 24 - IDENTITY_BEFORE_NR = 0, 25 - IDENTITY_BEFORE_END_NR, 24 + KVA_NR = 0, 25 + LOWCORE_START_NR, 26 + LOWCORE_END_NR, 26 27 AMODE31_START_NR, 27 28 AMODE31_END_NR, 28 29 KERNEL_START_NR, ··· 33 30 KFENCE_START_NR, 34 31 KFENCE_END_NR, 35 32 #endif 36 - IDENTITY_AFTER_NR, 37 - IDENTITY_AFTER_END_NR, 33 + IDENTITY_START_NR, 34 + IDENTITY_END_NR, 38 35 VMEMMAP_NR, 39 36 VMEMMAP_END_NR, 40 37 VMALLOC_NR, ··· 62 59 }; 63 60 64 61 static struct addr_marker address_markers[] = { 65 - [IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"}, 66 - [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"}, 67 - [AMODE31_START_NR] = {0, "Amode31 Area Start"}, 68 - [AMODE31_END_NR] = {0, "Amode31 Area End"}, 69 - [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"}, 70 - [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"}, 62 + [KVA_NR] = {0, 0, "Kernel Virtual Address Space"}, 63 + [LOWCORE_START_NR] = {1, 0, "Lowcore Start"}, 64 + [LOWCORE_END_NR] = {0, 0, "Lowcore End"}, 65 + [IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"}, 66 + [IDENTITY_END_NR] = {0, 0, "Identity Mapping End"}, 67 + [AMODE31_START_NR] = {1, 0, "Amode31 Area Start"}, 68 + [AMODE31_END_NR] = {0, 0, "Amode31 Area End"}, 69 + [KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"}, 70 + [KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"}, 71 71 #ifdef CONFIG_KFENCE 72 - [KFENCE_START_NR] = {0, "KFence Pool Start"}, 73 - [KFENCE_END_NR] = {0, "KFence Pool End"}, 72 + [KFENCE_START_NR] = {1, 0, "KFence Pool Start"}, 73 + [KFENCE_END_NR] = {0, 0, "KFence Pool End"}, 74 74 #endif 75 - [IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"}, 76 - [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"}, 77 - [VMEMMAP_NR] = {0, "vmemmap Area Start"}, 78 - [VMEMMAP_END_NR] = {0, "vmemmap Area End"}, 79 - [VMALLOC_NR] = {0, "vmalloc Area Start"}, 80 - [VMALLOC_END_NR] = {0, "vmalloc Area End"}, 75 + [VMEMMAP_NR] = {1, 0, "vmemmap Area Start"}, 76 + [VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"}, 77 + [VMALLOC_NR] = {1, 0, "vmalloc Area Start"}, 78 + [VMALLOC_END_NR] = {0, 0, "vmalloc Area End"}, 81 79 #ifdef CONFIG_KMSAN 82 - [KMSAN_VMALLOC_SHADOW_START_NR] = {0, "Kmsan vmalloc Shadow Start"}, 83 - [KMSAN_VMALLOC_SHADOW_END_NR] = {0, "Kmsan vmalloc Shadow End"}, 84 - [KMSAN_VMALLOC_ORIGIN_START_NR] = {0, "Kmsan vmalloc Origins Start"}, 85 - [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, "Kmsan vmalloc Origins End"}, 86 - [KMSAN_MODULES_SHADOW_START_NR] = {0, "Kmsan Modules Shadow Start"}, 87 - [KMSAN_MODULES_SHADOW_END_NR] = {0, "Kmsan Modules Shadow End"}, 88 - [KMSAN_MODULES_ORIGIN_START_NR] = {0, "Kmsan Modules Origins Start"}, 89 - [KMSAN_MODULES_ORIGIN_END_NR] = {0, "Kmsan Modules Origins End"}, 80 + [KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"}, 81 + [KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"}, 82 + [KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"}, 83 + [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"}, 84 + [KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"}, 85 + [KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"}, 86 + [KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"}, 87 + [KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"}, 90 88 #endif 91 - [MODULES_NR] = {0, "Modules Area Start"}, 92 - [MODULES_END_NR] = {0, "Modules Area End"}, 93 - [ABS_LOWCORE_NR] = {0, "Lowcore Area Start"}, 94 - [ABS_LOWCORE_END_NR] = {0, "Lowcore Area End"}, 95 - [MEMCPY_REAL_NR] = {0, "Real Memory Copy Area Start"}, 96 - [MEMCPY_REAL_END_NR] = {0, "Real Memory Copy Area End"}, 89 + [MODULES_NR] = {1, 0, "Modules Area Start"}, 90 + [MODULES_END_NR] = {0, 0, "Modules Area End"}, 91 + [ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"}, 92 + [ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"}, 93 + [MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"}, 94 + [MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"}, 97 95 #ifdef CONFIG_KASAN 98 - [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"}, 99 - [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"}, 96 + [KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"}, 97 + [KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"}, 100 98 #endif 101 - { -1, NULL } 99 + {1, -1UL, NULL} 102 100 }; 103 101 104 102 struct pg_state { ··· 167 163 st->wx_pages += (addr - st->start_address) / PAGE_SIZE; 168 164 } 169 165 166 + static void note_page_update_state(struct pg_state *st, unsigned long addr, unsigned int prot, int level) 167 + { 168 + struct seq_file *m = st->seq; 169 + 170 + while (addr >= st->marker[1].start_address) { 171 + st->marker++; 172 + pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); 173 + } 174 + st->start_address = addr; 175 + st->current_prot = prot; 176 + st->level = level; 177 + } 178 + 170 179 static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val) 171 180 { 172 181 int width = sizeof(unsigned long) * 2; ··· 203 186 addr = max_addr; 204 187 if (st->level == -1) { 205 188 pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); 206 - st->start_address = addr; 207 - st->current_prot = prot; 208 - st->level = level; 189 + note_page_update_state(st, addr, prot, level); 209 190 } else if (prot != st->current_prot || level != st->level || 210 191 addr >= st->marker[1].start_address) { 211 192 note_prot_wx(st, addr); ··· 217 202 } 218 203 pt_dump_seq_printf(m, "%9lu%c ", delta, *unit); 219 204 print_prot(m, st->current_prot, st->level); 220 - while (addr >= st->marker[1].start_address) { 221 - st->marker++; 222 - pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); 223 - } 224 - st->start_address = addr; 225 - st->current_prot = prot; 226 - st->level = level; 205 + note_page_update_state(st, addr, prot, level); 227 206 } 228 207 } 229 208 ··· 289 280 DEFINE_SHOW_ATTRIBUTE(ptdump); 290 281 #endif /* CONFIG_PTDUMP_DEBUGFS */ 291 282 292 - /* 293 - * Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple 294 - * insertion sort to preserve the original order of markers with the same 295 - * start address. 296 - */ 297 - static void sort_address_markers(void) 283 + static int ptdump_cmp(const void *a, const void *b) 298 284 { 299 - struct addr_marker tmp; 300 - int i, j; 285 + const struct addr_marker *ama = a; 286 + const struct addr_marker *amb = b; 301 287 302 - for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) { 303 - tmp = address_markers[i]; 304 - for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--) 305 - address_markers[j + 1] = address_markers[j]; 306 - address_markers[j + 1] = tmp; 307 - } 288 + if (ama->start_address > amb->start_address) 289 + return 1; 290 + if (ama->start_address < amb->start_address) 291 + return -1; 292 + /* 293 + * If the start addresses of two markers are identical consider the 294 + * marker which defines the start of an area higher than the one which 295 + * defines the end of an area. This keeps pairs of markers sorted. 296 + */ 297 + if (ama->is_start) 298 + return 1; 299 + if (amb->is_start) 300 + return -1; 301 + return 0; 308 302 } 309 303 310 304 static int pt_dump_init(void) ··· 315 303 #ifdef CONFIG_KFENCE 316 304 unsigned long kfence_start = (unsigned long)__kfence_pool; 317 305 #endif 306 + unsigned long lowcore = (unsigned long)get_lowcore(); 307 + 318 308 /* 319 309 * Figure out the maximum virtual address being accessible with the 320 310 * kernel ASCE. We need this to keep the page table walker functions ··· 324 310 */ 325 311 max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; 326 312 max_addr = 1UL << (max_addr * 11 + 31); 327 - address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; 313 + address_markers[LOWCORE_START_NR].start_address = lowcore; 314 + address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore); 315 + address_markers[IDENTITY_START_NR].start_address = __identity_base; 316 + address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size; 328 317 address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; 329 318 address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31; 330 319 address_markers[MODULES_NR].start_address = MODULES_VADDR; ··· 354 337 address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START; 355 338 address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END; 356 339 #endif 357 - sort_address_markers(); 340 + sort(address_markers, ARRAY_SIZE(address_markers) - 1, 341 + sizeof(address_markers[0]), ptdump_cmp, NULL); 358 342 #ifdef CONFIG_PTDUMP_DEBUGFS 359 343 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); 360 344 #endif /* CONFIG_PTDUMP_DEBUGFS */
+2 -7
arch/s390/mm/init.c
··· 108 108 { 109 109 unsigned long size = __end_ro_after_init - __start_ro_after_init; 110 110 111 + if (MACHINE_HAS_NX) 112 + system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT); 111 113 __set_memory_ro(__start_ro_after_init, __end_ro_after_init); 112 114 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); 113 115 } ··· 170 168 /* this will put all low memory onto the freelists */ 171 169 memblock_free_all(); 172 170 setup_zero_pages(); /* Setup zeroed pages. */ 173 - } 174 - 175 - void free_initmem(void) 176 - { 177 - set_memory_rwnx((unsigned long)_sinittext, 178 - (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT); 179 - free_initmem_default(POISON_FREE_INITMEM); 180 171 } 181 172 182 173 unsigned long memory_block_size_bytes(void)
+2 -11
arch/s390/mm/vmem.c
··· 661 661 { 662 662 __set_memory_rox(_stext, _etext); 663 663 __set_memory_ro(_etext, __end_rodata); 664 - __set_memory_rox(_sinittext, _einittext); 665 664 __set_memory_rox(__stext_amode31, __etext_amode31); 666 665 /* 667 666 * If the BEAR-enhancement facility is not installed the first ··· 669 670 */ 670 671 if (!static_key_enabled(&cpu_has_bear)) 671 672 set_memory_x(0, 1); 672 - if (debug_pagealloc_enabled()) { 673 - /* 674 - * Use RELOC_HIDE() as long as __va(0) translates to NULL, 675 - * since performing pointer arithmetic on a NULL pointer 676 - * has undefined behavior and generates compiler warnings. 677 - */ 678 - __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size)); 679 - } 680 - if (MACHINE_HAS_NX) 681 - system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT); 673 + if (debug_pagealloc_enabled()) 674 + __set_memory_4k(__va(0), __va(0) + ident_map_size); 682 675 pr_info("Write protected kernel read-only data: %luk\n", 683 676 (unsigned long)(__end_rodata - _stext) >> 10); 684 677 }
+2
arch/um/drivers/mconsole_user.c
··· 71 71 return NULL; 72 72 } 73 73 74 + #ifndef MIN 74 75 #define MIN(a,b) ((a)<(b) ? (a):(b)) 76 + #endif 75 77 76 78 #define STRINGX(x) #x 77 79 #define STRING(x) STRINGX(x)
+1 -1
arch/x86/coco/sev/core.c
··· 163 163 */ 164 164 use_cas : 1, 165 165 166 - __reserved : 62; 166 + __reserved : 61; 167 167 }; 168 168 169 169 static struct sev_config sev_cfg __read_mostly;
+1 -1
arch/x86/entry/syscalls/syscall_64.tbl
··· 344 344 332 common statx sys_statx 345 345 333 common io_pgetevents sys_io_pgetevents 346 346 334 common rseq sys_rseq 347 + 335 common uretprobe sys_uretprobe 347 348 # don't use numbers 387 through 423, add new calls after the last 348 349 # 'common' entry 349 350 424 common pidfd_send_signal sys_pidfd_send_signal ··· 386 385 460 common lsm_set_self_attr sys_lsm_set_self_attr 387 386 461 common lsm_list_modules sys_lsm_list_modules 388 387 462 common mseal sys_mseal 389 - 467 common uretprobe sys_uretprobe 390 388 391 389 # 392 390 # Due to a historical design error, certain syscalls are numbered differently
+12 -10
arch/x86/events/core.c
··· 1520 1520 void perf_event_print_debug(void) 1521 1521 { 1522 1522 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; 1523 + unsigned long *cntr_mask, *fixed_cntr_mask; 1524 + struct event_constraint *pebs_constraints; 1525 + struct cpu_hw_events *cpuc; 1523 1526 u64 pebs, debugctl; 1524 - int cpu = smp_processor_id(); 1525 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1526 - unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask); 1527 - unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); 1528 - struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); 1529 - unsigned long flags; 1530 - int idx; 1527 + int cpu, idx; 1528 + 1529 + guard(irqsave)(); 1530 + 1531 + cpu = smp_processor_id(); 1532 + cpuc = &per_cpu(cpu_hw_events, cpu); 1533 + cntr_mask = hybrid(cpuc->pmu, cntr_mask); 1534 + fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); 1535 + pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); 1531 1536 1532 1537 if (!*(u64 *)cntr_mask) 1533 1538 return; 1534 - 1535 - local_irq_save(flags); 1536 1539 1537 1540 if (x86_pmu.version >= 2) { 1538 1541 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); ··· 1580 1577 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1581 1578 cpu, idx, pmc_count); 1582 1579 } 1583 - local_irq_restore(flags); 1584 1580 } 1585 1581 1586 1582 void x86_pmu_stop(struct perf_event *event, int flags)
+3 -2
arch/x86/events/intel/cstate.c
··· 64 64 * perf code: 0x00 65 65 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, 66 66 * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL, 67 - * RPL,SPR,MTL,ARL,LNL 67 + * RPL,SPR,MTL,ARL,LNL,SRF 68 68 * Scope: Package (physical package) 69 69 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 70 70 * perf code: 0x01 ··· 693 693 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 694 694 BIT(PERF_CSTATE_CORE_C6_RES), 695 695 696 - .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), 696 + .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 697 + BIT(PERF_CSTATE_PKG_C6_RES), 697 698 698 699 .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), 699 700 };
+4
arch/x86/include/asm/cmdline.h
··· 2 2 #ifndef _ASM_X86_CMDLINE_H 3 3 #define _ASM_X86_CMDLINE_H 4 4 5 + #include <asm/setup.h> 6 + 7 + extern char builtin_cmdline[COMMAND_LINE_SIZE]; 8 + 5 9 int cmdline_find_option_bool(const char *cmdline_ptr, const char *option); 6 10 int cmdline_find_option(const char *cmdline_ptr, const char *option, 7 11 char *buffer, int bufsize);
+1
arch/x86/include/asm/kvm_host.h
··· 1305 1305 u8 vm_type; 1306 1306 bool has_private_mem; 1307 1307 bool has_protected_state; 1308 + bool pre_fault_allowed; 1308 1309 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 1309 1310 struct list_head active_mmu_pages; 1310 1311 struct list_head zapped_obsolete_pages;
+7 -5
arch/x86/include/asm/qspinlock.h
··· 66 66 67 67 #ifdef CONFIG_PARAVIRT 68 68 /* 69 - * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. 69 + * virt_spin_lock_key - disables by default the virt_spin_lock() hijack. 70 70 * 71 - * Native (and PV wanting native due to vCPU pinning) should disable this key. 72 - * It is done in this backwards fashion to only have a single direction change, 73 - * which removes ordering between native_pv_spin_init() and HV setup. 71 + * Native (and PV wanting native due to vCPU pinning) should keep this key 72 + * disabled. Native does not touch the key. 73 + * 74 + * When in a guest then native_pv_lock_init() enables the key first and 75 + * KVM/XEN might conditionally disable it later in the boot process again. 74 76 */ 75 - DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); 77 + DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); 76 78 77 79 /* 78 80 * Shortcut for the queued_spin_lock_slowpath() function that allows
+1 -1
arch/x86/kernel/acpi/madt_wakeup.c
··· 19 19 static u64 acpi_mp_wake_mailbox_paddr __ro_after_init; 20 20 21 21 /* Virtual address of the Multiprocessor Wakeup Structure mailbox */ 22 - static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init; 22 + static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox; 23 23 24 24 static u64 acpi_mp_pgd __ro_after_init; 25 25 static u64 acpi_mp_reset_vector_paddr __ro_after_init;
+1 -1
arch/x86/kernel/cpu/amd.c
··· 462 462 switch (c->x86_model) { 463 463 case 0x00 ... 0x2f: 464 464 case 0x40 ... 0x4f: 465 - case 0x70 ... 0x7f: 465 + case 0x60 ... 0x7f: 466 466 setup_force_cpu_cap(X86_FEATURE_ZEN5); 467 467 break; 468 468 default:
+4 -2
arch/x86/kernel/cpu/aperfmperf.c
··· 306 306 WARN_ON_ONCE(1); 307 307 return; 308 308 } 309 - static_branch_enable(&arch_scale_freq_key); 309 + static_branch_enable_cpuslocked(&arch_scale_freq_key); 310 310 register_freq_invariance_syscore_ops(); 311 311 pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio); 312 312 } ··· 323 323 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 324 324 return; 325 325 326 - if (intel_set_max_freq_ratio()) 326 + if (intel_set_max_freq_ratio()) { 327 + guard(cpus_read_lock)(); 327 328 freq_invariance_enable(); 329 + } 328 330 } 329 331 330 332 static void disable_freq_invariance_workfn(struct work_struct *work)
+1 -1
arch/x86/kernel/cpu/mtrr/mtrr.c
··· 609 609 { 610 610 int first_cpu; 611 611 612 - if (!mtrr_enabled()) 612 + if (!mtrr_enabled() || !mtrr_state.have_fixed) 613 613 return; 614 614 615 615 first_cpu = cpumask_first(cpu_online_mask);
+3 -4
arch/x86/kernel/paravirt.c
··· 51 51 DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); 52 52 #endif 53 53 54 - DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); 54 + DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); 55 55 56 56 void __init native_pv_lock_init(void) 57 57 { 58 - if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && 59 - !boot_cpu_has(X86_FEATURE_HYPERVISOR)) 60 - static_branch_disable(&virt_spin_lock_key); 58 + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 59 + static_branch_enable(&virt_spin_lock_key); 61 60 } 62 61 63 62 static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
+1 -1
arch/x86/kernel/setup.c
··· 164 164 165 165 static char __initdata command_line[COMMAND_LINE_SIZE]; 166 166 #ifdef CONFIG_CMDLINE_BOOL 167 - static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; 167 + char builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; 168 168 bool builtin_cmdline_added __ro_after_init; 169 169 #endif 170 170
+2 -2
arch/x86/kvm/Kconfig
··· 141 141 depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) 142 142 select ARCH_HAS_CC_PLATFORM 143 143 select KVM_GENERIC_PRIVATE_MEM 144 - select HAVE_KVM_GMEM_PREPARE 145 - select HAVE_KVM_GMEM_INVALIDATE 144 + select HAVE_KVM_ARCH_GMEM_PREPARE 145 + select HAVE_KVM_ARCH_GMEM_INVALIDATE 146 146 help 147 147 Provides support for launching Encrypted VMs (SEV) and Encrypted VMs 148 148 with Encrypted State (SEV-ES) on AMD processors.
+1 -1
arch/x86/kvm/lapic.c
··· 1743 1743 s64 min_period = min_timer_period_us * 1000LL; 1744 1744 1745 1745 if (apic->lapic_timer.period < min_period) { 1746 - pr_info_ratelimited( 1746 + pr_info_once( 1747 1747 "vcpu %i: requested %lld ns " 1748 1748 "lapic timer period limited to %lld ns\n", 1749 1749 apic->vcpu->vcpu_id,
+5 -2
arch/x86/kvm/mmu/mmu.c
··· 4335 4335 if (req_max_level) 4336 4336 max_level = min(max_level, req_max_level); 4337 4337 4338 - return req_max_level; 4338 + return max_level; 4339 4339 } 4340 4340 4341 4341 static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu, ··· 4742 4742 u8 level = PG_LEVEL_4K; 4743 4743 u64 end; 4744 4744 int r; 4745 + 4746 + if (!vcpu->kvm->arch.pre_fault_allowed) 4747 + return -EOPNOTSUPP; 4745 4748 4746 4749 /* 4747 4750 * reload is efficient when called repeatedly, so we can do it on ··· 7513 7510 const unsigned long end = start + KVM_PAGES_PER_HPAGE(level); 7514 7511 7515 7512 if (level == PG_LEVEL_2M) 7516 - return kvm_range_has_memory_attributes(kvm, start, end, attrs); 7513 + return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs); 7517 7514 7518 7515 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) { 7519 7516 if (hugepage_test_mixed(slot, gfn, level - 1) ||
+9 -8
arch/x86/kvm/svm/sev.c
··· 2279 2279 bool assigned; 2280 2280 int level; 2281 2281 2282 - if (!kvm_mem_is_private(kvm, gfn)) { 2283 - pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n", 2284 - __func__, gfn); 2285 - ret = -EINVAL; 2286 - goto err; 2287 - } 2288 - 2289 2282 ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level); 2290 2283 if (ret || assigned) { 2291 2284 pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n", 2292 2285 __func__, gfn, ret, assigned); 2293 - ret = -EINVAL; 2286 + ret = ret ? -EINVAL : -EEXIST; 2294 2287 goto err; 2295 2288 } 2296 2289 ··· 2541 2548 memcpy(data->host_data, params.host_data, KVM_SEV_SNP_FINISH_DATA_SIZE); 2542 2549 data->gctx_paddr = __psp_pa(sev->snp_context); 2543 2550 ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error); 2551 + 2552 + /* 2553 + * Now that there will be no more SNP_LAUNCH_UPDATE ioctls, private pages 2554 + * can be given to the guest simply by marking the RMP entry as private. 2555 + * This can happen on first access and also with KVM_PRE_FAULT_MEMORY. 2556 + */ 2557 + if (!ret) 2558 + kvm->arch.pre_fault_allowed = true; 2544 2559 2545 2560 kfree(id_auth); 2546 2561
+1
arch/x86/kvm/svm/svm.c
··· 4949 4949 to_kvm_sev_info(kvm)->need_init = true; 4950 4950 4951 4951 kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM); 4952 + kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem; 4952 4953 } 4953 4954 4954 4955 if (!pause_filter_count || !pause_filter_thresh)
+5 -7
arch/x86/kvm/x86.c
··· 12646 12646 kvm->arch.vm_type = type; 12647 12647 kvm->arch.has_private_mem = 12648 12648 (type == KVM_X86_SW_PROTECTED_VM); 12649 + /* Decided by the vendor code for other VM types. */ 12650 + kvm->arch.pre_fault_allowed = 12651 + type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM; 12649 12652 12650 12653 ret = kvm_page_track_init(kvm); 12651 12654 if (ret) ··· 13644 13641 } 13645 13642 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 13646 13643 13647 - #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE 13648 - bool kvm_arch_gmem_prepare_needed(struct kvm *kvm) 13649 - { 13650 - return kvm->arch.vm_type == KVM_X86_SNP_VM; 13651 - } 13652 - 13644 + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE 13653 13645 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order) 13654 13646 { 13655 13647 return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order); 13656 13648 } 13657 13649 #endif 13658 13650 13659 - #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE 13651 + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE 13660 13652 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) 13661 13653 { 13662 13654 kvm_x86_call(gmem_invalidate)(start, end);
+18 -7
arch/x86/lib/cmdline.c
··· 207 207 208 208 int cmdline_find_option_bool(const char *cmdline, const char *option) 209 209 { 210 - if (IS_ENABLED(CONFIG_CMDLINE_BOOL)) 211 - WARN_ON_ONCE(!builtin_cmdline_added); 210 + int ret; 212 211 213 - return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option); 212 + ret = __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option); 213 + if (ret > 0) 214 + return ret; 215 + 216 + if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added) 217 + return __cmdline_find_option_bool(builtin_cmdline, COMMAND_LINE_SIZE, option); 218 + 219 + return ret; 214 220 } 215 221 216 222 int cmdline_find_option(const char *cmdline, const char *option, char *buffer, 217 223 int bufsize) 218 224 { 219 - if (IS_ENABLED(CONFIG_CMDLINE_BOOL)) 220 - WARN_ON_ONCE(!builtin_cmdline_added); 225 + int ret; 221 226 222 - return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, 223 - buffer, bufsize); 227 + ret = __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize); 228 + if (ret > 0) 229 + return ret; 230 + 231 + if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added) 232 + return __cmdline_find_option(builtin_cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize); 233 + 234 + return ret; 224 235 }
+3 -1
arch/x86/lib/getuser.S
··· 88 88 EXPORT_SYMBOL(__get_user_4) 89 89 90 90 SYM_FUNC_START(__get_user_8) 91 + #ifndef CONFIG_X86_64 92 + xor %ecx,%ecx 93 + #endif 91 94 check_range size=8 92 95 ASM_STAC 93 96 #ifdef CONFIG_X86_64 94 97 UACCESS movq (%_ASM_AX),%rdx 95 98 #else 96 - xor %ecx,%ecx 97 99 UACCESS movl (%_ASM_AX),%edx 98 100 UACCESS movl 4(%_ASM_AX),%ecx 99 101 #endif
+32 -19
arch/x86/mm/pti.c
··· 241 241 * 242 242 * Returns a pointer to a PTE on success, or NULL on failure. 243 243 */ 244 - static pte_t *pti_user_pagetable_walk_pte(unsigned long address) 244 + static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text) 245 245 { 246 246 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 247 247 pmd_t *pmd; ··· 251 251 if (!pmd) 252 252 return NULL; 253 253 254 - /* We can't do anything sensible if we hit a large mapping. */ 254 + /* Large PMD mapping found */ 255 255 if (pmd_leaf(*pmd)) { 256 - WARN_ON(1); 257 - return NULL; 256 + /* Clear the PMD if we hit a large mapping from the first round */ 257 + if (late_text) { 258 + set_pmd(pmd, __pmd(0)); 259 + } else { 260 + WARN_ON_ONCE(1); 261 + return NULL; 262 + } 258 263 } 259 264 260 265 if (pmd_none(*pmd)) { ··· 288 283 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) 289 284 return; 290 285 291 - target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); 286 + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false); 292 287 if (WARN_ON(!target_pte)) 293 288 return; 294 289 ··· 306 301 307 302 static void 308 303 pti_clone_pgtable(unsigned long start, unsigned long end, 309 - enum pti_clone_level level) 304 + enum pti_clone_level level, bool late_text) 310 305 { 311 306 unsigned long addr; 312 307 ··· 379 374 */ 380 375 *target_pmd = *pmd; 381 376 382 - addr += PMD_SIZE; 377 + addr = round_up(addr + 1, PMD_SIZE); 383 378 384 379 } else if (level == PTI_CLONE_PTE) { 385 380 386 381 /* Walk the page-table down to the pte level */ 387 382 pte = pte_offset_kernel(pmd, addr); 388 383 if (pte_none(*pte)) { 389 - addr += PAGE_SIZE; 384 + addr = round_up(addr + 1, PAGE_SIZE); 390 385 continue; 391 386 } 392 387 ··· 395 390 return; 396 391 397 392 /* Allocate PTE in the user page-table */ 398 - target_pte = pti_user_pagetable_walk_pte(addr); 393 + target_pte = pti_user_pagetable_walk_pte(addr, late_text); 399 394 if (WARN_ON(!target_pte)) 400 395 return; 401 396 ··· 406 401 /* Clone the PTE */ 407 402 *target_pte = *pte; 408 403 409 - addr += PAGE_SIZE; 404 + addr = round_up(addr + 1, PAGE_SIZE); 410 405 411 406 } else { 412 407 BUG(); ··· 457 452 phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); 458 453 pte_t *target_pte; 459 454 460 - target_pte = pti_user_pagetable_walk_pte(va); 455 + target_pte = pti_user_pagetable_walk_pte(va, false); 461 456 if (WARN_ON(!target_pte)) 462 457 return; 463 458 ··· 480 475 start = CPU_ENTRY_AREA_BASE; 481 476 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); 482 477 483 - pti_clone_pgtable(start, end, PTI_CLONE_PMD); 478 + pti_clone_pgtable(start, end, PTI_CLONE_PMD, false); 484 479 } 485 480 #endif /* CONFIG_X86_64 */ 486 481 ··· 497 492 /* 498 493 * Clone the populated PMDs of the entry text and force it RO. 499 494 */ 500 - static void pti_clone_entry_text(void) 495 + static void pti_clone_entry_text(bool late) 501 496 { 502 497 pti_clone_pgtable((unsigned long) __entry_text_start, 503 498 (unsigned long) __entry_text_end, 504 - PTI_CLONE_PMD); 499 + PTI_LEVEL_KERNEL_IMAGE, late); 505 500 } 506 501 507 502 /* ··· 576 571 * pti_set_kernel_image_nonglobal() did to clear the 577 572 * global bit. 578 573 */ 579 - pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); 574 + pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false); 580 575 581 576 /* 582 577 * pti_clone_pgtable() will set the global bit in any PMDs ··· 643 638 644 639 /* Undo all global bits from the init pagetables in head_64.S: */ 645 640 pti_set_kernel_image_nonglobal(); 641 + 646 642 /* Replace some of the global bits just for shared entry text: */ 647 - pti_clone_entry_text(); 643 + /* 644 + * This is very early in boot. Device and Late initcalls can do 645 + * modprobe before free_initmem() and mark_readonly(). This 646 + * pti_clone_entry_text() allows those user-mode-helpers to function, 647 + * but notably the text is still RW. 648 + */ 649 + pti_clone_entry_text(false); 648 650 pti_setup_espfix64(); 649 651 pti_setup_vsyscall(); 650 652 } ··· 668 656 if (!boot_cpu_has(X86_FEATURE_PTI)) 669 657 return; 670 658 /* 671 - * We need to clone everything (again) that maps parts of the 672 - * kernel image. 659 + * This is after free_initmem() (all initcalls are done) and we've done 660 + * mark_readonly(). Text is now NX which might've split some PMDs 661 + * relative to the early clone. 673 662 */ 674 - pti_clone_entry_text(); 663 + pti_clone_entry_text(true); 675 664 pti_clone_kernel_text(); 676 665 677 666 debug_checkwx_user();
-11
block/blk-throttle.c
··· 31 31 32 32 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 33 33 34 - /* We measure latency for request size from <= 4k to >= 1M */ 35 - #define LATENCY_BUCKET_SIZE 9 36 - 37 - struct latency_bucket { 38 - unsigned long total_latency; /* ns / 1024 */ 39 - int samples; 40 - }; 41 - 42 34 struct throtl_data 43 35 { 44 36 /* service tree for active throtl groups */ ··· 107 115 108 116 return tg->iops[rw]; 109 117 } 110 - 111 - #define request_bucket_index(sectors) \ 112 - clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1) 113 118 114 119 /** 115 120 * throtl_log - log debug message via blktrace
+6 -9
drivers/android/binder.c
··· 1044 1044 } 1045 1045 1046 1046 /* Find the smallest unused descriptor the "slow way" */ 1047 - static u32 slow_desc_lookup_olocked(struct binder_proc *proc) 1047 + static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset) 1048 1048 { 1049 1049 struct binder_ref *ref; 1050 1050 struct rb_node *n; 1051 1051 u32 desc; 1052 1052 1053 - desc = 1; 1053 + desc = offset; 1054 1054 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { 1055 1055 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1056 1056 if (ref->data.desc > desc) ··· 1071 1071 u32 *desc) 1072 1072 { 1073 1073 struct dbitmap *dmap = &proc->dmap; 1074 + unsigned int nbits, offset; 1074 1075 unsigned long *new, bit; 1075 - unsigned int nbits; 1076 1076 1077 1077 /* 0 is reserved for the context manager */ 1078 - if (node == proc->context->binder_context_mgr_node) { 1079 - *desc = 0; 1080 - return 0; 1081 - } 1078 + offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1; 1082 1079 1083 1080 if (!dbitmap_enabled(dmap)) { 1084 - *desc = slow_desc_lookup_olocked(proc); 1081 + *desc = slow_desc_lookup_olocked(proc, offset); 1085 1082 return 0; 1086 1083 } 1087 1084 1088 - if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) { 1085 + if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) { 1089 1086 *desc = bit; 1090 1087 return 0; 1091 1088 }
+1 -1
drivers/android/binder_alloc.c
··· 939 939 __free_page(alloc->pages[i].page_ptr); 940 940 page_count++; 941 941 } 942 - kvfree(alloc->pages); 943 942 } 944 943 spin_unlock(&alloc->lock); 944 + kvfree(alloc->pages); 945 945 if (alloc->mm) 946 946 mmdrop(alloc->mm); 947 947
+7 -15
drivers/android/dbitmap.h
··· 6 6 * 7 7 * Used by the binder driver to optimize the allocation of the smallest 8 8 * available descriptor ID. Each bit in the bitmap represents the state 9 - * of an ID, with the exception of BIT(0) which is used exclusively to 10 - * reference binder's context manager. 9 + * of an ID. 11 10 * 12 11 * A dbitmap can grow or shrink as needed. This part has been designed 13 12 * considering that users might need to briefly release their locks in ··· 57 58 if (bit < (dmap->nbits >> 2)) 58 59 return dmap->nbits >> 1; 59 60 60 - /* 61 - * Note that find_last_bit() returns dmap->nbits when no bits 62 - * are set. While this is technically not possible here since 63 - * BIT(0) is always set, this check is left for extra safety. 64 - */ 61 + /* find_last_bit() returns dmap->nbits when no bits are set. */ 65 62 if (bit == dmap->nbits) 66 63 return NBITS_MIN; 67 64 ··· 127 132 } 128 133 129 134 /* 130 - * Finds and sets the first zero bit in the bitmap. Upon success @bit 135 + * Finds and sets the next zero bit in the bitmap. Upon success @bit 131 136 * is populated with the index and 0 is returned. Otherwise, -ENOSPC 132 137 * is returned to indicate that a dbitmap_grow() is needed. 133 138 */ 134 139 static inline int 135 - dbitmap_acquire_first_zero_bit(struct dbitmap *dmap, unsigned long *bit) 140 + dbitmap_acquire_next_zero_bit(struct dbitmap *dmap, unsigned long offset, 141 + unsigned long *bit) 136 142 { 137 143 unsigned long n; 138 144 139 - n = find_first_zero_bit(dmap->map, dmap->nbits); 145 + n = find_next_zero_bit(dmap->map, dmap->nbits, offset); 140 146 if (n == dmap->nbits) 141 147 return -ENOSPC; 142 148 ··· 150 154 static inline void 151 155 dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit) 152 156 { 153 - /* BIT(0) should always set for the context manager */ 154 - if (bit) 155 - clear_bit(bit, dmap->map); 157 + clear_bit(bit, dmap->map); 156 158 } 157 159 158 160 static inline int dbitmap_init(struct dbitmap *dmap) ··· 162 168 } 163 169 164 170 dmap->nbits = NBITS_MIN; 165 - /* BIT(0) is reserved for the context manager */ 166 - set_bit(0, dmap->map); 167 171 168 172 return 0; 169 173 }
+8 -5
drivers/base/core.c
··· 25 25 #include <linux/mutex.h> 26 26 #include <linux/pm_runtime.h> 27 27 #include <linux/netdevice.h> 28 + #include <linux/rcupdate.h> 28 29 #include <linux/sched/signal.h> 29 30 #include <linux/sched/mm.h> 30 31 #include <linux/string_helpers.h> ··· 2641 2640 static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 2642 2641 { 2643 2642 const struct device *dev = kobj_to_dev(kobj); 2643 + struct device_driver *driver; 2644 2644 int retval = 0; 2645 2645 2646 2646 /* add device node properties if present */ ··· 2670 2668 if (dev->type && dev->type->name) 2671 2669 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 2672 2670 2673 - if (dev->driver) 2674 - add_uevent_var(env, "DRIVER=%s", dev->driver->name); 2671 + /* Synchronize with module_remove_driver() */ 2672 + rcu_read_lock(); 2673 + driver = READ_ONCE(dev->driver); 2674 + if (driver) 2675 + add_uevent_var(env, "DRIVER=%s", driver->name); 2676 + rcu_read_unlock(); 2675 2677 2676 2678 /* Add common DT information about the device */ 2677 2679 of_device_uevent(dev, env); ··· 2745 2739 if (!env) 2746 2740 return -ENOMEM; 2747 2741 2748 - /* Synchronize with really_probe() */ 2749 - device_lock(dev); 2750 2742 /* let the kset specific function add its keys */ 2751 2743 retval = kset->uevent_ops->uevent(&dev->kobj, env); 2752 - device_unlock(dev); 2753 2744 if (retval) 2754 2745 goto out; 2755 2746
+4
drivers/base/module.c
··· 7 7 #include <linux/errno.h> 8 8 #include <linux/slab.h> 9 9 #include <linux/string.h> 10 + #include <linux/rcupdate.h> 10 11 #include "base.h" 11 12 12 13 static char *make_driver_name(const struct device_driver *drv) ··· 97 96 98 97 if (!drv) 99 98 return; 99 + 100 + /* Synchronize with dev_uevent() */ 101 + synchronize_rcu(); 100 102 101 103 sysfs_remove_link(&drv->p->kobj, "module"); 102 104
+2
drivers/bluetooth/Kconfig
··· 413 413 config BT_MTKSDIO 414 414 tristate "MediaTek HCI SDIO driver" 415 415 depends on MMC 416 + depends on USB || !BT_HCIBTUSB_MTK 416 417 select BT_MTK 417 418 help 418 419 MediaTek Bluetooth HCI SDIO driver. ··· 426 425 config BT_MTKUART 427 426 tristate "MediaTek HCI UART driver" 428 427 depends on SERIAL_DEV_BUS 428 + depends on USB || !BT_HCIBTUSB_MTK 429 429 select BT_MTK 430 430 help 431 431 MediaTek Bluetooth HCI UART driver.
+3
drivers/bluetooth/btintel.c
··· 3085 3085 btintel_set_dsm_reset_method(hdev, &ver_tlv); 3086 3086 3087 3087 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv); 3088 + if (err) 3089 + goto exit_error; 3090 + 3088 3091 btintel_register_devcoredump_support(hdev); 3089 3092 btintel_print_fseq_info(hdev); 3090 3093 break;
+4 -1
drivers/bluetooth/btmtk.c
··· 437 437 } 438 438 EXPORT_SYMBOL_GPL(btmtk_process_coredump); 439 439 440 + #if IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) 440 441 static void btmtk_usb_wmt_recv(struct urb *urb) 441 442 { 442 443 struct hci_dev *hdev = urb->context; ··· 1263 1262 struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1264 1263 1265 1264 /* Stop urb anchor for iso data transmission */ 1266 - usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); 1265 + if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) 1266 + usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); 1267 1267 1268 1268 return 0; 1269 1269 } ··· 1489 1487 return 0; 1490 1488 } 1491 1489 EXPORT_SYMBOL_GPL(btmtk_usb_shutdown); 1490 + #endif 1492 1491 1493 1492 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 1494 1493 MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
+9 -10
drivers/bluetooth/hci_qca.c
··· 2160 2160 qcadev = serdev_device_get_drvdata(hu->serdev); 2161 2161 power = qcadev->bt_power; 2162 2162 2163 - if (power->pwrseq) { 2163 + if (power && power->pwrseq) { 2164 2164 pwrseq_power_off(power->pwrseq); 2165 2165 set_bit(QCA_BT_OFF, &qca->flags); 2166 2166 return; ··· 2185 2185 sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl); 2186 2186 bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state); 2187 2187 } 2188 - break; 2189 - 2190 - case QCA_QCA6390: 2191 - pwrseq_power_off(qcadev->bt_power->pwrseq); 2192 2188 break; 2193 2189 2194 2190 default: ··· 2412 2416 break; 2413 2417 2414 2418 case QCA_QCA6390: 2415 - qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, 2416 - "bluetooth"); 2417 - if (IS_ERR(qcadev->bt_power->pwrseq)) 2418 - return PTR_ERR(qcadev->bt_power->pwrseq); 2419 - break; 2419 + if (dev_of_node(&serdev->dev)) { 2420 + qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, 2421 + "bluetooth"); 2422 + if (IS_ERR(qcadev->bt_power->pwrseq)) 2423 + return PTR_ERR(qcadev->bt_power->pwrseq); 2424 + break; 2425 + } 2426 + fallthrough; 2420 2427 2421 2428 default: 2422 2429 qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+1
drivers/cache/Kconfig
··· 18 18 bool "StarFive StarLink Cache controller" 19 19 depends on RISCV 20 20 depends on ARCH_STARFIVE 21 + depends on 64BIT 21 22 select RISCV_DMA_NONCOHERENT 22 23 select RISCV_NONSTANDARD_CACHE_OPS 23 24 help
+1
drivers/char/ds1620.c
··· 421 421 module_init(ds1620_init); 422 422 module_exit(ds1620_exit); 423 423 424 + MODULE_DESCRIPTION("Dallas Semiconductor DS1620 thermometer driver"); 424 425 MODULE_LICENSE("GPL");
+1
drivers/char/nwbutton.c
··· 241 241 242 242 243 243 MODULE_AUTHOR("Alex Holden"); 244 + MODULE_DESCRIPTION("NetWinder button driver"); 244 245 MODULE_LICENSE("GPL"); 245 246 246 247 module_init(nwbutton_init);
+1
drivers/char/nwflash.c
··· 618 618 iounmap((void *)FLASH_BASE); 619 619 } 620 620 621 + MODULE_DESCRIPTION("NetWinder flash memory driver"); 621 622 MODULE_LICENSE("GPL"); 622 623 623 624 module_param(flashdebug, bool, 0644);
+1
drivers/cpufreq/intel_pstate.c
··· 3405 3405 */ 3406 3406 X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), 3407 3407 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), 3408 + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), 3408 3409 X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, 3409 3410 179, 64, 16)), 3410 3411 X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
-1
drivers/edac/skx_common.h
··· 45 45 #define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS) 46 46 #define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS) 47 47 48 - #define MAX(a, b) ((a) > (b) ? (a) : (b)) 49 48 #define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC) 50 49 #define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS) 51 50 #define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
+6 -1
drivers/firmware/efi/libstub/Makefile
··· 27 27 cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ 28 28 -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ 29 29 -DEFI_HAVE_STRCMP -fno-builtin -fpic \ 30 - $(call cc-option,-mno-single-pic-base) 30 + $(call cc-option,-mno-single-pic-base) \ 31 + $(DISABLE_STACKLEAK_PLUGIN) 31 32 cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \ 32 33 $(DISABLE_STACKLEAK_PLUGIN) 33 34 cflags-$(CONFIG_LOONGARCH) += -fpie ··· 57 56 KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) 58 57 # disable LTO 59 58 KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS)) 59 + 60 + # The .data section would be renamed to .data.efistub, therefore, remove 61 + # `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL 62 + KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL)) 60 63 61 64 lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ 62 65 file.o mem.o random.o randomalloc.o pci.o \
+1
drivers/fsi/fsi-core.c
··· 1444 1444 } 1445 1445 module_exit(fsi_exit); 1446 1446 module_param(discard_errors, int, 0664); 1447 + MODULE_DESCRIPTION("FSI core driver"); 1447 1448 MODULE_LICENSE("GPL"); 1448 1449 MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses");
+1
drivers/fsi/fsi-master-aspeed.c
··· 670 670 }; 671 671 672 672 module_platform_driver(fsi_master_aspeed_driver); 673 + MODULE_DESCRIPTION("FSI master driver for AST2600"); 673 674 MODULE_LICENSE("GPL");
+2 -1
drivers/fsi/fsi-master-ast-cf.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 // Copyright 2018 IBM Corp 3 3 /* 4 - * A FSI master controller, using a simple GPIO bit-banging interface 4 + * A FSI master based on Aspeed ColdFire coprocessor 5 5 */ 6 6 7 7 #include <linux/crc4.h> ··· 1438 1438 }; 1439 1439 1440 1440 module_platform_driver(fsi_master_acf); 1441 + MODULE_DESCRIPTION("A FSI master based on Aspeed ColdFire coprocessor"); 1441 1442 MODULE_LICENSE("GPL"); 1442 1443 MODULE_FIRMWARE(FW_FILE_NAME);
+1
drivers/fsi/fsi-master-gpio.c
··· 892 892 }; 893 893 894 894 module_platform_driver(fsi_master_gpio_driver); 895 + MODULE_DESCRIPTION("A FSI master controller, using a simple GPIO bit-banging interface"); 895 896 MODULE_LICENSE("GPL");
+1
drivers/fsi/fsi-master-hub.c
··· 295 295 }; 296 296 297 297 module_fsi_driver(hub_master_driver); 298 + MODULE_DESCRIPTION("FSI hub master driver"); 298 299 MODULE_LICENSE("GPL");
+1
drivers/fsi/fsi-scom.c
··· 625 625 626 626 module_init(scom_init); 627 627 module_exit(scom_exit); 628 + MODULE_DESCRIPTION("SCOM FSI Client device driver"); 628 629 MODULE_LICENSE("GPL");
+1
drivers/gpu/drm/Kconfig
··· 268 268 config DRM_GPUVM 269 269 tristate 270 270 depends on DRM 271 + select DRM_EXEC 271 272 help 272 273 GPU-VM representation providing helpers to manage a GPUs virtual 273 274 address space
+8 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1778 1778 struct ttm_operation_ctx ctx = { false, false }; 1779 1779 struct amdgpu_vm *vm = &fpriv->vm; 1780 1780 struct amdgpu_bo_va_mapping *mapping; 1781 - int r; 1781 + int i, r; 1782 1782 1783 1783 addr /= AMDGPU_GPU_PAGE_SIZE; 1784 1784 ··· 1793 1793 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) 1794 1794 return -EINVAL; 1795 1795 1796 - if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1797 - (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1798 - amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1799 - r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1800 - if (r) 1801 - return r; 1802 - } 1796 + (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1797 + amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1798 + for (i = 0; i < (*bo)->placement.num_placement; i++) 1799 + (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; 1800 + r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1801 + if (r) 1802 + return r; 1803 1803 1804 1804 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1805 1805 }
+6
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 156 156 uint64_t addr, uint64_t *flags); 157 157 /* get the amount of memory used by the vbios for pre-OS console */ 158 158 unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); 159 + /* get the DCC buffer alignment */ 160 + unsigned int (*get_dcc_alignment)(struct amdgpu_device *adev); 159 161 160 162 enum amdgpu_memory_partition (*query_mem_partition_mode)( 161 163 struct amdgpu_device *adev); ··· 365 363 (adev)->gmc.gmc_funcs->override_vm_pte_flags \ 366 364 ((adev), (vm), (addr), (pte_flags)) 367 365 #define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) 366 + #define amdgpu_gmc_get_dcc_alignment(adev) ({ \ 367 + typeof(adev) _adev = (adev); \ 368 + _adev->gmc.gmc_funcs->get_dcc_alignment(_adev); \ 369 + }) 368 370 369 371 /** 370 372 * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 264 264 struct dma_fence *fence = NULL; 265 265 int r; 266 266 267 - /* Ignore soft recovered fences here */ 268 267 r = drm_sched_entity_error(s_entity); 269 - if (r && r != -ENODATA) 268 + if (r) 270 269 goto error; 271 270 272 271 if (!fence && job->gang_submit)
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 103 103 if (!amdgpu_mes_log_enable) 104 104 return 0; 105 105 106 - r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE, 106 + r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE, 107 107 AMDGPU_GEM_DOMAIN_GTT, 108 108 &adev->mes.event_log_gpu_obj, 109 109 &adev->mes.event_log_gpu_addr, ··· 113 113 return r; 114 114 } 115 115 116 - memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE); 116 + memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size); 117 117 118 118 return 0; 119 119 ··· 1573 1573 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); 1574 1574 1575 1575 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, 1576 - mem, AMDGPU_MES_LOG_BUFFER_SIZE, false); 1576 + mem, adev->mes.event_log_size, false); 1577 1577 1578 1578 return 0; 1579 1579 }
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
··· 52 52 53 53 #define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */ 54 54 #define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */ 55 - #define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */ 56 55 57 56 struct amdgpu_mes_funcs; 58 57 ··· 134 135 unsigned long *doorbell_bitmap; 135 136 136 137 /* MES event log buffer */ 137 - struct amdgpu_bo *event_log_gpu_obj; 138 - uint64_t event_log_gpu_addr; 138 + uint32_t event_log_size; 139 + struct amdgpu_bo *event_log_gpu_obj; 140 + uint64_t event_log_gpu_addr; 139 141 void *event_log_cpu_addr; 140 142 141 143 /* ip specific functions */
+34 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 456 456 u64 vis_usage = 0, max_bytes, min_block_size; 457 457 struct amdgpu_vram_mgr_resource *vres; 458 458 u64 size, remaining_size, lpfn, fpfn; 459 + unsigned int adjust_dcc_size = 0; 459 460 struct drm_buddy *mm = &mgr->mm; 460 461 struct drm_buddy_block *block; 461 462 unsigned long pages_per_block; ··· 512 511 /* Allocate blocks in desired range */ 513 512 vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; 514 513 514 + if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC && 515 + adev->gmc.gmc_funcs->get_dcc_alignment) 516 + adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev); 517 + 515 518 remaining_size = (u64)vres->base.size; 519 + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { 520 + unsigned int dcc_size; 521 + 522 + dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size); 523 + remaining_size = (u64)dcc_size; 524 + 525 + vres->flags |= DRM_BUDDY_TRIM_DISABLE; 526 + } 516 527 517 528 mutex_lock(&mgr->lock); 518 529 while (remaining_size) { ··· 534 521 min_block_size = mgr->default_page_size; 535 522 536 523 size = remaining_size; 537 - if ((size >= (u64)pages_per_block << PAGE_SHIFT) && 538 - !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) 524 + 525 + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) 526 + min_block_size = size; 527 + else if ((size >= (u64)pages_per_block << PAGE_SHIFT) && 528 + !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) 539 529 min_block_size = (u64)pages_per_block << PAGE_SHIFT; 540 530 541 531 BUG_ON(min_block_size < mm->chunk_size); ··· 568 552 remaining_size -= size; 569 553 } 570 554 mutex_unlock(&mgr->lock); 555 + 556 + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { 557 + struct drm_buddy_block *dcc_block; 558 + unsigned long dcc_start; 559 + u64 trim_start; 560 + 561 + dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks); 562 + /* Adjust the start address for DCC buffers only */ 563 + dcc_start = 564 + roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block), 565 + adjust_dcc_size); 566 + trim_start = (u64)dcc_start; 567 + drm_buddy_block_trim(mm, &trim_start, 568 + (u64)vres->base.size, 569 + &vres->blocks); 570 + } 571 571 572 572 vres->base.start = 0; 573 573 size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
+27
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 202 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) 203 203 }; 204 204 205 + static const struct soc15_reg_golden golden_settings_gc_12_0[] = { 206 + SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f), 207 + SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000), 208 + SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020) 209 + }; 210 + 205 211 #define DEFAULT_SH_MEM_CONFIG \ 206 212 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 207 213 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ ··· 3438 3432 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 3439 3433 } 3440 3434 3435 + static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) 3436 + { 3437 + if (amdgpu_sriov_vf(adev)) 3438 + return; 3439 + 3440 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3441 + case IP_VERSION(12, 0, 0): 3442 + case IP_VERSION(12, 0, 1): 3443 + if (adev->rev_id == 0) 3444 + soc15_program_register_sequence(adev, 3445 + golden_settings_gc_12_0, 3446 + (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); 3447 + break; 3448 + default: 3449 + break; 3450 + } 3451 + } 3452 + 3441 3453 static int gfx_v12_0_hw_init(void *handle) 3442 3454 { 3443 3455 int r; ··· 3495 3471 return r; 3496 3472 } 3497 3473 } 3474 + 3475 + if (!amdgpu_emu_mode) 3476 + gfx_v12_0_init_golden_registers(adev); 3498 3477 3499 3478 adev->gfx.is_poweron = true; 3500 3479
+18
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 542 542 return 0; 543 543 } 544 544 545 + static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev) 546 + { 547 + unsigned int max_tex_channel_caches, alignment; 548 + 549 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) && 550 + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1)) 551 + return 0; 552 + 553 + max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches; 554 + if (is_power_of_2(max_tex_channel_caches)) 555 + alignment = (unsigned int)(max_tex_channel_caches / SZ_4); 556 + else 557 + alignment = roundup_pow_of_two(max_tex_channel_caches); 558 + 559 + return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K); 560 + } 561 + 545 562 static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = { 546 563 .flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb, 547 564 .flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid, ··· 568 551 .get_vm_pde = gmc_v12_0_get_vm_pde, 569 552 .get_vm_pte = gmc_v12_0_get_vm_pte, 570 553 .get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size, 554 + .get_dcc_alignment = gmc_v12_0_get_dcc_alignment, 571 555 }; 572 556 573 557 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
+2
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 1163 1163 adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init; 1164 1164 adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini; 1165 1165 1166 + adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; 1167 + 1166 1168 r = amdgpu_mes_init(adev); 1167 1169 if (r) 1168 1170 return r;
+6 -2
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
··· 551 551 mes_set_hw_res_pkt.oversubscription_timer = 50; 552 552 mes_set_hw_res_pkt.unmapped_doorbell_handling = 1; 553 553 554 - mes_set_hw_res_pkt.enable_mes_event_int_logging = 0; 555 - mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; 554 + if (amdgpu_mes_log_enable) { 555 + mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; 556 + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; 557 + } 556 558 557 559 return mes_v12_0_submit_pkt_and_poll_completion(mes, 558 560 &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), ··· 1238 1236 adev->mes.funcs = &mes_v12_0_funcs; 1239 1237 adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init; 1240 1238 adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; 1239 + 1240 + adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; 1241 1241 1242 1242 r = amdgpu_mes_init(adev); 1243 1243 if (r)
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
··· 80 80 /* invalidate using legacy mode on vmid*/ 81 81 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, 82 82 PER_VMID_INVALIDATE_REQ, 1 << vmid); 83 - req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 83 + /* Only use legacy inv on mmhub side */ 84 + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); 84 85 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 85 86 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 86 87 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+4 -3
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
··· 1575 1575 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | 1576 1576 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | 1577 1577 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) | 1578 - SDMA_PKT_COPY_LINEAR_HEADER_CPV((copy_flags & 1579 - (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)) ? 1 : 0); 1578 + SDMA_PKT_COPY_LINEAR_HEADER_CPV(1); 1580 1579 1581 1580 ib->ptr[ib->length_dw++] = byte_count - 1; 1582 1581 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ··· 1589 1590 ((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) | 1590 1591 ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) | 1591 1592 SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1); 1593 + else 1594 + ib->ptr[ib->length_dw++] = 0; 1592 1595 } 1593 1596 1594 1597 /** ··· 1617 1616 1618 1617 static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = { 1619 1618 .copy_max_bytes = 0x400000, 1620 - .copy_num_dw = 7, 1619 + .copy_num_dw = 8, 1621 1620 .emit_copy_buffer = sdma_v7_0_emit_copy_buffer, 1622 1621 .fill_max_bytes = 0x400000, 1623 1622 .fill_num_dw = 5,
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 1270 1270 } 1271 1271 } 1272 1272 1273 + if (new_stream_on_link_num == 0) 1274 + return false; 1275 + 1273 1276 /* check current_state if there stream on link but it is not in 1274 1277 * new request state 1275 1278 */
+2
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 35 35 #include "dc_stream_priv.h" 36 36 37 37 #define DC_LOGGER dc->ctx->logger 38 + #ifndef MIN 38 39 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) 39 40 #define MAX(x, y) ((x > y) ? x : y) 41 + #endif 40 42 41 43 /******************************************************************************* 42 44 * Private functions
+1 -2
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
··· 185 185 else 186 186 copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; 187 187 188 - 189 - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 188 + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 190 189 191 190 return true; 192 191 }
+2
drivers/gpu/drm/amd/display/dc/dml/Makefile
··· 83 83 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_rcflags) 84 84 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_rcflags) 85 85 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcflags) 86 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_rcflags) 87 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_rcflags) 86 88 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags) 87 89 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) 88 90 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)
+2
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 1402 1402 if (hubbub && hubp) { 1403 1403 if (hubbub->funcs->program_det_size) 1404 1404 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); 1405 + if (hubbub->funcs->program_det_segments) 1406 + hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0); 1405 1407 } 1406 1408 } 1407 1409
+2
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 771 771 if (hubbub && hubp) { 772 772 if (hubbub->funcs->program_det_size) 773 773 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); 774 + if (hubbub->funcs->program_det_segments) 775 + hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0); 774 776 } 775 777 } 776 778
+1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 723 723 .min_prefetch_in_strobe_ns = 60000, // 60us 724 724 .disable_unbounded_requesting = false, 725 725 .enable_legacy_fast_update = false, 726 + .dcc_meta_propagation_delay_us = 10, 726 727 .fams2_config = { 727 728 .bits = { 728 729 .enable = true,
+3 -1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
··· 138 138 SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \ 139 139 SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \ 140 140 SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \ 141 - HUBP_3DLUT_FL_REG_LIST_DCN401(id) 141 + HUBP_3DLUT_FL_REG_LIST_DCN401(id), \ 142 + SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \ 143 + SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id) 142 144 143 145 /* ABM */ 144 146 #define ABM_DCN401_REG_LIST_RI(id) \
+2
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
··· 25 25 26 26 #include "hdcp.h" 27 27 28 + #ifndef MIN 28 29 #define MIN(a, b) ((a) < (b) ? (a) : (b)) 30 + #endif 29 31 #define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/ 30 32 #define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */ 31 33 #define HDCP_MAX_AUX_TRANSACTION_SIZE 16
+3
drivers/gpu/drm/amd/include/mes_v11_api_def.h
··· 28 28 29 29 #define MES_API_VERSION 1 30 30 31 + /* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG */ 32 + #define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 33 + 31 34 /* Driver submits one API(cmd) as a single Frame and this command size is same 32 35 * for all API to ease the debugging and parsing of ring buffer. 33 36 */
+3
drivers/gpu/drm/amd/include/mes_v12_api_def.h
··· 28 28 29 29 #define MES_API_VERSION 0x14 30 30 31 + /* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG_12 */ 32 + #define AMDGPU_MES_LOG_BUFFER_SIZE 0xC000 33 + 31 34 /* Driver submits one API(cmd) as a single Frame and this command size is same for all API 32 35 * to ease the debugging and parsing of ring buffer. 33 36 */
+2 -1
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
··· 618 618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 619 619 int r = 0; 620 620 621 - if (!pp_funcs || !pp_funcs->load_firmware || adev->flags & AMD_IS_APU) 621 + if (!pp_funcs || !pp_funcs->load_firmware || 622 + (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) 622 623 return 0; 623 624 624 625 mutex_lock(&adev->pm.mutex);
+10 -4
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
··· 22 22 */ 23 23 #include <asm/div64.h> 24 24 25 - #define SHIFT_AMOUNT 16 /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */ 25 + enum ppevvmath_constants { 26 + /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */ 27 + SHIFT_AMOUNT = 16, 26 28 27 - #define PRECISION 5 /* Change this value to change the number of decimal places in the final output - 5 is a good default */ 29 + /* Change this value to change the number of decimal places in the final output - 5 is a good default */ 30 + PRECISION = 5, 28 31 29 - #define SHIFTED_2 (2 << SHIFT_AMOUNT) 30 - #define MAX (1 << (SHIFT_AMOUNT - 1)) - 1 /* 32767 - Might change in the future */ 32 + SHIFTED_2 = (2 << SHIFT_AMOUNT), 33 + 34 + /* 32767 - Might change in the future */ 35 + MAX = (1 << (SHIFT_AMOUNT - 1)) - 1, 36 + }; 31 37 32 38 /* ------------------------------------------------------------------------------- 33 39 * NEW TYPE - fINT
+46 -6
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h
··· 27 27 28 28 #pragma pack(push, 1) 29 29 30 - #define SMU_14_0_2_TABLE_FORMAT_REVISION 3 30 + #define SMU_14_0_2_TABLE_FORMAT_REVISION 23 31 + #define SMU_14_0_2_CUSTOM_TABLE_FORMAT_REVISION 1 31 32 32 33 // POWERPLAYTABLE::ulPlatformCaps 33 34 #define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. ··· 44 43 #define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 45 44 46 45 #define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD 46 + #define SMU_14_0_2_PP_CUSTOM_OVERDRIVE_VERSION 0x1 47 47 #define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 48 48 49 49 enum SMU_14_0_2_OD_SW_FEATURE_CAP ··· 109 107 SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, 110 108 SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, 111 109 SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, 110 + SMU_14_0_2_PMSETTING_COUNT 112 111 }; 113 112 #define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings 114 113 ··· 130 127 int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings 131 128 }; 132 129 130 + enum smu_14_0_3_pptable_source { 131 + PPTABLE_SOURCE_IFWI = 0, 132 + PPTABLE_SOURCE_DRIVER_HARDCODED = 1, 133 + PPTABLE_SOURCE_PPGEN_REGISTRY = 2, 134 + PPTABLE_SOURCE_MAX = PPTABLE_SOURCE_PPGEN_REGISTRY, 135 + }; 136 + 133 137 struct smu_14_0_2_powerplay_table 134 138 { 135 139 struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. 136 140 uint8_t table_revision; // PPGen use only: table_revision = 3 137 - uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). 141 + uint8_t pptable_source; // PPGen UI dropdown box 138 142 uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) 139 143 uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. 140 - uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. 141 - uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. 142 - uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. 143 - uint16_t pmfw_board_table_size; // The size of BoardTable_t. 144 + uint16_t pmfw_sku_table_start_offset; // DO NOT CHANGE ORDER; The absolute start offset of the SkuTable_t (within smu_14_0_3_powerplay_table). 145 + uint16_t pmfw_sku_table_size; // DO NOT CHANGE ORDER; The size of SkuTable_t. 146 + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t 147 + uint16_t pmfw_board_table_size; // The size of BoardTable_t. 144 148 uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. 145 149 uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. 146 150 uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base ··· 167 157 struct smu_14_0_2_overdrive_table overdrive_table; 168 158 169 159 PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes 160 + }; 161 + 162 + enum SMU_14_0_2_CUSTOM_OD_SW_FEATURE_CAP { 163 + SMU_14_0_2_CUSTOM_ODCAP_POWER_MODE = 0, 164 + SMU_14_0_2_CUSTOM_ODCAP_COUNT 165 + }; 166 + 167 + enum SMU_14_0_2_CUSTOM_OD_FEATURE_SETTING_ID { 168 + SMU_14_0_2_CUSTOM_ODSETTING_POWER_MODE = 0, 169 + SMU_14_0_2_CUSTOM_ODSETTING_COUNT, 170 + }; 171 + 172 + struct smu_14_0_2_custom_overdrive_table { 173 + uint8_t revision; 174 + uint8_t reserve[3]; 175 + uint8_t cap[SMU_14_0_2_CUSTOM_ODCAP_COUNT]; 176 + int32_t max[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; 177 + int32_t min[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; 178 + int16_t pm_setting[SMU_14_0_2_PMSETTING_COUNT]; 179 + }; 180 + 181 + struct smu_14_0_3_custom_powerplay_table { 182 + uint8_t custom_table_revision; 183 + uint16_t custom_table_size; 184 + uint16_t custom_sku_table_offset; 185 + uint32_t custom_platform_caps; 186 + uint16_t software_shutdown_temp; 187 + struct smu_14_0_2_custom_overdrive_table custom_overdrive_table; 188 + uint32_t reserve[8]; 189 + CustomSkuTable_t custom_sku_table_pmfw; 170 190 }; 171 191 172 192 #pragma pack(pop)
+84 -2
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 66 66 67 67 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 68 68 #define DEBUGSMC_MSG_Mode1Reset 2 69 + #define LINK_SPEED_MAX 3 69 70 70 71 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { 71 72 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 222 221 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 223 222 }; 224 223 225 - #if 0 226 224 static const uint8_t smu_v14_0_2_throttler_map[] = { 227 225 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 228 226 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), ··· 241 241 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 242 242 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 243 243 }; 244 - #endif 245 244 246 245 static int 247 246 smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, ··· 1868 1869 return ret; 1869 1870 } 1870 1871 1872 + static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, 1873 + void **table) 1874 + { 1875 + struct smu_table_context *smu_table = &smu->smu_table; 1876 + struct gpu_metrics_v1_3 *gpu_metrics = 1877 + (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1878 + SmuMetricsExternal_t metrics_ext; 1879 + SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 1880 + int ret = 0; 1881 + 1882 + ret = smu_cmn_get_metrics_table(smu, 1883 + &metrics_ext, 1884 + true); 1885 + if (ret) 1886 + return ret; 1887 + 1888 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1889 + 1890 + gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 1891 + gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 1892 + gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 1893 + gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 1894 + gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 1895 + gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0], 1896 + metrics->AvgTemperature[TEMP_VR_MEM1]); 1897 + 1898 + gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 1899 + gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 1900 + gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage, 1901 + metrics->Vcn1ActivityPercentage); 1902 + 1903 + gpu_metrics->average_socket_power = metrics->AverageSocketPower; 1904 + gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 1905 + 1906 + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) 1907 + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 1908 + else 1909 + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 1910 + 1911 + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) 1912 + gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 1913 + else 1914 + gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 1915 + 1916 + gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 1917 + gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 1918 + gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1919 + gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1920 + 1921 + gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency; 1922 + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 1923 + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 1924 + gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 1925 + gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 1926 + gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0]; 1927 + gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0]; 1928 + 1929 + gpu_metrics->throttle_status = 1930 + smu_v14_0_2_get_throttler_status(metrics); 1931 + gpu_metrics->indep_throttle_status = 1932 + smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 1933 + smu_v14_0_2_throttler_map); 1934 + 1935 + gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 1936 + 1937 + gpu_metrics->pcie_link_width = metrics->PcieWidth; 1938 + if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) 1939 + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); 1940 + else 1941 + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); 1942 + 1943 + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1944 + 1945 + gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; 1946 + gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC]; 1947 + gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM]; 1948 + 1949 + *table = (void *)gpu_metrics; 1950 + 1951 + return sizeof(struct gpu_metrics_v1_3); 1952 + } 1953 + 1871 1954 static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { 1872 1955 .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, 1873 1956 .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, ··· 1986 1905 .enable_thermal_alert = smu_v14_0_enable_thermal_alert, 1987 1906 .disable_thermal_alert = smu_v14_0_disable_thermal_alert, 1988 1907 .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, 1908 + .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics, 1989 1909 .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, 1990 1910 .init_pptable_microcode = smu_v14_0_init_pptable_microcode, 1991 1911 .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 794 794 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, 795 795 char *buf) 796 796 { 797 - int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; 797 + int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; 798 798 uint64_t feature_mask; 799 799 int i, feature_index; 800 800 uint32_t count = 0;
+7
drivers/gpu/drm/ast/ast_dp.c
··· 158 158 ASTDP_HOST_EDID_READ_DONE); 159 159 } 160 160 161 + bool ast_dp_power_is_on(struct ast_device *ast) 162 + { 163 + u8 vgacre3; 161 164 165 + vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3); 166 + 167 + return !(vgacre3 & AST_DP_PHY_SLEEP); 168 + } 162 169 163 170 void ast_dp_power_on_off(struct drm_device *dev, bool on) 164 171 {
+5
drivers/gpu/drm/ast/ast_drv.c
··· 391 391 392 392 static int ast_drm_thaw(struct drm_device *dev) 393 393 { 394 + struct ast_device *ast = to_ast_device(dev); 395 + 396 + ast_enable_vga(ast->ioregs); 397 + ast_open_key(ast->ioregs); 398 + ast_enable_mmio(dev->dev, ast->ioregs); 394 399 ast_post_gpu(dev); 395 400 396 401 return drm_mode_config_helper_resume(dev);
+1
drivers/gpu/drm/ast/ast_drv.h
··· 472 472 bool ast_astdp_is_connected(struct ast_device *ast); 473 473 int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); 474 474 void ast_dp_launch(struct drm_device *dev); 475 + bool ast_dp_power_is_on(struct ast_device *ast); 475 476 void ast_dp_power_on_off(struct drm_device *dev, bool no); 476 477 void ast_dp_set_on_off(struct drm_device *dev, bool no); 477 478 void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
+27 -2
drivers/gpu/drm/ast/ast_mode.c
··· 28 28 * Authors: Dave Airlie <airlied@redhat.com> 29 29 */ 30 30 31 + #include <linux/delay.h> 31 32 #include <linux/export.h> 32 33 #include <linux/pci.h> 33 34 ··· 1688 1687 struct drm_modeset_acquire_ctx *ctx, 1689 1688 bool force) 1690 1689 { 1690 + struct drm_device *dev = connector->dev; 1691 1691 struct ast_device *ast = to_ast_device(connector->dev); 1692 + enum drm_connector_status status = connector_status_disconnected; 1693 + struct drm_connector_state *connector_state = connector->state; 1694 + bool is_active = false; 1695 + 1696 + mutex_lock(&ast->modeset_lock); 1697 + 1698 + if (connector_state && connector_state->crtc) { 1699 + struct drm_crtc_state *crtc_state = connector_state->crtc->state; 1700 + 1701 + if (crtc_state && crtc_state->active) 1702 + is_active = true; 1703 + } 1704 + 1705 + if (!is_active && !ast_dp_power_is_on(ast)) { 1706 + ast_dp_power_on_off(dev, true); 1707 + msleep(50); 1708 + } 1692 1709 1693 1710 if (ast_astdp_is_connected(ast)) 1694 - return connector_status_connected; 1695 - return connector_status_disconnected; 1711 + status = connector_status_connected; 1712 + 1713 + if (!is_active && status == connector_status_disconnected) 1714 + ast_dp_power_on_off(dev, false); 1715 + 1716 + mutex_unlock(&ast->modeset_lock); 1717 + 1718 + return status; 1696 1719 } 1697 1720 1698 1721 static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
+5 -9
drivers/gpu/drm/drm_atomic_uapi.c
··· 1070 1070 break; 1071 1071 } 1072 1072 1073 - if (async_flip && prop != config->prop_fb_id) { 1073 + if (async_flip && 1074 + (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY || 1075 + (prop != config->prop_fb_id && 1076 + prop != config->prop_in_fence_fd && 1077 + prop != config->prop_fb_damage_clips))) { 1074 1078 ret = drm_atomic_plane_get_property(plane, plane_state, 1075 1079 prop, &old_val); 1076 1080 ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); 1077 - break; 1078 - } 1079 - 1080 - if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) { 1081 - drm_dbg_atomic(prop->dev, 1082 - "[OBJECT:%d] Only primary planes can be changed during async flip\n", 1083 - obj->id); 1084 - ret = -EINVAL; 1085 1081 break; 1086 1082 } 1087 1083
+2 -6
drivers/gpu/drm/drm_bridge_connector.c
··· 443 443 panel_bridge = bridge; 444 444 } 445 445 446 - if (connector_type == DRM_MODE_CONNECTOR_Unknown) { 447 - kfree(bridge_connector); 446 + if (connector_type == DRM_MODE_CONNECTOR_Unknown) 448 447 return ERR_PTR(-EINVAL); 449 - } 450 448 451 449 if (bridge_connector->bridge_hdmi) 452 450 ret = drmm_connector_hdmi_init(drm, connector, ··· 459 461 ret = drmm_connector_init(drm, connector, 460 462 &drm_bridge_connector_funcs, 461 463 connector_type, ddc); 462 - if (ret) { 463 - kfree(bridge_connector); 464 + if (ret) 464 465 return ERR_PTR(ret); 465 - } 466 466 467 467 drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs); 468 468
+23 -2
drivers/gpu/drm/drm_buddy.c
··· 851 851 * drm_buddy_block_trim - free unused pages 852 852 * 853 853 * @mm: DRM buddy manager 854 + * @start: start address to begin the trimming. 854 855 * @new_size: original size requested 855 856 * @blocks: Input and output list of allocated blocks. 856 857 * MUST contain single block as input to be trimmed. ··· 867 866 * 0 on success, error code on failure. 868 867 */ 869 868 int drm_buddy_block_trim(struct drm_buddy *mm, 869 + u64 *start, 870 870 u64 new_size, 871 871 struct list_head *blocks) 872 872 { 873 873 struct drm_buddy_block *parent; 874 874 struct drm_buddy_block *block; 875 + u64 block_start, block_end; 875 876 LIST_HEAD(dfs); 876 877 u64 new_start; 877 878 int err; ··· 884 881 block = list_first_entry(blocks, 885 882 struct drm_buddy_block, 886 883 link); 884 + 885 + block_start = drm_buddy_block_offset(block); 886 + block_end = block_start + drm_buddy_block_size(mm, block); 887 887 888 888 if (WARN_ON(!drm_buddy_block_is_allocated(block))) 889 889 return -EINVAL; ··· 900 894 if (new_size == drm_buddy_block_size(mm, block)) 901 895 return 0; 902 896 897 + new_start = block_start; 898 + if (start) { 899 + new_start = *start; 900 + 901 + if (new_start < block_start) 902 + return -EINVAL; 903 + 904 + if (!IS_ALIGNED(new_start, mm->chunk_size)) 905 + return -EINVAL; 906 + 907 + if (range_overflows(new_start, new_size, block_end)) 908 + return -EINVAL; 909 + } 910 + 903 911 list_del(&block->link); 904 912 mark_free(mm, block); 905 913 mm->avail += drm_buddy_block_size(mm, block); ··· 924 904 parent = block->parent; 925 905 block->parent = NULL; 926 906 927 - new_start = drm_buddy_block_offset(block); 928 907 list_add(&block->tmp_link, &dfs); 929 908 err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); 930 909 if (err) { ··· 1085 1066 } while (1); 1086 1067 1087 1068 /* Trim the allocated block to the required size */ 1088 - if (original_size != size) { 1069 + if (!(flags & DRM_BUDDY_TRIM_DISABLE) && 1070 + original_size != size) { 1089 1071 struct list_head *trim_list; 1090 1072 LIST_HEAD(temp); 1091 1073 u64 trim_size; ··· 1103 1083 } 1104 1084 1105 1085 drm_buddy_block_trim(mm, 1086 + NULL, 1106 1087 trim_size, 1107 1088 trim_list); 1108 1089
+1 -1
drivers/gpu/drm/drm_client.c
··· 355 355 356 356 err_drm_gem_vmap_unlocked: 357 357 drm_gem_unlock(gem); 358 - return 0; 358 + return ret; 359 359 } 360 360 EXPORT_SYMBOL(drm_client_buffer_vmap_local); 361 361
+5
drivers/gpu/drm/drm_client_modeset.c
··· 880 880 881 881 kfree(modeset->mode); 882 882 modeset->mode = drm_mode_duplicate(dev, mode); 883 + if (!modeset->mode) { 884 + ret = -ENOMEM; 885 + break; 886 + } 887 + 883 888 drm_connector_get(connector); 884 889 modeset->connectors[modeset->num_connectors++] = connector; 885 890 modeset->x = offset->x;
+11
drivers/gpu/drm/drm_fb_helper.c
··· 624 624 static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y, 625 625 u32 width, u32 height) 626 626 { 627 + /* 628 + * This function may be invoked by panic() to flush the frame 629 + * buffer, where all CPUs except the panic CPU are stopped. 630 + * During the following schedule_work(), the panic CPU needs 631 + * the worker_pool lock, which might be held by a stopped CPU, 632 + * causing schedule_work() and panic() to block. Return early on 633 + * oops_in_progress to prevent this blocking. 634 + */ 635 + if (oops_in_progress) 636 + return; 637 + 627 638 drm_fb_helper_add_damage_clip(helper, x, y, width, height); 628 639 629 640 schedule_work(&helper->damage_work);
+6
drivers/gpu/drm/drm_panel_orientation_quirks.c
··· 414 414 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), 415 415 }, 416 416 .driver_data = (void *)&lcd1600x2560_leftside_up, 417 + }, { /* OrangePi Neo */ 418 + .matches = { 419 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"), 420 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"), 421 + }, 422 + .driver_data = (void *)&lcd1200x1920_rightside_up, 417 423 }, { /* Samsung GalaxyBook 10.6 */ 418 424 .matches = { 419 425 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+3
drivers/gpu/drm/i915/display/intel_backlight.c
··· 1449 1449 1450 1450 static int cnp_num_backlight_controllers(struct drm_i915_private *i915) 1451 1451 { 1452 + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) 1453 + return 2; 1454 + 1452 1455 if (INTEL_PCH_TYPE(i915) >= PCH_DG1) 1453 1456 return 1; 1454 1457
+3 -3
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
··· 1658 1658 } 1659 1659 1660 1660 static int 1661 - skl_ddi_calculate_wrpll(int clock /* in Hz */, 1661 + skl_ddi_calculate_wrpll(int clock, 1662 1662 int ref_clock, 1663 1663 struct skl_wrpll_params *wrpll_params) 1664 1664 { ··· 1683 1683 }; 1684 1684 unsigned int dco, d, i; 1685 1685 unsigned int p0, p1, p2; 1686 - u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ 1686 + u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */ 1687 1687 1688 1688 for (d = 0; d < ARRAY_SIZE(dividers); d++) { 1689 1689 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { ··· 1808 1808 struct skl_wrpll_params wrpll_params = {}; 1809 1809 int ret; 1810 1810 1811 - ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, 1811 + ret = skl_ddi_calculate_wrpll(crtc_state->port_clock, 1812 1812 i915->display.dpll.ref_clks.nssc, &wrpll_params); 1813 1813 if (ret) 1814 1814 return ret;
+1 -1
drivers/gpu/drm/i915/display/intel_hdcp_regs.h
··· 251 251 #define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ 252 252 (TRANS_HDCP(dev_priv) ? \ 253 253 TRANS_HDCP2_STREAM_STATUS(trans) : \ 254 - PIPE_HDCP2_STREAM_STATUS(pipe)) 254 + PIPE_HDCP2_STREAM_STATUS(port)) 255 255 256 256 #define _PORTA_HDCP2_AUTH_STREAM 0x66F00 257 257 #define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+3
drivers/gpu/drm/i915/display/intel_pps.c
··· 351 351 if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 352 352 return 2; 353 353 354 + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) 355 + return 2; 356 + 354 357 if (INTEL_PCH_TYPE(i915) >= PCH_DG1) 355 358 return 1; 356 359
+49 -6
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 290 290 return i915_error_to_vmf_fault(err); 291 291 } 292 292 293 + static void set_address_limits(struct vm_area_struct *area, 294 + struct i915_vma *vma, 295 + unsigned long obj_offset, 296 + unsigned long *start_vaddr, 297 + unsigned long *end_vaddr) 298 + { 299 + unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ 300 + long start, end; /* memory boundaries */ 301 + 302 + /* 303 + * Let's move into the ">> PAGE_SHIFT" 304 + * domain to be sure not to lose bits 305 + */ 306 + vm_start = area->vm_start >> PAGE_SHIFT; 307 + vm_end = area->vm_end >> PAGE_SHIFT; 308 + vma_size = vma->size >> PAGE_SHIFT; 309 + 310 + /* 311 + * Calculate the memory boundaries by considering the offset 312 + * provided by the user during memory mapping and the offset 313 + * provided for the partial mapping. 314 + */ 315 + start = vm_start; 316 + start -= obj_offset; 317 + start += vma->gtt_view.partial.offset; 318 + end = start + vma_size; 319 + 320 + start = max_t(long, start, vm_start); 321 + end = min_t(long, end, vm_end); 322 + 323 + /* Let's move back into the "<< PAGE_SHIFT" domain */ 324 + *start_vaddr = (unsigned long)start << PAGE_SHIFT; 325 + *end_vaddr = (unsigned long)end << PAGE_SHIFT; 326 + } 327 + 293 328 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) 294 329 { 295 330 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) ··· 337 302 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 338 303 bool write = area->vm_flags & VM_WRITE; 339 304 struct i915_gem_ww_ctx ww; 305 + unsigned long obj_offset; 306 + unsigned long start, end; /* memory boundaries */ 340 307 intel_wakeref_t wakeref; 341 308 struct i915_vma *vma; 342 309 pgoff_t page_offset; 310 + unsigned long pfn; 343 311 int srcu; 344 312 int ret; 345 313 346 - /* We don't use vmf->pgoff since that has the fake offset */ 314 + obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); 347 315 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 316 + page_offset += obj_offset; 348 317 349 318 trace_i915_gem_object_fault(obj, page_offset, true, write); 350 319 ··· 441 402 if (ret) 442 403 goto err_unpin; 443 404 405 + set_address_limits(area, vma, obj_offset, &start, &end); 406 + 407 + pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; 408 + pfn += (start - area->vm_start) >> PAGE_SHIFT; 409 + pfn += obj_offset - vma->gtt_view.partial.offset; 410 + 444 411 /* Finally, remap it using the new GTT offset */ 445 - ret = remap_io_mapping(area, 446 - area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), 447 - (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT, 448 - min_t(u64, vma->size, area->vm_end - area->vm_start), 449 - &ggtt->iomap); 412 + ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); 450 413 if (ret) 451 414 goto err_fence; 452 415 ··· 1125 1084 mmo = mmap_offset_attach(obj, mmap_type, NULL); 1126 1085 if (IS_ERR(mmo)) 1127 1086 return PTR_ERR(mmo); 1087 + 1088 + vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); 1128 1089 } 1129 1090 1130 1091 /*
+7 -6
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 165 165 i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : 166 166 obj->mm.region, &places[0], obj->bo_offset, 167 167 obj->base.size, flags); 168 - places[0].flags |= TTM_PL_FLAG_DESIRED; 169 168 170 169 /* Cache this on object? */ 171 170 for (i = 0; i < num_allowed; ++i) { ··· 778 779 .interruptible = true, 779 780 .no_wait_gpu = false, 780 781 }; 781 - int real_num_busy; 782 + struct ttm_placement initial_placement; 783 + struct ttm_place initial_place; 782 784 int ret; 783 785 784 786 /* First try only the requested placement. No eviction. */ 785 - real_num_busy = placement->num_placement; 786 - placement->num_placement = 1; 787 - ret = ttm_bo_validate(bo, placement, &ctx); 787 + initial_placement.num_placement = 1; 788 + memcpy(&initial_place, placement->placement, sizeof(struct ttm_place)); 789 + initial_place.flags |= TTM_PL_FLAG_DESIRED; 790 + initial_placement.placement = &initial_place; 791 + ret = ttm_bo_validate(bo, &initial_placement, &ctx); 788 792 if (ret) { 789 793 ret = i915_ttm_err_to_gem(ret); 790 794 /* ··· 802 800 * If the initial attempt fails, allow all accepted placements, 803 801 * evicting if necessary. 804 802 */ 805 - placement->num_placement = real_num_busy; 806 803 ret = ttm_bo_validate(bo, placement, &ctx); 807 804 if (ret) 808 805 return i915_ttm_err_to_gem(ret);
-33
drivers/gpu/drm/i915/i915_perf.c
··· 2749 2749 } 2750 2750 2751 2751 static int 2752 - gen12_configure_all_contexts(struct i915_perf_stream *stream, 2753 - const struct i915_oa_config *oa_config, 2754 - struct i915_active *active) 2755 - { 2756 - struct flex regs[] = { 2757 - { 2758 - GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), 2759 - CTX_R_PWR_CLK_STATE, 2760 - }, 2761 - }; 2762 - 2763 - if (stream->engine->class != RENDER_CLASS) 2764 - return 0; 2765 - 2766 - return oa_configure_all_contexts(stream, 2767 - regs, ARRAY_SIZE(regs), 2768 - active); 2769 - } 2770 - 2771 - static int 2772 2752 lrc_configure_all_contexts(struct i915_perf_stream *stream, 2773 2753 const struct i915_oa_config *oa_config, 2774 2754 struct i915_active *active) ··· 2854 2874 { 2855 2875 struct drm_i915_private *i915 = stream->perf->i915; 2856 2876 struct intel_uncore *uncore = stream->uncore; 2857 - struct i915_oa_config *oa_config = stream->oa_config; 2858 2877 bool periodic = stream->periodic; 2859 2878 u32 period_exponent = stream->period_exponent; 2860 2879 u32 sqcnt1; ··· 2896 2917 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0); 2897 2918 2898 2919 intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1); 2899 - 2900 - /* 2901 - * Update all contexts prior writing the mux configurations as we need 2902 - * to make sure all slices/subslices are ON before writing to NOA 2903 - * registers. 2904 - */ 2905 - ret = gen12_configure_all_contexts(stream, oa_config, active); 2906 - if (ret) 2907 - return ret; 2908 2920 2909 2921 /* 2910 2922 * For Gen12, performance counters are context ··· 2949 2979 intel_uncore_write(uncore, GEN7_ROW_CHICKEN2, 2950 2980 _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING)); 2951 2981 } 2952 - 2953 - /* Reset all contexts' slices/subslices configurations. */ 2954 - gen12_configure_all_contexts(stream, NULL, NULL); 2955 2982 2956 2983 /* disable the context save/restore or OAR counters */ 2957 2984 if (stream->ctx)
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 898 898 * Without this the operation can timeout and we'll fallback to a 899 899 * software copy, which might take several minutes to finish. 900 900 */ 901 - nouveau_fence_wait(fence, false); 901 + nouveau_fence_wait(fence, false, false); 902 902 ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, 903 903 new_reg); 904 904 nouveau_fence_unref(&fence);
+1 -1
drivers/gpu/drm/nouveau/nouveau_chan.c
··· 72 72 73 73 ret = nouveau_fence_new(&fence, chan); 74 74 if (!ret) { 75 - ret = nouveau_fence_wait(fence, false); 75 + ret = nouveau_fence_wait(fence, false, false); 76 76 nouveau_fence_unref(&fence); 77 77 } 78 78
+1 -1
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 128 128 static void nouveau_dmem_fence_done(struct nouveau_fence **fence) 129 129 { 130 130 if (fence) { 131 - nouveau_fence_wait(*fence, false); 131 + nouveau_fence_wait(*fence, true, false); 132 132 nouveau_fence_unref(fence); 133 133 } else { 134 134 /*
+29 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 311 311 return timeout - t; 312 312 } 313 313 314 + static int 315 + nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr) 316 + { 317 + int ret = 0; 318 + 319 + while (!nouveau_fence_done(fence)) { 320 + if (time_after_eq(jiffies, fence->timeout)) { 321 + ret = -EBUSY; 322 + break; 323 + } 324 + 325 + __set_current_state(intr ? 326 + TASK_INTERRUPTIBLE : 327 + TASK_UNINTERRUPTIBLE); 328 + 329 + if (intr && signal_pending(current)) { 330 + ret = -ERESTARTSYS; 331 + break; 332 + } 333 + } 334 + 335 + __set_current_state(TASK_RUNNING); 336 + return ret; 337 + } 338 + 314 339 int 315 - nouveau_fence_wait(struct nouveau_fence *fence, bool intr) 340 + nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) 316 341 { 317 342 long ret; 343 + 344 + if (!lazy) 345 + return nouveau_fence_wait_busy(fence, intr); 318 346 319 347 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ); 320 348 if (ret < 0)
+1 -1
drivers/gpu/drm/nouveau/nouveau_fence.h
··· 23 23 24 24 int nouveau_fence_emit(struct nouveau_fence *); 25 25 bool nouveau_fence_done(struct nouveau_fence *); 26 - int nouveau_fence_wait(struct nouveau_fence *, bool intr); 26 + int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 27 27 int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); 28 28 29 29 struct nouveau_fence_chan {
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 928 928 } 929 929 930 930 if (sync) { 931 - if (!(ret = nouveau_fence_wait(fence, false))) { 931 + if (!(ret = nouveau_fence_wait(fence, false, false))) { 932 932 if ((ret = dma_fence_get_status(&fence->base)) == 1) 933 933 ret = 0; 934 934 }
+2 -1
drivers/gpu/drm/nouveau/nouveau_prime.c
··· 64 64 * to the caller, instead of a normal nouveau_bo ttm reference. */ 65 65 ret = drm_gem_object_init(dev, &nvbo->bo.base, size); 66 66 if (ret) { 67 - nouveau_bo_ref(NULL, &nvbo); 67 + drm_gem_object_release(&nvbo->bo.base); 68 + kfree(nvbo); 68 69 obj = ERR_PTR(-ENOMEM); 69 70 goto unlock; 70 71 }
+1
drivers/gpu/drm/nouveau/nouveau_uvmm.c
··· 1803 1803 { 1804 1804 struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj); 1805 1805 1806 + nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0); 1806 1807 return nouveau_bo_validate(nvbo, true, false); 1807 1808 } 1808 1809
+1
drivers/gpu/drm/omapdrm/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config DRM_OMAP 3 3 tristate "OMAP DRM" 4 + depends on MMU 4 5 depends on DRM && OF 5 6 depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB) 6 7 select DRM_KMS_HELPER
+2
drivers/gpu/drm/radeon/evergreen_cs.c
··· 33 33 #include "evergreen_reg_safe.h" 34 34 #include "cayman_reg_safe.h" 35 35 36 + #ifndef MIN 36 37 #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 37 38 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 39 + #endif 38 40 39 41 #define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm) 40 42
+11
drivers/gpu/drm/tests/drm_gem_shmem_test.c
··· 102 102 103 103 sg_init_one(sgt->sgl, buf, TEST_SIZE); 104 104 105 + /* 106 + * Set the DMA mask to 64-bits and map the sgtables 107 + * otherwise drm_gem_shmem_free will cause a warning 108 + * on debug kernels. 109 + */ 110 + ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64)); 111 + KUNIT_ASSERT_EQ(test, ret, 0); 112 + 113 + ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0); 114 + KUNIT_ASSERT_EQ(test, ret, 0); 115 + 105 116 /* Init a mock DMA-BUF */ 106 117 buf_mock.size = TEST_SIZE; 107 118 attach_mock.dmabuf = &buf_mock;
+4
drivers/gpu/drm/v3d/v3d_drv.h
··· 565 565 void v3d_mmu_remove_ptes(struct v3d_bo *bo); 566 566 567 567 /* v3d_sched.c */ 568 + void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info, 569 + unsigned int count); 570 + void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info, 571 + unsigned int count); 568 572 void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue); 569 573 int v3d_sched_init(struct v3d_dev *v3d); 570 574 void v3d_sched_fini(struct v3d_dev *v3d);
+32 -12
drivers/gpu/drm/v3d/v3d_sched.c
··· 73 73 v3d_job_cleanup(job); 74 74 } 75 75 76 + void 77 + v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info, 78 + unsigned int count) 79 + { 80 + if (query_info->queries) { 81 + unsigned int i; 82 + 83 + for (i = 0; i < count; i++) 84 + drm_syncobj_put(query_info->queries[i].syncobj); 85 + 86 + kvfree(query_info->queries); 87 + } 88 + } 89 + 90 + void 91 + v3d_performance_query_info_free(struct v3d_performance_query_info *query_info, 92 + unsigned int count) 93 + { 94 + if (query_info->queries) { 95 + unsigned int i; 96 + 97 + for (i = 0; i < count; i++) 98 + drm_syncobj_put(query_info->queries[i].syncobj); 99 + 100 + kvfree(query_info->queries); 101 + } 102 + } 103 + 76 104 static void 77 105 v3d_cpu_job_free(struct drm_sched_job *sched_job) 78 106 { 79 107 struct v3d_cpu_job *job = to_cpu_job(sched_job); 80 - struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; 81 - struct v3d_performance_query_info *performance_query = &job->performance_query; 82 108 83 - if (timestamp_query->queries) { 84 - for (int i = 0; i < timestamp_query->count; i++) 85 - drm_syncobj_put(timestamp_query->queries[i].syncobj); 86 - kvfree(timestamp_query->queries); 87 - } 109 + v3d_timestamp_query_info_free(&job->timestamp_query, 110 + job->timestamp_query.count); 88 111 89 - if (performance_query->queries) { 90 - for (int i = 0; i < performance_query->count; i++) 91 - drm_syncobj_put(performance_query->queries[i].syncobj); 92 - kvfree(performance_query->queries); 93 - } 112 + v3d_performance_query_info_free(&job->performance_query, 113 + job->performance_query.count); 94 114 95 115 v3d_job_cleanup(&job->base); 96 116 }
+88 -33
drivers/gpu/drm/v3d/v3d_submit.c
··· 452 452 { 453 453 u32 __user *offsets, *syncs; 454 454 struct drm_v3d_timestamp_query timestamp; 455 + unsigned int i; 456 + int err; 455 457 456 458 if (!job) { 457 459 DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); ··· 482 480 offsets = u64_to_user_ptr(timestamp.offsets); 483 481 syncs = u64_to_user_ptr(timestamp.syncs); 484 482 485 - for (int i = 0; i < timestamp.count; i++) { 483 + for (i = 0; i < timestamp.count; i++) { 486 484 u32 offset, sync; 487 485 488 486 if (copy_from_user(&offset, offsets++, sizeof(offset))) { 489 - kvfree(job->timestamp_query.queries); 490 - return -EFAULT; 487 + err = -EFAULT; 488 + goto error; 491 489 } 492 490 493 491 job->timestamp_query.queries[i].offset = offset; 494 492 495 493 if (copy_from_user(&sync, syncs++, sizeof(sync))) { 496 - kvfree(job->timestamp_query.queries); 497 - return -EFAULT; 494 + err = -EFAULT; 495 + goto error; 498 496 } 499 497 500 498 job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); 499 + if (!job->timestamp_query.queries[i].syncobj) { 500 + err = -ENOENT; 501 + goto error; 502 + } 501 503 } 502 504 job->timestamp_query.count = timestamp.count; 503 505 504 506 return 0; 507 + 508 + error: 509 + v3d_timestamp_query_info_free(&job->timestamp_query, i); 510 + return err; 505 511 } 506 512 507 513 static int ··· 519 509 { 520 510 u32 __user *syncs; 521 511 struct drm_v3d_reset_timestamp_query reset; 512 + unsigned int i; 513 + int err; 522 514 523 515 if (!job) { 524 516 DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); ··· 545 533 546 534 syncs = u64_to_user_ptr(reset.syncs); 547 535 548 - for (int i = 0; i < reset.count; i++) { 536 + for (i = 0; i < reset.count; i++) { 549 537 u32 sync; 550 538 551 539 job->timestamp_query.queries[i].offset = reset.offset + 8 * i; 552 540 553 541 if (copy_from_user(&sync, syncs++, sizeof(sync))) { 554 - kvfree(job->timestamp_query.queries); 555 - return -EFAULT; 542 + err = -EFAULT; 543 + goto error; 556 544 } 557 545 558 546 job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); 547 + if (!job->timestamp_query.queries[i].syncobj) { 548 + err = -ENOENT; 549 + goto error; 550 + } 559 551 } 560 552 job->timestamp_query.count = reset.count; 561 553 562 554 return 0; 555 + 556 + error: 557 + v3d_timestamp_query_info_free(&job->timestamp_query, i); 558 + return err; 563 559 } 564 560 565 561 /* Get data for the copy timestamp query results job submission. */ ··· 578 558 { 579 559 u32 __user *offsets, *syncs; 580 560 struct drm_v3d_copy_timestamp_query copy; 581 - int i; 561 + unsigned int i; 562 + int err; 582 563 583 564 if (!job) { 584 565 DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); ··· 612 591 u32 offset, sync; 613 592 614 593 if (copy_from_user(&offset, offsets++, sizeof(offset))) { 615 - kvfree(job->timestamp_query.queries); 616 - return -EFAULT; 594 + err = -EFAULT; 595 + goto error; 617 596 } 618 597 619 598 job->timestamp_query.queries[i].offset = offset; 620 599 621 600 if (copy_from_user(&sync, syncs++, sizeof(sync))) { 622 - kvfree(job->timestamp_query.queries); 623 - return -EFAULT; 601 + err = -EFAULT; 602 + goto error; 624 603 } 625 604 626 605 job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); 606 + if (!job->timestamp_query.queries[i].syncobj) { 607 + err = -ENOENT; 608 + goto error; 609 + } 627 610 } 628 611 job->timestamp_query.count = copy.count; 629 612 ··· 638 613 job->copy.stride = copy.stride; 639 614 640 615 return 0; 616 + 617 + error: 618 + v3d_timestamp_query_info_free(&job->timestamp_query, i); 619 + return err; 641 620 } 642 621 643 622 static int ··· 652 623 u32 __user *syncs; 653 624 u64 __user *kperfmon_ids; 654 625 struct drm_v3d_reset_performance_query reset; 626 + unsigned int i, j; 627 + int err; 655 628 656 629 if (!job) { 657 630 DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); ··· 668 637 if (copy_from_user(&reset, ext, sizeof(reset))) 669 638 return -EFAULT; 670 639 640 + if (reset.nperfmons > V3D_MAX_PERFMONS) 641 + return -EINVAL; 642 + 671 643 job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY; 672 644 673 645 job->performance_query.queries = kvmalloc_array(reset.count, ··· 682 648 syncs = u64_to_user_ptr(reset.syncs); 683 649 kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids); 684 650 685 - for (int i = 0; i < reset.count; i++) { 651 + for (i = 0; i < reset.count; i++) { 686 652 u32 sync; 687 653 u64 ids; 688 654 u32 __user *ids_pointer; 689 655 u32 id; 690 656 691 657 if (copy_from_user(&sync, syncs++, sizeof(sync))) { 692 - kvfree(job->performance_query.queries); 693 - return -EFAULT; 658 + err = -EFAULT; 659 + goto error; 694 660 } 695 661 696 - job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); 697 - 698 662 if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { 699 - kvfree(job->performance_query.queries); 700 - return -EFAULT; 663 + err = -EFAULT; 664 + goto error; 701 665 } 702 666 703 667 ids_pointer = u64_to_user_ptr(ids); 704 668 705 - for (int j = 0; j < reset.nperfmons; j++) { 669 + for (j = 0; j < reset.nperfmons; j++) { 706 670 if (copy_from_user(&id, ids_pointer++, sizeof(id))) { 707 - kvfree(job->performance_query.queries); 708 - return -EFAULT; 671 + err = -EFAULT; 672 + goto error; 709 673 } 710 674 711 675 job->performance_query.queries[i].kperfmon_ids[j] = id; 676 + } 677 + 678 + job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); 679 + if (!job->performance_query.queries[i].syncobj) { 680 + err = -ENOENT; 681 + goto error; 712 682 } 713 683 } 714 684 job->performance_query.count = reset.count; 715 685 job->performance_query.nperfmons = reset.nperfmons; 716 686 717 687 return 0; 688 + 689 + error: 690 + v3d_performance_query_info_free(&job->performance_query, i); 691 + return err; 718 692 } 719 693 720 694 static int ··· 733 691 u32 __user *syncs; 734 692 u64 __user *kperfmon_ids; 735 693 struct drm_v3d_copy_performance_query copy; 694 + unsigned int i, j; 695 + int err; 736 696 737 697 if (!job) { 738 698 DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); ··· 752 708 if (copy.pad) 753 709 return -EINVAL; 754 710 711 + if (copy.nperfmons > V3D_MAX_PERFMONS) 712 + return -EINVAL; 713 + 755 714 job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY; 756 715 757 716 job->performance_query.queries = kvmalloc_array(copy.count, ··· 766 719 syncs = u64_to_user_ptr(copy.syncs); 767 720 kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids); 768 721 769 - for (int i = 0; i < copy.count; i++) { 722 + for (i = 0; i < copy.count; i++) { 770 723 u32 sync; 771 724 u64 ids; 772 725 u32 __user *ids_pointer; 773 726 u32 id; 774 727 775 728 if (copy_from_user(&sync, syncs++, sizeof(sync))) { 776 - kvfree(job->performance_query.queries); 777 - return -EFAULT; 729 + err = -EFAULT; 730 + goto error; 778 731 } 779 732 780 - job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); 781 - 782 733 if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { 783 - kvfree(job->performance_query.queries); 784 - return -EFAULT; 734 + err = -EFAULT; 735 + goto error; 785 736 } 786 737 787 738 ids_pointer = u64_to_user_ptr(ids); 788 739 789 - for (int j = 0; j < copy.nperfmons; j++) { 740 + for (j = 0; j < copy.nperfmons; j++) { 790 741 if (copy_from_user(&id, ids_pointer++, sizeof(id))) { 791 - kvfree(job->performance_query.queries); 792 - return -EFAULT; 742 + err = -EFAULT; 743 + goto error; 793 744 } 794 745 795 746 job->performance_query.queries[i].kperfmon_ids[j] = id; 747 + } 748 + 749 + job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); 750 + if (!job->performance_query.queries[i].syncobj) { 751 + err = -ENOENT; 752 + goto error; 796 753 } 797 754 } 798 755 job->performance_query.count = copy.count; ··· 810 759 job->copy.stride = copy.stride; 811 760 812 761 return 0; 762 + 763 + error: 764 + v3d_performance_query_info_free(&job->performance_query, i); 765 + return err; 813 766 } 814 767 815 768 /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+1 -1
drivers/gpu/drm/virtio/virtgpu_submit.c
··· 48 48 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, 49 49 struct dma_fence *in_fence) 50 50 { 51 - u32 context = submit->fence_ctx + submit->ring_idx; 51 + u64 context = submit->fence_ctx + submit->ring_idx; 52 52 53 53 if (dma_fence_match_context(in_fence, context)) 54 54 return 0;
+8 -2
drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 1 2 /********************************************************** 2 - * Copyright 2021 VMware, Inc. 3 - * SPDX-License-Identifier: GPL-2.0 OR MIT 3 + * 4 + * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 4 6 * 5 7 * Permission is hereby granted, free of charge, to any person 6 8 * obtaining a copy of this software and associated documentation ··· 32 30 #include "device_include/svga3d_surfacedefs.h" 33 31 34 32 #include <drm/vmwgfx_drm.h> 33 + 34 + #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) ((svga3d_flags) >> 32) 35 + #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \ 36 + ((svga3d_flags) & ((uint64_t)U32_MAX)) 35 37 36 38 static inline u32 clamped_umul32(u32 a, u32 b) 37 39 {
+76 -51
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /************************************************************************** 3 3 * 4 - * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA 5 - * All Rights Reserved. 4 + * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 6 6 * 7 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 8 * copy of this software and associated documentation files (the ··· 28 28 29 29 #include "vmwgfx_bo.h" 30 30 #include "vmwgfx_drv.h" 31 - 31 + #include "vmwgfx_resource_priv.h" 32 32 33 33 #include <drm/ttm/ttm_placement.h> 34 34 35 35 static void vmw_bo_release(struct vmw_bo *vbo) 36 36 { 37 + struct vmw_resource *res; 38 + 37 39 WARN_ON(vbo->tbo.base.funcs && 38 40 kref_read(&vbo->tbo.base.refcount) != 0); 39 41 vmw_bo_unmap(vbo); 42 + 43 + xa_destroy(&vbo->detached_resources); 44 + WARN_ON(vbo->is_dumb && !vbo->dumb_surface); 45 + if (vbo->is_dumb && vbo->dumb_surface) { 46 + res = &vbo->dumb_surface->res; 47 + WARN_ON(vbo != res->guest_memory_bo); 48 + WARN_ON(!res->guest_memory_bo); 49 + if (res->guest_memory_bo) { 50 + /* Reserve and switch the backing mob. */ 51 + mutex_lock(&res->dev_priv->cmdbuf_mutex); 52 + (void)vmw_resource_reserve(res, false, true); 53 + vmw_resource_mob_detach(res); 54 + if (res->coherent) 55 + vmw_bo_dirty_release(res->guest_memory_bo); 56 + res->guest_memory_bo = NULL; 57 + res->guest_memory_offset = 0; 58 + vmw_resource_unreserve(res, false, false, false, NULL, 59 + 0); 60 + mutex_unlock(&res->dev_priv->cmdbuf_mutex); 61 + } 62 + vmw_surface_unreference(&vbo->dumb_surface); 63 + } 40 64 drm_gem_object_release(&vbo->tbo.base); 41 65 } 42 66 ··· 350 326 */ 351 327 void *vmw_bo_map_and_cache(struct vmw_bo *vbo) 352 328 { 329 + return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size); 330 + } 331 + 332 + void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size) 333 + { 353 334 struct ttm_buffer_object *bo = &vbo->tbo; 354 335 bool not_used; 355 336 void *virtual; ··· 364 335 if (virtual) 365 336 return virtual; 366 337 367 - ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); 338 + ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map); 368 339 if (ret) 369 - DRM_ERROR("Buffer object map failed: %d.\n", ret); 340 + DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n", 341 + ret, bo->base.size, size); 370 342 371 343 return ttm_kmap_obj_virtual(&vbo->map, &not_used); 372 344 } ··· 420 390 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); 421 391 vmw_bo->tbo.priority = 3; 422 392 vmw_bo->res_tree = RB_ROOT; 393 + xa_init(&vmw_bo->detached_resources); 423 394 424 395 params->size = ALIGN(params->size, PAGE_SIZE); 425 396 drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); ··· 685 654 dma_fence_put(&fence->base); 686 655 } 687 656 688 - 689 - /** 690 - * vmw_dumb_create - Create a dumb kms buffer 691 - * 692 - * @file_priv: Pointer to a struct drm_file identifying the caller. 693 - * @dev: Pointer to the drm device. 694 - * @args: Pointer to a struct drm_mode_create_dumb structure 695 - * Return: Zero on success, negative error code on failure. 696 - * 697 - * This is a driver callback for the core drm create_dumb functionality. 698 - * Note that this is very similar to the vmw_bo_alloc ioctl, except 699 - * that the arguments have a different format. 700 - */ 701 - int vmw_dumb_create(struct drm_file *file_priv, 702 - struct drm_device *dev, 703 - struct drm_mode_create_dumb *args) 704 - { 705 - struct vmw_private *dev_priv = vmw_priv(dev); 706 - struct vmw_bo *vbo; 707 - int cpp = DIV_ROUND_UP(args->bpp, 8); 708 - int ret; 709 - 710 - switch (cpp) { 711 - case 1: /* DRM_FORMAT_C8 */ 712 - case 2: /* DRM_FORMAT_RGB565 */ 713 - case 4: /* DRM_FORMAT_XRGB8888 */ 714 - break; 715 - default: 716 - /* 717 - * Dumb buffers don't allow anything else. 718 - * This is tested via IGT's dumb_buffers 719 - */ 720 - return -EINVAL; 721 - } 722 - 723 - args->pitch = args->width * cpp; 724 - args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 725 - 726 - ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 727 - args->size, &args->handle, 728 - &vbo); 729 - /* drop reference from allocate - handle holds it now */ 730 - drm_gem_object_put(&vbo->tbo.base); 731 - return ret; 732 - } 733 - 734 657 /** 735 658 * vmw_bo_swap_notify - swapout notify callback. 736 659 * ··· 837 852 domain = VMW_BO_DOMAIN_MOB; 838 853 839 854 vmw_bo_placement_set(bo, domain, domain); 855 + } 856 + 857 + void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) 858 + { 859 + xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL); 860 + } 861 + 862 + void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) 863 + { 864 + xa_erase(&vbo->detached_resources, (unsigned long)res); 865 + } 866 + 867 + struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo) 868 + { 869 + unsigned long index; 870 + struct vmw_resource *res = NULL; 871 + struct vmw_surface *surf = NULL; 872 + struct rb_node *rb_itr = vbo->res_tree.rb_node; 873 + 874 + if (vbo->is_dumb && vbo->dumb_surface) { 875 + res = &vbo->dumb_surface->res; 876 + goto out; 877 + } 878 + 879 + xa_for_each(&vbo->detached_resources, index, res) { 880 + if (res->func->res_type == vmw_res_surface) 881 + goto out; 882 + } 883 + 884 + for (rb_itr = rb_first(&vbo->res_tree); rb_itr; 885 + rb_itr = rb_next(rb_itr)) { 886 + res = rb_entry(rb_itr, struct vmw_resource, mob_node); 887 + if (res->func->res_type == vmw_res_surface) 888 + goto out; 889 + } 890 + 891 + out: 892 + if (res) 893 + surf = vmw_res_to_srf(res); 894 + return surf; 840 895 }
+14 -1
drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 36 35 37 36 #include <linux/rbtree_types.h> 38 37 #include <linux/types.h> 38 + #include <linux/xarray.h> 39 39 40 40 struct vmw_bo_dirty; 41 41 struct vmw_fence_obj; 42 42 struct vmw_private; 43 43 struct vmw_resource; 44 + struct vmw_surface; 44 45 45 46 enum vmw_bo_domain { 46 47 VMW_BO_DOMAIN_SYS = BIT(0), ··· 88 85 89 86 struct rb_root res_tree; 90 87 u32 res_prios[TTM_MAX_BO_PRIORITY]; 88 + struct xarray detached_resources; 91 89 92 90 atomic_t cpu_writers; 93 91 /* Not ref-counted. Protected by binding_mutex */ 94 92 struct vmw_resource *dx_query_ctx; 95 93 struct vmw_bo_dirty *dirty; 94 + 95 + bool is_dumb; 96 + struct vmw_surface *dumb_surface; 96 97 }; 97 98 98 99 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain); ··· 131 124 struct vmw_fence_obj *fence); 132 125 133 126 void *vmw_bo_map_and_cache(struct vmw_bo *vbo); 127 + void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size); 134 128 void vmw_bo_unmap(struct vmw_bo *vbo); 135 129 136 130 void vmw_bo_move_notify(struct ttm_buffer_object *bo, 137 131 struct ttm_resource *mem); 138 132 void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 139 133 134 + void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); 135 + void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); 136 + struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo); 137 + 140 138 int vmw_user_bo_lookup(struct drm_file *filp, 141 139 u32 handle, 142 140 struct vmw_bo **out); 141 + 143 142 /** 144 143 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority 145 144 * according to attached resources
+31 -9
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 764 763 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 765 764 766 765 /** 766 + * User handles 767 + */ 768 + struct vmw_user_object { 769 + struct vmw_surface *surface; 770 + struct vmw_bo *buffer; 771 + }; 772 + 773 + int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp, 774 + u32 handle, struct vmw_user_object *uo); 775 + struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo); 776 + void vmw_user_object_unref(struct vmw_user_object *uo); 777 + bool vmw_user_object_is_null(struct vmw_user_object *uo); 778 + struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo); 779 + struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo); 780 + void *vmw_user_object_map(struct vmw_user_object *uo); 781 + void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size); 782 + void vmw_user_object_unmap(struct vmw_user_object *uo); 783 + bool vmw_user_object_is_mapped(struct vmw_user_object *uo); 784 + 785 + /** 767 786 * Resource utilities - vmwgfx_resource.c 768 787 */ 769 788 struct vmw_user_resource_conv; ··· 797 776 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, 798 777 bool no_backup); 799 778 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 800 - extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 801 - struct drm_file *filp, 802 - uint32_t handle, 803 - struct vmw_surface **out_surf, 804 - struct vmw_bo **out_buf); 805 779 extern int vmw_user_resource_lookup_handle( 806 780 struct vmw_private *dev_priv, 807 781 struct ttm_object_file *tfile, ··· 1073 1057 int vmw_kms_resume(struct drm_device *dev); 1074 1058 void vmw_kms_lost_device(struct drm_device *dev); 1075 1059 1076 - int vmw_dumb_create(struct drm_file *file_priv, 1077 - struct drm_device *dev, 1078 - struct drm_mode_create_dumb *args); 1079 1060 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); 1080 1061 extern void vmw_resource_unpin(struct vmw_resource *res); 1081 1062 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); ··· 1189 1176 int vmw_gb_surface_define(struct vmw_private *dev_priv, 1190 1177 const struct vmw_surface_metadata *req, 1191 1178 struct vmw_surface **srf_out); 1179 + struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, 1180 + struct vmw_bo *bo, 1181 + u32 handle); 1182 + u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, 1183 + struct vmw_bo *bo, 1184 + u32 handle); 1185 + int vmw_dumb_create(struct drm_file *file_priv, 1186 + struct drm_device *dev, 1187 + struct drm_mode_create_dumb *args); 1192 1188 1193 1189 /* 1194 1190 * Shader management - vmwgfx_shader.c
+7 -10
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 32 32 #define VMW_FENCE_WRAP (1 << 31) 33 33 34 34 struct vmw_fence_manager { 35 - int num_fence_objects; 36 35 struct vmw_private *dev_priv; 37 36 spinlock_t lock; 38 37 struct list_head fence_list; ··· 123 124 { 124 125 struct vmw_fence_obj *fence = 125 126 container_of(f, struct vmw_fence_obj, base); 126 - 127 127 struct vmw_fence_manager *fman = fman_from_fence(fence); 128 128 129 - spin_lock(&fman->lock); 130 - list_del_init(&fence->head); 131 - --fman->num_fence_objects; 132 - spin_unlock(&fman->lock); 129 + if (!list_empty(&fence->head)) { 130 + spin_lock(&fman->lock); 131 + list_del_init(&fence->head); 132 + spin_unlock(&fman->lock); 133 + } 133 134 fence->destroy(fence); 134 135 } 135 136 ··· 256 257 .release = vmw_fence_obj_destroy, 257 258 }; 258 259 259 - 260 260 /* 261 261 * Execute signal actions on fences recently signaled. 262 262 * This is done from a workqueue so we don't have to execute ··· 353 355 goto out_unlock; 354 356 } 355 357 list_add_tail(&fence->head, &fman->fence_list); 356 - ++fman->num_fence_objects; 357 358 358 359 out_unlock: 359 360 spin_unlock(&fman->lock); ··· 400 403 u32 passed_seqno) 401 404 { 402 405 u32 goal_seqno; 403 - struct vmw_fence_obj *fence; 406 + struct vmw_fence_obj *fence, *next_fence; 404 407 405 408 if (likely(!fman->seqno_valid)) 406 409 return false; ··· 410 413 return false; 411 414 412 415 fman->seqno_valid = false; 413 - list_for_each_entry(fence, &fman->fence_list, head) { 416 + list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 414 417 if (!list_empty(&fence->seq_passed_actions)) { 415 418 fman->seqno_valid = true; 416 419 vmw_fence_goal_write(fman->dev_priv,
+58 -4
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 2 /* 3 - * Copyright 2021-2023 VMware, Inc. 3 + * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term 4 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 4 5 * 5 6 * Permission is hereby granted, free of charge, to any person 6 7 * obtaining a copy of this software and associated documentation ··· 79 78 return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages); 80 79 } 81 80 81 + static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 82 + { 83 + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); 84 + int ret; 85 + 86 + if (obj->import_attach) { 87 + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 88 + if (!ret) { 89 + if (drm_WARN_ON(obj->dev, map->is_iomem)) { 90 + dma_buf_vunmap(obj->import_attach->dmabuf, map); 91 + return -EIO; 92 + } 93 + } 94 + } else { 95 + ret = ttm_bo_vmap(bo, map); 96 + } 97 + 98 + return ret; 99 + } 100 + 101 + static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 102 + { 103 + if (obj->import_attach) 104 + dma_buf_vunmap(obj->import_attach->dmabuf, map); 105 + else 106 + drm_gem_ttm_vunmap(obj, map); 107 + } 108 + 109 + static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 110 + { 111 + int ret; 112 + 113 + if (obj->import_attach) { 114 + /* 115 + * Reset both vm_ops and vm_private_data, so we don't end up with 116 + * vm_ops pointing to our implementation if the dma-buf backend 117 + * doesn't set those fields. 118 + */ 119 + vma->vm_private_data = NULL; 120 + vma->vm_ops = NULL; 121 + 122 + ret = dma_buf_mmap(obj->dma_buf, vma, 0); 123 + 124 + /* Drop the reference drm_gem_mmap_obj() acquired.*/ 125 + if (!ret) 126 + drm_gem_object_put(obj); 127 + 128 + return ret; 129 + } 130 + 131 + return drm_gem_ttm_mmap(obj, vma); 132 + } 133 + 82 134 static const struct vm_operations_struct vmw_vm_ops = { 83 135 .pfn_mkwrite = vmw_bo_vm_mkwrite, 84 136 .page_mkwrite = vmw_bo_vm_mkwrite, ··· 148 94 .pin = vmw_gem_object_pin, 149 95 .unpin = vmw_gem_object_unpin, 150 96 .get_sg_table = vmw_gem_object_get_sg_table, 151 - .vmap = drm_gem_ttm_vmap, 152 - .vunmap = drm_gem_ttm_vunmap, 153 - .mmap = drm_gem_ttm_mmap, 97 + .vmap = vmw_gem_vmap, 98 + .vunmap = vmw_gem_vunmap, 99 + .mmap = vmw_gem_mmap, 154 100 .vm_ops = &vmw_vm_ops, 155 101 }; 156 102
+204 -300
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 194 193 */ 195 194 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) 196 195 { 197 - if (vps->surf) { 198 - if (vps->surf_mapped) 199 - return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); 200 - return vps->surf->snooper.image; 201 - } else if (vps->bo) 202 - return vmw_bo_map_and_cache(vps->bo); 203 - return NULL; 196 + struct vmw_surface *surf; 197 + 198 + if (vmw_user_object_is_null(&vps->uo)) 199 + return NULL; 200 + 201 + surf = vmw_user_object_surface(&vps->uo); 202 + if (surf && !vmw_user_object_is_mapped(&vps->uo)) 203 + return surf->snooper.image; 204 + 205 + return vmw_user_object_map(&vps->uo); 204 206 } 205 207 206 208 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, ··· 540 536 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface 541 537 * 542 538 * @vps: plane state associated with the display surface 543 - * @unreference: true if we also want to unreference the display. 544 539 */ 545 - void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, 546 - bool unreference) 540 + void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps) 547 541 { 548 - if (vps->surf) { 549 - if (vps->pinned) { 550 - vmw_resource_unpin(&vps->surf->res); 551 - vps->pinned--; 552 - } 542 + struct vmw_surface *surf = vmw_user_object_surface(&vps->uo); 553 543 554 - if (unreference) { 555 - if (vps->pinned) 556 - DRM_ERROR("Surface still pinned\n"); 557 - vmw_surface_unreference(&vps->surf); 544 + if (surf) { 545 + if (vps->pinned) { 546 + vmw_resource_unpin(&surf->res); 547 + vps->pinned--; 558 548 } 559 549 } 560 550 } ··· 570 572 { 571 573 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 572 574 573 - vmw_du_plane_unpin_surf(vps, false); 575 + vmw_du_plane_unpin_surf(vps); 574 576 } 575 577 576 578 ··· 659 661 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); 660 662 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 661 663 662 - if (vps->surf_mapped) { 663 - vmw_bo_unmap(vps->surf->res.guest_memory_bo); 664 - vps->surf_mapped = false; 665 - } 664 + if (!vmw_user_object_is_null(&vps->uo)) 665 + vmw_user_object_unmap(&vps->uo); 666 666 667 667 vmw_du_cursor_plane_unmap_cm(vps); 668 668 vmw_du_put_cursor_mob(vcp, vps); 669 669 670 - vmw_du_plane_unpin_surf(vps, false); 671 - 672 - if (vps->surf) { 673 - vmw_surface_unreference(&vps->surf); 674 - vps->surf = NULL; 675 - } 676 - 677 - if (vps->bo) { 678 - vmw_bo_unreference(&vps->bo); 679 - vps->bo = NULL; 680 - } 670 + vmw_du_plane_unpin_surf(vps); 671 + vmw_user_object_unref(&vps->uo); 681 672 } 682 673 683 674 ··· 685 698 struct drm_framebuffer *fb = new_state->fb; 686 699 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); 687 700 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 701 + struct vmw_bo *bo = NULL; 688 702 int ret = 0; 689 703 690 - if (vps->surf) { 691 - if (vps->surf_mapped) { 692 - vmw_bo_unmap(vps->surf->res.guest_memory_bo); 693 - vps->surf_mapped = false; 694 - } 695 - vmw_surface_unreference(&vps->surf); 696 - vps->surf = NULL; 697 - } 698 - 699 - if (vps->bo) { 700 - vmw_bo_unreference(&vps->bo); 701 - vps->bo = NULL; 704 + if (!vmw_user_object_is_null(&vps->uo)) { 705 + vmw_user_object_unmap(&vps->uo); 706 + vmw_user_object_unref(&vps->uo); 702 707 } 703 708 704 709 if (fb) { 705 710 if (vmw_framebuffer_to_vfb(fb)->bo) { 706 - vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; 707 - vmw_bo_reference(vps->bo); 711 + vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer; 712 + vps->uo.surface = NULL; 708 713 } else { 709 - vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; 710 - vmw_surface_reference(vps->surf); 714 + memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo)); 711 715 } 716 + vmw_user_object_ref(&vps->uo); 712 717 } 713 718 714 - if (!vps->surf && vps->bo) { 715 - const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32); 719 + bo = vmw_user_object_buffer(&vps->uo); 720 + if (bo) { 721 + struct ttm_operation_ctx ctx = {false, false}; 716 722 717 - /* 718 - * Not using vmw_bo_map_and_cache() helper here as we need to 719 - * reserve the ttm_buffer_object first which 720 - * vmw_bo_map_and_cache() omits. 721 - */ 722 - ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL); 723 - 724 - if (unlikely(ret != 0)) 723 + ret = ttm_bo_reserve(&bo->tbo, true, false, NULL); 724 + if (ret != 0) 725 725 return -ENOMEM; 726 726 727 - ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map); 728 - 729 - ttm_bo_unreserve(&vps->bo->tbo); 730 - 731 - if (unlikely(ret != 0)) 727 + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 728 + if (ret != 0) 732 729 return -ENOMEM; 733 - } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) { 734 730 735 - WARN_ON(vps->surf->snooper.image); 736 - ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false, 737 - NULL); 738 - if (unlikely(ret != 0)) 739 - return -ENOMEM; 740 - vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); 741 - ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo); 742 - vps->surf_mapped = true; 731 + vmw_bo_pin_reserved(bo, true); 732 + if (vmw_framebuffer_to_vfb(fb)->bo) { 733 + const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32); 734 + 735 + (void)vmw_bo_map_and_cache_size(bo, size); 736 + } else { 737 + vmw_bo_map_and_cache(bo); 738 + } 739 + ttm_bo_unreserve(&bo->tbo); 743 740 } 744 741 745 - if (vps->surf || vps->bo) { 742 + if (!vmw_user_object_is_null(&vps->uo)) { 746 743 vmw_du_get_cursor_mob(vcp, vps); 747 744 vmw_du_cursor_plane_map_cm(vps); 748 745 } ··· 748 777 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 749 778 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 750 779 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); 780 + struct vmw_bo *old_bo = NULL; 781 + struct vmw_bo *new_bo = NULL; 751 782 s32 hotspot_x, hotspot_y; 783 + int ret; 752 784 753 785 hotspot_x = du->hotspot_x + new_state->hotspot_x; 754 786 hotspot_y = du->hotspot_y + new_state->hotspot_y; 755 787 756 - du->cursor_surface = vps->surf; 788 + du->cursor_surface = vmw_user_object_surface(&vps->uo); 757 789 758 - if (!vps->surf && !vps->bo) { 790 + if (vmw_user_object_is_null(&vps->uo)) { 759 791 vmw_cursor_update_position(dev_priv, false, 0, 0); 760 792 return; 761 793 } ··· 766 792 vps->cursor.hotspot_x = hotspot_x; 767 793 vps->cursor.hotspot_y = hotspot_y; 768 794 769 - if (vps->surf) { 795 + if (du->cursor_surface) 770 796 du->cursor_age = du->cursor_surface->snooper.age; 797 + 798 + if (!vmw_user_object_is_null(&old_vps->uo)) { 799 + old_bo = vmw_user_object_buffer(&old_vps->uo); 800 + ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL); 801 + if (ret != 0) 802 + return; 771 803 } 772 804 805 + if (!vmw_user_object_is_null(&vps->uo)) { 806 + new_bo = vmw_user_object_buffer(&vps->uo); 807 + if (old_bo != new_bo) { 808 + ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL); 809 + if (ret != 0) 810 + return; 811 + } else { 812 + new_bo = NULL; 813 + } 814 + } 773 815 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) { 774 816 /* 775 817 * If it hasn't changed, avoid making the device do extra ··· 802 812 new_state->crtc_h, 803 813 hotspot_x, hotspot_y); 804 814 } 815 + 816 + if (old_bo) 817 + ttm_bo_unreserve(&old_bo->tbo); 818 + if (new_bo) 819 + ttm_bo_unreserve(&new_bo->tbo); 805 820 806 821 du->cursor_x = new_state->crtc_x + du->set_gui_x; 807 822 du->cursor_y = new_state->crtc_y + du->set_gui_y; ··· 908 913 } 909 914 910 915 if (!vmw_framebuffer_to_vfb(fb)->bo) { 911 - surface = vmw_framebuffer_to_vfbs(fb)->surface; 916 + surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo); 912 917 913 918 WARN_ON(!surface); 914 919 ··· 1069 1074 memset(&vps->cursor, 0, sizeof(vps->cursor)); 1070 1075 1071 1076 /* Each ref counted resource needs to be acquired again */ 1072 - if (vps->surf) 1073 - (void) vmw_surface_reference(vps->surf); 1074 - 1075 - if (vps->bo) 1076 - (void) vmw_bo_reference(vps->bo); 1077 - 1077 + vmw_user_object_ref(&vps->uo); 1078 1078 state = &vps->base; 1079 1079 1080 1080 __drm_atomic_helper_plane_duplicate_state(plane, state); ··· 1118 1128 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); 1119 1129 1120 1130 /* Should have been freed by cleanup_fb */ 1121 - if (vps->surf) 1122 - vmw_surface_unreference(&vps->surf); 1123 - 1124 - if (vps->bo) 1125 - vmw_bo_unreference(&vps->bo); 1131 + vmw_user_object_unref(&vps->uo); 1126 1132 1127 1133 drm_atomic_helper_plane_destroy_state(plane, state); 1128 1134 } ··· 1213 1227 vmw_framebuffer_to_vfbs(framebuffer); 1214 1228 1215 1229 drm_framebuffer_cleanup(framebuffer); 1216 - vmw_surface_unreference(&vfbs->surface); 1230 + vmw_user_object_unref(&vfbs->uo); 1217 1231 1218 1232 kfree(vfbs); 1219 1233 } ··· 1258 1272 return -ENOSYS; 1259 1273 } 1260 1274 1275 + static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb, 1276 + struct drm_file *file_priv, 1277 + unsigned int *handle) 1278 + { 1279 + struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb); 1280 + struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo); 1281 + 1282 + return drm_gem_handle_create(file_priv, &bo->tbo.base, handle); 1283 + } 1261 1284 1262 1285 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 1286 + .create_handle = vmw_framebuffer_surface_create_handle, 1263 1287 .destroy = vmw_framebuffer_surface_destroy, 1264 1288 .dirty = drm_atomic_helper_dirtyfb, 1265 1289 }; 1266 1290 1267 1291 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 1268 - struct vmw_surface *surface, 1292 + struct vmw_user_object *uo, 1269 1293 struct vmw_framebuffer **out, 1270 1294 const struct drm_mode_fb_cmd2 1271 - *mode_cmd, 1272 - bool is_bo_proxy) 1295 + *mode_cmd) 1273 1296 1274 1297 { 1275 1298 struct drm_device *dev = &dev_priv->drm; 1276 1299 struct vmw_framebuffer_surface *vfbs; 1277 1300 enum SVGA3dSurfaceFormat format; 1301 + struct vmw_surface *surface; 1278 1302 int ret; 1279 1303 1280 1304 /* 3D is only supported on HWv8 and newer hosts */ 1281 1305 if (dev_priv->active_display_unit == vmw_du_legacy) 1282 1306 return -ENOSYS; 1307 + 1308 + surface = vmw_user_object_surface(uo); 1283 1309 1284 1310 /* 1285 1311 * Sanity checks. ··· 1355 1357 } 1356 1358 1357 1359 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 1358 - vfbs->surface = vmw_surface_reference(surface); 1359 - vfbs->is_bo_proxy = is_bo_proxy; 1360 + memcpy(&vfbs->uo, uo, sizeof(vfbs->uo)); 1361 + vmw_user_object_ref(&vfbs->uo); 1360 1362 1361 1363 *out = &vfbs->base; 1362 1364 ··· 1368 1370 return 0; 1369 1371 1370 1372 out_err2: 1371 - vmw_surface_unreference(&surface); 1373 + vmw_user_object_unref(&vfbs->uo); 1372 1374 kfree(vfbs); 1373 1375 out_err1: 1374 1376 return ret; ··· 1384 1386 { 1385 1387 struct vmw_framebuffer_bo *vfbd = 1386 1388 vmw_framebuffer_to_vfbd(fb); 1387 - 1388 1389 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle); 1389 1390 } 1390 1391 ··· 1403 1406 .destroy = vmw_framebuffer_bo_destroy, 1404 1407 .dirty = drm_atomic_helper_dirtyfb, 1405 1408 }; 1406 - 1407 - /** 1408 - * vmw_create_bo_proxy - create a proxy surface for the buffer object 1409 - * 1410 - * @dev: DRM device 1411 - * @mode_cmd: parameters for the new surface 1412 - * @bo_mob: MOB backing the buffer object 1413 - * @srf_out: newly created surface 1414 - * 1415 - * When the content FB is a buffer object, we create a surface as a proxy to the 1416 - * same buffer. This way we can do a surface copy rather than a surface DMA. 1417 - * This is a more efficient approach 1418 - * 1419 - * RETURNS: 1420 - * 0 on success, error code otherwise 1421 - */ 1422 - static int vmw_create_bo_proxy(struct drm_device *dev, 1423 - const struct drm_mode_fb_cmd2 *mode_cmd, 1424 - struct vmw_bo *bo_mob, 1425 - struct vmw_surface **srf_out) 1426 - { 1427 - struct vmw_surface_metadata metadata = {0}; 1428 - uint32_t format; 1429 - struct vmw_resource *res; 1430 - unsigned int bytes_pp; 1431 - int ret; 1432 - 1433 - switch (mode_cmd->pixel_format) { 1434 - case DRM_FORMAT_ARGB8888: 1435 - case DRM_FORMAT_XRGB8888: 1436 - format = SVGA3D_X8R8G8B8; 1437 - bytes_pp = 4; 1438 - break; 1439 - 1440 - case DRM_FORMAT_RGB565: 1441 - case DRM_FORMAT_XRGB1555: 1442 - format = SVGA3D_R5G6B5; 1443 - bytes_pp = 2; 1444 - break; 1445 - 1446 - case 8: 1447 - format = SVGA3D_P8; 1448 - bytes_pp = 1; 1449 - break; 1450 - 1451 - default: 1452 - DRM_ERROR("Invalid framebuffer format %p4cc\n", 1453 - &mode_cmd->pixel_format); 1454 - return -EINVAL; 1455 - } 1456 - 1457 - metadata.format = format; 1458 - metadata.mip_levels[0] = 1; 1459 - metadata.num_sizes = 1; 1460 - metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp; 1461 - metadata.base_size.height = mode_cmd->height; 1462 - metadata.base_size.depth = 1; 1463 - metadata.scanout = true; 1464 - 1465 - ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out); 1466 - if (ret) { 1467 - DRM_ERROR("Failed to allocate proxy content buffer\n"); 1468 - return ret; 1469 - } 1470 - 1471 - res = &(*srf_out)->res; 1472 - 1473 - /* Reserve and switch the backing mob. */ 1474 - mutex_lock(&res->dev_priv->cmdbuf_mutex); 1475 - (void) vmw_resource_reserve(res, false, true); 1476 - vmw_user_bo_unref(&res->guest_memory_bo); 1477 - res->guest_memory_bo = vmw_user_bo_ref(bo_mob); 1478 - res->guest_memory_offset = 0; 1479 - vmw_resource_unreserve(res, false, false, false, NULL, 0); 1480 - mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1481 - 1482 - return 0; 1483 - } 1484 - 1485 - 1486 1409 1487 1410 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, 1488 1411 struct vmw_bo *bo, ··· 1482 1565 * vmw_kms_new_framebuffer - Create a new framebuffer. 1483 1566 * 1484 1567 * @dev_priv: Pointer to device private struct. 1485 - * @bo: Pointer to buffer object to wrap the kms framebuffer around. 1486 - * Either @bo or @surface must be NULL. 1487 - * @surface: Pointer to a surface to wrap the kms framebuffer around. 1488 - * Either @bo or @surface must be NULL. 1489 - * @only_2d: No presents will occur to this buffer object based framebuffer. 1490 - * This helps the code to do some important optimizations. 1568 + * @uo: Pointer to user object to wrap the kms framebuffer around. 1569 + * Either the buffer or surface inside the user object must be NULL. 1491 1570 * @mode_cmd: Frame-buffer metadata. 1492 1571 */ 1493 1572 struct vmw_framebuffer * 1494 1573 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 1495 - struct vmw_bo *bo, 1496 - struct vmw_surface *surface, 1497 - bool only_2d, 1574 + struct vmw_user_object *uo, 1498 1575 const struct drm_mode_fb_cmd2 *mode_cmd) 1499 1576 { 1500 1577 struct vmw_framebuffer *vfb = NULL; 1501 - bool is_bo_proxy = false; 1502 1578 int ret; 1503 1579 1504 - /* 1505 - * We cannot use the SurfaceDMA command in an non-accelerated VM, 1506 - * therefore, wrap the buffer object in a surface so we can use the 1507 - * SurfaceCopy command. 1508 - */ 1509 - if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1510 - bo && only_2d && 1511 - mode_cmd->width > 64 && /* Don't create a proxy for cursor */ 1512 - dev_priv->active_display_unit == vmw_du_screen_target) { 1513 - ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd, 1514 - bo, &surface); 1515 - if (ret) 1516 - return ERR_PTR(ret); 1517 - 1518 - is_bo_proxy = true; 1519 - } 1520 - 1521 1580 /* Create the new framebuffer depending one what we have */ 1522 - if (surface) { 1523 - ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 1524 - mode_cmd, 1525 - is_bo_proxy); 1526 - /* 1527 - * vmw_create_bo_proxy() adds a reference that is no longer 1528 - * needed 1529 - */ 1530 - if (is_bo_proxy) 1531 - vmw_surface_unreference(&surface); 1532 - } else if (bo) { 1533 - ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, 1581 + if (vmw_user_object_surface(uo)) { 1582 + ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb, 1583 + mode_cmd); 1584 + } else if (uo->buffer) { 1585 + ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb, 1534 1586 mode_cmd); 1535 1587 } else { 1536 1588 BUG(); ··· 1521 1635 { 1522 1636 struct vmw_private *dev_priv = vmw_priv(dev); 1523 1637 struct vmw_framebuffer *vfb = NULL; 1524 - struct vmw_surface *surface = NULL; 1525 - struct vmw_bo *bo = NULL; 1638 + struct vmw_user_object uo = {0}; 1526 1639 int ret; 1527 1640 1528 1641 /* returns either a bo or surface */ 1529 - ret = vmw_user_lookup_handle(dev_priv, file_priv, 1530 - mode_cmd->handles[0], 1531 - &surface, &bo); 1642 + ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0], 1643 + &uo); 1532 1644 if (ret) { 1533 1645 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n", 1534 1646 mode_cmd->handles[0], mode_cmd->handles[0]); ··· 1534 1650 } 1535 1651 1536 1652 1537 - if (!bo && 1653 + if (vmw_user_object_surface(&uo) && 1538 1654 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { 1539 1655 DRM_ERROR("Surface size cannot exceed %dx%d\n", 1540 1656 dev_priv->texture_max_width, ··· 1543 1659 } 1544 1660 1545 1661 1546 - vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, 1547 - !(dev_priv->capabilities & SVGA_CAP_3D), 1548 - mode_cmd); 1662 + vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd); 1549 1663 if (IS_ERR(vfb)) { 1550 1664 ret = PTR_ERR(vfb); 1551 1665 goto err_out; 1552 1666 } 1553 1667 1554 1668 err_out: 1555 - /* vmw_user_lookup_handle takes one ref so does new_fb */ 1556 - if (bo) 1557 - vmw_user_bo_unref(&bo); 1558 - if (surface) 1559 - vmw_surface_unreference(&surface); 1669 + /* vmw_user_object_lookup takes one ref so does new_fb */ 1670 + vmw_user_object_unref(&uo); 1560 1671 1561 1672 if (ret) { 1562 1673 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); ··· 2464 2585 } 2465 2586 2466 2587 /** 2467 - * vmw_kms_update_proxy - Helper function to update a proxy surface from 2468 - * its backing MOB. 2469 - * 2470 - * @res: Pointer to the surface resource 2471 - * @clips: Clip rects in framebuffer (surface) space. 2472 - * @num_clips: Number of clips in @clips. 2473 - * @increment: Integer with which to increment the clip counter when looping. 2474 - * Used to skip a predetermined number of clip rects. 2475 - * 2476 - * This function makes sure the proxy surface is updated from its backing MOB 2477 - * using the region given by @clips. The surface resource @res and its backing 2478 - * MOB needs to be reserved and validated on call. 2479 - */ 2480 - int vmw_kms_update_proxy(struct vmw_resource *res, 2481 - const struct drm_clip_rect *clips, 2482 - unsigned num_clips, 2483 - int increment) 2484 - { 2485 - struct vmw_private *dev_priv = res->dev_priv; 2486 - struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size; 2487 - struct { 2488 - SVGA3dCmdHeader header; 2489 - SVGA3dCmdUpdateGBImage body; 2490 - } *cmd; 2491 - SVGA3dBox *box; 2492 - size_t copy_size = 0; 2493 - int i; 2494 - 2495 - if (!clips) 2496 - return 0; 2497 - 2498 - cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips); 2499 - if (!cmd) 2500 - return -ENOMEM; 2501 - 2502 - for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { 2503 - box = &cmd->body.box; 2504 - 2505 - cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 2506 - cmd->header.size = sizeof(cmd->body); 2507 - cmd->body.image.sid = res->id; 2508 - cmd->body.image.face = 0; 2509 - cmd->body.image.mipmap = 0; 2510 - 2511 - if (clips->x1 > size->width || clips->x2 > size->width || 2512 - clips->y1 > size->height || clips->y2 > size->height) { 2513 - DRM_ERROR("Invalid clips outsize of framebuffer.\n"); 2514 - return -EINVAL; 2515 - } 2516 - 2517 - box->x = clips->x1; 2518 - box->y = clips->y1; 2519 - box->z = 0; 2520 - box->w = clips->x2 - clips->x1; 2521 - box->h = clips->y2 - clips->y1; 2522 - box->d = 1; 2523 - 2524 - copy_size += sizeof(*cmd); 2525 - } 2526 - 2527 - vmw_cmd_commit(dev_priv, copy_size); 2528 - 2529 - return 0; 2530 - } 2531 - 2532 - /** 2533 2588 * vmw_kms_create_implicit_placement_property - Set up the implicit placement 2534 2589 * property. 2535 2590 * ··· 2597 2784 } else { 2598 2785 struct vmw_framebuffer_surface *vfbs = 2599 2786 container_of(update->vfb, typeof(*vfbs), base); 2787 + struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo); 2600 2788 2601 - ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res, 2789 + ret = vmw_validation_add_resource(&val_ctx, &surf->res, 2602 2790 0, VMW_RES_DIRTY_NONE, NULL, 2603 2791 NULL); 2604 2792 } ··· 2754 2940 num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height); 2755 2941 2756 2942 return num_modes; 2943 + } 2944 + 2945 + struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo) 2946 + { 2947 + if (uo->buffer) 2948 + vmw_user_bo_ref(uo->buffer); 2949 + else if (uo->surface) 2950 + vmw_surface_reference(uo->surface); 2951 + return uo; 2952 + } 2953 + 2954 + void vmw_user_object_unref(struct vmw_user_object *uo) 2955 + { 2956 + if (uo->buffer) 2957 + vmw_user_bo_unref(&uo->buffer); 2958 + else if (uo->surface) 2959 + vmw_surface_unreference(&uo->surface); 2960 + } 2961 + 2962 + struct vmw_bo * 2963 + vmw_user_object_buffer(struct vmw_user_object *uo) 2964 + { 2965 + if (uo->buffer) 2966 + return uo->buffer; 2967 + else if (uo->surface) 2968 + return uo->surface->res.guest_memory_bo; 2969 + return NULL; 2970 + } 2971 + 2972 + struct vmw_surface * 2973 + vmw_user_object_surface(struct vmw_user_object *uo) 2974 + { 2975 + if (uo->buffer) 2976 + return uo->buffer->dumb_surface; 2977 + return uo->surface; 2978 + } 2979 + 2980 + void *vmw_user_object_map(struct vmw_user_object *uo) 2981 + { 2982 + struct vmw_bo *bo = vmw_user_object_buffer(uo); 2983 + 2984 + WARN_ON(!bo); 2985 + return vmw_bo_map_and_cache(bo); 2986 + } 2987 + 2988 + void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size) 2989 + { 2990 + struct vmw_bo *bo = vmw_user_object_buffer(uo); 2991 + 2992 + WARN_ON(!bo); 2993 + return vmw_bo_map_and_cache_size(bo, size); 2994 + } 2995 + 2996 + void vmw_user_object_unmap(struct vmw_user_object *uo) 2997 + { 2998 + struct vmw_bo *bo = vmw_user_object_buffer(uo); 2999 + int ret; 3000 + 3001 + WARN_ON(!bo); 3002 + 3003 + /* Fence the mob creation so we are guarateed to have the mob */ 3004 + ret = ttm_bo_reserve(&bo->tbo, false, false, NULL); 3005 + if (ret != 0) 3006 + return; 3007 + 3008 + vmw_bo_unmap(bo); 3009 + vmw_bo_pin_reserved(bo, false); 3010 + 3011 + ttm_bo_unreserve(&bo->tbo); 3012 + } 3013 + 3014 + bool vmw_user_object_is_mapped(struct vmw_user_object *uo) 3015 + { 3016 + struct vmw_bo *bo; 3017 + 3018 + if (!uo || vmw_user_object_is_null(uo)) 3019 + return false; 3020 + 3021 + bo = vmw_user_object_buffer(uo); 3022 + 3023 + if (WARN_ON(!bo)) 3024 + return false; 3025 + 3026 + WARN_ON(bo->map.bo && !bo->map.virtual); 3027 + return bo->map.virtual; 3028 + } 3029 + 3030 + bool vmw_user_object_is_null(struct vmw_user_object *uo) 3031 + { 3032 + return !uo->buffer && !uo->surface; 2757 3033 }
+6 -11
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 222 221 223 222 struct vmw_framebuffer_surface { 224 223 struct vmw_framebuffer base; 225 - struct vmw_surface *surface; 226 - bool is_bo_proxy; /* true if this is proxy surface for DMA buf */ 224 + struct vmw_user_object uo; 227 225 }; 228 - 229 226 230 227 struct vmw_framebuffer_bo { 231 228 struct vmw_framebuffer base; ··· 276 277 */ 277 278 struct vmw_plane_state { 278 279 struct drm_plane_state base; 279 - struct vmw_surface *surf; 280 - struct vmw_bo *bo; 280 + struct vmw_user_object uo; 281 281 282 282 int content_fb_type; 283 283 unsigned long bo_size; ··· 455 457 uint32_t num_clips); 456 458 struct vmw_framebuffer * 457 459 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 458 - struct vmw_bo *bo, 459 - struct vmw_surface *surface, 460 - bool only_2d, 460 + struct vmw_user_object *uo, 461 461 const struct drm_mode_fb_cmd2 *mode_cmd); 462 462 void vmw_guess_mode_timing(struct drm_display_mode *mode); 463 463 void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv); ··· 482 486 struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane); 483 487 void vmw_du_plane_destroy_state(struct drm_plane *plane, 484 488 struct drm_plane_state *state); 485 - void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, 486 - bool unreference); 489 + void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps); 487 490 488 491 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, 489 492 struct drm_atomic_state *state);
+9 -5
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 148 147 struct vmw_bo *buf; 149 148 int ret; 150 149 151 - buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 152 - vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo; 150 + buf = vfb->bo ? 151 + vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 152 + vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo); 153 153 154 154 if (!buf) 155 155 return 0; ··· 171 169 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 172 170 struct vmw_bo *buf; 173 171 174 - buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 175 - vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo; 172 + buf = vfb->bo ? 173 + vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 174 + vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo); 175 + 176 176 177 177 if (WARN_ON(!buf)) 178 178 return 0;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
··· 92 92 { 93 93 struct vmw_escape_video_flush *flush; 94 94 size_t fifo_size; 95 - bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object); 95 + bool have_so = (dev_priv->active_display_unit != vmw_du_legacy); 96 96 int i, num_items; 97 97 SVGAGuestPtr ptr; 98 98
+28 -4
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2013 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2013-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 32 31 */ 33 32 34 33 #include "vmwgfx_drv.h" 34 + #include "vmwgfx_bo.h" 35 35 #include "ttm_object.h" 36 36 #include <linux/dma-buf.h> 37 37 ··· 90 88 uint32_t handle, uint32_t flags, 91 89 int *prime_fd) 92 90 { 91 + struct vmw_private *vmw = vmw_priv(dev); 93 92 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 93 + struct vmw_bo *vbo; 94 94 int ret; 95 + int surf_handle; 95 96 96 - if (handle > VMWGFX_NUM_MOB) 97 + if (handle > VMWGFX_NUM_MOB) { 97 98 ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); 98 - else 99 - ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd); 99 + } else { 100 + ret = vmw_user_bo_lookup(file_priv, handle, &vbo); 101 + if (ret) 102 + return ret; 103 + if (vbo && vbo->is_dumb) { 104 + ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, 105 + flags, prime_fd); 106 + } else { 107 + surf_handle = vmw_lookup_surface_handle_for_buffer(vmw, 108 + vbo, 109 + handle); 110 + if (surf_handle > 0) 111 + ret = ttm_prime_handle_to_fd(tfile, surf_handle, 112 + flags, prime_fd); 113 + else 114 + ret = drm_gem_prime_handle_to_fd(dev, file_priv, 115 + handle, flags, 116 + prime_fd); 117 + } 118 + vmw_user_bo_unref(&vbo); 119 + } 100 120 101 121 return ret; 102 122 }
+18 -9
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 59 58 60 59 rb_link_node(&res->mob_node, parent, new); 61 60 rb_insert_color(&res->mob_node, &gbo->res_tree); 61 + vmw_bo_del_detached_resource(gbo, res); 62 62 63 63 vmw_bo_prio_add(gbo, res->used_prio); 64 64 } ··· 289 287 * 290 288 * The pointer this pointed at by out_surf and out_buf needs to be null. 291 289 */ 292 - int vmw_user_lookup_handle(struct vmw_private *dev_priv, 290 + int vmw_user_object_lookup(struct vmw_private *dev_priv, 293 291 struct drm_file *filp, 294 - uint32_t handle, 295 - struct vmw_surface **out_surf, 296 - struct vmw_bo **out_buf) 292 + u32 handle, 293 + struct vmw_user_object *uo) 297 294 { 298 295 struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile; 299 296 struct vmw_resource *res; 300 297 int ret; 301 298 302 - BUG_ON(*out_surf || *out_buf); 299 + WARN_ON(uo->surface || uo->buffer); 303 300 304 301 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, 305 302 user_surface_converter, 306 303 &res); 307 304 if (!ret) { 308 - *out_surf = vmw_res_to_srf(res); 305 + uo->surface = vmw_res_to_srf(res); 309 306 return 0; 310 307 } 311 308 312 - *out_surf = NULL; 313 - ret = vmw_user_bo_lookup(filp, handle, out_buf); 309 + uo->surface = NULL; 310 + ret = vmw_user_bo_lookup(filp, handle, &uo->buffer); 311 + if (!ret && !uo->buffer->is_dumb) { 312 + uo->surface = vmw_lookup_surface_for_buffer(dev_priv, 313 + uo->buffer, 314 + handle); 315 + if (uo->surface) 316 + vmw_user_bo_unref(&uo->buffer); 317 + } 318 + 314 319 return ret; 315 320 } 316 321
+19 -14
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 241 240 struct vmw_connector_state *vmw_conn_state; 242 241 int x, y; 243 242 244 - sou->buffer = vps->bo; 243 + sou->buffer = vmw_user_object_buffer(&vps->uo); 245 244 246 245 conn_state = sou->base.connector.state; 247 246 vmw_conn_state = vmw_connector_state_to_vcs(conn_state); ··· 377 376 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 378 377 struct drm_crtc *crtc = plane->state->crtc ? 379 378 plane->state->crtc : old_state->crtc; 379 + struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo); 380 380 381 - if (vps->bo) 382 - vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false); 383 - vmw_bo_unreference(&vps->bo); 381 + if (bo) 382 + vmw_bo_unpin(vmw_priv(crtc->dev), bo, false); 383 + vmw_user_object_unref(&vps->uo); 384 384 vps->bo_size = 0; 385 385 386 386 vmw_du_plane_cleanup_fb(plane, old_state); ··· 413 411 .bo_type = ttm_bo_type_device, 414 412 .pin = true 415 413 }; 414 + struct vmw_bo *bo = NULL; 416 415 417 416 if (!new_fb) { 418 - vmw_bo_unreference(&vps->bo); 417 + vmw_user_object_unref(&vps->uo); 419 418 vps->bo_size = 0; 420 419 421 420 return 0; ··· 425 422 bo_params.size = new_state->crtc_w * new_state->crtc_h * 4; 426 423 dev_priv = vmw_priv(crtc->dev); 427 424 428 - if (vps->bo) { 425 + bo = vmw_user_object_buffer(&vps->uo); 426 + if (bo) { 429 427 if (vps->bo_size == bo_params.size) { 430 428 /* 431 429 * Note that this might temporarily up the pin-count 432 430 * to 2, until cleanup_fb() is called. 433 431 */ 434 - return vmw_bo_pin_in_vram(dev_priv, vps->bo, 435 - true); 432 + return vmw_bo_pin_in_vram(dev_priv, bo, true); 436 433 } 437 434 438 - vmw_bo_unreference(&vps->bo); 435 + vmw_user_object_unref(&vps->uo); 439 436 vps->bo_size = 0; 440 437 } 441 438 ··· 445 442 * resume the overlays, this is preferred to failing to alloc. 446 443 */ 447 444 vmw_overlay_pause_all(dev_priv); 448 - ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo); 445 + ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer); 449 446 vmw_overlay_resume_all(dev_priv); 450 447 if (ret) 451 448 return ret; ··· 456 453 * TTM already thinks the buffer is pinned, but make sure the 457 454 * pin_count is upped. 458 455 */ 459 - return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); 456 + return vmw_bo_pin_in_vram(dev_priv, vps->uo.buffer, true); 460 457 } 461 458 462 459 static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update, ··· 583 580 { 584 581 struct vmw_kms_sou_dirty_cmd *blit = cmd; 585 582 struct vmw_framebuffer_surface *vfbs; 583 + struct vmw_surface *surf = NULL; 586 584 587 585 vfbs = container_of(update->vfb, typeof(*vfbs), base); 588 586 ··· 591 587 blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) * 592 588 num_hits; 593 589 594 - blit->body.srcImage.sid = vfbs->surface->res.id; 590 + surf = vmw_user_object_surface(&vfbs->uo); 591 + blit->body.srcImage.sid = surf->res.id; 595 592 blit->body.destScreenId = update->du->unit; 596 593 597 594 /* Update the source and destination bounding box later in post_clip */ ··· 1109 1104 int ret; 1110 1105 1111 1106 if (!srf) 1112 - srf = &vfbs->surface->res; 1107 + srf = &vmw_user_object_surface(&vfbs->uo)->res; 1113 1108 1114 1109 ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE, 1115 1110 NULL, NULL);
+85 -89
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /****************************************************************************** 3 3 * 4 - * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2014-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 30 29 #include "vmwgfx_kms.h" 31 30 #include "vmwgfx_vkms.h" 32 31 #include "vmw_surface_cache.h" 32 + #include <linux/fsnotify.h> 33 33 34 34 #include <drm/drm_atomic.h> 35 35 #include <drm/drm_atomic_helper.h> ··· 737 735 int ret; 738 736 739 737 if (!srf) 740 - srf = &vfbs->surface->res; 738 + srf = &vmw_user_object_surface(&vfbs->uo)->res; 741 739 742 740 ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE, 743 741 NULL, NULL); ··· 747 745 ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true); 748 746 if (ret) 749 747 goto out_unref; 750 - 751 - if (vfbs->is_bo_proxy) { 752 - ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); 753 - if (ret) 754 - goto out_finish; 755 - } 756 748 757 749 sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit; 758 750 sdirty.base.clip = vmw_kms_stdu_surface_clip; ··· 761 765 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 762 766 dest_x, dest_y, num_clips, inc, 763 767 &sdirty.base); 764 - out_finish: 768 + 765 769 vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence, 766 770 NULL); 767 771 ··· 873 877 return MODE_OK; 874 878 } 875 879 880 + /* 881 + * Trigger a modeset if the X,Y position of the Screen Target changes. 882 + * This is needed when multi-mon is cycled. The original Screen Target will have 883 + * the same mode but its relative X,Y position in the topology will change. 884 + */ 885 + static int vmw_stdu_connector_atomic_check(struct drm_connector *conn, 886 + struct drm_atomic_state *state) 887 + { 888 + struct drm_connector_state *conn_state; 889 + struct vmw_screen_target_display_unit *du; 890 + struct drm_crtc_state *new_crtc_state; 891 + 892 + conn_state = drm_atomic_get_connector_state(state, conn); 893 + du = vmw_connector_to_stdu(conn); 894 + 895 + if (!conn_state->crtc) 896 + return 0; 897 + 898 + new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); 899 + if (du->base.gui_x != du->base.set_gui_x || 900 + du->base.gui_y != du->base.set_gui_y) 901 + new_crtc_state->mode_changed = true; 902 + 903 + return 0; 904 + } 905 + 876 906 static const struct drm_connector_funcs vmw_stdu_connector_funcs = { 877 907 .dpms = vmw_du_connector_dpms, 878 908 .detect = vmw_du_connector_detect, ··· 913 891 static const struct 914 892 drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = { 915 893 .get_modes = vmw_connector_get_modes, 916 - .mode_valid = vmw_stdu_connector_mode_valid 894 + .mode_valid = vmw_stdu_connector_mode_valid, 895 + .atomic_check = vmw_stdu_connector_atomic_check, 917 896 }; 918 897 919 898 ··· 941 918 { 942 919 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 943 920 944 - if (vps->surf) 921 + if (vmw_user_object_surface(&vps->uo)) 945 922 WARN_ON(!vps->pinned); 946 - 947 923 vmw_du_plane_cleanup_fb(plane, old_state); 948 924 949 925 vps->content_fb_type = SAME_AS_DISPLAY; 950 926 vps->cpp = 0; 951 927 } 952 - 953 928 954 929 955 930 /** ··· 973 952 enum stdu_content_type new_content_type; 974 953 struct vmw_framebuffer_surface *new_vfbs; 975 954 uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h; 955 + struct drm_plane_state *old_state = plane->state; 956 + struct drm_rect rect; 976 957 int ret; 977 958 978 959 /* No FB to prepare */ 979 960 if (!new_fb) { 980 - if (vps->surf) { 961 + if (vmw_user_object_surface(&vps->uo)) { 981 962 WARN_ON(vps->pinned != 0); 982 - vmw_surface_unreference(&vps->surf); 963 + vmw_user_object_unref(&vps->uo); 983 964 } 984 965 985 966 return 0; ··· 991 968 new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb); 992 969 993 970 if (new_vfbs && 994 - new_vfbs->surface->metadata.base_size.width == hdisplay && 995 - new_vfbs->surface->metadata.base_size.height == vdisplay) 971 + vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.width == hdisplay && 972 + vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.height == vdisplay) 996 973 new_content_type = SAME_AS_DISPLAY; 997 974 else if (vfb->bo) 998 975 new_content_type = SEPARATE_BO; ··· 1030 1007 metadata.num_sizes = 1; 1031 1008 metadata.scanout = true; 1032 1009 } else { 1033 - metadata = new_vfbs->surface->metadata; 1010 + metadata = vmw_user_object_surface(&new_vfbs->uo)->metadata; 1034 1011 } 1035 1012 1036 1013 metadata.base_size.width = hdisplay; 1037 1014 metadata.base_size.height = vdisplay; 1038 1015 metadata.base_size.depth = 1; 1039 1016 1040 - if (vps->surf) { 1017 + if (vmw_user_object_surface(&vps->uo)) { 1041 1018 struct drm_vmw_size cur_base_size = 1042 - vps->surf->metadata.base_size; 1019 + vmw_user_object_surface(&vps->uo)->metadata.base_size; 1043 1020 1044 1021 if (cur_base_size.width != metadata.base_size.width || 1045 1022 cur_base_size.height != metadata.base_size.height || 1046 - vps->surf->metadata.format != metadata.format) { 1023 + vmw_user_object_surface(&vps->uo)->metadata.format != metadata.format) { 1047 1024 WARN_ON(vps->pinned != 0); 1048 - vmw_surface_unreference(&vps->surf); 1025 + vmw_user_object_unref(&vps->uo); 1049 1026 } 1050 1027 1051 1028 } 1052 1029 1053 - if (!vps->surf) { 1030 + if (!vmw_user_object_surface(&vps->uo)) { 1054 1031 ret = vmw_gb_surface_define(dev_priv, &metadata, 1055 - &vps->surf); 1032 + &vps->uo.surface); 1056 1033 if (ret != 0) { 1057 1034 DRM_ERROR("Couldn't allocate STDU surface.\n"); 1058 1035 return ret; ··· 1065 1042 * The only time we add a reference in prepare_fb is if the 1066 1043 * state object doesn't have a reference to begin with 1067 1044 */ 1068 - if (vps->surf) { 1045 + if (vmw_user_object_surface(&vps->uo)) { 1069 1046 WARN_ON(vps->pinned != 0); 1070 - vmw_surface_unreference(&vps->surf); 1047 + vmw_user_object_unref(&vps->uo); 1071 1048 } 1072 1049 1073 - vps->surf = vmw_surface_reference(new_vfbs->surface); 1050 + memcpy(&vps->uo, &new_vfbs->uo, sizeof(vps->uo)); 1051 + vmw_user_object_ref(&vps->uo); 1074 1052 } 1075 1053 1076 - if (vps->surf) { 1054 + if (vmw_user_object_surface(&vps->uo)) { 1077 1055 1078 1056 /* Pin new surface before flipping */ 1079 - ret = vmw_resource_pin(&vps->surf->res, false); 1057 + ret = vmw_resource_pin(&vmw_user_object_surface(&vps->uo)->res, false); 1080 1058 if (ret) 1081 1059 goto out_srf_unref; 1082 1060 ··· 1085 1061 } 1086 1062 1087 1063 vps->content_fb_type = new_content_type; 1064 + 1065 + /* 1066 + * The drm fb code will do blit's via the vmap interface, which doesn't 1067 + * trigger vmw_bo page dirty tracking due to being kernel side (and thus 1068 + * doesn't require mmap'ing) so we have to update the surface's dirty 1069 + * regions by hand but we want to be careful to not overwrite the 1070 + * resource if it has been written to by the gpu (res_dirty). 1071 + */ 1072 + if (vps->uo.buffer && vps->uo.buffer->is_dumb) { 1073 + struct vmw_surface *surf = vmw_user_object_surface(&vps->uo); 1074 + struct vmw_resource *res = &surf->res; 1075 + 1076 + if (!res->res_dirty && drm_atomic_helper_damage_merged(old_state, 1077 + new_state, 1078 + &rect)) { 1079 + /* 1080 + * At some point it might be useful to actually translate 1081 + * (rect.x1, rect.y1) => start, and (rect.x2, rect.y2) => end, 1082 + * but currently the fb code will just report the entire fb 1083 + * dirty so in practice it doesn't matter. 1084 + */ 1085 + pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT; 1086 + pgoff_t end = __KERNEL_DIV_ROUND_UP(res->guest_memory_offset + 1087 + res->guest_memory_size, 1088 + PAGE_SIZE); 1089 + vmw_resource_dirty_update(res, start, end); 1090 + } 1091 + } 1088 1092 1089 1093 /* 1090 1094 * This should only happen if the buffer object is too large to create a ··· 1124 1072 return 0; 1125 1073 1126 1074 out_srf_unref: 1127 - vmw_surface_unreference(&vps->surf); 1075 + vmw_user_object_unref(&vps->uo); 1128 1076 return ret; 1129 1077 } 1130 1078 ··· 1266 1214 vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update, 1267 1215 uint32_t num_hits) 1268 1216 { 1269 - struct vmw_framebuffer_surface *vfbs; 1270 1217 uint32_t size = 0; 1271 - 1272 - vfbs = container_of(update->vfb, typeof(*vfbs), base); 1273 - 1274 - if (vfbs->is_bo_proxy) 1275 - size += sizeof(struct vmw_stdu_update_gb_image) * num_hits; 1276 1218 1277 1219 size += sizeof(struct vmw_stdu_update); 1278 1220 ··· 1276 1230 static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update, 1277 1231 uint32_t num_hits) 1278 1232 { 1279 - struct vmw_framebuffer_surface *vfbs; 1280 1233 uint32_t size = 0; 1281 - 1282 - vfbs = container_of(update->vfb, typeof(*vfbs), base); 1283 - 1284 - if (vfbs->is_bo_proxy) 1285 - size += sizeof(struct vmw_stdu_update_gb_image) * num_hits; 1286 1234 1287 1235 size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) * 1288 1236 num_hits + sizeof(struct vmw_stdu_update); 1289 1237 1290 1238 return size; 1291 - } 1292 - 1293 - static uint32_t 1294 - vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd) 1295 - { 1296 - struct vmw_framebuffer_surface *vfbs; 1297 - struct drm_plane_state *state = update->plane->state; 1298 - struct drm_plane_state *old_state = update->old_state; 1299 - struct vmw_stdu_update_gb_image *cmd_update = cmd; 1300 - struct drm_atomic_helper_damage_iter iter; 1301 - struct drm_rect clip; 1302 - uint32_t copy_size = 0; 1303 - 1304 - vfbs = container_of(update->vfb, typeof(*vfbs), base); 1305 - 1306 - /* 1307 - * proxy surface is special where a buffer object type fb is wrapped 1308 - * in a surface and need an update gb image command to sync with device. 1309 - */ 1310 - drm_atomic_helper_damage_iter_init(&iter, old_state, state); 1311 - drm_atomic_for_each_plane_damage(&iter, &clip) { 1312 - SVGA3dBox *box = &cmd_update->body.box; 1313 - 1314 - cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 1315 - cmd_update->header.size = sizeof(cmd_update->body); 1316 - cmd_update->body.image.sid = vfbs->surface->res.id; 1317 - cmd_update->body.image.face = 0; 1318 - cmd_update->body.image.mipmap = 0; 1319 - 1320 - box->x = clip.x1; 1321 - box->y = clip.y1; 1322 - box->z = 0; 1323 - box->w = drm_rect_width(&clip); 1324 - box->h = drm_rect_height(&clip); 1325 - box->d = 1; 1326 - 1327 - copy_size += sizeof(*cmd_update); 1328 - cmd_update++; 1329 - } 1330 - 1331 - return copy_size; 1332 1239 } 1333 1240 1334 1241 static uint32_t ··· 1298 1299 cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY; 1299 1300 cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) * 1300 1301 num_hits; 1301 - cmd_copy->body.src.sid = vfbs->surface->res.id; 1302 + cmd_copy->body.src.sid = vmw_user_object_surface(&vfbs->uo)->res.id; 1302 1303 cmd_copy->body.dest.sid = stdu->display_srf->res.id; 1303 1304 1304 1305 return sizeof(*cmd_copy); ··· 1369 1370 srf_update.mutex = &dev_priv->cmdbuf_mutex; 1370 1371 srf_update.intr = true; 1371 1372 1372 - if (vfbs->is_bo_proxy) 1373 - srf_update.post_prepare = vmw_stdu_surface_update_proxy; 1374 - 1375 - if (vfbs->surface->res.id != stdu->display_srf->res.id) { 1373 + if (vmw_user_object_surface(&vfbs->uo)->res.id != stdu->display_srf->res.id) { 1376 1374 srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size; 1377 1375 srf_update.pre_clip = vmw_stdu_surface_populate_copy; 1378 1376 srf_update.clip = vmw_stdu_surface_populate_clip; ··· 1413 1417 stdu = vmw_crtc_to_stdu(crtc); 1414 1418 dev_priv = vmw_priv(crtc->dev); 1415 1419 1416 - stdu->display_srf = vps->surf; 1420 + stdu->display_srf = vmw_user_object_surface(&vps->uo); 1417 1421 stdu->content_fb_type = vps->content_fb_type; 1418 1422 stdu->cpp = vps->cpp; 1419 1423
+271 -9
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /************************************************************************** 3 3 * 4 - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 4 + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 6 * 6 7 * Permission is hereby granted, free of charge, to any person obtaining a 7 8 * copy of this software and associated documentation files (the ··· 37 36 #include <drm/ttm/ttm_placement.h> 38 37 39 38 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) 40 - #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32) 41 - #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \ 42 - (svga3d_flags & ((uint64_t)U32_MAX)) 43 39 44 40 /** 45 41 * struct vmw_user_surface - User-space visible surface resource ··· 684 686 struct vmw_resource *res = &user_srf->srf.res; 685 687 686 688 *p_base = NULL; 689 + 690 + /* 691 + * Dumb buffers own the resource and they'll unref the 692 + * resource themselves 693 + */ 694 + if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb) 695 + return; 696 + 687 697 vmw_resource_unreference(&res); 688 698 } 689 699 ··· 818 812 } 819 813 } 820 814 res->guest_memory_size = cur_bo_offset; 821 - if (metadata->scanout && 815 + if (!file_priv->atomic && 816 + metadata->scanout && 822 817 metadata->num_sizes == 1 && 823 818 metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && 824 819 metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT && ··· 871 864 vmw_resource_unreference(&res); 872 865 goto out_unlock; 873 866 } 867 + vmw_bo_add_detached_resource(res->guest_memory_bo, res); 874 868 } 875 869 876 870 tmp = vmw_resource_reference(&srf->res); ··· 900 892 return ret; 901 893 } 902 894 895 + static struct vmw_user_surface * 896 + vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo, 897 + u32 handle) 898 + { 899 + struct vmw_user_surface *user_srf = NULL; 900 + struct vmw_surface *surf; 901 + struct ttm_base_object *base; 902 + 903 + surf = vmw_bo_surface(bo); 904 + if (surf) { 905 + rcu_read_lock(); 906 + user_srf = container_of(surf, struct vmw_user_surface, srf); 907 + base = &user_srf->prime.base; 908 + if (base && !kref_get_unless_zero(&base->refcount)) { 909 + drm_dbg_driver(&vmw->drm, 910 + "%s: referencing a stale surface handle %d\n", 911 + __func__, handle); 912 + base = NULL; 913 + user_srf = NULL; 914 + } 915 + rcu_read_unlock(); 916 + } 917 + 918 + return user_srf; 919 + } 920 + 921 + struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, 922 + struct vmw_bo *bo, 923 + u32 handle) 924 + { 925 + struct vmw_user_surface *user_srf = 926 + vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 927 + struct vmw_surface *surf = NULL; 928 + struct ttm_base_object *base; 929 + 930 + if (user_srf) { 931 + surf = vmw_surface_reference(&user_srf->srf); 932 + base = &user_srf->prime.base; 933 + ttm_base_object_unref(&base); 934 + } 935 + return surf; 936 + } 937 + 938 + u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, 939 + struct vmw_bo *bo, 940 + u32 handle) 941 + { 942 + struct vmw_user_surface *user_srf = 943 + vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 944 + int surf_handle = 0; 945 + struct ttm_base_object *base; 946 + 947 + if (user_srf) { 948 + base = &user_srf->prime.base; 949 + surf_handle = (u32)base->handle; 950 + ttm_base_object_unref(&base); 951 + } 952 + return surf_handle; 953 + } 954 + 955 + static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv, 956 + struct drm_file *file_priv, 957 + u32 fd, u32 *handle, 958 + struct ttm_base_object **base_p) 959 + { 960 + struct ttm_base_object *base; 961 + struct vmw_bo *bo; 962 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 963 + struct vmw_user_surface *user_srf; 964 + int ret; 965 + 966 + ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle); 967 + if (ret) { 968 + drm_warn(&dev_priv->drm, 969 + "Wasn't able to find user buffer for fd = %u.\n", fd); 970 + return ret; 971 + } 972 + 973 + ret = vmw_user_bo_lookup(file_priv, *handle, &bo); 974 + if (ret) { 975 + drm_warn(&dev_priv->drm, 976 + "Wasn't able to lookup user buffer for handle = %u.\n", *handle); 977 + return ret; 978 + } 979 + 980 + user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle); 981 + if (WARN_ON(!user_srf)) { 982 + drm_warn(&dev_priv->drm, 983 + "User surface fd %d (handle %d) is null.\n", fd, *handle); 984 + ret = -EINVAL; 985 + goto out; 986 + } 987 + 988 + base = &user_srf->prime.base; 989 + ret = ttm_ref_object_add(tfile, base, NULL, false); 990 + if (ret) { 991 + drm_warn(&dev_priv->drm, 992 + "Couldn't add an object ref for the buffer (%d).\n", *handle); 993 + goto out; 994 + } 995 + 996 + *base_p = base; 997 + out: 998 + vmw_user_bo_unref(&bo); 999 + 1000 + return ret; 1001 + } 903 1002 904 1003 static int 905 1004 vmw_surface_handle_reference(struct vmw_private *dev_priv, ··· 1016 901 struct ttm_base_object **base_p) 1017 902 { 1018 903 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1019 - struct vmw_user_surface *user_srf; 904 + struct vmw_user_surface *user_srf = NULL; 1020 905 uint32_t handle; 1021 906 struct ttm_base_object *base; 1022 907 int ret; 1023 908 1024 909 if (handle_type == DRM_VMW_HANDLE_PRIME) { 1025 910 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 1026 - if (unlikely(ret != 0)) 1027 - return ret; 911 + if (ret) 912 + return vmw_buffer_prime_to_surface_base(dev_priv, 913 + file_priv, 914 + u_handle, 915 + &handle, 916 + base_p); 1028 917 } else { 1029 918 handle = u_handle; 1030 919 } ··· 1622 1503 ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, 1623 1504 &res->guest_memory_bo); 1624 1505 if (ret == 0) { 1625 - if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { 1506 + if (res->guest_memory_bo->is_dumb) { 1507 + VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n"); 1508 + vmw_user_bo_unref(&res->guest_memory_bo); 1509 + ret = -EINVAL; 1510 + goto out_unlock; 1511 + } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { 1626 1512 VMW_DEBUG_USER("Surface backup buffer too small.\n"); 1627 1513 vmw_user_bo_unref(&res->guest_memory_bo); 1628 1514 ret = -EINVAL; ··· 1684 1560 rep->handle = user_srf->prime.base.handle; 1685 1561 rep->backup_size = res->guest_memory_size; 1686 1562 if (res->guest_memory_bo) { 1563 + vmw_bo_add_detached_resource(res->guest_memory_bo, res); 1687 1564 rep->buffer_map_handle = 1688 1565 drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); 1689 1566 rep->buffer_size = res->guest_memory_bo->tbo.base.size; ··· 2223 2098 return ret; 2224 2099 2225 2100 out_unlock: 2101 + return ret; 2102 + } 2103 + 2104 + static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw, 2105 + int bpp) 2106 + { 2107 + switch (bpp) { 2108 + case 8: /* DRM_FORMAT_C8 */ 2109 + return SVGA3D_P8; 2110 + case 16: /* DRM_FORMAT_RGB565 */ 2111 + return SVGA3D_R5G6B5; 2112 + case 32: /* DRM_FORMAT_XRGB8888 */ 2113 + if (has_sm4_context(vmw)) 2114 + return SVGA3D_B8G8R8X8_UNORM; 2115 + return SVGA3D_X8R8G8B8; 2116 + default: 2117 + drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp); 2118 + return SVGA3D_X8R8G8B8; 2119 + } 2120 + } 2121 + 2122 + /** 2123 + * vmw_dumb_create - Create a dumb kms buffer 2124 + * 2125 + * @file_priv: Pointer to a struct drm_file identifying the caller. 2126 + * @dev: Pointer to the drm device. 2127 + * @args: Pointer to a struct drm_mode_create_dumb structure 2128 + * Return: Zero on success, negative error code on failure. 2129 + * 2130 + * This is a driver callback for the core drm create_dumb functionality. 2131 + * Note that this is very similar to the vmw_bo_alloc ioctl, except 2132 + * that the arguments have a different format. 2133 + */ 2134 + int vmw_dumb_create(struct drm_file *file_priv, 2135 + struct drm_device *dev, 2136 + struct drm_mode_create_dumb *args) 2137 + { 2138 + struct vmw_private *dev_priv = vmw_priv(dev); 2139 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 2140 + struct vmw_bo *vbo = NULL; 2141 + struct vmw_resource *res = NULL; 2142 + union drm_vmw_gb_surface_create_ext_arg arg = { 0 }; 2143 + struct drm_vmw_gb_surface_create_ext_req *req = &arg.req; 2144 + int ret; 2145 + struct drm_vmw_size drm_size = { 2146 + .width = args->width, 2147 + .height = args->height, 2148 + .depth = 1, 2149 + }; 2150 + SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp); 2151 + const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); 2152 + SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE | 2153 + SVGA3D_SURFACE_HINT_RENDERTARGET | 2154 + SVGA3D_SURFACE_SCREENTARGET | 2155 + SVGA3D_SURFACE_BIND_SHADER_RESOURCE | 2156 + SVGA3D_SURFACE_BIND_RENDER_TARGET; 2157 + 2158 + /* 2159 + * Without mob support we're just going to use raw memory buffer 2160 + * because we wouldn't be able to support full surface coherency 2161 + * without mobs 2162 + */ 2163 + if (!dev_priv->has_mob) { 2164 + int cpp = DIV_ROUND_UP(args->bpp, 8); 2165 + 2166 + switch (cpp) { 2167 + case 1: /* DRM_FORMAT_C8 */ 2168 + case 2: /* DRM_FORMAT_RGB565 */ 2169 + case 4: /* DRM_FORMAT_XRGB8888 */ 2170 + break; 2171 + default: 2172 + /* 2173 + * Dumb buffers don't allow anything else. 2174 + * This is tested via IGT's dumb_buffers 2175 + */ 2176 + return -EINVAL; 2177 + } 2178 + 2179 + args->pitch = args->width * cpp; 2180 + args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 2181 + 2182 + ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 2183 + args->size, &args->handle, 2184 + &vbo); 2185 + /* drop reference from allocate - handle holds it now */ 2186 + drm_gem_object_put(&vbo->tbo.base); 2187 + return ret; 2188 + } 2189 + 2190 + req->version = drm_vmw_gb_surface_v1; 2191 + req->multisample_pattern = SVGA3D_MS_PATTERN_NONE; 2192 + req->quality_level = SVGA3D_MS_QUALITY_NONE; 2193 + req->buffer_byte_stride = 0; 2194 + req->must_be_zero = 0; 2195 + req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags); 2196 + req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags); 2197 + req->base.format = (uint32_t)format; 2198 + req->base.drm_surface_flags = drm_vmw_surface_flag_scanout; 2199 + req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable; 2200 + req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer; 2201 + req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent; 2202 + req->base.base_size.width = args->width; 2203 + req->base.base_size.height = args->height; 2204 + req->base.base_size.depth = 1; 2205 + req->base.array_size = 0; 2206 + req->base.mip_levels = 1; 2207 + req->base.multisample_count = 0; 2208 + req->base.buffer_handle = SVGA3D_INVALID_ID; 2209 + req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE; 2210 + ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv); 2211 + if (ret) { 2212 + drm_warn(dev, "Unable to create a dumb buffer\n"); 2213 + return ret; 2214 + } 2215 + 2216 + args->handle = arg.rep.buffer_handle; 2217 + args->size = arg.rep.buffer_size; 2218 + args->pitch = vmw_surface_calculate_pitch(desc, &drm_size); 2219 + 2220 + ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle, 2221 + user_surface_converter, 2222 + &res); 2223 + if (ret) { 2224 + drm_err(dev, "Created resource handle doesn't exist!\n"); 2225 + goto err; 2226 + } 2227 + 2228 + vbo = res->guest_memory_bo; 2229 + vbo->is_dumb = true; 2230 + vbo->dumb_surface = vmw_res_to_srf(res); 2231 + 2232 + err: 2233 + if (res) 2234 + vmw_resource_unreference(&res); 2235 + if (ret) 2236 + ttm_ref_object_base_unref(tfile, arg.rep.handle); 2237 + 2226 2238 return ret; 2227 2239 }
+22 -18
drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
··· 75 75 return ret; 76 76 } 77 77 78 - static int 78 + static void 79 79 compute_crc(struct drm_crtc *crtc, 80 80 struct vmw_surface *surf, 81 81 u32 *crc) ··· 101 101 } 102 102 103 103 vmw_bo_unmap(bo); 104 - 105 - return 0; 106 104 } 107 105 108 106 static void ··· 114 116 u64 frame_start, frame_end; 115 117 u32 crc32 = 0; 116 118 struct vmw_surface *surf = 0; 117 - int ret; 118 119 119 120 spin_lock_irq(&du->vkms.crc_state_lock); 120 121 crc_pending = du->vkms.crc_pending; ··· 127 130 return; 128 131 129 132 spin_lock_irq(&du->vkms.crc_state_lock); 130 - surf = du->vkms.surface; 133 + surf = vmw_surface_reference(du->vkms.surface); 131 134 spin_unlock_irq(&du->vkms.crc_state_lock); 132 135 133 - if (vmw_surface_sync(vmw, surf)) { 134 - drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n"); 135 - return; 136 - } 136 + if (surf) { 137 + if (vmw_surface_sync(vmw, surf)) { 138 + drm_warn( 139 + crtc->dev, 140 + "CRC worker wasn't able to sync the crc surface!\n"); 141 + return; 142 + } 137 143 138 - ret = compute_crc(crtc, surf, &crc32); 139 - if (ret) 140 - return; 144 + compute_crc(crtc, surf, &crc32); 145 + vmw_surface_unreference(&surf); 146 + } 141 147 142 148 spin_lock_irq(&du->vkms.crc_state_lock); 143 149 frame_start = du->vkms.frame_start; 144 150 frame_end = du->vkms.frame_end; 145 - crc_pending = du->vkms.crc_pending; 146 151 du->vkms.frame_start = 0; 147 152 du->vkms.frame_end = 0; 148 153 du->vkms.crc_pending = false; ··· 163 164 struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer); 164 165 struct drm_crtc *crtc = &du->crtc; 165 166 struct vmw_private *vmw = vmw_priv(crtc->dev); 166 - struct vmw_surface *surf = NULL; 167 + bool has_surface = false; 167 168 u64 ret_overrun; 168 169 bool locked, ret; 169 170 ··· 178 179 WARN_ON(!ret); 179 180 if (!locked) 180 181 return HRTIMER_RESTART; 181 - surf = du->vkms.surface; 182 + has_surface = du->vkms.surface != NULL; 182 183 vmw_vkms_unlock(crtc); 183 184 184 - if (du->vkms.crc_enabled && surf) { 185 + if (du->vkms.crc_enabled && has_surface) { 185 186 u64 frame = drm_crtc_accurate_vblank_count(crtc); 186 187 187 188 spin_lock(&du->vkms.crc_state_lock); ··· 335 336 { 336 337 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 337 338 339 + if (du->vkms.surface) 340 + vmw_surface_unreference(&du->vkms.surface); 338 341 WARN_ON(work_pending(&du->vkms.crc_generator_work)); 339 342 hrtimer_cancel(&du->vkms.timer); 340 343 } ··· 498 497 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 499 498 struct vmw_private *vmw = vmw_priv(crtc->dev); 500 499 501 - if (vmw->vkms_enabled) { 500 + if (vmw->vkms_enabled && du->vkms.surface != surf) { 502 501 WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET); 503 - du->vkms.surface = surf; 502 + if (du->vkms.surface) 503 + vmw_surface_unreference(&du->vkms.surface); 504 + if (surf) 505 + du->vkms.surface = vmw_surface_reference(surf); 504 506 } 505 507 } 506 508
+2 -1
drivers/gpu/drm/xe/xe_hwmon.c
··· 203 203 reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0); 204 204 reg_val = xe_mmio_read32(hwmon->gt, rapl_limit); 205 205 if (reg_val & PKG_PWR_LIM_1_EN) { 206 + drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n"); 206 207 ret = -EOPNOTSUPP; 207 - goto unlock; 208 208 } 209 + goto unlock; 209 210 } 210 211 211 212 /* Computation in 64-bits to avoid overflow. Round to nearest. */
+14 -1
drivers/gpu/drm/xe/xe_lrc.c
··· 1634 1634 if (!snapshot) 1635 1635 return NULL; 1636 1636 1637 + if (lrc->bo && lrc->bo->vm) 1638 + xe_vm_get(lrc->bo->vm); 1639 + 1637 1640 snapshot->context_desc = xe_lrc_ggtt_addr(lrc); 1638 1641 snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc); 1639 1642 snapshot->head = xe_lrc_ring_head(lrc); ··· 1656 1653 void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) 1657 1654 { 1658 1655 struct xe_bo *bo; 1656 + struct xe_vm *vm; 1659 1657 struct iosys_map src; 1660 1658 1661 1659 if (!snapshot) 1662 1660 return; 1663 1661 1664 1662 bo = snapshot->lrc_bo; 1663 + vm = bo->vm; 1665 1664 snapshot->lrc_bo = NULL; 1666 1665 1667 1666 snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL); ··· 1683 1678 xe_bo_unlock(bo); 1684 1679 put_bo: 1685 1680 xe_bo_put(bo); 1681 + if (vm) 1682 + xe_vm_put(vm); 1686 1683 } 1687 1684 1688 1685 void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p) ··· 1734 1727 return; 1735 1728 1736 1729 kvfree(snapshot->lrc_snapshot); 1737 - if (snapshot->lrc_bo) 1730 + if (snapshot->lrc_bo) { 1731 + struct xe_vm *vm; 1732 + 1733 + vm = snapshot->lrc_bo->vm; 1738 1734 xe_bo_put(snapshot->lrc_bo); 1735 + if (vm) 1736 + xe_vm_put(vm); 1737 + } 1739 1738 kfree(snapshot); 1740 1739 } 1741 1740
+1 -1
drivers/gpu/drm/xe/xe_rtp.c
··· 231 231 if (first == last) 232 232 bitmap_set(ctx->active_entries, first, 1); 233 233 else 234 - bitmap_set(ctx->active_entries, first, last - first + 2); 234 + bitmap_set(ctx->active_entries, first, last - first + 1); 235 235 } 236 236 237 237 /**
+1 -1
drivers/gpu/drm/xe/xe_sync.c
··· 263 263 if (sync->fence) 264 264 dma_fence_put(sync->fence); 265 265 if (sync->chain_fence) 266 - dma_fence_put(&sync->chain_fence->base); 266 + dma_fence_chain_free(sync->chain_fence); 267 267 if (sync->ufence) 268 268 user_fence_put(sync->ufence); 269 269 }
+1 -1
drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
··· 150 150 } while (remaining_size); 151 151 152 152 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 153 - if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks)) 153 + if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) 154 154 size = vres->base.size; 155 155 } 156 156
+11 -7
drivers/hid/amd-sfh-hid/amd_sfh_client.c
··· 288 288 mp2_ops->start(privdata, info); 289 289 cl_data->sensor_sts[i] = amd_sfh_wait_for_response 290 290 (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED); 291 + 292 + if (cl_data->sensor_sts[i] == SENSOR_ENABLED) 293 + cl_data->is_any_sensor_enabled = true; 294 + } 295 + 296 + if (!cl_data->is_any_sensor_enabled || 297 + (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) { 298 + dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", 299 + cl_data->is_any_sensor_enabled); 300 + rc = -EOPNOTSUPP; 301 + goto cleanup; 291 302 } 292 303 293 304 for (i = 0; i < cl_data->num_hid_devices; i++) { 294 305 cl_data->cur_hid_dev = i; 295 306 if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { 296 - cl_data->is_any_sensor_enabled = true; 297 307 rc = amdtp_hid_probe(i, cl_data); 298 308 if (rc) 299 309 goto cleanup; ··· 315 305 cl_data->sensor_sts[i]); 316 306 } 317 307 318 - if (!cl_data->is_any_sensor_enabled || 319 - (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) { 320 - dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled); 321 - rc = -EOPNOTSUPP; 322 - goto cleanup; 323 - } 324 308 schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); 325 309 return 0; 326 310
+1 -1
drivers/hid/bpf/Kconfig
··· 3 3 4 4 config HID_BPF 5 5 bool "HID-BPF support" 6 - depends on BPF 6 + depends on BPF_JIT 7 7 depends on BPF_SYSCALL 8 8 depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS 9 9 help
+5
drivers/hid/bpf/hid_bpf_struct_ops.c
··· 183 183 struct hid_device *hdev; 184 184 int count, err = 0; 185 185 186 + /* prevent multiple attach of the same struct_ops */ 187 + if (ops->hdev) 188 + return -EINVAL; 189 + 186 190 hdev = hid_get_device(ops->hid_id); 187 191 if (IS_ERR(hdev)) 188 192 return PTR_ERR(hdev); ··· 252 248 253 249 list_del_rcu(&ops->list); 254 250 synchronize_srcu(&hdev->bpf.srcu); 251 + ops->hdev = NULL; 255 252 256 253 reconnect = hdev->bpf.rdesc_ops == ops; 257 254 if (reconnect)
+10 -57
drivers/hid/wacom_wac.c
··· 692 692 693 693 static int wacom_intuos_get_tool_type(int tool_id) 694 694 { 695 - int tool_type = BTN_TOOL_PEN; 696 - 697 - if (wacom_is_art_pen(tool_id)) 698 - return tool_type; 699 - 700 695 switch (tool_id) { 701 696 case 0x812: /* Inking pen */ 702 697 case 0x801: /* Intuos3 Inking pen */ 703 698 case 0x12802: /* Intuos4/5 Inking Pen */ 704 699 case 0x012: 705 - tool_type = BTN_TOOL_PENCIL; 706 - break; 707 - 708 - case 0x822: /* Pen */ 709 - case 0x842: 710 - case 0x852: 711 - case 0x823: /* Intuos3 Grip Pen */ 712 - case 0x813: /* Intuos3 Classic Pen */ 713 - case 0x802: /* Intuos4/5 13HD/24HD General Pen */ 714 - case 0x8e2: /* IntuosHT2 pen */ 715 - case 0x022: 716 - case 0x200: /* Pro Pen 3 */ 717 - case 0x04200: /* Pro Pen 3 */ 718 - case 0x10842: /* MobileStudio Pro Pro Pen slim */ 719 - case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */ 720 - case 0x16802: /* Cintiq 13HD Pro Pen */ 721 - case 0x18802: /* DTH2242 Pen */ 722 - case 0x10802: /* Intuos4/5 13HD/24HD General Pen */ 723 - case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */ 724 - tool_type = BTN_TOOL_PEN; 725 - break; 700 + return BTN_TOOL_PENCIL; 726 701 727 702 case 0x832: /* Stroke pen */ 728 703 case 0x032: 729 - tool_type = BTN_TOOL_BRUSH; 730 - break; 704 + return BTN_TOOL_BRUSH; 731 705 732 706 case 0x007: /* Mouse 4D and 2D */ 733 707 case 0x09c: 734 708 case 0x094: 735 709 case 0x017: /* Intuos3 2D Mouse */ 736 710 case 0x806: /* Intuos4 Mouse */ 737 - tool_type = BTN_TOOL_MOUSE; 738 - break; 711 + return BTN_TOOL_MOUSE; 739 712 740 713 case 0x096: /* Lens cursor */ 741 714 case 0x097: /* Intuos3 Lens cursor */ 742 715 case 0x006: /* Intuos4 Lens cursor */ 743 - tool_type = BTN_TOOL_LENS; 744 - break; 745 - 746 - case 0x82a: /* Eraser */ 747 - case 0x84a: 748 - case 0x85a: 749 - case 0x91a: 750 - case 0xd1a: 751 - case 0x0fa: 752 - case 0x82b: /* Intuos3 Grip Pen Eraser */ 753 - case 0x81b: /* Intuos3 Classic Pen Eraser */ 754 - case 0x91b: /* Intuos3 Airbrush Eraser */ 755 - case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */ 756 - case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */ 757 - case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ 758 - case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */ 759 - case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ 760 - case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ 761 - case 0x1084a: /* MobileStudio Pro Pro Pen slim Eraser */ 762 - case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */ 763 - case 0x1880a: /* DTH2242 Eraser */ 764 - case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */ 765 - tool_type = BTN_TOOL_RUBBER; 766 - break; 716 + return BTN_TOOL_LENS; 767 717 768 718 case 0xd12: 769 719 case 0x912: ··· 721 771 case 0x913: /* Intuos3 Airbrush */ 722 772 case 0x902: /* Intuos4/5 13HD/24HD Airbrush */ 723 773 case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */ 724 - tool_type = BTN_TOOL_AIRBRUSH; 725 - break; 774 + return BTN_TOOL_AIRBRUSH; 775 + 776 + default: 777 + if (tool_id & 0x0008) 778 + return BTN_TOOL_RUBBER; 779 + return BTN_TOOL_PEN; 726 780 } 727 - return tool_type; 728 781 } 729 782 730 783 static void wacom_exit_report(struct wacom_wac *wacom)
+12 -12
drivers/hwmon/adt7475.c
··· 22 22 #include <linux/util_macros.h> 23 23 24 24 /* Indexes for the sysfs hooks */ 25 - 26 - #define INPUT 0 27 - #define MIN 1 28 - #define MAX 2 29 - #define CONTROL 3 30 - #define OFFSET 3 31 - #define AUTOMIN 4 32 - #define THERM 5 33 - #define HYSTERSIS 6 34 - 25 + enum adt_sysfs_id { 26 + INPUT = 0, 27 + MIN = 1, 28 + MAX = 2, 29 + CONTROL = 3, 30 + OFFSET = 3, // Dup 31 + AUTOMIN = 4, 32 + THERM = 5, 33 + HYSTERSIS = 6, 35 34 /* 36 35 * These are unique identifiers for the sysfs functions - unlike the 37 36 * numbers above, these are not also indexes into an array 38 37 */ 38 + ALARM = 9, 39 + FAULT = 10, 40 + }; 39 41 40 - #define ALARM 9 41 - #define FAULT 10 42 42 43 43 /* 7475 Common Registers */ 44 44
+4 -1
drivers/i2c/busses/i2c-qcom-geni.c
··· 990 990 return ret; 991 991 992 992 ret = geni_se_resources_on(&gi2c->se); 993 - if (ret) 993 + if (ret) { 994 + clk_disable_unprepare(gi2c->core_clk); 995 + geni_icc_disable(&gi2c->se); 994 996 return ret; 997 + } 995 998 996 999 enable_irq(gi2c->irq); 997 1000 gi2c->suspended = 0;
+2 -2
drivers/i2c/i2c-slave-testunit.c
··· 18 18 19 19 enum testunit_cmds { 20 20 TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */ 21 - TU_CMD_HOST_NOTIFY, 21 + TU_CMD_SMBUS_HOST_NOTIFY, 22 22 TU_CMD_SMBUS_BLOCK_PROC_CALL, 23 23 TU_NUM_CMDS 24 24 }; ··· 60 60 msg.len = tu->regs[TU_REG_DATAH]; 61 61 break; 62 62 63 - case TU_CMD_HOST_NOTIFY: 63 + case TU_CMD_SMBUS_HOST_NOTIFY: 64 64 msg.addr = 0x08; 65 65 msg.flags = 0; 66 66 msg.len = 3;
+57 -7
drivers/i2c/i2c-smbus.c
··· 34 34 struct i2c_client *client = i2c_verify_client(dev); 35 35 struct alert_data *data = addrp; 36 36 struct i2c_driver *driver; 37 + int ret; 37 38 38 39 if (!client || client->addr != data->addr) 39 40 return 0; ··· 48 47 device_lock(dev); 49 48 if (client->dev.driver) { 50 49 driver = to_i2c_driver(client->dev.driver); 51 - if (driver->alert) 50 + if (driver->alert) { 51 + /* Stop iterating after we find the device */ 52 52 driver->alert(client, data->type, data->data); 53 - else 53 + ret = -EBUSY; 54 + } else { 54 55 dev_warn(&client->dev, "no driver alert()!\n"); 55 - } else 56 + ret = -EOPNOTSUPP; 57 + } 58 + } else { 56 59 dev_dbg(&client->dev, "alert with no driver\n"); 60 + ret = -ENODEV; 61 + } 57 62 device_unlock(dev); 58 63 59 - /* Stop iterating after we find the device */ 60 - return -EBUSY; 64 + return ret; 65 + } 66 + 67 + /* Same as above, but call back all drivers with alert handler */ 68 + 69 + static int smbus_do_alert_force(struct device *dev, void *addrp) 70 + { 71 + struct i2c_client *client = i2c_verify_client(dev); 72 + struct alert_data *data = addrp; 73 + struct i2c_driver *driver; 74 + 75 + if (!client || (client->flags & I2C_CLIENT_TEN)) 76 + return 0; 77 + 78 + /* 79 + * Drivers should either disable alerts, or provide at least 80 + * a minimal handler. Lock so the driver won't change. 81 + */ 82 + device_lock(dev); 83 + if (client->dev.driver) { 84 + driver = to_i2c_driver(client->dev.driver); 85 + if (driver->alert) 86 + driver->alert(client, data->type, data->data); 87 + } 88 + device_unlock(dev); 89 + 90 + return 0; 61 91 } 62 92 63 93 /* ··· 99 67 { 100 68 struct i2c_smbus_alert *alert = d; 101 69 struct i2c_client *ara; 70 + unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */ 102 71 103 72 ara = alert->ara; 104 73 ··· 127 94 data.addr, data.data); 128 95 129 96 /* Notify driver for the device which issued the alert */ 130 - device_for_each_child(&ara->adapter->dev, &data, 131 - smbus_do_alert); 97 + status = device_for_each_child(&ara->adapter->dev, &data, 98 + smbus_do_alert); 99 + /* 100 + * If we read the same address more than once, and the alert 101 + * was not handled by a driver, it won't do any good to repeat 102 + * the loop because it will never terminate. Try again, this 103 + * time calling the alert handlers of all devices connected to 104 + * the bus, and abort the loop afterwards. If this helps, we 105 + * are all set. If it doesn't, there is nothing else we can do, 106 + * so we might as well abort the loop. 107 + * Note: This assumes that a driver with alert handler handles 108 + * the alert properly and clears it if necessary. 109 + */ 110 + if (data.addr == prev_addr && status != -EBUSY) { 111 + device_for_each_child(&ara->adapter->dev, &data, 112 + smbus_do_alert_force); 113 + break; 114 + } 115 + prev_addr = data.addr; 132 116 } 133 117 134 118 return IRQ_HANDLED;
+3
drivers/input/input-mt.c
··· 46 46 return 0; 47 47 if (mt) 48 48 return mt->num_slots != num_slots ? -EINVAL : 0; 49 + /* Arbitrary limit for avoiding too large memory allocation. */ 50 + if (num_slots > 1024) 51 + return -EINVAL; 49 52 50 53 mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL); 51 54 if (!mt)
+1 -1
drivers/input/touchscreen/cyttsp4_core.c
··· 871 871 struct cyttsp4_touch tch; 872 872 int sig; 873 873 int i, j, t = 0; 874 - int ids[max(CY_TMA1036_MAX_TCH, CY_TMA4XX_MAX_TCH)]; 874 + int ids[MAX(CY_TMA1036_MAX_TCH, CY_TMA4XX_MAX_TCH)]; 875 875 876 876 memset(ids, 0, si->si_ofs.tch_abs[CY_TCH_T].max * sizeof(int)); 877 877 for (i = 0; i < num_cur_tch; i++) {
+4 -2
drivers/irqchip/irq-loongarch-cpu.c
··· 18 18 19 19 static u32 lpic_gsi_to_irq(u32 gsi) 20 20 { 21 + int irq = 0; 22 + 21 23 /* Only pch irqdomain transferring is required for LoongArch. */ 22 24 if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) 23 - return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); 25 + irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); 24 26 25 - return 0; 27 + return (irq > 0) ? irq : 0; 26 28 } 27 29 28 30 static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
+16 -4
drivers/irqchip/irq-mbigen.c
··· 64 64 void __iomem *base; 65 65 }; 66 66 67 + static inline unsigned int get_mbigen_node_offset(unsigned int nid) 68 + { 69 + unsigned int offset = nid * MBIGEN_NODE_OFFSET; 70 + 71 + /* 72 + * To avoid touched clear register in unexpected way, we need to directly 73 + * skip clear register when access to more than 10 mbigen nodes. 74 + */ 75 + if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET)) 76 + offset += MBIGEN_NODE_OFFSET; 77 + 78 + return offset; 79 + } 80 + 67 81 static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) 68 82 { 69 83 unsigned int nid, pin; ··· 86 72 nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; 87 73 pin = hwirq % IRQS_PER_MBIGEN_NODE; 88 74 89 - return pin * 4 + nid * MBIGEN_NODE_OFFSET 90 - + REG_MBIGEN_VEC_OFFSET; 75 + return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET; 91 76 } 92 77 93 78 static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, ··· 101 88 *mask = 1 << (irq_ofst % 32); 102 89 ofst = irq_ofst / 32 * 4; 103 90 104 - *addr = ofst + nid * MBIGEN_NODE_OFFSET 105 - + REG_MBIGEN_TYPE_OFFSET; 91 + *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET; 106 92 } 107 93 108 94 static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
+7 -7
drivers/irqchip/irq-meson-gpio.c
··· 178 178 void __iomem *base; 179 179 u32 channel_irqs[MAX_NUM_CHANNEL]; 180 180 DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); 181 - spinlock_t lock; 181 + raw_spinlock_t lock; 182 182 }; 183 183 184 184 static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, ··· 187 187 unsigned long flags; 188 188 u32 tmp; 189 189 190 - spin_lock_irqsave(&ctl->lock, flags); 190 + raw_spin_lock_irqsave(&ctl->lock, flags); 191 191 192 192 tmp = readl_relaxed(ctl->base + reg); 193 193 tmp &= ~mask; 194 194 tmp |= val; 195 195 writel_relaxed(tmp, ctl->base + reg); 196 196 197 - spin_unlock_irqrestore(&ctl->lock, flags); 197 + raw_spin_unlock_irqrestore(&ctl->lock, flags); 198 198 } 199 199 200 200 static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) ··· 244 244 unsigned long flags; 245 245 unsigned int idx; 246 246 247 - spin_lock_irqsave(&ctl->lock, flags); 247 + raw_spin_lock_irqsave(&ctl->lock, flags); 248 248 249 249 /* Find a free channel */ 250 250 idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); 251 251 if (idx >= ctl->params->nr_channels) { 252 - spin_unlock_irqrestore(&ctl->lock, flags); 252 + raw_spin_unlock_irqrestore(&ctl->lock, flags); 253 253 pr_err("No channel available\n"); 254 254 return -ENOSPC; 255 255 } ··· 257 257 /* Mark the channel as used */ 258 258 set_bit(idx, ctl->channel_map); 259 259 260 - spin_unlock_irqrestore(&ctl->lock, flags); 260 + raw_spin_unlock_irqrestore(&ctl->lock, flags); 261 261 262 262 /* 263 263 * Setup the mux of the channel to route the signal of the pad ··· 567 567 if (!ctl) 568 568 return -ENOMEM; 569 569 570 - spin_lock_init(&ctl->lock); 570 + raw_spin_lock_init(&ctl->lock); 571 571 572 572 ctl->base = of_iomap(node, 0); 573 573 if (!ctl->base) {
+3 -3
drivers/irqchip/irq-pic32-evic.c
··· 161 161 return ret; 162 162 } 163 163 164 - int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 165 - const u32 *intspec, unsigned int intsize, 166 - irq_hw_number_t *out_hwirq, unsigned int *out_type) 164 + static int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 165 + const u32 *intspec, unsigned int intsize, 166 + irq_hw_number_t *out_hwirq, unsigned int *out_type) 167 167 { 168 168 struct evic_chip_data *priv = d->host_data; 169 169
+25 -7
drivers/irqchip/irq-riscv-aplic-msi.c
··· 32 32 aplic_irq_unmask(d); 33 33 } 34 34 35 - static void aplic_msi_irq_eoi(struct irq_data *d) 35 + static void aplic_msi_irq_retrigger_level(struct irq_data *d) 36 36 { 37 37 struct aplic_priv *priv = irq_data_get_irq_chip_data(d); 38 - 39 - /* 40 - * EOI handling is required only for level-triggered interrupts 41 - * when APLIC is in MSI mode. 42 - */ 43 38 44 39 switch (irqd_get_trigger_type(d)) { 45 40 case IRQ_TYPE_LEVEL_LOW: ··· 52 57 writel(d->hwirq, priv->regs + APLIC_SETIPNUM_LE); 53 58 break; 54 59 } 60 + } 61 + 62 + static void aplic_msi_irq_eoi(struct irq_data *d) 63 + { 64 + /* 65 + * EOI handling is required only for level-triggered interrupts 66 + * when APLIC is in MSI mode. 67 + */ 68 + aplic_msi_irq_retrigger_level(d); 69 + } 70 + 71 + static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type) 72 + { 73 + int rc = aplic_irq_set_type(d, type); 74 + 75 + if (rc) 76 + return rc; 77 + /* 78 + * Updating sourcecfg register for level-triggered interrupts 79 + * requires interrupt retriggering when APLIC is in MSI mode. 80 + */ 81 + aplic_msi_irq_retrigger_level(d); 82 + return 0; 55 83 } 56 84 57 85 static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg) ··· 148 130 .name = "APLIC-MSI", 149 131 .irq_mask = aplic_msi_irq_mask, 150 132 .irq_unmask = aplic_msi_irq_unmask, 151 - .irq_set_type = aplic_irq_set_type, 133 + .irq_set_type = aplic_msi_irq_set_type, 152 134 .irq_eoi = aplic_msi_irq_eoi, 153 135 #ifdef CONFIG_SMP 154 136 .irq_set_affinity = irq_chip_set_affinity_parent,
+1 -1
drivers/irqchip/irq-sun6i-r.c
··· 270 270 271 271 static int sun6i_r_intc_suspend(void) 272 272 { 273 - u32 buf[BITS_TO_U32(max(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))]; 273 + u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))]; 274 274 int i; 275 275 276 276 /* Wake IRQs are enabled during system sleep and shutdown. */
+1 -1
drivers/irqchip/irq-xilinx-intc.c
··· 189 189 irqc->intr_mask = 0; 190 190 } 191 191 192 - if (irqc->intr_mask >> irqc->nr_irq) 192 + if ((u64)irqc->intr_mask >> irqc->nr_irq) 193 193 pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); 194 194 195 195 pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
+3
drivers/media/dvb-frontends/stv0367_priv.h
··· 25 25 #endif 26 26 27 27 /* MACRO definitions */ 28 + #ifndef MIN 28 29 #define MAX(X, Y) ((X) >= (Y) ? (X) : (Y)) 29 30 #define MIN(X, Y) ((X) <= (Y) ? (X) : (Y)) 31 + #endif 32 + 30 33 #define INRANGE(X, Y, Z) \ 31 34 ((((X) <= (Y)) && ((Y) <= (Z))) || \ 32 35 (((Z) <= (Y)) && ((Y) <= (X))) ? 1 : 0)
+2 -1
drivers/media/pci/intel/ipu6/Kconfig
··· 3 3 depends on ACPI || COMPILE_TEST 4 4 depends on VIDEO_DEV 5 5 depends on X86 && X86_64 && HAS_DMA 6 + depends on IPU_BRIDGE || !IPU_BRIDGE 7 + select AUXILIARY_BUS 6 8 select DMA_OPS 7 9 select IOMMU_IOVA 8 10 select VIDEO_V4L2_SUBDEV_API 9 11 select MEDIA_CONTROLLER 10 12 select VIDEOBUF2_DMA_CONTIG 11 13 select V4L2_FWNODE 12 - select IPU_BRIDGE 13 14 help 14 15 This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs 15 16 and used for capturing images and video from camera sensors.
+5 -3
drivers/media/usb/uvc/uvc_ctrl.c
··· 2680 2680 for (i = 0; i < ARRAY_SIZE(uvc_ctrl_mappings); ++i) { 2681 2681 const struct uvc_control_mapping *mapping = &uvc_ctrl_mappings[i]; 2682 2682 2683 + if (!uvc_entity_match_guid(ctrl->entity, mapping->entity) || 2684 + ctrl->info.selector != mapping->selector) 2685 + continue; 2686 + 2683 2687 /* Let the device provide a custom mapping. */ 2684 2688 if (mapping->filter_mapping) { 2685 2689 mapping = mapping->filter_mapping(chain, ctrl); ··· 2691 2687 continue; 2692 2688 } 2693 2689 2694 - if (uvc_entity_match_guid(ctrl->entity, mapping->entity) && 2695 - ctrl->info.selector == mapping->selector) 2696 - __uvc_ctrl_add_mapping(chain, ctrl, mapping); 2690 + __uvc_ctrl_add_mapping(chain, ctrl, mapping); 2697 2691 } 2698 2692 } 2699 2693
+1 -1
drivers/misc/Kconfig
··· 587 587 588 588 config MARVELL_CN10K_DPI 589 589 tristate "Octeon CN10K DPI driver" 590 - depends on PCI 590 + depends on PCI && PCI_IOV 591 591 depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT) 592 592 help 593 593 Enables Octeon CN10K DMA packet interface (DPI) driver which
+51 -34
drivers/misc/eeprom/ee1004.c
··· 233 233 mutex_unlock(&ee1004_bus_lock); 234 234 } 235 235 236 + static int ee1004_init_bus_data(struct i2c_client *client) 237 + { 238 + struct ee1004_bus_data *bd; 239 + int err, cnr = 0; 240 + 241 + bd = ee1004_get_bus_data(client->adapter); 242 + if (!bd) 243 + return dev_err_probe(&client->dev, -ENOSPC, "Only %d busses supported", 244 + EE1004_MAX_BUSSES); 245 + 246 + i2c_set_clientdata(client, bd); 247 + 248 + if (++bd->dev_count == 1) { 249 + /* Use 2 dummy devices for page select command */ 250 + for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) { 251 + struct i2c_client *cl; 252 + 253 + cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr); 254 + if (IS_ERR(cl)) { 255 + err = PTR_ERR(cl); 256 + goto err_out; 257 + } 258 + 259 + bd->set_page[cnr] = cl; 260 + } 261 + 262 + /* Remember current page to avoid unneeded page select */ 263 + err = ee1004_get_current_page(bd); 264 + if (err < 0) 265 + goto err_out; 266 + 267 + dev_dbg(&client->dev, "Currently selected page: %d\n", err); 268 + bd->current_page = err; 269 + } 270 + 271 + return 0; 272 + 273 + err_out: 274 + ee1004_cleanup(cnr, bd); 275 + 276 + return err; 277 + } 278 + 236 279 static int ee1004_probe(struct i2c_client *client) 237 280 { 238 281 struct nvmem_config config = { ··· 294 251 .compat = true, 295 252 .base_dev = &client->dev, 296 253 }; 297 - struct ee1004_bus_data *bd; 298 254 struct nvmem_device *ndev; 299 - int err, cnr = 0; 255 + int err; 300 256 301 257 /* Make sure we can operate on this adapter */ 302 258 if (!i2c_check_functionality(client->adapter, ··· 306 264 307 265 mutex_lock(&ee1004_bus_lock); 308 266 309 - bd = ee1004_get_bus_data(client->adapter); 310 - if (!bd) { 267 + err = ee1004_init_bus_data(client); 268 + if (err < 0) { 311 269 mutex_unlock(&ee1004_bus_lock); 312 - return dev_err_probe(&client->dev, -ENOSPC, 313 - "Only %d busses supported", EE1004_MAX_BUSSES); 314 - } 315 - 316 - err = devm_add_action_or_reset(&client->dev, ee1004_cleanup_bus_data, bd); 317 - if (err < 0) 318 270 return err; 319 - 320 - i2c_set_clientdata(client, bd); 321 - 322 - if (++bd->dev_count == 1) { 323 - /* Use 2 dummy devices for page select command */ 324 - for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) { 325 - struct i2c_client *cl; 326 - 327 - cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr); 328 - if (IS_ERR(cl)) { 329 - mutex_unlock(&ee1004_bus_lock); 330 - return PTR_ERR(cl); 331 - } 332 - bd->set_page[cnr] = cl; 333 - } 334 - 335 - /* Remember current page to avoid unneeded page select */ 336 - err = ee1004_get_current_page(bd); 337 - if (err < 0) { 338 - mutex_unlock(&ee1004_bus_lock); 339 - return err; 340 - } 341 - dev_dbg(&client->dev, "Currently selected page: %d\n", err); 342 - bd->current_page = err; 343 271 } 344 272 345 273 ee1004_probe_temp_sensor(client); 346 274 347 275 mutex_unlock(&ee1004_bus_lock); 276 + 277 + err = devm_add_action_or_reset(&client->dev, ee1004_cleanup_bus_data, 278 + i2c_get_clientdata(client)); 279 + if (err < 0) 280 + return err; 348 281 349 282 ndev = devm_nvmem_register(&client->dev, &config); 350 283 if (IS_ERR(ndev))
+1 -1
drivers/net/can/usb/etas_es58x/es58x_devlink.c
··· 215 215 struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version; 216 216 struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version; 217 217 struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision; 218 - char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))]; 218 + char buf[MAX(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))]; 219 219 int ret = 0; 220 220 221 221 if (es58x_sw_version_is_valid(fw_ver)) {
+3 -1
drivers/net/dsa/bcm_sf2.c
··· 675 675 of_remove_property(child, prop); 676 676 677 677 phydev = of_phy_find_device(child); 678 - if (phydev) 678 + if (phydev) { 679 679 phy_device_remove(phydev); 680 + phy_device_free(phydev); 681 + } 680 682 } 681 683 682 684 err = mdiobus_register(priv->user_mii_bus);
+16
drivers/net/dsa/microchip/ksz_common.c
··· 2578 2578 if (!port) 2579 2579 return MICREL_KSZ8_P1_ERRATA; 2580 2580 break; 2581 + case KSZ8567_CHIP_ID: 2581 2582 case KSZ9477_CHIP_ID: 2583 + case KSZ9567_CHIP_ID: 2584 + case KSZ9896_CHIP_ID: 2585 + case KSZ9897_CHIP_ID: 2582 2586 /* KSZ9477 Errata DS80000754C 2583 2587 * 2584 2588 * Module 4: Energy Efficient Ethernet (EEE) feature select must ··· 2592 2588 * controls. If not disabled, the PHY ports can auto-negotiate 2593 2589 * to enable EEE, and this feature can cause link drops when 2594 2590 * linked to another device supporting EEE. 2591 + * 2592 + * The same item appears in the errata for the KSZ9567, KSZ9896, 2593 + * and KSZ9897. 2594 + * 2595 + * A similar item appears in the errata for the KSZ8567, but 2596 + * provides an alternative workaround. For now, use the simple 2597 + * workaround of disabling the EEE feature for this device too. 2595 2598 */ 2596 2599 return MICREL_NO_EEE; 2597 2600 } ··· 3774 3763 port); 3775 3764 return -EBUSY; 3776 3765 } 3766 + 3767 + /* Need to initialize variable as the code to fill in settings may 3768 + * not be executed. 3769 + */ 3770 + wol.wolopts = 0; 3777 3771 3778 3772 ksz_get_wol(ds, dp->index, &wol); 3779 3773 if (wol.wolopts & WAKE_MAGIC) {
+11 -8
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 7591 7591 int rx = bp->rx_nr_rings, stat; 7592 7592 int vnic, grp = rx; 7593 7593 7594 - if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7595 - bp->hwrm_spec_code >= 0x10601) 7596 - return true; 7597 - 7598 7594 /* Old firmware does not need RX ring reservations but we still 7599 7595 * need to setup a default RSS map when needed. With new firmware 7600 7596 * we go through RX ring reservations first and then set up the 7601 7597 * RSS map for the successfully reserved RX rings when needed. 7602 7598 */ 7603 - if (!BNXT_NEW_RM(bp)) { 7599 + if (!BNXT_NEW_RM(bp)) 7604 7600 bnxt_check_rss_tbl_no_rmgr(bp); 7601 + 7602 + if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7603 + bp->hwrm_spec_code >= 0x10601) 7604 + return true; 7605 + 7606 + if (!BNXT_NEW_RM(bp)) 7605 7607 return false; 7606 - } 7607 7608 7608 7609 vnic = bnxt_get_total_vnics(bp, rx); 7609 7610 ··· 7650 7649 static int __bnxt_reserve_rings(struct bnxt *bp) 7651 7650 { 7652 7651 struct bnxt_hw_rings hwr = {0}; 7652 + int rx_rings, old_rx_rings, rc; 7653 7653 int cp = bp->cp_nr_rings; 7654 - int rx_rings, rc; 7655 7654 int ulp_msix = 0; 7656 7655 bool sh = false; 7657 7656 int tx_cp; ··· 7685 7684 hwr.grp = bp->rx_nr_rings; 7686 7685 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 7687 7686 hwr.stat = bnxt_get_func_stat_ctxs(bp); 7687 + old_rx_rings = bp->hw_resc.resv_rx_rings; 7688 7688 7689 7689 rc = bnxt_hwrm_reserve_rings(bp, &hwr); 7690 7690 if (rc) ··· 7740 7738 if (!bnxt_rings_ok(bp, &hwr)) 7741 7739 return -ENOMEM; 7742 7740 7743 - if (!netif_is_rxfh_configured(bp->dev)) 7741 + if (old_rx_rings != bp->hw_resc.resv_rx_rings && 7742 + !netif_is_rxfh_configured(bp->dev)) 7744 7743 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7745 7744 7746 7745 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
+13 -3
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 1863 1863 } 1864 1864 1865 1865 static int bnxt_rxfh_context_check(struct bnxt *bp, 1866 + const struct ethtool_rxfh_param *rxfh, 1866 1867 struct netlink_ext_ack *extack) 1867 1868 { 1869 + if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { 1870 + NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); 1871 + return -EOPNOTSUPP; 1872 + } 1873 + 1868 1874 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { 1869 1875 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); 1870 1876 return -EOPNOTSUPP; ··· 1894 1888 struct bnxt_vnic_info *vnic; 1895 1889 int rc; 1896 1890 1897 - rc = bnxt_rxfh_context_check(bp, extack); 1891 + rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1898 1892 if (rc) 1899 1893 return rc; 1900 1894 ··· 1921 1915 if (rc) 1922 1916 goto out; 1923 1917 1918 + /* Populate defaults in the context */ 1924 1919 bnxt_set_dflt_rss_indir_tbl(bp, ctx); 1920 + ctx->hfunc = ETH_RSS_HASH_TOP; 1925 1921 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); 1922 + memcpy(ethtool_rxfh_context_key(ctx), 1923 + bp->rss_hash_key, HW_HASH_KEY_SIZE); 1926 1924 1927 1925 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); 1928 1926 if (rc) { ··· 1963 1953 struct bnxt_rss_ctx *rss_ctx; 1964 1954 int rc; 1965 1955 1966 - rc = bnxt_rxfh_context_check(bp, extack); 1956 + rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1967 1957 if (rc) 1968 1958 return rc; 1969 1959 ··· 5290 5280 const struct ethtool_ops bnxt_ethtool_ops = { 5291 5281 .cap_link_lanes_supported = 1, 5292 5282 .cap_rss_ctx_supported = 1, 5293 - .rxfh_max_context_id = BNXT_MAX_ETH_RSS_CTX, 5283 + .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, 5294 5284 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, 5295 5285 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), 5296 5286 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+5 -9
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
··· 42 42 struct bcmgenet_priv *priv = netdev_priv(dev); 43 43 struct device *kdev = &priv->pdev->dev; 44 44 45 - if (dev->phydev) { 45 + if (dev->phydev) 46 46 phy_ethtool_get_wol(dev->phydev, wol); 47 - if (wol->supported) 48 - return; 49 - } 50 47 51 - if (!device_can_wakeup(kdev)) { 52 - wol->supported = 0; 53 - wol->wolopts = 0; 48 + /* MAC is not wake-up capable, return what the PHY does */ 49 + if (!device_can_wakeup(kdev)) 54 50 return; 55 - } 56 51 57 - wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 52 + /* Overlay MAC capabilities with that of the PHY queried before */ 53 + wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 58 54 wol->wolopts = priv->wolopts; 59 55 memset(wol->sopass, 0, sizeof(wol->sopass)); 60 56
+3
drivers/net/ethernet/freescale/fec_ptp.c
··· 775 775 struct net_device *ndev = platform_get_drvdata(pdev); 776 776 struct fec_enet_private *fep = netdev_priv(ndev); 777 777 778 + if (fep->pps_enable) 779 + fec_ptp_enable_pps(fep, 0); 780 + 778 781 cancel_delayed_work_sync(&fep->time_keep); 779 782 hrtimer_cancel(&fep->perout_timer); 780 783 if (fep->ptp_clock)
+1 -1
drivers/net/ethernet/google/gve/gve_ethtool.c
··· 495 495 return -EINVAL; 496 496 } 497 497 498 - if (!netif_carrier_ok(netdev)) { 498 + if (!netif_running(netdev)) { 499 499 priv->tx_cfg.num_queues = new_tx; 500 500 priv->rx_cfg.num_queues = new_rx; 501 501 return 0;
+6 -6
drivers/net/ethernet/google/gve/gve_main.c
··· 1566 1566 u32 status; 1567 1567 1568 1568 old_prog = READ_ONCE(priv->xdp_prog); 1569 - if (!netif_carrier_ok(priv->dev)) { 1569 + if (!netif_running(priv->dev)) { 1570 1570 WRITE_ONCE(priv->xdp_prog, prog); 1571 1571 if (old_prog) 1572 1572 bpf_prog_put(old_prog); ··· 1847 1847 rx_alloc_cfg.qcfg = &new_rx_config; 1848 1848 tx_alloc_cfg.num_rings = new_tx_config.num_queues; 1849 1849 1850 - if (netif_carrier_ok(priv->dev)) { 1850 + if (netif_running(priv->dev)) { 1851 1851 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); 1852 1852 return err; 1853 1853 } ··· 2064 2064 2065 2065 if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { 2066 2066 netdev->features ^= NETIF_F_LRO; 2067 - if (netif_carrier_ok(netdev)) { 2067 + if (netif_running(netdev)) { 2068 2068 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); 2069 2069 if (err) 2070 2070 goto revert_features; ··· 2359 2359 2360 2360 int gve_reset(struct gve_priv *priv, bool attempt_teardown) 2361 2361 { 2362 - bool was_up = netif_carrier_ok(priv->dev); 2362 + bool was_up = netif_running(priv->dev); 2363 2363 int err; 2364 2364 2365 2365 dev_info(&priv->pdev->dev, "Performing reset\n"); ··· 2700 2700 { 2701 2701 struct net_device *netdev = pci_get_drvdata(pdev); 2702 2702 struct gve_priv *priv = netdev_priv(netdev); 2703 - bool was_up = netif_carrier_ok(priv->dev); 2703 + bool was_up = netif_running(priv->dev); 2704 2704 2705 2705 rtnl_lock(); 2706 2706 if (was_up && gve_close(priv->dev)) { ··· 2718 2718 { 2719 2719 struct net_device *netdev = pci_get_drvdata(pdev); 2720 2720 struct gve_priv *priv = netdev_priv(netdev); 2721 - bool was_up = netif_carrier_ok(priv->dev); 2721 + bool was_up = netif_running(priv->dev); 2722 2722 2723 2723 priv->suspend_cnt++; 2724 2724 rtnl_lock();
+5 -6
drivers/net/ethernet/intel/ice/ice.h
··· 765 765 } 766 766 767 767 /** 768 - * ice_xsk_pool - get XSK buffer pool bound to a ring 768 + * ice_rx_xsk_pool - assign XSK buff pool to Rx ring 769 769 * @ring: Rx ring to use 770 770 * 771 - * Returns a pointer to xsk_buff_pool structure if there is a buffer pool 772 - * present, NULL otherwise. 771 + * Sets XSK buff pool pointer on Rx ring. 773 772 */ 774 - static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) 773 + static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring) 775 774 { 776 775 struct ice_vsi *vsi = ring->vsi; 777 776 u16 qid = ring->q_index; 778 777 779 - return ice_get_xp_from_qid(vsi, qid); 778 + WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); 780 779 } 781 780 782 781 /** ··· 800 801 if (!ring) 801 802 return; 802 803 803 - ring->xsk_pool = ice_get_xp_from_qid(vsi, qid); 804 + WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); 804 805 } 805 806 806 807 /**
+2 -2
drivers/net/ethernet/intel/ice/ice_base.c
··· 536 536 return err; 537 537 } 538 538 539 - ring->xsk_pool = ice_xsk_pool(ring); 539 + ice_rx_xsk_pool(ring); 540 540 if (ring->xsk_pool) { 541 541 xdp_rxq_info_unreg(&ring->xdp_rxq); 542 542 ··· 597 597 return 0; 598 598 } 599 599 600 - ok = ice_alloc_rx_bufs_zc(ring, num_bufs); 600 + ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs); 601 601 if (!ok) { 602 602 u16 pf_q = ring->vsi->rxq_map[ring->q_index]; 603 603
+4 -4
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 4673 4673 if (err) 4674 4674 return err; 4675 4675 4676 - fec_stats->uncorrectable_blocks.total = (fec_corr_high_val << 16) + 4677 - fec_corr_low_val; 4678 - fec_stats->corrected_blocks.total = (fec_uncorr_high_val << 16) + 4679 - fec_uncorr_low_val; 4676 + fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) + 4677 + fec_corr_low_val; 4678 + fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) + 4679 + fec_uncorr_low_val; 4680 4680 return 0; 4681 4681 } 4682 4682
+3 -1
drivers/net/ethernet/intel/ice/ice_main.c
··· 559 559 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 560 560 return; 561 561 562 + synchronize_irq(pf->oicr_irq.virq); 563 + 562 564 ice_unplug_aux_dev(pf); 563 565 564 566 /* Notify VFs of impending reset */ ··· 2950 2948 ice_for_each_rxq(vsi, i) { 2951 2949 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2952 2950 2953 - if (rx_ring->xsk_pool) 2951 + if (READ_ONCE(rx_ring->xsk_pool)) 2954 2952 napi_schedule(&rx_ring->q_vector->napi); 2955 2953 } 2956 2954 }
+4
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 1477 1477 /* Update cached link status for this port immediately */ 1478 1478 ptp_port->link_up = linkup; 1479 1479 1480 + /* Skip HW writes if reset is in progress */ 1481 + if (pf->hw.reset_ongoing) 1482 + return; 1483 + 1480 1484 switch (hw->ptp.phy_model) { 1481 1485 case ICE_PHY_E810: 1482 1486 /* Do not reconfigure E810 PHY */
+6 -4
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 456 456 if (rx_ring->vsi->type == ICE_VSI_PF) 457 457 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 458 458 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 459 - rx_ring->xdp_prog = NULL; 459 + WRITE_ONCE(rx_ring->xdp_prog, NULL); 460 460 if (rx_ring->xsk_pool) { 461 461 kfree(rx_ring->xdp_buf); 462 462 rx_ring->xdp_buf = NULL; ··· 1521 1521 * budget and be more aggressive about cleaning up the Tx descriptors. 1522 1522 */ 1523 1523 ice_for_each_tx_ring(tx_ring, q_vector->tx) { 1524 + struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); 1524 1525 bool wd; 1525 1526 1526 - if (tx_ring->xsk_pool) 1527 - wd = ice_xmit_zc(tx_ring); 1527 + if (xsk_pool) 1528 + wd = ice_xmit_zc(tx_ring, xsk_pool); 1528 1529 else if (ice_ring_is_xdp(tx_ring)) 1529 1530 wd = true; 1530 1531 else ··· 1551 1550 budget_per_ring = budget; 1552 1551 1553 1552 ice_for_each_rx_ring(rx_ring, q_vector->rx) { 1553 + struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); 1554 1554 int cleaned; 1555 1555 1556 1556 /* A dedicated path for zero-copy allows making a single ··· 1559 1557 * ice_clean_rx_irq function and makes the codebase cleaner. 1560 1558 */ 1561 1559 cleaned = rx_ring->xsk_pool ? 1562 - ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1560 + ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) : 1563 1561 ice_clean_rx_irq(rx_ring, budget_per_ring); 1564 1562 work_done += cleaned; 1565 1563 /* if we clean as many as budgeted, we must not be done */
+111 -73
drivers/net/ethernet/intel/ice/ice_xsk.c
··· 52 52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) 53 53 { 54 54 ice_clean_tx_ring(vsi->tx_rings[q_idx]); 55 - if (ice_is_xdp_ena_vsi(vsi)) { 56 - synchronize_rcu(); 55 + if (ice_is_xdp_ena_vsi(vsi)) 57 56 ice_clean_tx_ring(vsi->xdp_rings[q_idx]); 58 - } 59 57 ice_clean_rx_ring(vsi->rx_rings[q_idx]); 60 58 } 61 59 ··· 110 112 * ice_qvec_cfg_msix - Enable IRQ for given queue vector 111 113 * @vsi: the VSI that contains queue vector 112 114 * @q_vector: queue vector 115 + * @qid: queue index 113 116 */ 114 117 static void 115 - ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 118 + ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid) 116 119 { 117 120 u16 reg_idx = q_vector->reg_idx; 118 121 struct ice_pf *pf = vsi->back; 119 122 struct ice_hw *hw = &pf->hw; 120 - struct ice_tx_ring *tx_ring; 121 - struct ice_rx_ring *rx_ring; 123 + int q, _qid = qid; 122 124 123 125 ice_cfg_itr(hw, q_vector); 124 126 125 - ice_for_each_tx_ring(tx_ring, q_vector->tx) 126 - ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx, 127 - q_vector->tx.itr_idx); 127 + for (q = 0; q < q_vector->num_ring_tx; q++) { 128 + ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx); 129 + _qid++; 130 + } 128 131 129 - ice_for_each_rx_ring(rx_ring, q_vector->rx) 130 - ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx, 131 - q_vector->rx.itr_idx); 132 + _qid = qid; 133 + 134 + for (q = 0; q < q_vector->num_ring_rx; q++) { 135 + ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx); 136 + _qid++; 137 + } 132 138 133 139 ice_flush(hw); 134 140 } ··· 166 164 struct ice_tx_ring *tx_ring; 167 165 struct ice_rx_ring *rx_ring; 168 166 int timeout = 50; 167 + int fail = 0; 169 168 int err; 170 169 171 170 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) ··· 183 180 usleep_range(1000, 2000); 184 181 } 185 182 183 + synchronize_net(); 184 + netif_carrier_off(vsi->netdev); 185 + netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 186 + 186 187 ice_qvec_dis_irq(vsi, rx_ring, q_vector); 187 188 ice_qvec_toggle_napi(vsi, q_vector, false); 188 189 189 - netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 190 - 191 190 ice_fill_txq_meta(vsi, tx_ring, &txq_meta); 192 191 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); 193 - if (err) 194 - return err; 192 + if (!fail) 193 + fail = err; 195 194 if (ice_is_xdp_ena_vsi(vsi)) { 196 195 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; 197 196 ··· 201 196 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); 202 197 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, 203 198 &txq_meta); 204 - if (err) 205 - return err; 199 + if (!fail) 200 + fail = err; 206 201 } 207 - err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); 208 - if (err) 209 - return err; 210 202 203 + ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false); 211 204 ice_qp_clean_rings(vsi, q_idx); 212 205 ice_qp_reset_stats(vsi, q_idx); 213 206 214 - return 0; 207 + return fail; 215 208 } 216 209 217 210 /** ··· 222 219 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) 223 220 { 224 221 struct ice_q_vector *q_vector; 222 + int fail = 0; 223 + bool link_up; 225 224 int err; 226 225 227 226 err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx); 228 - if (err) 229 - return err; 227 + if (!fail) 228 + fail = err; 230 229 231 230 if (ice_is_xdp_ena_vsi(vsi)) { 232 231 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; 233 232 234 233 err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx); 235 - if (err) 236 - return err; 234 + if (!fail) 235 + fail = err; 237 236 ice_set_ring_xdp(xdp_ring); 238 237 ice_tx_xsk_pool(vsi, q_idx); 239 238 } 240 239 241 240 err = ice_vsi_cfg_single_rxq(vsi, q_idx); 242 - if (err) 243 - return err; 241 + if (!fail) 242 + fail = err; 244 243 245 244 q_vector = vsi->rx_rings[q_idx]->q_vector; 246 - ice_qvec_cfg_msix(vsi, q_vector); 245 + ice_qvec_cfg_msix(vsi, q_vector, q_idx); 247 246 248 247 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); 249 - if (err) 250 - return err; 248 + if (!fail) 249 + fail = err; 251 250 252 251 ice_qvec_toggle_napi(vsi, q_vector, true); 253 252 ice_qvec_ena_irq(vsi, q_vector); 254 253 255 - netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 254 + /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */ 255 + synchronize_net(); 256 + ice_get_link_status(vsi->port_info, &link_up); 257 + if (link_up) { 258 + netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 259 + netif_carrier_on(vsi->netdev); 260 + } 256 261 clear_bit(ICE_CFG_BUSY, vsi->state); 257 262 258 - return 0; 263 + return fail; 259 264 } 260 265 261 266 /** ··· 470 459 /** 471 460 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers 472 461 * @rx_ring: Rx ring 462 + * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW 473 463 * @count: The number of buffers to allocate 474 464 * 475 465 * Place the @count of descriptors onto Rx ring. Handle the ring wrap ··· 479 467 * 480 468 * Returns true if all allocations were successful, false if any fail. 481 469 */ 482 - static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) 470 + static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, 471 + struct xsk_buff_pool *xsk_pool, u16 count) 483 472 { 484 473 u32 nb_buffs_extra = 0, nb_buffs = 0; 485 474 union ice_32b_rx_flex_desc *rx_desc; ··· 492 479 xdp = ice_xdp_buf(rx_ring, ntu); 493 480 494 481 if (ntu + count >= rx_ring->count) { 495 - nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, 496 - rx_desc, 482 + nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, 497 483 rx_ring->count - ntu); 498 484 if (nb_buffs_extra != rx_ring->count - ntu) { 499 485 ntu += nb_buffs_extra; ··· 505 493 ice_release_rx_desc(rx_ring, 0); 506 494 } 507 495 508 - nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count); 496 + nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count); 509 497 510 498 ntu += nb_buffs; 511 499 if (ntu == rx_ring->count) ··· 521 509 /** 522 510 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers 523 511 * @rx_ring: Rx ring 512 + * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW 524 513 * @count: The number of buffers to allocate 525 514 * 526 515 * Wrapper for internal allocation routine; figure out how many tail ··· 529 516 * 530 517 * Returns true if all calls to internal alloc routine succeeded 531 518 */ 532 - bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) 519 + bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, 520 + struct xsk_buff_pool *xsk_pool, u16 count) 533 521 { 534 522 u16 rx_thresh = ICE_RING_QUARTER(rx_ring); 535 523 u16 leftover, i, tail_bumps; ··· 539 525 leftover = count - (tail_bumps * rx_thresh); 540 526 541 527 for (i = 0; i < tail_bumps; i++) 542 - if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh)) 528 + if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh)) 543 529 return false; 544 - return __ice_alloc_rx_bufs_zc(rx_ring, leftover); 530 + return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover); 545 531 } 546 532 547 533 /** ··· 610 596 /** 611 597 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ 612 598 * @xdp_ring: XDP Tx ring 599 + * @xsk_pool: AF_XDP buffer pool pointer 613 600 */ 614 - static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) 601 + static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, 602 + struct xsk_buff_pool *xsk_pool) 615 603 { 616 604 u16 ntc = xdp_ring->next_to_clean; 617 605 struct ice_tx_desc *tx_desc; ··· 664 648 if (xdp_ring->next_to_clean >= cnt) 665 649 xdp_ring->next_to_clean -= cnt; 666 650 if (xsk_frames) 667 - xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); 651 + xsk_tx_completed(xsk_pool, xsk_frames); 668 652 669 653 return completed_frames; 670 654 } ··· 673 657 * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX 674 658 * @xdp: XDP buffer to xmit 675 659 * @xdp_ring: XDP ring to produce descriptor onto 660 + * @xsk_pool: AF_XDP buffer pool pointer 676 661 * 677 662 * note that this function works directly on xdp_buff, no need to convert 678 663 * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning ··· 683 666 * was not enough space on XDP ring 684 667 */ 685 668 static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, 686 - struct ice_tx_ring *xdp_ring) 669 + struct ice_tx_ring *xdp_ring, 670 + struct xsk_buff_pool *xsk_pool) 687 671 { 688 672 struct skb_shared_info *sinfo = NULL; 689 673 u32 size = xdp->data_end - xdp->data; ··· 698 680 699 681 free_space = ICE_DESC_UNUSED(xdp_ring); 700 682 if (free_space < ICE_RING_QUARTER(xdp_ring)) 701 - free_space += ice_clean_xdp_irq_zc(xdp_ring); 683 + free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool); 702 684 703 685 if (unlikely(!free_space)) 704 686 goto busy; ··· 718 700 dma_addr_t dma; 719 701 720 702 dma = xsk_buff_xdp_get_dma(xdp); 721 - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size); 703 + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size); 722 704 723 705 tx_buf->xdp = xdp; 724 706 tx_buf->type = ICE_TX_BUF_XSK_TX; ··· 760 742 * @xdp: xdp_buff used as input to the XDP program 761 743 * @xdp_prog: XDP program to run 762 744 * @xdp_ring: ring to be used for XDP_TX action 745 + * @xsk_pool: AF_XDP buffer pool pointer 763 746 * 764 747 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 765 748 */ 766 749 static int 767 750 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 768 - struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 751 + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, 752 + struct xsk_buff_pool *xsk_pool) 769 753 { 770 754 int err, result = ICE_XDP_PASS; 771 755 u32 act; ··· 778 758 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 779 759 if (!err) 780 760 return ICE_XDP_REDIR; 781 - if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) 761 + if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS) 782 762 result = ICE_XDP_EXIT; 783 763 else 784 764 result = ICE_XDP_CONSUMED; ··· 789 769 case XDP_PASS: 790 770 break; 791 771 case XDP_TX: 792 - result = ice_xmit_xdp_tx_zc(xdp, xdp_ring); 772 + result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool); 793 773 if (result == ICE_XDP_CONSUMED) 794 774 goto out_failure; 795 775 break; ··· 841 821 /** 842 822 * ice_clean_rx_irq_zc - consumes packets from the hardware ring 843 823 * @rx_ring: AF_XDP Rx ring 824 + * @xsk_pool: AF_XDP buffer pool pointer 844 825 * @budget: NAPI budget 845 826 * 846 827 * Returns number of processed packets on success, remaining budget on failure. 847 828 */ 848 - int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) 829 + int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, 830 + struct xsk_buff_pool *xsk_pool, 831 + int budget) 849 832 { 850 833 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 851 - struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool; 852 834 u32 ntc = rx_ring->next_to_clean; 853 835 u32 ntu = rx_ring->next_to_use; 854 836 struct xdp_buff *first = NULL; ··· 913 891 if (ice_is_non_eop(rx_ring, rx_desc)) 914 892 continue; 915 893 916 - xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring); 894 + xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring, 895 + xsk_pool); 917 896 if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { 918 897 xdp_xmit |= xdp_res; 919 898 } else if (xdp_res == ICE_XDP_EXIT) { ··· 963 940 rx_ring->next_to_clean = ntc; 964 941 entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring); 965 942 if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) 966 - failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); 943 + failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, 944 + entries_to_alloc); 967 945 968 946 ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0); 969 947 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); ··· 987 963 /** 988 964 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor 989 965 * @xdp_ring: XDP ring to produce the HW Tx descriptor on 966 + * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW 990 967 * @desc: AF_XDP descriptor to pull the DMA address and length from 991 968 * @total_bytes: bytes accumulator that will be used for stats update 992 969 */ 993 - static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc, 970 + static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, 971 + struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc, 994 972 unsigned int *total_bytes) 995 973 { 996 974 struct ice_tx_desc *tx_desc; 997 975 dma_addr_t dma; 998 976 999 - dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); 1000 - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); 977 + dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr); 978 + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len); 1001 979 1002 980 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++); 1003 981 tx_desc->buf_addr = cpu_to_le64(dma); ··· 1012 986 /** 1013 987 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors 1014 988 * @xdp_ring: XDP ring to produce the HW Tx descriptors on 989 + * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW 1015 990 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from 1016 991 * @total_bytes: bytes accumulator that will be used for stats update 1017 992 */ 1018 - static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, 993 + static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, 994 + struct xsk_buff_pool *xsk_pool, 995 + struct xdp_desc *descs, 1019 996 unsigned int *total_bytes) 1020 997 { 1021 998 u16 ntu = xdp_ring->next_to_use; ··· 1028 999 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { 1029 1000 dma_addr_t dma; 1030 1001 1031 - dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr); 1032 - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len); 1002 + dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr); 1003 + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len); 1033 1004 1034 1005 tx_desc = ICE_TX_DESC(xdp_ring, ntu++); 1035 1006 tx_desc->buf_addr = cpu_to_le64(dma); ··· 1045 1016 /** 1046 1017 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring 1047 1018 * @xdp_ring: XDP ring to produce the HW Tx descriptors on 1019 + * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW 1048 1020 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from 1049 1021 * @nb_pkts: count of packets to be send 1050 1022 * @total_bytes: bytes accumulator that will be used for stats update 1051 1023 */ 1052 - static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, 1053 - u32 nb_pkts, unsigned int *total_bytes) 1024 + static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, 1025 + struct xsk_buff_pool *xsk_pool, 1026 + struct xdp_desc *descs, u32 nb_pkts, 1027 + unsigned int *total_bytes) 1054 1028 { 1055 1029 u32 batched, leftover, i; 1056 1030 1057 1031 batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH); 1058 1032 leftover = nb_pkts & (PKTS_PER_BATCH - 1); 1059 1033 for (i = 0; i < batched; i += PKTS_PER_BATCH) 1060 - ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); 1034 + ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes); 1061 1035 for (; i < batched + leftover; i++) 1062 - ice_xmit_pkt(xdp_ring, &descs[i], total_bytes); 1036 + ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes); 1063 1037 } 1064 1038 1065 1039 /** 1066 1040 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring 1067 1041 * @xdp_ring: XDP ring to produce the HW Tx descriptors on 1042 + * @xsk_pool: AF_XDP buffer pool pointer 1068 1043 * 1069 1044 * Returns true if there is no more work that needs to be done, false otherwise 1070 1045 */ 1071 - bool ice_xmit_zc(struct ice_tx_ring *xdp_ring) 1046 + bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool) 1072 1047 { 1073 - struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; 1048 + struct xdp_desc *descs = xsk_pool->tx_descs; 1074 1049 u32 nb_pkts, nb_processed = 0; 1075 1050 unsigned int total_bytes = 0; 1076 1051 int budget; 1077 1052 1078 - ice_clean_xdp_irq_zc(xdp_ring); 1053 + ice_clean_xdp_irq_zc(xdp_ring, xsk_pool); 1054 + 1055 + if (!netif_carrier_ok(xdp_ring->vsi->netdev) || 1056 + !netif_running(xdp_ring->vsi->netdev)) 1057 + return true; 1079 1058 1080 1059 budget = ICE_DESC_UNUSED(xdp_ring); 1081 1060 budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring)); 1082 1061 1083 - nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); 1062 + nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget); 1084 1063 if (!nb_pkts) 1085 1064 return true; 1086 1065 1087 1066 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { 1088 1067 nb_processed = xdp_ring->count - xdp_ring->next_to_use; 1089 - ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); 1068 + ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed, 1069 + &total_bytes); 1090 1070 xdp_ring->next_to_use = 0; 1091 1071 } 1092 1072 1093 - ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, 1094 - &total_bytes); 1073 + ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed], 1074 + nb_pkts - nb_processed, &total_bytes); 1095 1075 1096 1076 ice_set_rs_bit(xdp_ring); 1097 1077 ice_xdp_ring_update_tail(xdp_ring); 1098 1078 ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes); 1099 1079 1100 - if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) 1101 - xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); 1080 + if (xsk_uses_need_wakeup(xsk_pool)) 1081 + xsk_set_tx_need_wakeup(xsk_pool); 1102 1082 1103 1083 return nb_pkts < budget; 1104 1084 } ··· 1129 1091 struct ice_vsi *vsi = np->vsi; 1130 1092 struct ice_tx_ring *ring; 1131 1093 1132 - if (test_bit(ICE_VSI_DOWN, vsi->state)) 1094 + if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev)) 1133 1095 return -ENETDOWN; 1134 1096 1135 1097 if (!ice_is_xdp_ena_vsi(vsi)) ··· 1140 1102 1141 1103 ring = vsi->rx_rings[queue_id]->xdp_ring; 1142 1104 1143 - if (!ring->xsk_pool) 1105 + if (!READ_ONCE(ring->xsk_pool)) 1144 1106 return -EINVAL; 1145 1107 1146 1108 /* The idea here is that if NAPI is running, mark a miss, so
+10 -4
drivers/net/ethernet/intel/ice/ice_xsk.h
··· 20 20 #ifdef CONFIG_XDP_SOCKETS 21 21 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, 22 22 u16 qid); 23 - int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget); 23 + int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, 24 + struct xsk_buff_pool *xsk_pool, 25 + int budget); 24 26 int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); 25 - bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count); 27 + bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, 28 + struct xsk_buff_pool *xsk_pool, u16 count); 26 29 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); 27 30 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); 28 31 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); 29 - bool ice_xmit_zc(struct ice_tx_ring *xdp_ring); 32 + bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool); 30 33 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); 31 34 #else 32 - static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring) 35 + static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring, 36 + struct xsk_buff_pool __always_unused *xsk_pool) 33 37 { 34 38 return false; 35 39 } ··· 48 44 49 45 static inline int 50 46 ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, 47 + struct xsk_buff_pool __always_unused *xsk_pool, 51 48 int __always_unused budget) 52 49 { 53 50 return 0; ··· 56 51 57 52 static inline bool 58 53 ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring, 54 + struct xsk_buff_pool __always_unused *xsk_pool, 59 55 u16 __always_unused count) 60 56 { 61 57 return false;
+24 -24
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 900 900 901 901 vport->link_up = false; 902 902 idpf_vport_intr_deinit(vport); 903 - idpf_vport_intr_rel(vport); 904 903 idpf_vport_queues_rel(vport); 904 + idpf_vport_intr_rel(vport); 905 905 np->state = __IDPF_VPORT_DOWN; 906 906 } 907 907 ··· 1335 1335 /** 1336 1336 * idpf_vport_open - Bring up a vport 1337 1337 * @vport: vport to bring up 1338 - * @alloc_res: allocate queue resources 1339 1338 */ 1340 - static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) 1339 + static int idpf_vport_open(struct idpf_vport *vport) 1341 1340 { 1342 1341 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1343 1342 struct idpf_adapter *adapter = vport->adapter; ··· 1349 1350 /* we do not allow interface up just yet */ 1350 1351 netif_carrier_off(vport->netdev); 1351 1352 1352 - if (alloc_res) { 1353 - err = idpf_vport_queues_alloc(vport); 1354 - if (err) 1355 - return err; 1356 - } 1357 - 1358 1353 err = idpf_vport_intr_alloc(vport); 1359 1354 if (err) { 1360 1355 dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", 1361 1356 vport->vport_id, err); 1362 - goto queues_rel; 1357 + return err; 1363 1358 } 1359 + 1360 + err = idpf_vport_queues_alloc(vport); 1361 + if (err) 1362 + goto intr_rel; 1364 1363 1365 1364 err = idpf_vport_queue_ids_init(vport); 1366 1365 if (err) { 1367 1366 dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", 1368 1367 vport->vport_id, err); 1369 - goto intr_rel; 1368 + goto queues_rel; 1370 1369 } 1371 1370 1372 1371 err = idpf_vport_intr_init(vport); 1373 1372 if (err) { 1374 1373 dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", 1375 1374 vport->vport_id, err); 1376 - goto intr_rel; 1375 + goto queues_rel; 1377 1376 } 1378 1377 1379 1378 err = idpf_rx_bufs_init_all(vport); 1380 1379 if (err) { 1381 1380 dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", 1382 1381 vport->vport_id, err); 1383 - goto intr_rel; 1382 + goto queues_rel; 1384 1383 } 1385 1384 1386 1385 err = idpf_queue_reg_init(vport); 1387 1386 if (err) { 1388 1387 dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", 1389 1388 vport->vport_id, err); 1390 - goto intr_rel; 1389 + goto queues_rel; 1391 1390 } 1392 1391 1393 1392 idpf_rx_init_buf_tail(vport); ··· 1452 1455 idpf_send_map_unmap_queue_vector_msg(vport, false); 1453 1456 intr_deinit: 1454 1457 idpf_vport_intr_deinit(vport); 1455 - intr_rel: 1456 - idpf_vport_intr_rel(vport); 1457 1458 queues_rel: 1458 1459 idpf_vport_queues_rel(vport); 1460 + intr_rel: 1461 + idpf_vport_intr_rel(vport); 1459 1462 1460 1463 return err; 1461 1464 } ··· 1536 1539 np = netdev_priv(vport->netdev); 1537 1540 np->state = __IDPF_VPORT_DOWN; 1538 1541 if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) 1539 - idpf_vport_open(vport, true); 1542 + idpf_vport_open(vport); 1540 1543 1541 1544 /* Spawn and return 'idpf_init_task' work queue until all the 1542 1545 * default vports are created ··· 1895 1898 goto free_vport; 1896 1899 } 1897 1900 1898 - err = idpf_vport_queues_alloc(new_vport); 1899 - if (err) 1900 - goto free_vport; 1901 1901 if (current_state <= __IDPF_VPORT_DOWN) { 1902 1902 idpf_send_delete_queues_msg(vport); 1903 1903 } else { ··· 1926 1932 1927 1933 err = idpf_set_real_num_queues(vport); 1928 1934 if (err) 1929 - goto err_reset; 1935 + goto err_open; 1930 1936 1931 1937 if (current_state == __IDPF_VPORT_UP) 1932 - err = idpf_vport_open(vport, false); 1938 + err = idpf_vport_open(vport); 1933 1939 1934 1940 kfree(new_vport); 1935 1941 1936 1942 return err; 1937 1943 1938 1944 err_reset: 1939 - idpf_vport_queues_rel(new_vport); 1945 + idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, 1946 + vport->num_rxq, vport->num_bufq); 1947 + 1948 + err_open: 1949 + if (current_state == __IDPF_VPORT_UP) 1950 + idpf_vport_open(vport); 1951 + 1940 1952 free_vport: 1941 1953 kfree(new_vport); 1942 1954 ··· 2171 2171 idpf_vport_ctrl_lock(netdev); 2172 2172 vport = idpf_netdev_to_vport(netdev); 2173 2173 2174 - err = idpf_vport_open(vport, true); 2174 + err = idpf_vport_open(vport); 2175 2175 2176 2176 idpf_vport_ctrl_unlock(netdev); 2177 2177
+9 -34
drivers/net/ethernet/intel/idpf/idpf_txrx.c
··· 3576 3576 */ 3577 3577 void idpf_vport_intr_rel(struct idpf_vport *vport) 3578 3578 { 3579 - int i, j, v_idx; 3580 - 3581 - for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 3579 + for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 3582 3580 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; 3583 3581 3584 3582 kfree(q_vector->complq); ··· 3590 3592 3591 3593 free_cpumask_var(q_vector->affinity_mask); 3592 3594 } 3593 - 3594 - /* Clean up the mapping of queues to vectors */ 3595 - for (i = 0; i < vport->num_rxq_grp; i++) { 3596 - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3597 - 3598 - if (idpf_is_queue_model_split(vport->rxq_model)) 3599 - for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++) 3600 - rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL; 3601 - else 3602 - for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) 3603 - rx_qgrp->singleq.rxqs[j]->q_vector = NULL; 3604 - } 3605 - 3606 - if (idpf_is_queue_model_split(vport->txq_model)) 3607 - for (i = 0; i < vport->num_txq_grp; i++) 3608 - vport->txq_grps[i].complq->q_vector = NULL; 3609 - else 3610 - for (i = 0; i < vport->num_txq_grp; i++) 3611 - for (j = 0; j < vport->txq_grps[i].num_txq; j++) 3612 - vport->txq_grps[i].txqs[j]->q_vector = NULL; 3613 3595 3614 3596 kfree(vport->q_vectors); 3615 3597 vport->q_vectors = NULL; ··· 3758 3780 /** 3759 3781 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport 3760 3782 * @vport: main vport structure 3761 - * @basename: name for the vector 3762 3783 */ 3763 - static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) 3784 + static int idpf_vport_intr_req_irq(struct idpf_vport *vport) 3764 3785 { 3765 3786 struct idpf_adapter *adapter = vport->adapter; 3787 + const char *drv_name, *if_name, *vec_name; 3766 3788 int vector, err, irq_num, vidx; 3767 - const char *vec_name; 3789 + 3790 + drv_name = dev_driver_string(&adapter->pdev->dev); 3791 + if_name = netdev_name(vport->netdev); 3768 3792 3769 3793 for (vector = 0; vector < vport->num_q_vectors; vector++) { 3770 3794 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; ··· 3784 3804 else 3785 3805 continue; 3786 3806 3787 - name = kasprintf(GFP_KERNEL, "%s-%s-%d", basename, vec_name, 3788 - vidx); 3807 + name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name, 3808 + vec_name, vidx); 3789 3809 3790 3810 err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, 3791 3811 name, q_vector); ··· 4306 4326 */ 4307 4327 int idpf_vport_intr_init(struct idpf_vport *vport) 4308 4328 { 4309 - char *int_name; 4310 4329 int err; 4311 4330 4312 4331 err = idpf_vport_intr_init_vec_idx(vport); ··· 4319 4340 if (err) 4320 4341 goto unroll_vectors_alloc; 4321 4342 4322 - int_name = kasprintf(GFP_KERNEL, "%s-%s", 4323 - dev_driver_string(&vport->adapter->pdev->dev), 4324 - vport->netdev->name); 4325 - 4326 - err = idpf_vport_intr_req_irq(vport, int_name); 4343 + err = idpf_vport_intr_req_irq(vport); 4327 4344 if (err) 4328 4345 goto unroll_vectors_alloc; 4329 4346
+17 -16
drivers/net/ethernet/intel/igc/igc_main.c
··· 6306 6306 size_t n; 6307 6307 int i; 6308 6308 6309 - switch (qopt->cmd) { 6310 - case TAPRIO_CMD_REPLACE: 6311 - break; 6312 - case TAPRIO_CMD_DESTROY: 6313 - return igc_tsn_clear_schedule(adapter); 6314 - case TAPRIO_CMD_STATS: 6315 - igc_taprio_stats(adapter->netdev, &qopt->stats); 6316 - return 0; 6317 - case TAPRIO_CMD_QUEUE_STATS: 6318 - igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); 6319 - return 0; 6320 - default: 6321 - return -EOPNOTSUPP; 6322 - } 6323 - 6324 6309 if (qopt->base_time < 0) 6325 6310 return -ERANGE; 6326 6311 ··· 6414 6429 if (hw->mac.type != igc_i225) 6415 6430 return -EOPNOTSUPP; 6416 6431 6417 - err = igc_save_qbv_schedule(adapter, qopt); 6432 + switch (qopt->cmd) { 6433 + case TAPRIO_CMD_REPLACE: 6434 + err = igc_save_qbv_schedule(adapter, qopt); 6435 + break; 6436 + case TAPRIO_CMD_DESTROY: 6437 + err = igc_tsn_clear_schedule(adapter); 6438 + break; 6439 + case TAPRIO_CMD_STATS: 6440 + igc_taprio_stats(adapter->netdev, &qopt->stats); 6441 + return 0; 6442 + case TAPRIO_CMD_QUEUE_STATS: 6443 + igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); 6444 + return 0; 6445 + default: 6446 + return -EOPNOTSUPP; 6447 + } 6448 + 6418 6449 if (err) 6419 6450 return err; 6420 6451
+3 -3
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 953 953 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) 954 954 { 955 955 struct mvpp2_port *port; 956 - int i; 956 + int i, j; 957 957 958 958 for (i = 0; i < priv->port_count; i++) { 959 959 port = priv->port_list[i]; 960 960 if (port->priv->percpu_pools) { 961 - for (i = 0; i < port->nrxqs; i++) 962 - mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], 961 + for (j = 0; j < port->nrxqs; j++) 962 + mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j], 963 963 port->tx_fc & en); 964 964 } else { 965 965 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 932 932 mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh); 933 933 mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); 934 934 err_mod_hdr: 935 + *attr = *old_attr; 935 936 kfree(old_attr); 936 937 err_attr: 937 938 kvfree(spec);
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 51 51 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap)) 52 52 caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD; 53 53 54 - if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && 55 - MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) || 56 - MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)) 54 + if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) && 55 + ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && 56 + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) || 57 + MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))) 57 58 caps |= MLX5_IPSEC_CAP_PRIO; 58 59 59 60 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+6 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1409 1409 if (!an_changes && link_modes == eproto.admin) 1410 1410 goto out; 1411 1411 1412 - mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext); 1412 + err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext); 1413 + if (err) { 1414 + netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err); 1415 + goto out; 1416 + } 1417 + 1413 1418 mlx5_toggle_port_link(mdev); 1414 1419 1415 1420 out:
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
··· 207 207 static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded) 208 208 { 209 209 struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; 210 + struct devlink *devlink = priv_to_devlink(dev); 210 211 211 212 /* if this is the driver that initiated the fw reset, devlink completed the reload */ 212 213 if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { ··· 219 218 mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); 220 219 else 221 220 mlx5_load_one(dev, true); 222 - devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, 221 + devl_lock(devlink); 222 + devlink_remote_reload_actions_performed(devlink, 0, 223 223 BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 224 224 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); 225 + devl_unlock(devlink); 225 226 } 226 227 } 227 228
+7 -3
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
··· 48 48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) 49 49 { 50 50 struct irq_affinity_desc auto_desc = {}; 51 + struct mlx5_irq *irq; 51 52 u32 irq_index; 52 53 int err; 53 54 ··· 65 64 else 66 65 cpu_get(pool, cpumask_first(&af_desc->mask)); 67 66 } 68 - return mlx5_irq_alloc(pool, irq_index, 69 - cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc, 70 - NULL); 67 + irq = mlx5_irq_alloc(pool, irq_index, 68 + cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc, 69 + NULL); 70 + if (IS_ERR(irq)) 71 + xa_erase(&pool->irqs, irq_index); 72 + return irq; 71 73 } 72 74 73 75 /* Looking for the IRQ with the smallest refcount that fits req_mask.
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
··· 1538 1538 goto unlock; 1539 1539 1540 1540 for (i = 0; i < ldev->ports; i++) { 1541 - if (ldev->pf[MLX5_LAG_P1].netdev == slave) { 1541 + if (ldev->pf[i].netdev == slave) { 1542 1542 port = i; 1543 1543 break; 1544 1544 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 2142 2142 /* Panic tear down fw command will stop the PCI bus communication 2143 2143 * with the HCA, so the health poll is no longer needed. 2144 2144 */ 2145 - mlx5_drain_health_wq(dev); 2146 2145 mlx5_stop_health_poll(dev, false); 2147 2146 2148 2147 ret = mlx5_cmd_fast_teardown_hca(dev); ··· 2176 2177 2177 2178 mlx5_core_info(dev, "Shutdown was called\n"); 2178 2179 set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); 2180 + mlx5_drain_health_wq(dev); 2179 2181 err = mlx5_try_fast_unload(dev); 2180 2182 if (err) 2181 2183 mlx5_unload_one(dev, false);
+1
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
··· 112 112 struct mlx5_core_dev *mdev = sf_dev->mdev; 113 113 114 114 set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state); 115 + mlx5_drain_health_wq(mdev); 115 116 mlx5_unload_one(mdev, false); 116 117 } 117 118
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 7 7 /* don't try to optimize STE allocation if the stack is too constaraining */ 8 8 #define DR_RULE_MAX_STES_OPTIMIZED 0 9 9 #else 10 - #define DR_RULE_MAX_STES_OPTIMIZED 5 10 + #define DR_RULE_MAX_STES_OPTIMIZED 2 11 11 #endif 12 12 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES) 13 13
+1 -1
drivers/net/ethernet/meta/Kconfig
··· 20 20 config FBNIC 21 21 tristate "Meta Platforms Host Network Interface" 22 22 depends on X86_64 || COMPILE_TEST 23 - depends on S390=n 23 + depends on !S390 24 24 depends on MAX_SKB_FRAGS < 22 25 25 depends on PCI_MSI 26 26 select PHYLINK
+2 -6
drivers/net/ethernet/realtek/r8169_main.c
··· 4349 4349 if (unlikely(!rtl_tx_slots_avail(tp))) { 4350 4350 if (net_ratelimit()) 4351 4351 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 4352 - goto err_stop_0; 4352 + netif_stop_queue(dev); 4353 + return NETDEV_TX_BUSY; 4353 4354 } 4354 4355 4355 4356 opts[1] = rtl8169_tx_vlan_tag(skb); ··· 4406 4405 dev_kfree_skb_any(skb); 4407 4406 dev->stats.tx_dropped++; 4408 4407 return NETDEV_TX_OK; 4409 - 4410 - err_stop_0: 4411 - netif_stop_queue(dev); 4412 - dev->stats.tx_dropped++; 4413 - return NETDEV_TX_BUSY; 4414 4408 } 4415 4409 4416 4410 static unsigned int rtl_last_frag_len(struct sk_buff *skb)
-2
drivers/net/ethernet/stmicro/stmmac/dwmac4.h
··· 573 573 #define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19) 574 574 #define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20) 575 575 #define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21) 576 - /* LNKMOD */ 577 - #define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1 578 576 /* LNKSPEED */ 579 577 #define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2 580 578 #define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
··· 786 786 else 787 787 x->pcs_speed = SPEED_10; 788 788 789 - x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK); 789 + x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD); 790 790 791 791 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, 792 792 x->pcs_duplex ? "Full" : "Half");
+1 -1
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 2219 2219 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2220 2220 axienet_set_mac_address(ndev, NULL); 2221 2221 axienet_set_multicast_list(ndev); 2222 - axienet_setoptions(ndev, lp->options); 2223 2222 napi_enable(&lp->napi_rx); 2224 2223 napi_enable(&lp->napi_tx); 2224 + axienet_setoptions(ndev, lp->options); 2225 2225 } 2226 2226 2227 2227 /**
+1 -3
drivers/net/fjes/fjes_main.c
··· 14 14 #include "fjes.h" 15 15 #include "fjes_trace.h" 16 16 17 - #define MAJ 1 18 - #define MIN 2 19 - #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) 17 + #define DRV_VERSION "1.2" 20 18 #define DRV_NAME "fjes" 21 19 char fjes_driver_name[] = DRV_NAME; 22 20 char fjes_driver_version[] = DRV_VERSION;
+21 -8
drivers/net/phy/aquantia/aquantia_main.c
··· 653 653 unsigned long *possible = phydev->possible_interfaces; 654 654 unsigned int serdes_mode, rate_adapt; 655 655 phy_interface_t interface; 656 - int i, val, ret; 657 - 658 - ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, 659 - VEND1_GLOBAL_CFG_10M, val, val != 0, 660 - 1000, 100000, false); 661 - if (ret) 662 - return ret; 656 + int i, val; 663 657 664 658 /* Walk the media-speed configuration registers to determine which 665 659 * host-side serdes modes may be used by the PHY depending on the ··· 702 708 return 0; 703 709 } 704 710 711 + static int aqr113c_fill_interface_modes(struct phy_device *phydev) 712 + { 713 + int val, ret; 714 + 715 + /* It's been observed on some models that - when coming out of suspend 716 + * - the FW signals that the PHY is ready but the GLOBAL_CFG registers 717 + * continue on returning zeroes for some time. Let's poll the 100M 718 + * register until it returns a real value as both 113c and 115c support 719 + * this mode. 720 + */ 721 + ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, 722 + VEND1_GLOBAL_CFG_100M, val, val != 0, 723 + 1000, 100000, false); 724 + if (ret) 725 + return ret; 726 + 727 + return aqr107_fill_interface_modes(phydev); 728 + } 729 + 705 730 static int aqr113c_config_init(struct phy_device *phydev) 706 731 { 707 732 int ret; ··· 738 725 if (ret) 739 726 return ret; 740 727 741 - return aqr107_fill_interface_modes(phydev); 728 + return aqr113c_fill_interface_modes(phydev); 742 729 } 743 730 744 731 static int aqr107_probe(struct phy_device *phydev)
+19 -15
drivers/net/phy/micrel.c
··· 1389 1389 const struct device *dev_walker; 1390 1390 int ret; 1391 1391 1392 + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; 1393 + 1392 1394 dev_walker = &phydev->mdio.dev; 1393 1395 do { 1394 1396 of_node = dev_walker->of_node; ··· 1440 1438 #define MII_KSZ9131_AUTO_MDIX 0x1C 1441 1439 #define MII_KSZ9131_AUTO_MDI_SET BIT(7) 1442 1440 #define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6) 1441 + #define MII_KSZ9131_DIG_AXAN_STS 0x14 1442 + #define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14) 1443 + #define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12) 1443 1444 1444 1445 static int ksz9131_mdix_update(struct phy_device *phydev) 1445 1446 { 1446 1447 int ret; 1447 1448 1448 - ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX); 1449 - if (ret < 0) 1450 - return ret; 1451 - 1452 - if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) { 1453 - if (ret & MII_KSZ9131_AUTO_MDI_SET) 1454 - phydev->mdix_ctrl = ETH_TP_MDI; 1455 - else 1456 - phydev->mdix_ctrl = ETH_TP_MDI_X; 1449 + if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) { 1450 + phydev->mdix = phydev->mdix_ctrl; 1457 1451 } else { 1458 - phydev->mdix_ctrl = ETH_TP_MDI_AUTO; 1459 - } 1452 + ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS); 1453 + if (ret < 0) 1454 + return ret; 1460 1455 1461 - if (ret & MII_KSZ9131_AUTO_MDI_SET) 1462 - phydev->mdix = ETH_TP_MDI; 1463 - else 1464 - phydev->mdix = ETH_TP_MDI_X; 1456 + if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) { 1457 + if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT) 1458 + phydev->mdix = ETH_TP_MDI; 1459 + else 1460 + phydev->mdix = ETH_TP_MDI_X; 1461 + } else { 1462 + phydev->mdix = ETH_TP_MDI_INVALID; 1463 + } 1464 + } 1465 1465 1466 1466 return 0; 1467 1467 }
+7
drivers/net/phy/realtek.c
··· 1465 1465 .handle_interrupt = genphy_handle_interrupt_no_ack, 1466 1466 .suspend = genphy_suspend, 1467 1467 .resume = genphy_resume, 1468 + }, { 1469 + PHY_ID_MATCH_EXACT(0x001cc960), 1470 + .name = "RTL8366S Gigabit Ethernet", 1471 + .suspend = genphy_suspend, 1472 + .resume = genphy_resume, 1473 + .read_mmd = genphy_read_mmd_unsupported, 1474 + .write_mmd = genphy_write_mmd_unsupported, 1468 1475 }, 1469 1476 }; 1470 1477
+4 -1
drivers/net/pse-pd/tps23881.c
··· 5 5 * Copyright (c) 2023 Bootlin, Kory Maincent <kory.maincent@bootlin.com> 6 6 */ 7 7 8 + #include <linux/bitfield.h> 8 9 #include <linux/delay.h> 9 10 #include <linux/firmware.h> 10 11 #include <linux/i2c.h> ··· 30 29 #define TPS23881_REG_TPON BIT(0) 31 30 #define TPS23881_REG_FWREV 0x41 32 31 #define TPS23881_REG_DEVID 0x43 32 + #define TPS23881_REG_DEVID_MASK 0xF0 33 + #define TPS23881_DEVICE_ID 0x02 33 34 #define TPS23881_REG_SRAM_CTRL 0x60 34 35 #define TPS23881_REG_SRAM_DATA 0x61 35 36 ··· 753 750 if (ret < 0) 754 751 return ret; 755 752 756 - if (ret != 0x22) { 753 + if (FIELD_GET(TPS23881_REG_DEVID_MASK, ret) != TPS23881_DEVICE_ID) { 757 754 dev_err(dev, "Wrong device ID\n"); 758 755 return -ENXIO; 759 756 }
+2
drivers/net/usb/qmi_wwan.c
··· 201 201 break; 202 202 default: 203 203 /* not ip - do not know what to do */ 204 + kfree_skb(skbn); 204 205 goto skip; 205 206 } 206 207 ··· 1432 1431 {QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */ 1433 1432 {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */ 1434 1433 {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */ 1434 + {QMI_FIXED_INTF(0x2dee, 0x4d22, 5)}, /* MeiG Smart SRM825L */ 1435 1435 1436 1436 /* 4. Gobi 1000 devices */ 1437 1437 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+9 -2
drivers/net/usb/sr9700.c
··· 179 179 struct usbnet *dev = netdev_priv(netdev); 180 180 __le16 res; 181 181 int rc = 0; 182 + int err; 182 183 183 184 if (phy_id) { 184 185 netdev_dbg(netdev, "Only internal phy supported\n"); ··· 190 189 if (loc == MII_BMSR) { 191 190 u8 value; 192 191 193 - sr_read_reg(dev, SR_NSR, &value); 192 + err = sr_read_reg(dev, SR_NSR, &value); 193 + if (err < 0) 194 + return err; 195 + 194 196 if (value & NSR_LINKST) 195 197 rc = 1; 196 198 } 197 - sr_share_read_word(dev, 1, loc, &res); 199 + err = sr_share_read_word(dev, 1, loc, &res); 200 + if (err < 0) 201 + return err; 202 + 198 203 if (rc == 1) 199 204 res = le16_to_cpu(res) | BMSR_LSTATUS; 200 205 else
+12 -2
drivers/net/virtio_net.c
··· 3658 3658 { 3659 3659 int err; 3660 3660 3661 + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 3662 + return -EOPNOTSUPP; 3663 + 3661 3664 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), 3662 3665 max_usecs, max_packets); 3663 3666 if (err) ··· 3677 3674 u32 max_packets) 3678 3675 { 3679 3676 int err; 3677 + 3678 + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 3679 + return -EOPNOTSUPP; 3680 3680 3681 3681 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), 3682 3682 max_usecs, max_packets); ··· 3749 3743 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, 3750 3744 vi->intr_coal_tx.max_usecs, 3751 3745 vi->intr_coal_tx.max_packets); 3752 - if (err) 3746 + 3747 + /* Don't break the tx resize action if the vq coalescing is not 3748 + * supported. The same is true for rx resize below. 3749 + */ 3750 + if (err && err != -EOPNOTSUPP) 3753 3751 return err; 3754 3752 } 3755 3753 ··· 3768 3758 vi->intr_coal_rx.max_usecs, 3769 3759 vi->intr_coal_rx.max_packets); 3770 3760 mutex_unlock(&vi->rq[i].dim_lock); 3771 - if (err) 3761 + if (err && err != -EOPNOTSUPP) 3772 3762 return err; 3773 3763 } 3774 3764 }
+23 -10
drivers/net/wan/fsl_qmc_hdlc.c
··· 18 18 #include <linux/hdlc.h> 19 19 #include <linux/mod_devicetable.h> 20 20 #include <linux/module.h> 21 + #include <linux/mutex.h> 21 22 #include <linux/platform_device.h> 22 23 #include <linux/slab.h> 23 24 #include <linux/spinlock.h> ··· 38 37 struct qmc_chan *qmc_chan; 39 38 struct net_device *netdev; 40 39 struct framer *framer; 41 - spinlock_t carrier_lock; /* Protect carrier detection */ 40 + struct mutex carrier_lock; /* Protect carrier detection */ 42 41 struct notifier_block nb; 43 42 bool is_crc32; 44 43 spinlock_t tx_lock; /* Protect tx descriptors */ ··· 61 60 if (!qmc_hdlc->framer) 62 61 return 0; 63 62 64 - guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock); 63 + guard(mutex)(&qmc_hdlc->carrier_lock); 65 64 66 65 ret = framer_get_status(qmc_hdlc->framer, &framer_status); 67 66 if (ret) { ··· 250 249 struct qmc_hdlc_desc *desc = context; 251 250 struct net_device *netdev; 252 251 struct qmc_hdlc *qmc_hdlc; 252 + size_t crc_size; 253 253 int ret; 254 254 255 255 netdev = desc->netdev; ··· 269 267 if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */ 270 268 netdev->stats.rx_crc_errors++; 271 269 kfree_skb(desc->skb); 272 - } else { 273 - netdev->stats.rx_packets++; 274 - netdev->stats.rx_bytes += length; 275 - 276 - skb_put(desc->skb, length); 277 - desc->skb->protocol = hdlc_type_trans(desc->skb, netdev); 278 - netif_rx(desc->skb); 270 + goto re_queue; 279 271 } 280 272 273 + /* Discard the CRC */ 274 + crc_size = qmc_hdlc->is_crc32 ? 4 : 2; 275 + if (length < crc_size) { 276 + netdev->stats.rx_length_errors++; 277 + kfree_skb(desc->skb); 278 + goto re_queue; 279 + } 280 + length -= crc_size; 281 + 282 + netdev->stats.rx_packets++; 283 + netdev->stats.rx_bytes += length; 284 + 285 + skb_put(desc->skb, length); 286 + desc->skb->protocol = hdlc_type_trans(desc->skb, netdev); 287 + netif_rx(desc->skb); 288 + 289 + re_queue: 281 290 /* Re-queue a transfer using the same descriptor */ 282 291 ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size); 283 292 if (ret) { ··· 719 706 720 707 qmc_hdlc->dev = dev; 721 708 spin_lock_init(&qmc_hdlc->tx_lock); 722 - spin_lock_init(&qmc_hdlc->carrier_lock); 709 + mutex_init(&qmc_hdlc->carrier_lock); 723 710 724 711 qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node); 725 712 if (IS_ERR(qmc_hdlc->qmc_chan))
+2 -1
drivers/net/wireless/ath/ath12k/pci.c
··· 473 473 { 474 474 int i; 475 475 476 - clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 476 + if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 477 + return; 477 478 478 479 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 479 480 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+4 -4
drivers/net/wireless/ath/ath12k/wow.c
··· 361 361 struct ath12k *ar = arvif->ar; 362 362 unsigned long wow_mask = 0; 363 363 int pattern_id = 0; 364 - int ret, i; 364 + int ret, i, j; 365 365 366 366 /* Setup requested WOW features */ 367 367 switch (arvif->vdev_type) { ··· 431 431 eth_pattern->pattern_len); 432 432 433 433 /* convert bitmask to bytemask */ 434 - for (i = 0; i < eth_pattern->pattern_len; i++) 435 - if (eth_pattern->mask[i / 8] & BIT(i % 8)) 436 - new_pattern.bytemask[i] = 0xff; 434 + for (j = 0; j < eth_pattern->pattern_len; j++) 435 + if (eth_pattern->mask[j / 8] & BIT(j % 8)) 436 + new_pattern.bytemask[j] = 0xff; 437 437 438 438 new_pattern.pattern_len = eth_pattern->pattern_len; 439 439 new_pattern.pkt_offset = eth_pattern->pkt_offset;
+1
drivers/net/wireless/mediatek/mt76/mt7921/main.c
··· 303 303 304 304 mvif->bss_conf.mt76.omac_idx = mvif->bss_conf.mt76.idx; 305 305 mvif->phy = phy; 306 + mvif->bss_conf.vif = mvif; 306 307 mvif->bss_conf.mt76.band_idx = 0; 307 308 mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS; 308 309
-2
drivers/nfc/pn544/i2c.c
··· 126 126 #define PN544_FW_CMD_RESULT_COMMAND_REJECTED 0xE0 127 127 #define PN544_FW_CMD_RESULT_CHUNK_ERROR 0xE6 128 128 129 - #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) 130 - 131 129 #define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7 132 130 #define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE 133 131 #define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8
+9 -9
drivers/nvme/host/core.c
··· 36 36 struct nvme_ns_ids ids; 37 37 u32 nsid; 38 38 __le32 anagrpid; 39 + u8 pi_offset; 39 40 bool is_shared; 40 41 bool is_readonly; 41 42 bool is_ready; ··· 1758 1757 return 0; 1759 1758 } 1760 1759 1761 - static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head, 1762 - struct queue_limits *lim) 1760 + static bool nvme_init_integrity(struct nvme_ns_head *head, 1761 + struct queue_limits *lim, struct nvme_ns_info *info) 1763 1762 { 1764 1763 struct blk_integrity *bi = &lim->integrity; 1765 1764 ··· 1817 1816 } 1818 1817 1819 1818 bi->tuple_size = head->ms; 1820 - bi->pi_offset = head->pi_offset; 1819 + bi->pi_offset = info->pi_offset; 1821 1820 return true; 1822 1821 } 1823 1822 ··· 1903 1902 1904 1903 static void nvme_configure_metadata(struct nvme_ctrl *ctrl, 1905 1904 struct nvme_ns_head *head, struct nvme_id_ns *id, 1906 - struct nvme_id_ns_nvm *nvm) 1905 + struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info) 1907 1906 { 1908 1907 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1909 1908 head->pi_type = 0; 1910 1909 head->pi_size = 0; 1911 - head->pi_offset = 0; 1912 1910 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); 1913 1911 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1914 1912 return; ··· 1922 1922 if (head->pi_size && head->ms >= head->pi_size) 1923 1923 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1924 1924 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) 1925 - head->pi_offset = head->ms - head->pi_size; 1925 + info->pi_offset = head->ms - head->pi_size; 1926 1926 1927 1927 if (ctrl->ops->flags & NVME_F_FABRICS) { 1928 1928 /* ··· 2156 2156 2157 2157 lim = queue_limits_start_update(ns->disk->queue); 2158 2158 nvme_set_ctrl_limits(ns->ctrl, &lim); 2159 - nvme_configure_metadata(ns->ctrl, ns->head, id, nvm); 2159 + nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info); 2160 2160 nvme_set_chunk_sectors(ns, id, &lim); 2161 2161 if (!nvme_update_disk_info(ns, id, &lim)) 2162 2162 capacity = 0; ··· 2176 2176 * I/O to namespaces with metadata except when the namespace supports 2177 2177 * PI, as it can strip/insert in that case. 2178 2178 */ 2179 - if (!nvme_init_integrity(ns->disk, ns->head, &lim)) 2179 + if (!nvme_init_integrity(ns->head, &lim, info)) 2180 2180 capacity = 0; 2181 2181 2182 2182 ret = queue_limits_commit_update(ns->disk->queue, &lim); ··· 2280 2280 if (unsupported) 2281 2281 ns->head->disk->flags |= GENHD_FL_HIDDEN; 2282 2282 else 2283 - nvme_init_integrity(ns->head->disk, ns->head, &lim); 2283 + nvme_init_integrity(ns->head, &lim, info); 2284 2284 ret = queue_limits_commit_update(ns->head->disk->queue, &lim); 2285 2285 2286 2286 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
+6 -7
drivers/nvme/host/nvme.h
··· 462 462 struct srcu_struct srcu; 463 463 struct nvme_subsystem *subsys; 464 464 struct nvme_ns_ids ids; 465 + u8 lba_shift; 466 + u16 ms; 467 + u16 pi_size; 468 + u8 pi_type; 469 + u8 guard_type; 465 470 struct list_head entry; 466 471 struct kref ref; 467 472 bool shared; 468 473 bool passthru_err_log_enabled; 469 - int instance; 470 474 struct nvme_effects_log *effects; 471 475 u64 nuse; 472 476 unsigned ns_id; 473 - int lba_shift; 474 - u16 ms; 475 - u16 pi_size; 476 - u8 pi_type; 477 - u8 pi_offset; 478 - u8 guard_type; 477 + int instance; 479 478 #ifdef CONFIG_BLK_DEV_ZONED 480 479 u64 zsze; 481 480 #endif
+3 -1
drivers/pci/hotplug/pciehp_hpc.c
··· 485 485 struct pci_dev *pdev = ctrl_dev(ctrl); 486 486 487 487 pci_config_pm_runtime_get(pdev); 488 - pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC, status), 488 + 489 + /* Attention and Power Indicator Control bits are supported */ 490 + pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status), 489 491 PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC); 490 492 pci_config_pm_runtime_put(pdev); 491 493 return 0;
+8 -7
drivers/pci/pci.c
··· 4477 4477 { 4478 4478 u16 pci_command, new; 4479 4479 4480 - /* Preserve the "hybrid" behavior for backwards compatibility */ 4481 - if (pci_is_managed(pdev)) { 4482 - WARN_ON_ONCE(pcim_intx(pdev, enable) != 0); 4483 - return; 4484 - } 4485 - 4486 4480 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 4487 4481 4488 4482 if (enable) ··· 4484 4490 else 4485 4491 new = pci_command | PCI_COMMAND_INTX_DISABLE; 4486 4492 4487 - if (new != pci_command) 4493 + if (new != pci_command) { 4494 + /* Preserve the "hybrid" behavior for backwards compatibility */ 4495 + if (pci_is_managed(pdev)) { 4496 + WARN_ON_ONCE(pcim_intx(pdev, enable) != 0); 4497 + return; 4498 + } 4499 + 4488 4500 pci_write_config_word(pdev, PCI_COMMAND, new); 4501 + } 4489 4502 } 4490 4503 EXPORT_SYMBOL_GPL(pci_intx); 4491 4504
+1 -1
drivers/perf/riscv_pmu_sbi.c
··· 416 416 * but not in the user access mode as we want to use the other counters 417 417 * that support sampling/filtering. 418 418 */ 419 - if (hwc->flags & PERF_EVENT_FLAG_LEGACY) { 419 + if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) { 420 420 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) { 421 421 cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH; 422 422 cmask = 1;
+2
drivers/platform/chrome/cros_ec_proto.c
··· 813 813 if (ret == -ENOPROTOOPT) { 814 814 dev_dbg(ec_dev->dev, 815 815 "GET_NEXT_EVENT returned invalid version error.\n"); 816 + mutex_lock(&ec_dev->lock); 816 817 ret = cros_ec_get_host_command_version_mask(ec_dev, 817 818 EC_CMD_GET_NEXT_EVENT, 818 819 &ver_mask); 820 + mutex_unlock(&ec_dev->lock); 819 821 if (ret < 0 || ver_mask == 0) 820 822 /* 821 823 * Do not change the MKBP supported version if we can't
+55 -25
drivers/platform/cznic/Kconfig
··· 16 16 tristate "Turris Omnia MCU driver" 17 17 depends on MACH_ARMADA_38X || COMPILE_TEST 18 18 depends on I2C 19 - depends on OF 20 - depends on WATCHDOG 21 - depends on GPIOLIB 22 - depends on HW_RANDOM 23 - depends on RTC_CLASS 24 - depends on WATCHDOG_CORE 25 - select GPIOLIB_IRQCHIP 26 19 help 27 20 Say Y here to add support for the features implemented by the 28 21 microcontroller on the CZ.NIC's Turris Omnia SOHO router. 29 - The features include: 30 - - board poweroff into true low power mode (with voltage regulators 31 - disabled) and the ability to configure wake up from this mode (via 32 - rtcwake) 33 - - true random number generator (if available on the MCU) 34 - - MCU watchdog 35 - - GPIO pins 36 - - to get front button press events (the front button can be 37 - configured either to generate press events to the CPU or to change 38 - front LEDs panel brightness) 39 - - to enable / disable USB port voltage regulators and to detect 40 - USB overcurrent 41 - - to detect MiniPCIe / mSATA card presence in MiniPCIe port 0 42 - - to configure resets of various peripherals on board revisions 32+ 43 - - to enable / disable the VHV voltage regulator to the SOC in order 44 - to be able to program SOC's OTP on board revisions 32+ 45 - - to get input from the LED output pins of the WAN ethernet PHY, LAN 46 - switch and MiniPCIe ports 22 + This option only enables the core part of the driver. Specific 23 + features can be enabled by subsequent config options. 47 24 To compile this driver as a module, choose M here; the module will be 48 25 called turris-omnia-mcu. 26 + 27 + if TURRIS_OMNIA_MCU 28 + 29 + config TURRIS_OMNIA_MCU_GPIO 30 + bool "Turris Omnia MCU GPIOs" 31 + default y 32 + depends on GPIOLIB 33 + depends on OF 34 + select GPIOLIB_IRQCHIP 35 + help 36 + Say Y here to add support for controlling MCU GPIO pins and receiving 37 + MCU interrupts on CZ.NIC's Turris Omnia. 38 + This enables you to 39 + - get front button press events (the front button can be configured 40 + either to generate press events to the CPU or to change front LEDs 41 + panel brightness), 42 + - enable / disable USB port voltage regulators and to detect USB 43 + overcurrent, 44 + - detect MiniPCIe / mSATA card presence in MiniPCIe port 0, 45 + - configure resets of various peripherals on board revisions 32+, 46 + - enable / disable the VHV voltage regulator to the SOC in order to be 47 + able to program SOC's OTP on board revisions 32+, 48 + - get input from the LED output pins of the WAN ethernet PHY, LAN 49 + switch and MiniPCIe ports. 50 + 51 + config TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 52 + bool "Turris Omnia MCU system off and RTC wakeup" 53 + default y 54 + depends on RTC_CLASS 55 + help 56 + Say Y here to add support for CZ.NIC's Turris Omnia board poweroff 57 + into true low power mode (with voltage regulators disabled) and the 58 + ability to configure wake up from this mode (via rtcwake). 59 + 60 + config TURRIS_OMNIA_MCU_WATCHDOG 61 + bool "Turris Omnia MCU watchdog" 62 + default y 63 + depends on WATCHDOG 64 + select WATCHDOG_CORE 65 + help 66 + Say Y here to add support for watchdog provided by CZ.NIC's Turris 67 + Omnia MCU. 68 + 69 + config TURRIS_OMNIA_MCU_TRNG 70 + bool "Turris Omnia MCU true random number generator" 71 + default y 72 + depends on TURRIS_OMNIA_MCU_GPIO 73 + depends on HW_RANDOM 74 + help 75 + Say Y here to add support for the true random number generator 76 + provided by CZ.NIC's Turris Omnia MCU. 77 + 78 + endif # TURRIS_OMNIA_MCU 49 79 50 80 endif # CZNIC_PLATFORMS
+4 -4
drivers/platform/cznic/Makefile
··· 2 2 3 3 obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o 4 4 turris-omnia-mcu-y := turris-omnia-mcu-base.o 5 - turris-omnia-mcu-y += turris-omnia-mcu-gpio.o 6 - turris-omnia-mcu-y += turris-omnia-mcu-sys-off-wakeup.o 7 - turris-omnia-mcu-y += turris-omnia-mcu-trng.o 8 - turris-omnia-mcu-y += turris-omnia-mcu-watchdog.o 5 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_GPIO) += turris-omnia-mcu-gpio.o 6 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP) += turris-omnia-mcu-sys-off-wakeup.o 7 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_TRNG) += turris-omnia-mcu-trng.o 8 + turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_WATCHDOG) += turris-omnia-mcu-watchdog.o
+4
drivers/platform/cznic/turris-omnia-mcu-base.c
··· 197 197 198 198 static const struct attribute_group *omnia_mcu_groups[] = { 199 199 &omnia_mcu_base_group, 200 + #ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO 200 201 &omnia_mcu_gpio_group, 202 + #endif 203 + #ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 201 204 &omnia_mcu_poweroff_group, 205 + #endif 202 206 NULL 203 207 }; 204 208
+40 -2
drivers/platform/cznic/turris-omnia-mcu.h
··· 33 33 u8 board_first_mac[ETH_ALEN]; 34 34 u8 board_revision; 35 35 36 + #ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO 36 37 /* GPIO chip */ 37 38 struct gpio_chip gc; 38 39 struct mutex lock; ··· 42 41 struct delayed_work button_release_emul_work; 43 42 unsigned long last_status; 44 43 bool button_pressed_emul; 44 + #endif 45 45 46 + #ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 46 47 /* RTC device for configuring wake-up */ 47 48 struct rtc_device *rtcdev; 48 49 u32 rtc_alarm; 49 50 bool front_button_poweron; 51 + #endif 50 52 53 + #ifdef CONFIG_TURRIS_OMNIA_MCU_WATCHDOG 51 54 /* MCU watchdog */ 52 55 struct watchdog_device wdt; 56 + #endif 53 57 58 + #ifdef CONFIG_TURRIS_OMNIA_MCU_TRNG 54 59 /* true random number generator */ 55 60 struct hwrng trng; 56 61 struct completion trng_entropy_ready; 62 + #endif 57 63 }; 58 64 59 65 int omnia_cmd_write_read(const struct i2c_client *client, ··· 190 182 return omnia_cmd_read(client, cmd, reply, sizeof(*reply)); 191 183 } 192 184 185 + #ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO 193 186 extern const u8 omnia_int_to_gpio_idx[32]; 194 187 extern const struct attribute_group omnia_mcu_gpio_group; 195 - extern const struct attribute_group omnia_mcu_poweroff_group; 196 - 197 188 int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu); 189 + #else 190 + static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) 191 + { 192 + return 0; 193 + } 194 + #endif 195 + 196 + #ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP 197 + extern const struct attribute_group omnia_mcu_poweroff_group; 198 198 int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu); 199 + #else 200 + static inline int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu) 201 + { 202 + return 0; 203 + } 204 + #endif 205 + 206 + #ifdef CONFIG_TURRIS_OMNIA_MCU_TRNG 199 207 int omnia_mcu_register_trng(struct omnia_mcu *mcu); 208 + #else 209 + static inline int omnia_mcu_register_trng(struct omnia_mcu *mcu) 210 + { 211 + return 0; 212 + } 213 + #endif 214 + 215 + #ifdef CONFIG_TURRIS_OMNIA_MCU_WATCHDOG 200 216 int omnia_mcu_register_watchdog(struct omnia_mcu *mcu); 217 + #else 218 + static inline int omnia_mcu_register_watchdog(struct omnia_mcu *mcu) 219 + { 220 + return 0; 221 + } 222 + #endif 201 223 202 224 #endif /* __TURRIS_OMNIA_MCU_H */
+2
drivers/platform/x86/amd/pmc/pmc.c
··· 764 764 case AMD_CPU_ID_CB: 765 765 case AMD_CPU_ID_PS: 766 766 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 767 + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 767 768 return MSG_OS_HINT_RN; 768 769 } 769 770 return -EINVAL; ··· 968 967 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) }, 969 968 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) }, 970 969 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, 970 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) }, 971 971 { } 972 972 }; 973 973
+1
drivers/platform/x86/amd/pmc/pmc.h
··· 67 67 #define AMD_CPU_ID_PS 0x14E8 68 68 #define AMD_CPU_ID_SP 0x14A4 69 69 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 70 + #define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 70 71 #define PCI_DEVICE_ID_AMD_MP2_STB 0x172c 71 72 72 73 #endif /* PMC_H */
+3
drivers/platform/x86/amd/pmf/core.c
··· 41 41 #define AMD_CPU_ID_RMB 0x14b5 42 42 #define AMD_CPU_ID_PS 0x14e8 43 43 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 44 + #define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 44 45 45 46 #define PMF_MSG_DELAY_MIN_US 50 46 47 #define RESPONSE_REGISTER_LOOP_MAX 20000 ··· 250 249 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) }, 251 250 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, 252 251 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, 252 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) }, 253 253 { } 254 254 }; 255 255 ··· 384 382 {"AMDI0102", 0}, 385 383 {"AMDI0103", 0}, 386 384 {"AMDI0105", 0}, 385 + {"AMDI0107", 0}, 387 386 { } 388 387 }; 389 388 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
+8 -1
drivers/platform/x86/amd/pmf/pmf-quirks.c
··· 29 29 }, 30 30 .driver_data = &quirk_no_sps_bug, 31 31 }, 32 + { 33 + .ident = "ROG Ally X", 34 + .matches = { 35 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 36 + DMI_MATCH(DMI_PRODUCT_NAME, "RC72LA"), 37 + }, 38 + .driver_data = &quirk_no_sps_bug, 39 + }, 32 40 {} 33 41 }; 34 42 ··· 56 48 dmi_id->ident); 57 49 } 58 50 } 59 -
+1 -1
drivers/platform/x86/intel/ifs/runtest.c
··· 221 221 */ 222 222 static void ifs_test_core(int cpu, struct device *dev) 223 223 { 224 + union ifs_status status = {}; 224 225 union ifs_scan activate; 225 - union ifs_status status; 226 226 unsigned long timeout; 227 227 struct ifs_data *ifsd; 228 228 int to_start, to_stop;
+9
drivers/platform/x86/intel/vbtn.c
··· 7 7 */ 8 8 9 9 #include <linux/acpi.h> 10 + #include <linux/cleanup.h> 10 11 #include <linux/dmi.h> 11 12 #include <linux/input.h> 12 13 #include <linux/input/sparse-keymap.h> 13 14 #include <linux/kernel.h> 14 15 #include <linux/module.h> 16 + #include <linux/mutex.h> 15 17 #include <linux/platform_device.h> 16 18 #include <linux/suspend.h> 17 19 #include "../dual_accel_detect.h" ··· 68 66 }; 69 67 70 68 struct intel_vbtn_priv { 69 + struct mutex mutex; /* Avoid notify_handler() racing with itself */ 71 70 struct input_dev *buttons_dev; 72 71 struct input_dev *switches_dev; 73 72 bool dual_accel; ··· 157 154 struct input_dev *input_dev; 158 155 bool autorelease; 159 156 int ret; 157 + 158 + guard(mutex)(&priv->mutex); 160 159 161 160 if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) { 162 161 if (!priv->has_buttons) { ··· 294 289 if (!priv) 295 290 return -ENOMEM; 296 291 dev_set_drvdata(&device->dev, priv); 292 + 293 + err = devm_mutex_init(&device->dev, &priv->mutex); 294 + if (err) 295 + return err; 297 296 298 297 priv->dual_accel = dual_accel; 299 298 priv->has_buttons = has_buttons;
-1
drivers/platform/x86/sony-laptop.c
··· 757 757 return result; 758 758 } 759 759 760 - #define MIN(a, b) (a > b ? b : a) 761 760 static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value, 762 761 void *buffer, size_t buflen) 763 762 {
+12 -12
drivers/power/supply/axp288_charger.c
··· 178 178 u8 reg_val; 179 179 int ret; 180 180 181 - if (cv <= CV_4100MV) { 182 - reg_val = CHRG_CCCV_CV_4100MV; 183 - cv = CV_4100MV; 184 - } else if (cv <= CV_4150MV) { 185 - reg_val = CHRG_CCCV_CV_4150MV; 186 - cv = CV_4150MV; 187 - } else if (cv <= CV_4200MV) { 188 - reg_val = CHRG_CCCV_CV_4200MV; 189 - cv = CV_4200MV; 190 - } else { 181 + if (cv >= CV_4350MV) { 191 182 reg_val = CHRG_CCCV_CV_4350MV; 192 183 cv = CV_4350MV; 184 + } else if (cv >= CV_4200MV) { 185 + reg_val = CHRG_CCCV_CV_4200MV; 186 + cv = CV_4200MV; 187 + } else if (cv >= CV_4150MV) { 188 + reg_val = CHRG_CCCV_CV_4150MV; 189 + cv = CV_4150MV; 190 + } else { 191 + reg_val = CHRG_CCCV_CV_4100MV; 192 + cv = CV_4100MV; 193 193 } 194 194 195 195 reg_val = reg_val << CHRG_CCCV_CV_BIT_POS; ··· 337 337 } 338 338 break; 339 339 case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: 340 - scaled_val = min(val->intval, info->max_cv); 341 - scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000); 340 + scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000); 341 + scaled_val = min(scaled_val, info->max_cv); 342 342 ret = axp288_charger_set_cv(info, scaled_val); 343 343 if (ret < 0) { 344 344 dev_warn(&info->pdev->dev, "set charge voltage failed\n");
+7 -5
drivers/power/supply/qcom_battmgr.c
··· 486 486 int ret; 487 487 488 488 if (!battmgr->service_up) 489 - return -ENODEV; 489 + return -EAGAIN; 490 490 491 491 if (battmgr->variant == QCOM_BATTMGR_SC8280XP) 492 492 ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); ··· 683 683 int ret; 684 684 685 685 if (!battmgr->service_up) 686 - return -ENODEV; 686 + return -EAGAIN; 687 687 688 688 ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); 689 689 if (ret) ··· 748 748 int ret; 749 749 750 750 if (!battmgr->service_up) 751 - return -ENODEV; 751 + return -EAGAIN; 752 752 753 753 if (battmgr->variant == QCOM_BATTMGR_SC8280XP) 754 754 ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); ··· 867 867 int ret; 868 868 869 869 if (!battmgr->service_up) 870 - return -ENODEV; 870 + return -EAGAIN; 871 871 872 872 if (battmgr->variant == QCOM_BATTMGR_SC8280XP) 873 873 ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); ··· 1007 1007 battmgr->error = 0; 1008 1008 break; 1009 1009 case BATTMGR_BAT_INFO: 1010 - if (payload_len != sizeof(resp->info)) { 1010 + /* some firmware versions report an extra __le32 at the end of the payload */ 1011 + if (payload_len != sizeof(resp->info) && 1012 + payload_len != (sizeof(resp->info) + sizeof(__le32))) { 1011 1013 dev_warn(battmgr->dev, 1012 1014 "invalid payload length for battery information request: %zd\n", 1013 1015 payload_len);
+1
drivers/power/supply/rt5033_battery.c
··· 159 159 return -EINVAL; 160 160 } 161 161 162 + i2c_set_clientdata(client, battery); 162 163 psy_cfg.of_node = client->dev.of_node; 163 164 psy_cfg.drv_data = battery; 164 165
+1
drivers/s390/cio/ccwgroup.c
··· 550 550 put_device(&gdev->dev); 551 551 } 552 552 EXPORT_SYMBOL(ccwgroup_remove_ccwdev); 553 + MODULE_DESCRIPTION("ccwgroup bus driver"); 553 554 MODULE_LICENSE("GPL");
+1
drivers/s390/cio/vfio_ccw_drv.c
··· 488 488 module_init(vfio_ccw_sch_init); 489 489 module_exit(vfio_ccw_sch_exit); 490 490 491 + MODULE_DESCRIPTION("VFIO based Subchannel device driver"); 491 492 MODULE_LICENSE("GPL v2");
+1 -5
drivers/scsi/isci/init.c
··· 65 65 #include "task.h" 66 66 #include "probe_roms.h" 67 67 68 - #define MAJ 1 69 - #define MIN 2 70 - #define BUILD 0 71 - #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 72 - __stringify(BUILD) 68 + #define DRV_VERSION "1.2.0" 73 69 74 70 MODULE_VERSION(DRV_VERSION); 75 71
+11
drivers/scsi/mpi3mr/mpi3mr_os.c
··· 3575 3575 scmd->sc_data_direction); 3576 3576 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3577 3577 } else { 3578 + /* 3579 + * Some firmware versions byte-swap the REPORT ZONES command 3580 + * reply from ATA-ZAC devices by directly accessing in the host 3581 + * buffer. This does not respect the default command DMA 3582 + * direction and causes IOMMU page faults on some architectures 3583 + * with an IOMMU enforcing write mappings (e.g. AMD hosts). 3584 + * Avoid such issue by making the REPORT ZONES buffer mapping 3585 + * bi-directional. 3586 + */ 3587 + if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) 3588 + scmd->sc_data_direction = DMA_BIDIRECTIONAL; 3578 3589 sg_scmd = scsi_sglist(scmd); 3579 3590 sges_left = scsi_dma_map(scmd); 3580 3591 }
+18 -2
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 2671 2671 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); 2672 2672 } 2673 2673 2674 + static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd) 2675 + { 2676 + /* 2677 + * Some firmware versions byte-swap the REPORT ZONES command reply from 2678 + * ATA-ZAC devices by directly accessing in the host buffer. This does 2679 + * not respect the default command DMA direction and causes IOMMU page 2680 + * faults on some architectures with an IOMMU enforcing write mappings 2681 + * (e.g. AMD hosts). Avoid such issue by making the report zones buffer 2682 + * mapping bi-directional. 2683 + */ 2684 + if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES) 2685 + cmd->sc_data_direction = DMA_BIDIRECTIONAL; 2686 + 2687 + return scsi_dma_map(cmd); 2688 + } 2689 + 2674 2690 /** 2675 2691 * _base_build_sg_scmd - main sg creation routine 2676 2692 * pcie_device is unused here! ··· 2733 2717 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2734 2718 2735 2719 sg_scmd = scsi_sglist(scmd); 2736 - sges_left = scsi_dma_map(scmd); 2720 + sges_left = _base_scsi_dma_map(scmd); 2737 2721 if (sges_left < 0) 2738 2722 return -ENOMEM; 2739 2723 ··· 2877 2861 } 2878 2862 2879 2863 sg_scmd = scsi_sglist(scmd); 2880 - sges_left = scsi_dma_map(scmd); 2864 + sges_left = _base_scsi_dma_map(scmd); 2881 2865 if (sges_left < 0) 2882 2866 return -ENOMEM; 2883 2867
+13 -7
drivers/scsi/sd.c
··· 2711 2711 2712 2712 if (buffer[14] & 0x40) /* LBPRZ */ 2713 2713 sdkp->lbprz = 1; 2714 - 2715 - sd_config_discard(sdkp, lim, SD_LBP_WS16); 2716 2714 } 2717 2715 2718 2716 sdkp->capacity = lba + 1; ··· 3363 3365 sdkp->unmap_alignment = 3364 3366 get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); 3365 3367 3366 - sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); 3367 - 3368 3368 config_atomic: 3369 3369 sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]); 3370 3370 sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]); ··· 3749 3753 sd_read_block_limits_ext(sdkp); 3750 3754 sd_read_block_characteristics(sdkp, &lim); 3751 3755 sd_zbc_read_zones(sdkp, &lim, buffer); 3752 - sd_read_cpr(sdkp); 3753 3756 } 3757 + 3758 + sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp)); 3754 3759 3755 3760 sd_print_capacity(sdkp, old_capacity); 3756 3761 ··· 3804 3807 blk_mq_unfreeze_queue(sdkp->disk->queue); 3805 3808 if (err) 3806 3809 return err; 3810 + 3811 + /* 3812 + * Query concurrent positioning ranges after 3813 + * queue_limits_commit_update() unlocked q->limits_lock to avoid 3814 + * deadlock with q->sysfs_dir_lock and q->sysfs_lock. 3815 + */ 3816 + if (sdkp->media_present && scsi_device_supports_vpd(sdp)) 3817 + sd_read_cpr(sdkp); 3807 3818 3808 3819 /* 3809 3820 * For a zoned drive, revalidating the zones can be done only once ··· 4210 4205 { 4211 4206 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4212 4207 4208 + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 4209 + 4213 4210 if (opal_unlock_from_suspend(sdkp->opal_dev)) { 4214 4211 sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n"); 4215 4212 return -EIO; ··· 4228 4221 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 4229 4222 return 0; 4230 4223 4231 - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 4232 - 4233 4224 if (!sd_do_start_stop(sdkp->device, runtime)) { 4234 4225 sdkp->suspended = false; 4235 4226 return 0; 4236 4227 } 4237 4228 4229 + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 4238 4230 ret = sd_start_stop_device(sdkp, 1); 4239 4231 if (!ret) { 4240 4232 sd_resume(dev);
+1 -1
drivers/scsi/sr_ioctl.c
··· 431 431 struct packet_command cgc; 432 432 433 433 /* avoid exceeding the max speed or overflowing integer bounds */ 434 - speed = clamp(0, speed, 0xffff / 177); 434 + speed = clamp(speed, 0, 0xffff / 177); 435 435 436 436 if (speed == 0) 437 437 speed = 0xffff; /* set to max */
+4 -2
drivers/spi/spi-fsl-lpspi.c
··· 296 296 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) 297 297 { 298 298 struct lpspi_config config = fsl_lpspi->config; 299 - unsigned int perclk_rate, scldiv; 299 + unsigned int perclk_rate, scldiv, div; 300 300 u8 prescale; 301 301 302 302 perclk_rate = clk_get_rate(fsl_lpspi->clk_per); ··· 313 313 return -EINVAL; 314 314 } 315 315 316 + div = DIV_ROUND_UP(perclk_rate, config.speed_hz); 317 + 316 318 for (prescale = 0; prescale < 8; prescale++) { 317 - scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2; 319 + scldiv = div / (1 << prescale) - 2; 318 320 if (scldiv < 256) { 319 321 fsl_lpspi->config.prescale = prescale; 320 322 break;
+4
drivers/spi/spi-hisi-kunpeng.c
··· 481 481 return -EINVAL; 482 482 } 483 483 484 + if (host->max_speed_hz == 0) 485 + return dev_err_probe(dev, -EINVAL, "spi-max-frequency can't be 0\n"); 486 + 484 487 ret = device_property_read_u16(dev, "num-cs", 485 488 &host->num_chipselect); 486 489 if (ret) ··· 498 495 host->transfer_one = hisi_spi_transfer_one; 499 496 host->handle_err = hisi_spi_handle_err; 500 497 host->dev.fwnode = dev->fwnode; 498 + host->min_speed_hz = DIV_ROUND_UP(host->max_speed_hz, CLK_DIV_MAX); 501 499 502 500 hisi_spi_hw_init(hs); 503 501
+1
drivers/spi/spidev.c
··· 700 700 }; 701 701 702 702 static const struct spi_device_id spidev_spi_ids[] = { 703 + { .name = "bh2228fv" }, 703 704 { .name = "dh2228fv" }, 704 705 { .name = "ltc2488" }, 705 706 { .name = "sx1301" },
+5 -6
drivers/spmi/spmi-pmic-arb.c
··· 398 398 399 399 *offset = rc; 400 400 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 401 - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", 401 + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", 402 402 PMIC_ARB_MAX_TRANS_BYTES, len); 403 403 return -EINVAL; 404 404 } ··· 477 477 478 478 *offset = rc; 479 479 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 480 - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", 480 + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", 481 481 PMIC_ARB_MAX_TRANS_BYTES, len); 482 482 return -EINVAL; 483 483 } ··· 1702 1702 1703 1703 index = of_property_match_string(node, "reg-names", "cnfg"); 1704 1704 if (index < 0) { 1705 - dev_err(dev, "cnfg reg region missing"); 1705 + dev_err(dev, "cnfg reg region missing\n"); 1706 1706 return -EINVAL; 1707 1707 } 1708 1708 ··· 1712 1712 1713 1713 index = of_property_match_string(node, "reg-names", "intr"); 1714 1714 if (index < 0) { 1715 - dev_err(dev, "intr reg region missing"); 1715 + dev_err(dev, "intr reg region missing\n"); 1716 1716 return -EINVAL; 1717 1717 } 1718 1718 ··· 1737 1737 1738 1738 dev_dbg(&pdev->dev, "adding irq domain for bus %d\n", bus_index); 1739 1739 1740 - bus->domain = irq_domain_add_tree(dev->of_node, 1741 - &pmic_arb_irq_domain_ops, bus); 1740 + bus->domain = irq_domain_add_tree(node, &pmic_arb_irq_domain_ops, bus); 1742 1741 if (!bus->domain) { 1743 1742 dev_err(&pdev->dev, "unable to create irq_domain\n"); 1744 1743 return -ENOMEM;
-5
drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
··· 22 22 /* force a value to a lower even value */ 23 23 #define EVEN_FLOOR(x) ((x) & ~1) 24 24 25 - /* for preprocessor and array sizing use MIN and MAX 26 - otherwise use min and max */ 27 - #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 28 - #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 29 - 30 25 #define CEIL_DIV(a, b) (((b) != 0) ? ((a) + (b) - 1) / (b) : 0) 31 26 #define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) 32 27 #define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1))
+22 -7
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
··· 278 278 279 279 static bool msi_irq; 280 280 281 + static void proc_thermal_free_msi(struct pci_dev *pdev, struct proc_thermal_pci *pci_info) 282 + { 283 + int i; 284 + 285 + for (i = 0; i < MSI_THERMAL_MAX; i++) { 286 + if (proc_thermal_msi_map[i]) 287 + devm_free_irq(&pdev->dev, proc_thermal_msi_map[i], pci_info); 288 + } 289 + 290 + pci_free_irq_vectors(pdev); 291 + } 292 + 281 293 static int proc_thermal_setup_msi(struct pci_dev *pdev, struct proc_thermal_pci *pci_info) 282 294 { 283 - int ret, i, irq; 295 + int ret, i, irq, count; 284 296 285 - ret = pci_alloc_irq_vectors(pdev, 1, MSI_THERMAL_MAX, PCI_IRQ_MSI | PCI_IRQ_MSIX); 286 - if (ret < 0) { 297 + count = pci_alloc_irq_vectors(pdev, 1, MSI_THERMAL_MAX, PCI_IRQ_MSI | PCI_IRQ_MSIX); 298 + if (count < 0) { 287 299 dev_err(&pdev->dev, "Failed to allocate vectors!\n"); 288 - return ret; 300 + return count; 289 301 } 290 302 291 303 dev_info(&pdev->dev, "msi enabled:%d msix enabled:%d\n", pdev->msi_enabled, 292 304 pdev->msix_enabled); 293 305 294 - for (i = 0; i < MSI_THERMAL_MAX; i++) { 306 + for (i = 0; i < count; i++) { 295 307 irq = pci_irq_vector(pdev, i); 296 308 297 309 ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler, ··· 322 310 return 0; 323 311 324 312 err_free_msi_vectors: 325 - pci_free_irq_vectors(pdev); 313 + proc_thermal_free_msi(pdev, pci_info); 326 314 327 315 return ret; 328 316 } ··· 409 397 410 398 err_free_vectors: 411 399 if (msi_irq) 412 - pci_free_irq_vectors(pdev); 400 + proc_thermal_free_msi(pdev, pci_info); 413 401 err_ret_tzone: 414 402 thermal_zone_device_unregister(pci_info->tzone); 415 403 err_del_legacy: ··· 430 418 431 419 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0); 432 420 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); 421 + 422 + if (msi_irq) 423 + proc_thermal_free_msi(pdev, pci_info); 433 424 434 425 thermal_zone_device_unregister(pci_info->tzone); 435 426 proc_thermal_mmio_remove(pdev, pci_info->proc_priv);
+2 -2
drivers/thermal/thermal_trip.c
··· 88 88 return; 89 89 90 90 for_each_trip_desc(tz, td) { 91 - if (td->threshold < tz->temperature && td->threshold > low) 91 + if (td->threshold <= tz->temperature && td->threshold > low) 92 92 low = td->threshold; 93 93 94 - if (td->threshold > tz->temperature && td->threshold < high) 94 + if (td->threshold >= tz->temperature && td->threshold < high) 95 95 high = td->threshold; 96 96 } 97 97
+15 -10
drivers/tty/serial/sc16is7xx.c
··· 327 327 struct kthread_work reg_work; 328 328 struct kthread_delayed_work ms_work; 329 329 struct sc16is7xx_one_config config; 330 + unsigned char buf[SC16IS7XX_FIFO_SIZE]; /* Rx buffer. */ 330 331 unsigned int old_mctrl; 331 332 u8 old_lcr; /* Value before EFR access. */ 332 333 bool irda_mode; ··· 341 340 unsigned long gpio_valid_mask; 342 341 #endif 343 342 u8 mctrl_mask; 344 - unsigned char buf[SC16IS7XX_FIFO_SIZE]; 345 343 struct kthread_worker kworker; 346 344 struct task_struct *kworker_task; 347 345 struct sc16is7xx_one p[]; ··· 592 592 SC16IS7XX_MCR_CLKSEL_BIT, 593 593 prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT); 594 594 595 + mutex_lock(&one->efr_lock); 596 + 595 597 /* Backup LCR and access special register set (DLL/DLH) */ 596 598 lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); 597 599 sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, ··· 608 606 /* Restore LCR and access to general register set */ 609 607 sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); 610 608 609 + mutex_unlock(&one->efr_lock); 610 + 611 611 return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div); 612 612 } 613 613 614 614 static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, 615 615 unsigned int iir) 616 616 { 617 - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); 617 + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); 618 618 unsigned int lsr = 0, bytes_read, i; 619 619 bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false; 620 620 u8 ch, flag; 621 621 622 - if (unlikely(rxlen >= sizeof(s->buf))) { 622 + if (unlikely(rxlen >= sizeof(one->buf))) { 623 623 dev_warn_ratelimited(port->dev, 624 624 "ttySC%i: Possible RX FIFO overrun: %d\n", 625 625 port->line, rxlen); 626 626 port->icount.buf_overrun++; 627 627 /* Ensure sanity of RX level */ 628 - rxlen = sizeof(s->buf); 628 + rxlen = sizeof(one->buf); 629 629 } 630 630 631 631 while (rxlen) { ··· 640 636 lsr = 0; 641 637 642 638 if (read_lsr) { 643 - s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 639 + one->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 644 640 bytes_read = 1; 645 641 } else { 646 - sc16is7xx_fifo_read(port, s->buf, rxlen); 642 + sc16is7xx_fifo_read(port, one->buf, rxlen); 647 643 bytes_read = rxlen; 648 644 } 649 645 ··· 676 672 } 677 673 678 674 for (i = 0; i < bytes_read; ++i) { 679 - ch = s->buf[i]; 675 + ch = one->buf[i]; 680 676 if (uart_handle_sysrq_char(port, ch)) 681 677 continue; 682 678 ··· 694 690 695 691 static void sc16is7xx_handle_tx(struct uart_port *port) 696 692 { 697 - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); 698 693 struct tty_port *tport = &port->state->port; 699 694 unsigned long flags; 700 695 unsigned int txlen; 696 + unsigned char *tail; 701 697 702 698 if (unlikely(port->x_char)) { 703 699 sc16is7xx_port_write(port, SC16IS7XX_THR_REG, port->x_char); ··· 722 718 txlen = 0; 723 719 } 724 720 725 - txlen = uart_fifo_out(port, s->buf, txlen); 726 - sc16is7xx_fifo_write(port, s->buf, txlen); 721 + txlen = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, txlen); 722 + sc16is7xx_fifo_write(port, tail, txlen); 723 + uart_xmit_advance(port, txlen); 727 724 728 725 uart_port_lock_irqsave(port, &flags); 729 726 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
+8
drivers/tty/serial/serial_core.c
··· 881 881 new_flags = (__force upf_t)new_info->flags; 882 882 old_custom_divisor = uport->custom_divisor; 883 883 884 + if (!(uport->flags & UPF_FIXED_PORT)) { 885 + unsigned int uartclk = new_info->baud_base * 16; 886 + /* check needs to be done here before other settings made */ 887 + if (uartclk == 0) { 888 + retval = -EINVAL; 889 + goto exit; 890 + } 891 + } 884 892 if (!capable(CAP_SYS_ADMIN)) { 885 893 retval = -EPERM; 886 894 if (change_irq || change_port ||
+7 -13
drivers/tty/vt/conmakehash.c
··· 11 11 * Copyright (C) 1995-1997 H. Peter Anvin 12 12 */ 13 13 14 + #include <libgen.h> 15 + #include <linux/limits.h> 14 16 #include <stdio.h> 15 17 #include <stdlib.h> 16 18 #include <sysexits.h> ··· 78 76 int main(int argc, char *argv[]) 79 77 { 80 78 FILE *ctbl; 81 - const char *tblname, *rel_tblname; 82 - const char *abs_srctree; 79 + const char *tblname; 80 + char base_tblname[PATH_MAX]; 83 81 char buffer[65536]; 84 82 int fontlen; 85 83 int i, nuni, nent; ··· 103 101 exit(EX_NOINPUT); 104 102 } 105 103 } 106 - 107 - abs_srctree = getenv("abs_srctree"); 108 - if (abs_srctree && !strncmp(abs_srctree, tblname, strlen(abs_srctree))) 109 - { 110 - rel_tblname = tblname + strlen(abs_srctree); 111 - while (*rel_tblname == '/') 112 - ++rel_tblname; 113 - } 114 - else 115 - rel_tblname = tblname; 116 104 117 105 /* For now we assume the default font is always 256 characters. */ 118 106 fontlen = 256; ··· 245 253 for ( i = 0 ; i < fontlen ; i++ ) 246 254 nuni += unicount[i]; 247 255 256 + strncpy(base_tblname, tblname, PATH_MAX); 257 + base_tblname[PATH_MAX - 1] = 0; 248 258 printf("\ 249 259 /*\n\ 250 260 * Do not edit this file; it was automatically generated by\n\ ··· 258 264 #include <linux/types.h>\n\ 259 265 \n\ 260 266 u8 dfont_unicount[%d] = \n\ 261 - {\n\t", rel_tblname, fontlen); 267 + {\n\t", basename(base_tblname), fontlen); 262 268 263 269 for ( i = 0 ; i < fontlen ; i++ ) 264 270 {
+5
drivers/ufs/core/ufshcd-priv.h
··· 316 316 return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev); 317 317 } 318 318 319 + static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba) 320 + { 321 + return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev); 322 + } 323 + 319 324 static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) 320 325 { 321 326 return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev);
+30 -8
drivers/ufs/core/ufshcd.c
··· 2416 2416 return err; 2417 2417 } 2418 2418 2419 + /* 2420 + * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and 2421 + * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which 2422 + * means we can simply read values regardless of version. 2423 + */ 2419 2424 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); 2425 + /* 2426 + * 0h: legacy single doorbell support is available 2427 + * 1h: indicate that legacy single doorbell support has been removed 2428 + */ 2429 + hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); 2420 2430 if (!hba->mcq_sup) 2421 2431 return 0; 2422 2432 ··· 4100 4090 min_sleep_time_us = 4101 4091 MIN_DELAY_BEFORE_DME_CMDS_US - delta; 4102 4092 else 4103 - return; /* no more delay required */ 4093 + min_sleep_time_us = 0; /* no more delay required */ 4104 4094 } 4105 4095 4106 - /* allow sleep for extra 50us if needed */ 4107 - usleep_range(min_sleep_time_us, min_sleep_time_us + 50); 4096 + if (min_sleep_time_us > 0) { 4097 + /* allow sleep for extra 50us if needed */ 4098 + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); 4099 + } 4100 + 4101 + /* update the last_dme_cmd_tstamp */ 4102 + hba->last_dme_cmd_tstamp = ktime_get(); 4108 4103 } 4109 4104 4110 4105 /** ··· 6568 6553 if (ufshcd_err_handling_should_stop(hba)) 6569 6554 goto skip_err_handling; 6570 6555 6571 - if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 6556 + if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) && 6557 + !hba->force_reset) { 6572 6558 bool ret; 6573 6559 6574 6560 spin_unlock_irqrestore(hba->host->host_lock, flags); ··· 8227 8211 */ 8228 8212 val = ts64.tv_sec - hba->dev_info.rtc_time_baseline; 8229 8213 8230 - ufshcd_rpm_get_sync(hba); 8214 + /* Skip update RTC if RPM state is not RPM_ACTIVE */ 8215 + if (ufshcd_rpm_get_if_active(hba) <= 0) 8216 + return; 8217 + 8231 8218 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED, 8232 8219 0, 0, &val); 8233 8220 ufshcd_rpm_put_sync(hba); ··· 10284 10265 */ 10285 10266 ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H); 10286 10267 10287 - /* Resuming from hibernate, assume that link was OFF */ 10288 - ufshcd_set_link_off(hba); 10289 - 10290 10268 return 0; 10291 10269 10292 10270 } ··· 10512 10496 } 10513 10497 10514 10498 if (!is_mcq_supported(hba)) { 10499 + if (!hba->lsdb_sup) { 10500 + dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n", 10501 + __func__); 10502 + err = -EINVAL; 10503 + goto out_disable; 10504 + } 10515 10505 err = scsi_add_host(host, hba->dev); 10516 10506 if (err) { 10517 10507 dev_err(hba->dev, "scsi_add_host failed\n");
+3
drivers/ufs/host/ufs-exynos.c
··· 1293 1293 { 1294 1294 struct arm_smccc_res res; 1295 1295 1296 + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) 1297 + return; 1298 + 1296 1299 arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3, 1297 1300 0, 0, 0, 0, &res); 1298 1301 if (res.a0)
+20 -12
drivers/usb/gadget/function/f_fs.c
··· 3734 3734 if (alt > MAX_ALT_SETTINGS) 3735 3735 return -EINVAL; 3736 3736 3737 - if (alt != (unsigned)-1) { 3738 - intf = ffs_func_revmap_intf(func, interface); 3739 - if (intf < 0) 3740 - return intf; 3741 - } 3737 + intf = ffs_func_revmap_intf(func, interface); 3738 + if (intf < 0) 3739 + return intf; 3742 3740 3743 3741 if (ffs->func) 3744 3742 ffs_func_eps_disable(ffs->func); ··· 3751 3753 if (ffs->state != FFS_ACTIVE) 3752 3754 return -ENODEV; 3753 3755 3754 - if (alt == (unsigned)-1) { 3755 - ffs->func = NULL; 3756 - ffs_event_add(ffs, FUNCTIONFS_DISABLE); 3757 - return 0; 3758 - } 3759 - 3760 3756 ffs->func = func; 3761 3757 ret = ffs_func_eps_enable(func); 3762 3758 if (ret >= 0) { ··· 3762 3770 3763 3771 static void ffs_func_disable(struct usb_function *f) 3764 3772 { 3765 - ffs_func_set_alt(f, 0, (unsigned)-1); 3773 + struct ffs_function *func = ffs_func_from_usb(f); 3774 + struct ffs_data *ffs = func->ffs; 3775 + 3776 + if (ffs->func) 3777 + ffs_func_eps_disable(ffs->func); 3778 + 3779 + if (ffs->state == FFS_DEACTIVATED) { 3780 + ffs->state = FFS_CLOSING; 3781 + INIT_WORK(&ffs->reset_work, ffs_reset_work); 3782 + schedule_work(&ffs->reset_work); 3783 + return; 3784 + } 3785 + 3786 + if (ffs->state == FFS_ACTIVE) { 3787 + ffs->func = NULL; 3788 + ffs_event_add(ffs, FUNCTIONFS_DISABLE); 3789 + } 3766 3790 } 3767 3791 3768 3792 static int ffs_func_setup(struct usb_function *f,
+15 -6
drivers/usb/gadget/function/f_midi2.c
··· 642 642 if (format) 643 643 return; // invalid 644 644 blk = (*data >> 8) & 0xff; 645 - if (blk >= ep->num_blks) 646 - return; 647 - if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) 648 - reply_ump_stream_fb_info(ep, blk); 649 - if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) 650 - reply_ump_stream_fb_name(ep, blk); 645 + if (blk == 0xff) { 646 + /* inquiry for all blocks */ 647 + for (blk = 0; blk < ep->num_blks; blk++) { 648 + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) 649 + reply_ump_stream_fb_info(ep, blk); 650 + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) 651 + reply_ump_stream_fb_name(ep, blk); 652 + } 653 + } else if (blk < ep->num_blks) { 654 + /* only the specified block */ 655 + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) 656 + reply_ump_stream_fb_info(ep, blk); 657 + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) 658 + reply_ump_stream_fb_name(ep, blk); 659 + } 651 660 return; 652 661 } 653 662 }
+34 -8
drivers/usb/gadget/function/u_audio.c
··· 592 592 struct usb_ep *ep, *ep_fback; 593 593 struct uac_rtd_params *prm; 594 594 struct uac_params *params = &audio_dev->params; 595 - int req_len, i; 595 + int req_len, i, ret; 596 596 597 597 prm = &uac->c_prm; 598 598 dev_dbg(dev, "start capture with rate %d\n", prm->srate); 599 599 ep = audio_dev->out_ep; 600 - config_ep_by_speed(gadget, &audio_dev->func, ep); 600 + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); 601 + if (ret < 0) { 602 + dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret); 603 + return ret; 604 + } 605 + 601 606 req_len = ep->maxpacket; 602 607 603 608 prm->ep_enabled = true; 604 - usb_ep_enable(ep); 609 + ret = usb_ep_enable(ep); 610 + if (ret < 0) { 611 + dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret); 612 + return ret; 613 + } 605 614 606 615 for (i = 0; i < params->req_number; i++) { 607 616 if (!prm->reqs[i]) { ··· 638 629 return 0; 639 630 640 631 /* Setup feedback endpoint */ 641 - config_ep_by_speed(gadget, &audio_dev->func, ep_fback); 632 + ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback); 633 + if (ret < 0) { 634 + dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret); 635 + return ret; // TODO: Clean up out_ep 636 + } 637 + 642 638 prm->fb_ep_enabled = true; 643 - usb_ep_enable(ep_fback); 639 + ret = usb_ep_enable(ep_fback); 640 + if (ret < 0) { 641 + dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret); 642 + return ret; // TODO: Clean up out_ep 643 + } 644 644 req_len = ep_fback->maxpacket; 645 645 646 646 req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC); ··· 705 687 struct uac_params *params = &audio_dev->params; 706 688 unsigned int factor; 707 689 const struct usb_endpoint_descriptor *ep_desc; 708 - int req_len, i; 690 + int req_len, i, ret; 709 691 unsigned int p_pktsize; 710 692 711 693 prm = &uac->p_prm; 712 694 dev_dbg(dev, "start playback with rate %d\n", prm->srate); 713 695 ep = audio_dev->in_ep; 714 - config_ep_by_speed(gadget, &audio_dev->func, ep); 696 + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); 697 + if (ret < 0) { 698 + dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret); 699 + return ret; 700 + } 715 701 716 702 ep_desc = ep->desc; 717 703 /* ··· 742 720 uac->p_residue_mil = 0; 743 721 744 722 prm->ep_enabled = true; 745 - usb_ep_enable(ep); 723 + ret = usb_ep_enable(ep); 724 + if (ret < 0) { 725 + dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret); 726 + return ret; 727 + } 746 728 747 729 for (i = 0; i < params->req_number; i++) { 748 730 if (!prm->reqs[i]) {
+1
drivers/usb/gadget/function/u_serial.c
··· 1441 1441 spin_lock(&port->port_lock); 1442 1442 spin_unlock(&serial_port_lock); 1443 1443 port->suspended = true; 1444 + port->start_delayed = true; 1444 1445 spin_unlock_irqrestore(&port->port_lock, flags); 1445 1446 } 1446 1447 EXPORT_SYMBOL_GPL(gserial_suspend);
+4 -6
drivers/usb/gadget/udc/core.c
··· 118 118 goto out; 119 119 120 120 /* UDC drivers can't handle endpoints with maxpacket size 0 */ 121 - if (usb_endpoint_maxp(ep->desc) == 0) { 122 - /* 123 - * We should log an error message here, but we can't call 124 - * dev_err() because there's no way to find the gadget 125 - * given only ep. 126 - */ 121 + if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { 122 + WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, 123 + (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); 124 + 127 125 ret = -EINVAL; 128 126 goto out; 129 127 }
+1
drivers/usb/serial/ch341.c
··· 863 863 864 864 module_usb_serial_driver(serial_drivers, id_table); 865 865 866 + MODULE_DESCRIPTION("Winchiphead CH341 USB Serial driver"); 866 867 MODULE_LICENSE("GPL v2");
+2 -3
drivers/usb/serial/garmin_gps.c
··· 104 104 int seq; 105 105 /* the real size of the data array, always > 0 */ 106 106 int size; 107 - __u8 data[]; 107 + __u8 data[] __counted_by(size); 108 108 }; 109 109 110 110 /* structure used to keep the current state of the driver */ ··· 267 267 268 268 /* process only packets containing data ... */ 269 269 if (data_length) { 270 - pkt = kmalloc(sizeof(struct garmin_packet)+data_length, 271 - GFP_ATOMIC); 270 + pkt = kmalloc(struct_size(pkt, data, data_length), GFP_ATOMIC); 272 271 if (!pkt) 273 272 return 0; 274 273
+1
drivers/usb/serial/mxuport.c
··· 1315 1315 1316 1316 MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>"); 1317 1317 MODULE_AUTHOR("<support@moxa.com>"); 1318 + MODULE_DESCRIPTION("Moxa UPORT USB Serial driver"); 1318 1319 MODULE_LICENSE("GPL");
+1
drivers/usb/serial/navman.c
··· 112 112 113 113 module_usb_serial_driver(serial_drivers, id_table); 114 114 115 + MODULE_DESCRIPTION("Navman USB Serial driver"); 115 116 MODULE_LICENSE("GPL v2");
+1
drivers/usb/serial/qcaux.c
··· 84 84 }; 85 85 86 86 module_usb_serial_driver(serial_drivers, id_table); 87 + MODULE_DESCRIPTION("Qualcomm USB Auxiliary Serial Port driver"); 87 88 MODULE_LICENSE("GPL v2");
-10
drivers/usb/serial/spcp8x5.c
··· 49 49 }; 50 50 MODULE_DEVICE_TABLE(usb, id_table); 51 51 52 - struct spcp8x5_usb_ctrl_arg { 53 - u8 type; 54 - u8 cmd; 55 - u8 cmd_type; 56 - u16 value; 57 - u16 index; 58 - u16 length; 59 - }; 60 - 61 - 62 52 /* spcp8x5 spec register define */ 63 53 #define MCR_CONTROL_LINE_RTS 0x02 64 54 #define MCR_CONTROL_LINE_DTR 0x01
+1
drivers/usb/serial/symbolserial.c
··· 190 190 191 191 module_usb_serial_driver(serial_drivers, id_table); 192 192 193 + MODULE_DESCRIPTION("Symbol USB barcode to serial driver"); 193 194 MODULE_LICENSE("GPL v2");
+1
drivers/usb/serial/usb-serial-simple.c
··· 163 163 MODULE_DEVICE_TABLE(usb, id_table); 164 164 165 165 module_usb_serial_driver(serial_drivers, id_table); 166 + MODULE_DESCRIPTION("USB Serial 'Simple' driver"); 166 167 MODULE_LICENSE("GPL v2");
+8
drivers/usb/serial/usb_debug.c
··· 76 76 usb_serial_generic_process_read_urb(urb); 77 77 } 78 78 79 + static void usb_debug_init_termios(struct tty_struct *tty) 80 + { 81 + tty->termios.c_lflag &= ~(ECHO | ECHONL); 82 + } 83 + 79 84 static struct usb_serial_driver debug_device = { 80 85 .driver = { 81 86 .owner = THIS_MODULE, ··· 90 85 .num_ports = 1, 91 86 .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, 92 87 .break_ctl = usb_debug_break_ctl, 88 + .init_termios = usb_debug_init_termios, 93 89 .process_read_urb = usb_debug_process_read_urb, 94 90 }; 95 91 ··· 102 96 .id_table = dbc_id_table, 103 97 .num_ports = 1, 104 98 .break_ctl = usb_debug_break_ctl, 99 + .init_termios = usb_debug_init_termios, 105 100 .process_read_urb = usb_debug_process_read_urb, 106 101 }; 107 102 ··· 111 104 }; 112 105 113 106 module_usb_serial_driver(serial_drivers, id_table_combined); 107 + MODULE_DESCRIPTION("USB Debug cable driver"); 114 108 MODULE_LICENSE("GPL v2");
+14
drivers/usb/typec/mux/fsa4480.c
··· 13 13 #include <linux/usb/typec_dp.h> 14 14 #include <linux/usb/typec_mux.h> 15 15 16 + #define FSA4480_DEVICE_ID 0x00 17 + #define FSA4480_DEVICE_ID_VENDOR_ID GENMASK(7, 6) 18 + #define FSA4480_DEVICE_ID_VERSION_ID GENMASK(5, 3) 19 + #define FSA4480_DEVICE_ID_REV_ID GENMASK(2, 0) 16 20 #define FSA4480_SWITCH_ENABLE 0x04 17 21 #define FSA4480_SWITCH_SELECT 0x05 18 22 #define FSA4480_SWITCH_STATUS1 0x07 ··· 255 251 struct typec_switch_desc sw_desc = { }; 256 252 struct typec_mux_desc mux_desc = { }; 257 253 struct fsa4480 *fsa; 254 + int val = 0; 258 255 int ret; 259 256 260 257 fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL); ··· 272 267 fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config); 273 268 if (IS_ERR(fsa->regmap)) 274 269 return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n"); 270 + 271 + ret = regmap_read(fsa->regmap, FSA4480_DEVICE_ID, &val); 272 + if (ret || !val) 273 + return dev_err_probe(dev, -ENODEV, "FSA4480 not found\n"); 274 + 275 + dev_dbg(dev, "Found FSA4480 v%lu.%lu (Vendor ID = %lu)\n", 276 + FIELD_GET(FSA4480_DEVICE_ID_VERSION_ID, val), 277 + FIELD_GET(FSA4480_DEVICE_ID_REV_ID, val), 278 + FIELD_GET(FSA4480_DEVICE_ID_VENDOR_ID, val)); 275 279 276 280 /* Safe mode */ 277 281 fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+1 -1
drivers/usb/typec/tcpm/tcpci.c
··· 67 67 return regmap_raw_write(tcpci->regmap, reg, &val, sizeof(u16)); 68 68 } 69 69 70 - static bool tcpci_check_std_output_cap(struct regmap *regmap, u8 mask) 70 + static int tcpci_check_std_output_cap(struct regmap *regmap, u8 mask) 71 71 { 72 72 unsigned int reg; 73 73 int ret;
+1 -1
drivers/usb/typec/tcpm/tcpm.c
··· 4515 4515 return ERROR_RECOVERY; 4516 4516 if (port->pwr_role == TYPEC_SOURCE) 4517 4517 return SRC_UNATTACHED; 4518 - if (port->state == SNK_WAIT_CAPABILITIES) 4518 + if (port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) 4519 4519 return SNK_READY; 4520 4520 return SNK_UNATTACHED; 4521 4521 }
+2 -2
drivers/usb/typec/tipd/core.c
··· 1191 1191 dev_info(tps->dev, "Firmware update succeeded\n"); 1192 1192 1193 1193 release_fw: 1194 - release_firmware(fw); 1195 1194 if (ret) { 1196 1195 dev_err(tps->dev, "Failed to write patch %s of %zu bytes\n", 1197 1196 firmware_name, fw->size); 1198 1197 } 1198 + release_firmware(fw); 1199 1199 1200 1200 return ret; 1201 - }; 1201 + } 1202 1202 1203 1203 static int cd321x_init(struct tps6598x *tps) 1204 1204 {
+4 -7
drivers/usb/typec/ucsi/ucsi.c
··· 238 238 mutex_lock(&ucsi->ppm_lock); 239 239 240 240 ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack); 241 - if (cci & UCSI_CCI_BUSY) { 242 - ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false); 243 - return ret ? ret : -EBUSY; 244 - } 245 - 246 - if (cci & UCSI_CCI_ERROR) 247 - return ucsi_read_error(ucsi, connector_num); 241 + if (cci & UCSI_CCI_BUSY) 242 + ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false) ?: -EBUSY; 243 + else if (cci & UCSI_CCI_ERROR) 244 + ret = ucsi_read_error(ucsi, connector_num); 248 245 249 246 mutex_unlock(&ucsi->ppm_lock); 250 247 return ret;
+6 -3
drivers/usb/usbip/vhci_hcd.c
··· 745 745 * 746 746 */ 747 747 if (usb_pipedevice(urb->pipe) == 0) { 748 + struct usb_device *old; 748 749 __u8 type = usb_pipetype(urb->pipe); 749 750 struct usb_ctrlrequest *ctrlreq = 750 751 (struct usb_ctrlrequest *) urb->setup_packet; ··· 756 755 goto no_need_xmit; 757 756 } 758 757 758 + old = vdev->udev; 759 759 switch (ctrlreq->bRequest) { 760 760 case USB_REQ_SET_ADDRESS: 761 761 /* set_address may come when a device is reset */ 762 762 dev_info(dev, "SetAddress Request (%d) to port %d\n", 763 763 ctrlreq->wValue, vdev->rhport); 764 764 765 - usb_put_dev(vdev->udev); 766 765 vdev->udev = usb_get_dev(urb->dev); 766 + usb_put_dev(old); 767 767 768 768 spin_lock(&vdev->ud.lock); 769 769 vdev->ud.status = VDEV_ST_USED; ··· 783 781 usbip_dbg_vhci_hc( 784 782 "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n"); 785 783 786 - usb_put_dev(vdev->udev); 787 784 vdev->udev = usb_get_dev(urb->dev); 785 + usb_put_dev(old); 788 786 goto out; 789 787 790 788 default: ··· 1069 1067 static void vhci_device_reset(struct usbip_device *ud) 1070 1068 { 1071 1069 struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); 1070 + struct usb_device *old = vdev->udev; 1072 1071 unsigned long flags; 1073 1072 1074 1073 spin_lock_irqsave(&ud->lock, flags); ··· 1077 1074 vdev->speed = 0; 1078 1075 vdev->devid = 0; 1079 1076 1080 - usb_put_dev(vdev->udev); 1081 1077 vdev->udev = NULL; 1078 + usb_put_dev(old); 1082 1079 1083 1080 if (ud->tcp_socket) { 1084 1081 sockfd_put(ud->tcp_socket);
+1 -1
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
··· 140 140 val = octep_read_sig(mbox); 141 141 if ((val & 0xFFFF) != MBOX_RSP_SIG) { 142 142 dev_warn(&pdev->dev, "Invalid Signature from mbox : %d response\n", id); 143 - return ret; 143 + return -EINVAL; 144 144 } 145 145 146 146 val = octep_read_sts(mbox);
+1 -7
drivers/vhost/vdpa.c
··· 1481 1481 1482 1482 notify = ops->get_vq_notification(vdpa, index); 1483 1483 1484 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1485 - if (remap_pfn_range(vma, vmf->address & PAGE_MASK, 1486 - PFN_DOWN(notify.addr), PAGE_SIZE, 1487 - vma->vm_page_prot)) 1488 - return VM_FAULT_SIGBUS; 1489 - 1490 - return VM_FAULT_NOPAGE; 1484 + return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr)); 1491 1485 } 1492 1486 1493 1487 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+2 -26
drivers/virtio/virtio.c
··· 305 305 if (err) 306 306 goto err; 307 307 308 - if (dev->config->create_avq) { 309 - err = dev->config->create_avq(dev); 310 - if (err) 311 - goto err; 312 - } 313 - 314 308 err = drv->probe(dev); 315 309 if (err) 316 - goto err_probe; 310 + goto err; 317 311 318 312 /* If probe didn't do it, mark device DRIVER_OK ourselves. */ 319 313 if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) ··· 320 326 321 327 return 0; 322 328 323 - err_probe: 324 - if (dev->config->destroy_avq) 325 - dev->config->destroy_avq(dev); 326 329 err: 327 330 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 328 331 return err; ··· 334 343 virtio_config_disable(dev); 335 344 336 345 drv->remove(dev); 337 - 338 - if (dev->config->destroy_avq) 339 - dev->config->destroy_avq(dev); 340 346 341 347 /* Driver should have reset device. */ 342 348 WARN_ON_ONCE(dev->config->get_status(dev)); ··· 512 524 } 513 525 } 514 526 515 - if (dev->config->destroy_avq) 516 - dev->config->destroy_avq(dev); 517 - 518 527 return 0; 519 528 } 520 529 EXPORT_SYMBOL_GPL(virtio_device_freeze); ··· 547 562 if (ret) 548 563 goto err; 549 564 550 - if (dev->config->create_avq) { 551 - ret = dev->config->create_avq(dev); 552 - if (ret) 553 - goto err; 554 - } 555 - 556 565 if (drv->restore) { 557 566 ret = drv->restore(dev); 558 567 if (ret) 559 - goto err_restore; 568 + goto err; 560 569 } 561 570 562 571 /* If restore didn't do it, mark device DRIVER_OK ourselves. */ ··· 561 582 562 583 return 0; 563 584 564 - err_restore: 565 - if (dev->config->destroy_avq) 566 - dev->config->destroy_avq(dev); 567 585 err: 568 586 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 569 587 return ret;
+151 -41
drivers/virtio/virtio_pci_common.c
··· 46 46 return true; 47 47 } 48 48 49 + /* Notify all slow path virtqueues on an interrupt. */ 50 + static void vp_vring_slow_path_interrupt(int irq, 51 + struct virtio_pci_device *vp_dev) 52 + { 53 + struct virtio_pci_vq_info *info; 54 + unsigned long flags; 55 + 56 + spin_lock_irqsave(&vp_dev->lock, flags); 57 + list_for_each_entry(info, &vp_dev->slow_virtqueues, node) 58 + vring_interrupt(irq, info->vq); 59 + spin_unlock_irqrestore(&vp_dev->lock, flags); 60 + } 61 + 49 62 /* Handle a configuration change: Tell driver if it wants to know. */ 50 63 static irqreturn_t vp_config_changed(int irq, void *opaque) 51 64 { 52 65 struct virtio_pci_device *vp_dev = opaque; 53 66 54 67 virtio_config_changed(&vp_dev->vdev); 68 + vp_vring_slow_path_interrupt(irq, vp_dev); 55 69 return IRQ_HANDLED; 56 70 } 57 71 ··· 139 125 GFP_KERNEL)) 140 126 goto error; 141 127 128 + if (!per_vq_vectors) 129 + desc = NULL; 130 + 142 131 if (desc) { 143 132 flags |= PCI_IRQ_AFFINITY; 144 133 desc->pre_vectors++; /* virtio config vector */ ··· 188 171 return err; 189 172 } 190 173 174 + static bool vp_is_slow_path_vector(u16 msix_vec) 175 + { 176 + return msix_vec == VP_MSIX_CONFIG_VECTOR; 177 + } 178 + 191 179 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index, 192 180 void (*callback)(struct virtqueue *vq), 193 181 const char *name, 194 182 bool ctx, 195 - u16 msix_vec) 183 + u16 msix_vec, 184 + struct virtio_pci_vq_info **p_info) 196 185 { 197 186 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 198 187 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); ··· 217 194 info->vq = vq; 218 195 if (callback) { 219 196 spin_lock_irqsave(&vp_dev->lock, flags); 220 - list_add(&info->node, &vp_dev->virtqueues); 197 + if (!vp_is_slow_path_vector(msix_vec)) 198 + list_add(&info->node, &vp_dev->virtqueues); 199 + else 200 + list_add(&info->node, &vp_dev->slow_virtqueues); 221 201 spin_unlock_irqrestore(&vp_dev->lock, flags); 222 202 } else { 223 203 INIT_LIST_HEAD(&info->node); 224 204 } 225 205 226 - vp_dev->vqs[index] = info; 206 + *p_info = info; 227 207 return vq; 228 208 229 209 out_info: ··· 262 236 int i; 263 237 264 238 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 265 - if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index)) 266 - continue; 267 - 268 239 if (vp_dev->per_vq_vectors) { 269 240 int v = vp_dev->vqs[vq->index]->msix_vector; 270 241 271 - if (v != VIRTIO_MSI_NO_VECTOR) { 242 + if (v != VIRTIO_MSI_NO_VECTOR && 243 + !vp_is_slow_path_vector(v)) { 272 244 int irq = pci_irq_vector(vp_dev->pci_dev, v); 273 245 274 246 irq_update_affinity_hint(irq, NULL); ··· 308 284 vp_dev->vqs = NULL; 309 285 } 310 286 287 + enum vp_vq_vector_policy { 288 + VP_VQ_VECTOR_POLICY_EACH, 289 + VP_VQ_VECTOR_POLICY_SHARED_SLOW, 290 + VP_VQ_VECTOR_POLICY_SHARED, 291 + }; 292 + 293 + static struct virtqueue * 294 + vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx, 295 + vq_callback_t *callback, const char *name, bool ctx, 296 + bool slow_path, int *allocated_vectors, 297 + enum vp_vq_vector_policy vector_policy, 298 + struct virtio_pci_vq_info **p_info) 299 + { 300 + struct virtio_pci_device *vp_dev = to_vp_device(vdev); 301 + struct virtqueue *vq; 302 + u16 msix_vec; 303 + int err; 304 + 305 + if (!callback) 306 + msix_vec = VIRTIO_MSI_NO_VECTOR; 307 + else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH || 308 + (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW && 309 + !slow_path)) 310 + msix_vec = (*allocated_vectors)++; 311 + else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH && 312 + slow_path) 313 + msix_vec = VP_MSIX_CONFIG_VECTOR; 314 + else 315 + msix_vec = VP_MSIX_VQ_VECTOR; 316 + vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec, 317 + p_info); 318 + if (IS_ERR(vq)) 319 + return vq; 320 + 321 + if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED || 322 + msix_vec == VIRTIO_MSI_NO_VECTOR || 323 + vp_is_slow_path_vector(msix_vec)) 324 + return vq; 325 + 326 + /* allocate per-vq irq if available and necessary */ 327 + snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names), 328 + "%s-%s", dev_name(&vp_dev->vdev.dev), name); 329 + err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 330 + vring_interrupt, 0, 331 + vp_dev->msix_names[msix_vec], vq); 332 + if (err) { 333 + vp_del_vq(vq); 334 + return ERR_PTR(err); 335 + } 336 + 337 + return vq; 338 + } 339 + 311 340 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, 312 341 struct virtqueue *vqs[], 313 342 struct virtqueue_info vqs_info[], 314 - bool per_vq_vectors, 343 + enum vp_vq_vector_policy vector_policy, 315 344 struct irq_affinity *desc) 316 345 { 317 346 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 347 + struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq; 318 348 struct virtqueue_info *vqi; 319 - u16 msix_vec; 320 349 int i, err, nvectors, allocated_vectors, queue_idx = 0; 350 + struct virtqueue *vq; 351 + bool per_vq_vectors; 352 + u16 avq_num = 0; 321 353 322 354 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 323 355 if (!vp_dev->vqs) 324 356 return -ENOMEM; 357 + 358 + if (vp_dev->avq_index) { 359 + err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num); 360 + if (err) 361 + goto error_find; 362 + } 363 + 364 + per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED; 325 365 326 366 if (per_vq_vectors) { 327 367 /* Best option: one for change interrupt, one per vq. */ ··· 395 307 if (vqi->name && vqi->callback) 396 308 ++nvectors; 397 309 } 310 + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH) 311 + ++nvectors; 398 312 } else { 399 313 /* Second best: one for change, shared for all vqs. */ 400 314 nvectors = 2; 401 315 } 402 316 403 - err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, 404 - per_vq_vectors ? desc : NULL); 317 + err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc); 405 318 if (err) 406 319 goto error_find; 407 320 ··· 414 325 vqs[i] = NULL; 415 326 continue; 416 327 } 417 - 418 - if (!vqi->callback) 419 - msix_vec = VIRTIO_MSI_NO_VECTOR; 420 - else if (vp_dev->per_vq_vectors) 421 - msix_vec = allocated_vectors++; 422 - else 423 - msix_vec = VP_MSIX_VQ_VECTOR; 424 - vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback, 425 - vqi->name, vqi->ctx, msix_vec); 328 + vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback, 329 + vqi->name, vqi->ctx, false, 330 + &allocated_vectors, vector_policy, 331 + &vp_dev->vqs[i]); 426 332 if (IS_ERR(vqs[i])) { 427 333 err = PTR_ERR(vqs[i]); 428 334 goto error_find; 429 335 } 430 - 431 - if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) 432 - continue; 433 - 434 - /* allocate per-vq irq if available and necessary */ 435 - snprintf(vp_dev->msix_names[msix_vec], 436 - sizeof *vp_dev->msix_names, 437 - "%s-%s", 438 - dev_name(&vp_dev->vdev.dev), vqi->name); 439 - err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 440 - vring_interrupt, 0, 441 - vp_dev->msix_names[msix_vec], 442 - vqs[i]); 443 - if (err) { 444 - vp_del_vq(vqs[i]); 445 - goto error_find; 446 - } 447 336 } 337 + 338 + if (!avq_num) 339 + return 0; 340 + sprintf(avq->name, "avq.%u", avq->vq_index); 341 + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done, 342 + avq->name, false, true, &allocated_vectors, 343 + vector_policy, &vp_dev->admin_vq.info); 344 + if (IS_ERR(vq)) { 345 + err = PTR_ERR(vq); 346 + goto error_find; 347 + } 348 + 448 349 return 0; 449 350 450 351 error_find: ··· 447 368 struct virtqueue_info vqs_info[]) 448 369 { 449 370 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 371 + struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq; 450 372 int i, err, queue_idx = 0; 373 + struct virtqueue *vq; 374 + u16 avq_num = 0; 451 375 452 376 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 453 377 if (!vp_dev->vqs) 454 378 return -ENOMEM; 379 + 380 + if (vp_dev->avq_index) { 381 + err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num); 382 + if (err) 383 + goto out_del_vqs; 384 + } 455 385 456 386 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 457 387 dev_name(&vdev->dev), vp_dev); ··· 478 390 } 479 391 vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback, 480 392 vqi->name, vqi->ctx, 481 - VIRTIO_MSI_NO_VECTOR); 393 + VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]); 482 394 if (IS_ERR(vqs[i])) { 483 395 err = PTR_ERR(vqs[i]); 484 396 goto out_del_vqs; 485 397 } 398 + } 399 + 400 + if (!avq_num) 401 + return 0; 402 + sprintf(avq->name, "avq.%u", avq->vq_index); 403 + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name, 404 + false, VIRTIO_MSI_NO_VECTOR, 405 + &vp_dev->admin_vq.info); 406 + if (IS_ERR(vq)) { 407 + err = PTR_ERR(vq); 408 + goto out_del_vqs; 486 409 } 487 410 488 411 return 0; ··· 510 411 int err; 511 412 512 413 /* Try MSI-X with one vector per queue. */ 513 - err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, true, desc); 414 + err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, 415 + VP_VQ_VECTOR_POLICY_EACH, desc); 416 + if (!err) 417 + return 0; 418 + /* Fallback: MSI-X with one shared vector for config and 419 + * slow path queues, one vector per queue for the rest. 420 + */ 421 + err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, 422 + VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc); 514 423 if (!err) 515 424 return 0; 516 425 /* Fallback: MSI-X with one vector for config, one shared for queues. */ 517 - err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, false, desc); 426 + err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, 427 + VP_VQ_VECTOR_POLICY_SHARED, desc); 518 428 if (!err) 519 429 return 0; 520 430 /* Is there an interrupt? If not give up. */ ··· 574 466 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 575 467 576 468 if (!vp_dev->per_vq_vectors || 577 - vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR) 469 + vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR || 470 + vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector)) 578 471 return NULL; 579 472 580 473 return pci_irq_get_affinity(vp_dev->pci_dev, ··· 683 574 vp_dev->vdev.dev.release = virtio_pci_release_dev; 684 575 vp_dev->pci_dev = pci_dev; 685 576 INIT_LIST_HEAD(&vp_dev->virtqueues); 577 + INIT_LIST_HEAD(&vp_dev->slow_virtqueues); 686 578 spin_lock_init(&vp_dev->lock); 687 579 688 580 /* enable the device */
+10 -6
drivers/virtio/virtio_pci_common.h
··· 35 35 /* the actual virtqueue */ 36 36 struct virtqueue *vq; 37 37 38 - /* the list node for the virtqueues list */ 38 + /* the list node for the virtqueues or slow_virtqueues list */ 39 39 struct list_head node; 40 40 41 41 /* MSI-X vector (or none) */ ··· 44 44 45 45 struct virtio_pci_admin_vq { 46 46 /* Virtqueue info associated with this admin queue. */ 47 - struct virtio_pci_vq_info info; 48 - /* serializing admin commands execution and virtqueue deletion */ 49 - struct mutex cmd_lock; 47 + struct virtio_pci_vq_info *info; 48 + /* Protects virtqueue access. */ 49 + spinlock_t lock; 50 50 u64 supported_cmds; 51 51 /* Name of the admin queue: avq.$vq_index. */ 52 52 char name[10]; ··· 66 66 /* Where to read and clear interrupt */ 67 67 u8 __iomem *isr; 68 68 69 - /* a list of queues so we can dispatch IRQs */ 69 + /* Lists of queues and potentially slow path queues 70 + * so we can dispatch IRQs. 71 + */ 70 72 spinlock_t lock; 71 73 struct list_head virtqueues; 74 + struct list_head slow_virtqueues; 72 75 73 76 /* Array of all virtqueues reported in the 74 77 * PCI common config num_queues field ··· 105 102 void (*del_vq)(struct virtio_pci_vq_info *info); 106 103 107 104 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 108 - bool (*is_avq)(struct virtio_device *vdev, unsigned int index); 105 + int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num); 109 106 }; 110 107 111 108 /* Constants for MSI-X */ ··· 178 175 #define VIRTIO_ADMIN_CMD_BITMAP 0 179 176 #endif 180 177 178 + void vp_modern_avq_done(struct virtqueue *vq); 181 179 int vp_modern_admin_cmd_exec(struct virtio_device *vdev, 182 180 struct virtio_admin_cmd *cmd); 183 181
+75 -86
drivers/virtio/virtio_pci_modern.c
··· 28 28 return vp_modern_get_features(&vp_dev->mdev); 29 29 } 30 30 31 + static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num) 32 + { 33 + struct virtio_pci_device *vp_dev = to_vp_device(vdev); 34 + 35 + *num = 0; 36 + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 37 + return 0; 38 + 39 + *num = vp_modern_avq_num(&vp_dev->mdev); 40 + if (!(*num)) 41 + return -EINVAL; 42 + *index = vp_modern_avq_index(&vp_dev->mdev); 43 + return 0; 44 + } 45 + 31 46 static bool vp_is_avq(struct virtio_device *vdev, unsigned int index) 32 47 { 33 48 struct virtio_pci_device *vp_dev = to_vp_device(vdev); ··· 53 38 return index == vp_dev->admin_vq.vq_index; 54 39 } 55 40 41 + void vp_modern_avq_done(struct virtqueue *vq) 42 + { 43 + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 44 + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq; 45 + struct virtio_admin_cmd *cmd; 46 + unsigned long flags; 47 + unsigned int len; 48 + 49 + spin_lock_irqsave(&admin_vq->lock, flags); 50 + do { 51 + virtqueue_disable_cb(vq); 52 + while ((cmd = virtqueue_get_buf(vq, &len))) 53 + complete(&cmd->completion); 54 + } while (!virtqueue_enable_cb(vq)); 55 + spin_unlock_irqrestore(&admin_vq->lock, flags); 56 + } 57 + 56 58 static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq, 57 59 u16 opcode, 58 60 struct scatterlist **sgs, 59 61 unsigned int out_num, 60 62 unsigned int in_num, 61 - void *data) 63 + struct virtio_admin_cmd *cmd) 62 64 { 63 65 struct virtqueue *vq; 64 - int ret, len; 66 + unsigned long flags; 67 + int ret; 65 68 66 - vq = admin_vq->info.vq; 69 + vq = admin_vq->info->vq; 67 70 if (!vq) 68 71 return -EIO; 69 72 ··· 90 57 !((1ULL << opcode) & admin_vq->supported_cmds)) 91 58 return -EOPNOTSUPP; 92 59 93 - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL); 94 - if (ret < 0) 95 - return -EIO; 60 + init_completion(&cmd->completion); 96 61 97 - if (unlikely(!virtqueue_kick(vq))) 98 - return -EIO; 99 - 100 - while (!virtqueue_get_buf(vq, &len) && 101 - !virtqueue_is_broken(vq)) 102 - cpu_relax(); 103 - 62 + again: 104 63 if (virtqueue_is_broken(vq)) 105 64 return -EIO; 106 65 107 - return 0; 66 + spin_lock_irqsave(&admin_vq->lock, flags); 67 + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL); 68 + if (ret < 0) { 69 + if (ret == -ENOSPC) { 70 + spin_unlock_irqrestore(&admin_vq->lock, flags); 71 + cpu_relax(); 72 + goto again; 73 + } 74 + goto unlock_err; 75 + } 76 + if (!virtqueue_kick(vq)) 77 + goto unlock_err; 78 + spin_unlock_irqrestore(&admin_vq->lock, flags); 79 + 80 + wait_for_completion(&cmd->completion); 81 + 82 + return cmd->ret; 83 + 84 + unlock_err: 85 + spin_unlock_irqrestore(&admin_vq->lock, flags); 86 + return -EIO; 108 87 } 109 88 110 89 int vp_modern_admin_cmd_exec(struct virtio_device *vdev, ··· 167 122 in_num++; 168 123 } 169 124 170 - mutex_lock(&vp_dev->admin_vq.cmd_lock); 171 125 ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq, 172 126 le16_to_cpu(cmd->opcode), 173 - sgs, out_num, in_num, sgs); 174 - mutex_unlock(&vp_dev->admin_vq.cmd_lock); 175 - 127 + sgs, out_num, in_num, cmd); 176 128 if (ret) { 177 129 dev_err(&vdev->dev, 178 130 "Failed to execute command on admin vq: %d\n.", ret); ··· 230 188 231 189 static void vp_modern_avq_activate(struct virtio_device *vdev) 232 190 { 233 - struct virtio_pci_device *vp_dev = to_vp_device(vdev); 234 - struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq; 235 - 236 191 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 237 192 return; 238 193 239 - __virtqueue_unbreak(admin_vq->info.vq); 240 194 virtio_pci_admin_cmd_list_init(vdev); 241 195 } 242 196 243 - static void vp_modern_avq_deactivate(struct virtio_device *vdev) 197 + static void vp_modern_avq_cleanup(struct virtio_device *vdev) 244 198 { 245 199 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 246 - struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq; 200 + struct virtio_admin_cmd *cmd; 201 + struct virtqueue *vq; 247 202 248 203 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 249 204 return; 250 205 251 - __virtqueue_break(admin_vq->info.vq); 206 + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq; 207 + if (!vq) 208 + return; 209 + 210 + while ((cmd = virtqueue_detach_unused_buf(vq))) { 211 + cmd->ret = -EIO; 212 + complete(&cmd->completion); 213 + } 252 214 } 253 215 254 216 static void vp_transport_features(struct virtio_device *vdev, u64 features) ··· 449 403 while (vp_modern_get_status(mdev)) 450 404 msleep(1); 451 405 452 - vp_modern_avq_deactivate(vdev); 406 + vp_modern_avq_cleanup(vdev); 453 407 454 408 /* Flush pending VQ/configuration callbacks. */ 455 409 vp_synchronize_vectors(vdev); ··· 598 552 if (index >= vp_modern_get_num_queues(mdev) && !is_avq) 599 553 return ERR_PTR(-EINVAL); 600 554 601 - num = is_avq ? 602 - VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index); 555 + num = vp_modern_get_queue_size(mdev, index); 603 556 /* Check if queue is either not available or already active. */ 604 557 if (!num || vp_modern_get_queue_enable(mdev, index)) 605 558 return ERR_PTR(-ENOENT); ··· 623 578 if (!vq->priv) { 624 579 err = -ENOMEM; 625 580 goto err; 626 - } 627 - 628 - if (is_avq) { 629 - mutex_lock(&vp_dev->admin_vq.cmd_lock); 630 - vp_dev->admin_vq.info.vq = vq; 631 - mutex_unlock(&vp_dev->admin_vq.cmd_lock); 632 581 } 633 582 634 583 return vq; ··· 658 619 struct virtqueue *vq = info->vq; 659 620 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 660 621 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 661 - 662 - if (vp_is_avq(&vp_dev->vdev, vq->index)) { 663 - mutex_lock(&vp_dev->admin_vq.cmd_lock); 664 - vp_dev->admin_vq.info.vq = NULL; 665 - mutex_unlock(&vp_dev->admin_vq.cmd_lock); 666 - } 667 622 668 623 if (vp_dev->msix_enabled) 669 624 vp_modern_queue_vector(mdev, vq->index, ··· 768 735 return true; 769 736 } 770 737 771 - static int vp_modern_create_avq(struct virtio_device *vdev) 772 - { 773 - struct virtio_pci_device *vp_dev = to_vp_device(vdev); 774 - struct virtio_pci_admin_vq *avq; 775 - struct virtqueue *vq; 776 - u16 admin_q_num; 777 - 778 - if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 779 - return 0; 780 - 781 - admin_q_num = vp_modern_avq_num(&vp_dev->mdev); 782 - if (!admin_q_num) 783 - return -EINVAL; 784 - 785 - avq = &vp_dev->admin_vq; 786 - avq->vq_index = vp_modern_avq_index(&vp_dev->mdev); 787 - sprintf(avq->name, "avq.%u", avq->vq_index); 788 - vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL, 789 - avq->name, NULL, VIRTIO_MSI_NO_VECTOR); 790 - if (IS_ERR(vq)) { 791 - dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld", 792 - PTR_ERR(vq)); 793 - return PTR_ERR(vq); 794 - } 795 - 796 - vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true); 797 - return 0; 798 - } 799 - 800 - static void vp_modern_destroy_avq(struct virtio_device *vdev) 801 - { 802 - struct virtio_pci_device *vp_dev = to_vp_device(vdev); 803 - 804 - if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 805 - return; 806 - 807 - vp_dev->del_vq(&vp_dev->admin_vq.info); 808 - } 809 - 810 738 static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 811 739 .get = NULL, 812 740 .set = NULL, ··· 786 792 .get_shm_region = vp_get_shm_region, 787 793 .disable_vq_and_reset = vp_modern_disable_vq_and_reset, 788 794 .enable_vq_after_reset = vp_modern_enable_vq_after_reset, 789 - .create_avq = vp_modern_create_avq, 790 - .destroy_avq = vp_modern_destroy_avq, 791 795 }; 792 796 793 797 static const struct virtio_config_ops virtio_pci_config_ops = { ··· 806 814 .get_shm_region = vp_get_shm_region, 807 815 .disable_vq_and_reset = vp_modern_disable_vq_and_reset, 808 816 .enable_vq_after_reset = vp_modern_enable_vq_after_reset, 809 - .create_avq = vp_modern_create_avq, 810 - .destroy_avq = vp_modern_destroy_avq, 811 817 }; 812 818 813 819 /* the PCI probing function */ ··· 829 839 vp_dev->config_vector = vp_config_vector; 830 840 vp_dev->setup_vq = setup_vq; 831 841 vp_dev->del_vq = del_vq; 832 - vp_dev->is_avq = vp_is_avq; 842 + vp_dev->avq_index = vp_avq_index; 833 843 vp_dev->isr = mdev->isr; 834 844 vp_dev->vdev.id = mdev->id; 835 845 836 - mutex_init(&vp_dev->admin_vq.cmd_lock); 846 + spin_lock_init(&vp_dev->admin_vq.lock); 837 847 return 0; 838 848 } 839 849 ··· 841 851 { 842 852 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 843 853 844 - mutex_destroy(&vp_dev->admin_vq.cmd_lock); 845 854 vp_modern_remove(mdev); 846 855 }
+7 -4
fs/bcachefs/acl.c
··· 272 272 return xattr; 273 273 } 274 274 275 - struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap, 276 - struct dentry *dentry, int type) 275 + struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu) 277 276 { 278 - struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 277 + struct bch_inode_info *inode = to_bch_ei(vinode); 279 278 struct bch_fs *c = inode->v.i_sb->s_fs_info; 280 279 struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); 281 280 struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0); 282 - struct btree_trans *trans = bch2_trans_get(c); 283 281 struct btree_iter iter = { NULL }; 284 282 struct posix_acl *acl = NULL; 283 + 284 + if (rcu) 285 + return ERR_PTR(-ECHILD); 286 + 287 + struct btree_trans *trans = bch2_trans_get(c); 285 288 retry: 286 289 bch2_trans_begin(trans); 287 290
+1 -1
fs/bcachefs/acl.h
··· 28 28 29 29 #ifdef CONFIG_BCACHEFS_POSIX_ACL 30 30 31 - struct posix_acl *bch2_get_acl(struct mnt_idmap *, struct dentry *, int); 31 + struct posix_acl *bch2_get_acl(struct inode *, int, bool); 32 32 33 33 int bch2_set_acl_trans(struct btree_trans *, subvol_inum, 34 34 struct bch_inode_unpacked *,
+10 -2
fs/bcachefs/alloc_background.h
··· 82 82 bucket_data_type(bucket) != bucket_data_type(ptr); 83 83 } 84 84 85 + /* 86 + * It is my general preference to use unsigned types for unsigned quantities - 87 + * however, these helpers are used in disk accounting calculations run by 88 + * triggers where the output will be negated and added to an s64. unsigned is 89 + * right out even though all these quantities will fit in 32 bits, since it 90 + * won't be sign extended correctly; u64 will negate "correctly", but s64 is the 91 + * simpler option here. 92 + */ 85 93 static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a) 86 94 { 87 95 return a.stripe_sectors + a.dirty_sectors + a.cached_sectors; ··· 174 166 * avoid overflowing LRU_TIME_BITS on a corrupted fs, when 175 167 * bucket_sectors_dirty is (much) bigger than bucket_size 176 168 */ 177 - u64 d = min(bch2_bucket_sectors_dirty(a), 178 - ca->mi.bucket_size); 169 + u64 d = min_t(s64, bch2_bucket_sectors_dirty(a), 170 + ca->mi.bucket_size); 179 171 180 172 return div_u64(d * (1ULL << 31), ca->mi.bucket_size); 181 173 }
+29 -5
fs/bcachefs/alloc_foreground.c
··· 1603 1603 prt_newline(out); 1604 1604 } 1605 1605 1606 - void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c) 1606 + void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c, 1607 + struct bch_dev *ca) 1607 1608 { 1608 1609 struct open_bucket *ob; 1609 1610 ··· 1614 1613 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); 1615 1614 ob++) { 1616 1615 spin_lock(&ob->lock); 1617 - if (ob->valid && !ob->on_partial_list) 1616 + if (ob->valid && !ob->on_partial_list && 1617 + (!ca || ob->dev == ca->dev_idx)) 1618 1618 bch2_open_bucket_to_text(out, c, ob); 1619 1619 spin_unlock(&ob->lock); 1620 1620 } ··· 1740 1738 printbuf_tabstop_push(out, 16); 1741 1739 printbuf_tabstop_push(out, 16); 1742 1740 1743 - bch2_dev_usage_to_text(out, &stats); 1741 + bch2_dev_usage_to_text(out, ca, &stats); 1744 1742 1745 1743 prt_newline(out); 1746 1744 ··· 1758 1756 prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats)); 1759 1757 } 1760 1758 1761 - void bch2_print_allocator_stuck(struct bch_fs *c) 1759 + static noinline void bch2_print_allocator_stuck(struct bch_fs *c) 1762 1760 { 1763 1761 struct printbuf buf = PRINTBUF; 1764 1762 1765 - prt_printf(&buf, "Allocator stuck? Waited for 10 seconds\n"); 1763 + prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n", 1764 + c->opts.allocator_stuck_timeout); 1766 1765 1767 1766 prt_printf(&buf, "Allocator debug:\n"); 1768 1767 printbuf_indent_add(&buf, 2); ··· 1792 1789 1793 1790 bch2_print_string_as_lines(KERN_ERR, buf.buf); 1794 1791 printbuf_exit(&buf); 1792 + } 1793 + 1794 + static inline unsigned allocator_wait_timeout(struct bch_fs *c) 1795 + { 1796 + if (c->allocator_last_stuck && 1797 + time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies)) 1798 + return 0; 1799 + 1800 + return c->opts.allocator_stuck_timeout * HZ; 1801 + } 1802 + 1803 + void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl) 1804 + { 1805 + unsigned t = allocator_wait_timeout(c); 1806 + 1807 + if (t && closure_sync_timeout(cl, t)) { 1808 + c->allocator_last_stuck = jiffies; 1809 + bch2_print_allocator_stuck(c); 1810 + } 1811 + 1812 + closure_sync(cl); 1795 1813 }
+7 -2
fs/bcachefs/alloc_foreground.h
··· 223 223 void bch2_fs_allocator_foreground_init(struct bch_fs *); 224 224 225 225 void bch2_open_bucket_to_text(struct printbuf *, struct bch_fs *, struct open_bucket *); 226 - void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *); 226 + void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *, struct bch_dev *); 227 227 void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *); 228 228 229 229 void bch2_write_points_to_text(struct printbuf *, struct bch_fs *); ··· 231 231 void bch2_fs_alloc_debug_to_text(struct printbuf *, struct bch_fs *); 232 232 void bch2_dev_alloc_debug_to_text(struct printbuf *, struct bch_dev *); 233 233 234 - void bch2_print_allocator_stuck(struct bch_fs *); 234 + void __bch2_wait_on_allocator(struct bch_fs *, struct closure *); 235 + static inline void bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl) 236 + { 237 + if (cl->closure_get_happened) 238 + __bch2_wait_on_allocator(c, cl); 239 + } 235 240 236 241 #endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
+2
fs/bcachefs/bcachefs.h
··· 893 893 struct bch_fs_usage_base __percpu *usage; 894 894 u64 __percpu *online_reserved; 895 895 896 + unsigned long allocator_last_stuck; 897 + 896 898 struct io_clock io_clock[2]; 897 899 898 900 /* JOURNAL SEQ BLACKLIST */
+4 -1
fs/bcachefs/bcachefs_format.h
··· 675 675 x(btree_subvolume_children, BCH_VERSION(1, 6)) \ 676 676 x(mi_btree_bitmap, BCH_VERSION(1, 7)) \ 677 677 x(bucket_stripe_sectors, BCH_VERSION(1, 8)) \ 678 - x(disk_accounting_v2, BCH_VERSION(1, 9)) 678 + x(disk_accounting_v2, BCH_VERSION(1, 9)) \ 679 + x(disk_accounting_v3, BCH_VERSION(1, 10)) 679 680 680 681 enum bcachefs_metadata_version { 681 682 bcachefs_metadata_version_min = 9, ··· 837 836 838 837 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE, 839 838 struct bch_sb, flags[5], 0, 16); 839 + LE64_BITMASK(BCH_SB_ALLOCATOR_STUCK_TIMEOUT, 840 + struct bch_sb, flags[5], 16, 32); 840 841 841 842 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb) 842 843 {
+5
fs/bcachefs/btree_iter.c
··· 1921 1921 bch2_trans_verify_not_in_restart(trans); 1922 1922 bch2_btree_iter_verify(iter); 1923 1923 1924 + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); 1925 + if (ret) 1926 + goto err; 1927 + 1928 + 1924 1929 struct btree_path *path = btree_iter_path(trans, iter); 1925 1930 1926 1931 /* already at end? */
+1 -1
fs/bcachefs/btree_update_interior.c
··· 1264 1264 ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl); 1265 1265 1266 1266 bch2_trans_unlock(trans); 1267 - closure_sync(&cl); 1267 + bch2_wait_on_allocator(c, &cl); 1268 1268 } while (bch2_err_matches(ret, BCH_ERR_operation_blocked)); 1269 1269 } 1270 1270
+8 -4
fs/bcachefs/buckets.c
··· 71 71 return ret; 72 72 } 73 73 74 - void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage) 74 + void bch2_dev_usage_to_text(struct printbuf *out, 75 + struct bch_dev *ca, 76 + struct bch_dev_usage *usage) 75 77 { 76 78 prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n"); 77 79 78 80 for (unsigned i = 0; i < BCH_DATA_NR; i++) { 79 81 bch2_prt_data_type(out, i); 80 82 prt_printf(out, "\t%llu\r%llu\r%llu\r\n", 81 - usage->d[i].buckets, 82 - usage->d[i].sectors, 83 - usage->d[i].fragmented); 83 + usage->d[i].buckets, 84 + usage->d[i].sectors, 85 + usage->d[i].fragmented); 84 86 } 87 + 88 + prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets); 85 89 } 86 90 87 91 static int bch2_check_fix_ptr(struct btree_trans *trans,
+1 -1
fs/bcachefs/buckets.h
··· 212 212 return ret; 213 213 } 214 214 215 - void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *); 215 + void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *); 216 216 217 217 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark) 218 218 {
+64 -1
fs/bcachefs/disk_accounting.c
··· 114 114 return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc); 115 115 } 116 116 117 + static inline bool is_zero(char *start, char *end) 118 + { 119 + BUG_ON(start > end); 120 + 121 + for (; start < end; start++) 122 + if (*start) 123 + return false; 124 + return true; 125 + } 126 + 127 + #define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member)) 128 + 117 129 int bch2_accounting_invalid(struct bch_fs *c, struct bkey_s_c k, 118 130 enum bch_validate_flags flags, 119 131 struct printbuf *err) 120 132 { 121 - return 0; 133 + struct disk_accounting_pos acc_k; 134 + bpos_to_disk_accounting_pos(&acc_k, k.k->p); 135 + void *end = &acc_k + 1; 136 + int ret = 0; 137 + 138 + switch (acc_k.type) { 139 + case BCH_DISK_ACCOUNTING_nr_inodes: 140 + end = field_end(acc_k, nr_inodes); 141 + break; 142 + case BCH_DISK_ACCOUNTING_persistent_reserved: 143 + end = field_end(acc_k, persistent_reserved); 144 + break; 145 + case BCH_DISK_ACCOUNTING_replicas: 146 + bkey_fsck_err_on(!acc_k.replicas.nr_devs, 147 + c, err, accounting_key_replicas_nr_devs_0, 148 + "accounting key replicas entry with nr_devs=0"); 149 + 150 + bkey_fsck_err_on(acc_k.replicas.nr_required > acc_k.replicas.nr_devs || 151 + (acc_k.replicas.nr_required > 1 && 152 + acc_k.replicas.nr_required == acc_k.replicas.nr_devs), 153 + c, err, accounting_key_replicas_nr_required_bad, 154 + "accounting key replicas entry with bad nr_required"); 155 + 156 + for (unsigned i = 0; i + 1 < acc_k.replicas.nr_devs; i++) 157 + bkey_fsck_err_on(acc_k.replicas.devs[i] > acc_k.replicas.devs[i + 1], 158 + c, err, accounting_key_replicas_devs_unsorted, 159 + "accounting key replicas entry with unsorted devs"); 160 + 161 + end = (void *) &acc_k.replicas + replicas_entry_bytes(&acc_k.replicas); 162 + break; 163 + case BCH_DISK_ACCOUNTING_dev_data_type: 164 + end = field_end(acc_k, dev_data_type); 165 + break; 166 + case BCH_DISK_ACCOUNTING_compression: 167 + end = field_end(acc_k, compression); 168 + break; 169 + case BCH_DISK_ACCOUNTING_snapshot: 170 + end = field_end(acc_k, snapshot); 171 + break; 172 + case BCH_DISK_ACCOUNTING_btree: 173 + end = field_end(acc_k, btree); 174 + break; 175 + case BCH_DISK_ACCOUNTING_rebalance_work: 176 + end = field_end(acc_k, rebalance_work); 177 + break; 178 + } 179 + 180 + bkey_fsck_err_on(!is_zero(end, (void *) (&acc_k + 1)), 181 + c, err, accounting_key_junk_at_end, 182 + "junk at end of accounting key"); 183 + fsck_err: 184 + return ret; 122 185 } 123 186 124 187 void bch2_accounting_key_to_text(struct printbuf *out, struct disk_accounting_pos *k)
+7 -8
fs/bcachefs/disk_accounting_format.h
··· 124 124 __u8 data_type; 125 125 }; 126 126 127 - struct bch_dev_stripe_buckets { 128 - __u8 dev; 129 - }; 130 - 131 127 struct bch_acct_compression { 132 128 __u8 type; 133 129 }; 134 130 135 131 struct bch_acct_snapshot { 136 132 __u32 id; 137 - }; 133 + } __packed; 138 134 139 135 struct bch_acct_btree { 140 136 __u32 id; 137 + } __packed; 138 + 139 + struct bch_acct_rebalance_work { 141 140 }; 142 141 143 142 struct disk_accounting_pos { ··· 148 149 struct bch_persistent_reserved persistent_reserved; 149 150 struct bch_replicas_entry_v1 replicas; 150 151 struct bch_dev_data_type dev_data_type; 151 - struct bch_dev_stripe_buckets dev_stripe_buckets; 152 152 struct bch_acct_compression compression; 153 153 struct bch_acct_snapshot snapshot; 154 154 struct bch_acct_btree btree; 155 - }; 156 - }; 155 + struct bch_acct_rebalance_work rebalance_work; 156 + } __packed; 157 + } __packed; 157 158 struct bpos _pad; 158 159 }; 159 160 };
+23 -11
fs/bcachefs/ec.c
··· 1809 1809 BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); 1810 1810 BUG_ON(v->nr_redundant != h->s->nr_parity); 1811 1811 1812 + /* * We bypass the sector allocator which normally does this: */ 1813 + bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); 1814 + 1812 1815 for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { 1813 1816 __clear_bit(v->ptrs[i].dev, devs.d); 1814 1817 if (i < h->s->nr_data) ··· 2238 2235 mutex_unlock(&c->ec_stripes_heap_lock); 2239 2236 } 2240 2237 2238 + static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c, 2239 + struct ec_stripe_new *s) 2240 + { 2241 + prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs", 2242 + s->idx, s->nr_data, s->nr_parity, 2243 + bitmap_weight(s->blocks_allocated, s->nr_data), 2244 + atomic_read(&s->ref[STRIPE_REF_io]), 2245 + atomic_read(&s->ref[STRIPE_REF_stripe]), 2246 + bch2_watermarks[s->h->watermark]); 2247 + 2248 + struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 2249 + unsigned i; 2250 + for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) 2251 + prt_printf(out, " %u", s->blocks[i]); 2252 + prt_newline(out); 2253 + } 2254 + 2241 2255 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) 2242 2256 { 2243 2257 struct ec_stripe_head *h; ··· 2267 2247 bch2_watermarks[h->watermark]); 2268 2248 2269 2249 if (h->s) 2270 - prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n", 2271 - h->s->idx, h->s->nr_data, h->s->nr_parity, 2272 - bitmap_weight(h->s->blocks_allocated, 2273 - h->s->nr_data)); 2250 + bch2_new_stripe_to_text(out, c, h->s); 2274 2251 } 2275 2252 mutex_unlock(&c->ec_stripe_head_lock); 2276 2253 2277 2254 prt_printf(out, "in flight:\n"); 2278 2255 2279 2256 mutex_lock(&c->ec_stripe_new_lock); 2280 - list_for_each_entry(s, &c->ec_stripe_new_list, list) { 2281 - prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n", 2282 - s->idx, s->nr_data, s->nr_parity, 2283 - atomic_read(&s->ref[STRIPE_REF_io]), 2284 - atomic_read(&s->ref[STRIPE_REF_stripe]), 2285 - bch2_watermarks[s->h->watermark]); 2286 - } 2257 + list_for_each_entry(s, &c->ec_stripe_new_list, list) 2258 + bch2_new_stripe_to_text(out, c, s); 2287 2259 mutex_unlock(&c->ec_stripe_new_lock); 2288 2260 } 2289 2261
+4 -4
fs/bcachefs/fs.c
··· 1199 1199 .fiemap = bch2_fiemap, 1200 1200 .listxattr = bch2_xattr_list, 1201 1201 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1202 - .get_acl = bch2_get_acl, 1202 + .get_inode_acl = bch2_get_acl, 1203 1203 .set_acl = bch2_set_acl, 1204 1204 #endif 1205 1205 }; ··· 1219 1219 .tmpfile = bch2_tmpfile, 1220 1220 .listxattr = bch2_xattr_list, 1221 1221 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1222 - .get_acl = bch2_get_acl, 1222 + .get_inode_acl = bch2_get_acl, 1223 1223 .set_acl = bch2_set_acl, 1224 1224 #endif 1225 1225 }; ··· 1241 1241 .setattr = bch2_setattr, 1242 1242 .listxattr = bch2_xattr_list, 1243 1243 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1244 - .get_acl = bch2_get_acl, 1244 + .get_inode_acl = bch2_get_acl, 1245 1245 .set_acl = bch2_set_acl, 1246 1246 #endif 1247 1247 }; ··· 1251 1251 .setattr = bch2_setattr, 1252 1252 .listxattr = bch2_xattr_list, 1253 1253 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1254 - .get_acl = bch2_get_acl, 1254 + .get_inode_acl = bch2_get_acl, 1255 1255 .set_acl = bch2_set_acl, 1256 1256 #endif 1257 1257 };
+1 -5
fs/bcachefs/io_misc.c
··· 126 126 127 127 if (closure_nr_remaining(&cl) != 1) { 128 128 bch2_trans_unlock_long(trans); 129 - 130 - if (closure_sync_timeout(&cl, HZ * 10)) { 131 - bch2_print_allocator_stuck(c); 132 - closure_sync(&cl); 133 - } 129 + bch2_wait_on_allocator(c, &cl); 134 130 } 135 131 136 132 return ret;
+1
fs/bcachefs/io_read.c
··· 406 406 bch2_trans_iter_init(trans, &iter, rbio->data_btree, 407 407 rbio->read_pos, BTREE_ITER_slots); 408 408 retry: 409 + bch2_trans_begin(trans); 409 410 rbio->bio.bi_status = 0; 410 411 411 412 k = bch2_btree_iter_peek_slot(&iter);
+1 -4
fs/bcachefs/io_write.c
··· 1503 1503 if ((op->flags & BCH_WRITE_SYNC) || 1504 1504 (!(op->flags & BCH_WRITE_SUBMITTED) && 1505 1505 !(op->flags & BCH_WRITE_IN_WORKER))) { 1506 - if (closure_sync_timeout(&op->cl, HZ * 10)) { 1507 - bch2_print_allocator_stuck(c); 1508 - closure_sync(&op->cl); 1509 - } 1506 + bch2_wait_on_allocator(c, &op->cl); 1510 1507 1511 1508 __bch2_write_index(op); 1512 1509
+5
fs/bcachefs/opts.h
··· 391 391 OPT_BOOL(), \ 392 392 BCH_SB_JOURNAL_TRANSACTION_NAMES, true, \ 393 393 NULL, "Log transaction function names in journal") \ 394 + x(allocator_stuck_timeout, u16, \ 395 + OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ 396 + OPT_UINT(0, U16_MAX), \ 397 + BCH_SB_ALLOCATOR_STUCK_TIMEOUT, 30, \ 398 + NULL, "Default timeout in seconds for stuck allocator messages")\ 394 399 x(noexcl, u8, \ 395 400 OPT_FS|OPT_MOUNT, \ 396 401 OPT_BOOL(), \
-1
fs/bcachefs/replicas.c
··· 24 24 static void verify_replicas_entry(struct bch_replicas_entry_v1 *e) 25 25 { 26 26 #ifdef CONFIG_BCACHEFS_DEBUG 27 - BUG_ON(e->data_type >= BCH_DATA_NR); 28 27 BUG_ON(!e->nr_devs); 29 28 BUG_ON(e->nr_required > 1 && 30 29 e->nr_required >= e->nr_devs);
+26 -1
fs/bcachefs/sb-downgrade.c
··· 61 61 BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 62 62 BCH_FSCK_ERR_dev_usage_sectors_wrong, \ 63 63 BCH_FSCK_ERR_dev_usage_fragmented_wrong, \ 64 - BCH_FSCK_ERR_accounting_mismatch) 64 + BCH_FSCK_ERR_accounting_mismatch) \ 65 + x(disk_accounting_v3, \ 66 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 67 + BCH_FSCK_ERR_bkey_version_in_future, \ 68 + BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 69 + BCH_FSCK_ERR_dev_usage_sectors_wrong, \ 70 + BCH_FSCK_ERR_dev_usage_fragmented_wrong, \ 71 + BCH_FSCK_ERR_accounting_mismatch, \ 72 + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ 73 + BCH_FSCK_ERR_accounting_key_replicas_nr_required_bad, \ 74 + BCH_FSCK_ERR_accounting_key_replicas_devs_unsorted, \ 75 + BCH_FSCK_ERR_accounting_key_junk_at_end) 65 76 66 77 #define DOWNGRADE_TABLE() \ 67 78 x(bucket_stripe_sectors, \ 68 79 0) \ 69 80 x(disk_accounting_v2, \ 81 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 82 + BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 83 + BCH_FSCK_ERR_dev_usage_sectors_wrong, \ 84 + BCH_FSCK_ERR_dev_usage_fragmented_wrong, \ 85 + BCH_FSCK_ERR_fs_usage_hidden_wrong, \ 86 + BCH_FSCK_ERR_fs_usage_btree_wrong, \ 87 + BCH_FSCK_ERR_fs_usage_data_wrong, \ 88 + BCH_FSCK_ERR_fs_usage_cached_wrong, \ 89 + BCH_FSCK_ERR_fs_usage_reserved_wrong, \ 90 + BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \ 91 + BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \ 92 + BCH_FSCK_ERR_fs_usage_replicas_wrong, \ 93 + BCH_FSCK_ERR_bkey_version_in_future) \ 94 + x(disk_accounting_v3, \ 70 95 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 71 96 BCH_FSCK_ERR_dev_usage_buckets_wrong, \ 72 97 BCH_FSCK_ERR_dev_usage_sectors_wrong, \
+5 -1
fs/bcachefs/sb-errors_format.h
··· 287 287 x(accounting_replicas_not_marked, 273, 0) \ 288 288 x(invalid_btree_id, 274, 0) \ 289 289 x(alloc_key_io_time_bad, 275, 0) \ 290 - x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) 290 + x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) \ 291 + x(accounting_key_junk_at_end, 277, 0) \ 292 + x(accounting_key_replicas_nr_devs_0, 278, 0) \ 293 + x(accounting_key_replicas_nr_required_bad, 279, 0) \ 294 + x(accounting_key_replicas_devs_unsorted, 280, 0) \ 291 295 292 296 enum bch_sb_error_id { 293 297 #define x(t, n, ...) BCH_FSCK_ERR_##t = n,
+4
fs/bcachefs/super-io.c
··· 414 414 415 415 if (!BCH_SB_VERSION_UPGRADE_COMPLETE(sb)) 416 416 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(sb, le16_to_cpu(sb->version)); 417 + 418 + if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2 && 419 + !BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb)) 420 + SET_BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb, 30); 417 421 } 418 422 419 423 for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
-1
fs/bcachefs/super.c
··· 1193 1193 if (ca->kobj.state_in_sysfs) 1194 1194 kobject_del(&ca->kobj); 1195 1195 1196 - kfree(ca->buckets_nouse); 1197 1196 bch2_free_super(&ca->disk_sb); 1198 1197 bch2_dev_allocator_background_exit(ca); 1199 1198 bch2_dev_journal_exit(ca);
+5 -1
fs/bcachefs/sysfs.c
··· 367 367 bch2_stripes_heap_to_text(out, c); 368 368 369 369 if (attr == &sysfs_open_buckets) 370 - bch2_open_buckets_to_text(out, c); 370 + bch2_open_buckets_to_text(out, c, NULL); 371 371 372 372 if (attr == &sysfs_open_buckets_partial) 373 373 bch2_open_buckets_partial_to_text(out, c); ··· 811 811 if (attr == &sysfs_alloc_debug) 812 812 bch2_dev_alloc_debug_to_text(out, ca); 813 813 814 + if (attr == &sysfs_open_buckets) 815 + bch2_open_buckets_to_text(out, c, ca); 816 + 814 817 return 0; 815 818 } 816 819 ··· 895 892 896 893 /* debug: */ 897 894 &sysfs_alloc_debug, 895 + &sysfs_open_buckets, 898 896 NULL 899 897 }; 900 898
+8 -5
fs/btrfs/block-group.c
··· 1223 1223 block_group->space_info->total_bytes -= block_group->length; 1224 1224 block_group->space_info->bytes_readonly -= 1225 1225 (block_group->length - block_group->zone_unusable); 1226 - block_group->space_info->bytes_zone_unusable -= 1227 - block_group->zone_unusable; 1226 + btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info, 1227 + -block_group->zone_unusable); 1228 1228 block_group->space_info->disk_total -= block_group->length * factor; 1229 1229 1230 1230 spin_unlock(&block_group->space_info->lock); ··· 1396 1396 if (btrfs_is_zoned(cache->fs_info)) { 1397 1397 /* Migrate zone_unusable bytes to readonly */ 1398 1398 sinfo->bytes_readonly += cache->zone_unusable; 1399 - sinfo->bytes_zone_unusable -= cache->zone_unusable; 1399 + btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo, 1400 + -cache->zone_unusable); 1400 1401 cache->zone_unusable = 0; 1401 1402 } 1402 1403 cache->ro++; ··· 3057 3056 if (btrfs_is_zoned(cache->fs_info)) { 3058 3057 /* Migrate zone_unusable bytes back */ 3059 3058 cache->zone_unusable = 3060 - (cache->alloc_offset - cache->used) + 3059 + (cache->alloc_offset - cache->used - cache->pinned - 3060 + cache->reserved) + 3061 3061 (cache->length - cache->zone_capacity); 3062 - sinfo->bytes_zone_unusable += cache->zone_unusable; 3062 + btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo, 3063 + cache->zone_unusable); 3063 3064 sinfo->bytes_readonly -= cache->zone_unusable; 3064 3065 } 3065 3066 num_bytes = cache->length - cache->reserved -
+1
fs/btrfs/ctree.h
··· 459 459 void *filldir_buf; 460 460 u64 last_index; 461 461 struct extent_state *llseek_cached_state; 462 + bool fsync_skip_inode_lock; 462 463 }; 463 464 464 465 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
+28 -10
fs/btrfs/direct-io.c
··· 856 856 * So here we disable page faults in the iov_iter and then retry if we 857 857 * got -EFAULT, faulting in the pages before the retry. 858 858 */ 859 + again: 859 860 from->nofault = true; 860 861 dio = btrfs_dio_write(iocb, from, written); 861 862 from->nofault = false; 862 863 863 - /* 864 - * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync 865 - * iocb, and that needs to lock the inode. So unlock it before calling 866 - * iomap_dio_complete() to avoid a deadlock. 867 - */ 868 - btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 869 - 870 - if (IS_ERR_OR_NULL(dio)) 864 + if (IS_ERR_OR_NULL(dio)) { 871 865 ret = PTR_ERR_OR_ZERO(dio); 872 - else 866 + } else { 867 + struct btrfs_file_private stack_private = { 0 }; 868 + struct btrfs_file_private *private; 869 + const bool have_private = (file->private_data != NULL); 870 + 871 + if (!have_private) 872 + file->private_data = &stack_private; 873 + 874 + /* 875 + * If we have a synchronous write, we must make sure the fsync 876 + * triggered by the iomap_dio_complete() call below doesn't 877 + * deadlock on the inode lock - we are already holding it and we 878 + * can't call it after unlocking because we may need to complete 879 + * partial writes due to the input buffer (or parts of it) not 880 + * being already faulted in. 881 + */ 882 + private = file->private_data; 883 + private->fsync_skip_inode_lock = true; 873 884 ret = iomap_dio_complete(dio); 885 + private->fsync_skip_inode_lock = false; 886 + 887 + if (!have_private) 888 + file->private_data = NULL; 889 + } 874 890 875 891 /* No increment (+=) because iomap returns a cumulative value. */ 876 892 if (ret > 0) ··· 913 897 } else { 914 898 fault_in_iov_iter_readable(from, left); 915 899 prev_left = left; 916 - goto relock; 900 + goto again; 917 901 } 918 902 } 903 + 904 + btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 919 905 920 906 /* 921 907 * If 'ret' is -ENOTBLK or we have not written all data, then it means
+2 -1
fs/btrfs/extent-tree.c
··· 2793 2793 readonly = true; 2794 2794 } else if (btrfs_is_zoned(fs_info)) { 2795 2795 /* Need reset before reusing in a zoned block group */ 2796 - space_info->bytes_zone_unusable += len; 2796 + btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info, 2797 + len); 2797 2798 readonly = true; 2798 2799 } 2799 2800 spin_unlock(&cache->lock);
+1 -1
fs/btrfs/extent_map.c
··· 664 664 start_diff = start - em->start; 665 665 em->start = start; 666 666 em->len = end - start; 667 - if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE && !extent_map_is_compressed(em)) 667 + if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) 668 668 em->offset += start_diff; 669 669 return add_extent_mapping(inode, em, 0); 670 670 }
+18 -4
fs/btrfs/file.c
··· 1603 1603 */ 1604 1604 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1605 1605 { 1606 + struct btrfs_file_private *private = file->private_data; 1606 1607 struct dentry *dentry = file_dentry(file); 1607 1608 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 1608 1609 struct btrfs_root *root = inode->root; ··· 1613 1612 int ret = 0, err; 1614 1613 u64 len; 1615 1614 bool full_sync; 1615 + const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false); 1616 1616 1617 1617 trace_btrfs_sync_file(file, datasync); 1618 1618 ··· 1641 1639 if (ret) 1642 1640 goto out; 1643 1641 1644 - btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP); 1642 + if (skip_ilock) 1643 + down_write(&inode->i_mmap_lock); 1644 + else 1645 + btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP); 1645 1646 1646 1647 atomic_inc(&root->log_batch); 1647 1648 ··· 1668 1663 */ 1669 1664 ret = start_ordered_ops(inode, start, end); 1670 1665 if (ret) { 1671 - btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); 1666 + if (skip_ilock) 1667 + up_write(&inode->i_mmap_lock); 1668 + else 1669 + btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); 1672 1670 goto out; 1673 1671 } 1674 1672 ··· 1796 1788 * file again, but that will end up using the synchronization 1797 1789 * inside btrfs_sync_log to keep things safe. 1798 1790 */ 1799 - btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); 1791 + if (skip_ilock) 1792 + up_write(&inode->i_mmap_lock); 1793 + else 1794 + btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); 1800 1795 1801 1796 if (ret == BTRFS_NO_LOG_SYNC) { 1802 1797 ret = btrfs_end_transaction(trans); ··· 1868 1857 1869 1858 out_release_extents: 1870 1859 btrfs_release_log_ctx_extents(&ctx); 1871 - btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); 1860 + if (skip_ilock) 1861 + up_write(&inode->i_mmap_lock); 1862 + else 1863 + btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); 1872 1864 goto out; 1873 1865 } 1874 1866
+3 -1
fs/btrfs/free-space-cache.c
··· 2723 2723 * If the block group is read-only, we should account freed space into 2724 2724 * bytes_readonly. 2725 2725 */ 2726 - if (!block_group->ro) 2726 + if (!block_group->ro) { 2727 2727 block_group->zone_unusable += to_unusable; 2728 + WARN_ON(block_group->zone_unusable > block_group->length); 2729 + } 2728 2730 spin_unlock(&ctl->tree_lock); 2729 2731 if (!used) { 2730 2732 spin_lock(&block_group->lock);
+21 -7
fs/btrfs/inode.c
··· 714 714 return ret; 715 715 } 716 716 717 - static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset, 718 - u64 end, 717 + static noinline int cow_file_range_inline(struct btrfs_inode *inode, 718 + struct page *locked_page, 719 + u64 offset, u64 end, 719 720 size_t compressed_size, 720 721 int compress_type, 721 722 struct folio *compressed_folio, ··· 740 739 return ret; 741 740 } 742 741 743 - extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached, 742 + if (ret == 0) 743 + locked_page = NULL; 744 + 745 + extent_clear_unlock_delalloc(inode, offset, end, locked_page, &cached, 744 746 clear_flags, 745 747 PAGE_UNLOCK | PAGE_START_WRITEBACK | 746 748 PAGE_END_WRITEBACK); ··· 1047 1043 * extent for the subpage case. 1048 1044 */ 1049 1045 if (total_in < actual_end) 1050 - ret = cow_file_range_inline(inode, start, end, 0, 1046 + ret = cow_file_range_inline(inode, NULL, start, end, 0, 1051 1047 BTRFS_COMPRESS_NONE, NULL, false); 1052 1048 else 1053 - ret = cow_file_range_inline(inode, start, end, total_compressed, 1049 + ret = cow_file_range_inline(inode, NULL, start, end, total_compressed, 1054 1050 compress_type, folios[0], false); 1055 1051 if (ret <= 0) { 1056 1052 if (ret < 0) ··· 1363 1359 1364 1360 if (!no_inline) { 1365 1361 /* lets try to make an inline extent */ 1366 - ret = cow_file_range_inline(inode, start, end, 0, 1362 + ret = cow_file_range_inline(inode, locked_page, start, end, 0, 1367 1363 BTRFS_COMPRESS_NONE, NULL, false); 1368 1364 if (ret <= 0) { 1369 1365 /* ··· 1585 1581 locked_page, &cached, 1586 1582 clear_bits, 1587 1583 page_ops); 1584 + btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); 1588 1585 start += cur_alloc_size; 1589 1586 } 1590 1587 ··· 1599 1594 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1600 1595 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1601 1596 &cached, clear_bits, page_ops); 1597 + btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); 1602 1598 } 1603 1599 return ret; 1604 1600 } ··· 2261 2255 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2262 2256 PAGE_START_WRITEBACK | 2263 2257 PAGE_END_WRITEBACK); 2258 + btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL); 2264 2259 } 2265 2260 btrfs_free_path(path); 2266 2261 return ret; ··· 5667 5660 struct inode *inode; 5668 5661 struct btrfs_root *root = BTRFS_I(dir)->root; 5669 5662 struct btrfs_root *sub_root = root; 5670 - struct btrfs_key location; 5663 + struct btrfs_key location = { 0 }; 5671 5664 u8 di_type = 0; 5672 5665 int ret = 0; 5673 5666 ··· 7203 7196 */ 7204 7197 spin_lock_irq(&subpage->lock); 7205 7198 spin_unlock_irq(&subpage->lock); 7199 + } 7200 + 7201 + static int btrfs_launder_folio(struct folio *folio) 7202 + { 7203 + return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio), 7204 + PAGE_SIZE, NULL); 7206 7205 } 7207 7206 7208 7207 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) ··· 10146 10133 .writepages = btrfs_writepages, 10147 10134 .readahead = btrfs_readahead, 10148 10135 .invalidate_folio = btrfs_invalidate_folio, 10136 + .launder_folio = btrfs_launder_folio, 10149 10137 .release_folio = btrfs_release_folio, 10150 10138 .migrate_folio = btrfs_migrate_folio, 10151 10139 .dirty_folio = filemap_dirty_folio,
+1 -1
fs/btrfs/print-tree.c
··· 14 14 15 15 struct root_name_map { 16 16 u64 id; 17 - char name[16]; 17 + const char *name; 18 18 }; 19 19 20 20 static const struct root_name_map root_map[] = {
+19 -6
fs/btrfs/scrub.c
··· 1648 1648 } 1649 1649 } 1650 1650 1651 + static u32 stripe_length(const struct scrub_stripe *stripe) 1652 + { 1653 + ASSERT(stripe->bg); 1654 + 1655 + return min(BTRFS_STRIPE_LEN, 1656 + stripe->bg->start + stripe->bg->length - stripe->logical); 1657 + } 1658 + 1651 1659 static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx, 1652 1660 struct scrub_stripe *stripe) 1653 1661 { 1654 1662 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 1655 1663 struct btrfs_bio *bbio = NULL; 1656 - unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + 1657 - stripe->bg->length - stripe->logical) >> 1658 - fs_info->sectorsize_bits; 1664 + unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits; 1659 1665 u64 stripe_len = BTRFS_STRIPE_LEN; 1660 1666 int mirror = stripe->mirror_num; 1661 1667 int i; ··· 1735 1729 { 1736 1730 struct btrfs_fs_info *fs_info = sctx->fs_info; 1737 1731 struct btrfs_bio *bbio; 1738 - unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + 1739 - stripe->bg->length - stripe->logical) >> 1740 - fs_info->sectorsize_bits; 1732 + unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits; 1741 1733 int mirror = stripe->mirror_num; 1742 1734 1743 1735 ASSERT(stripe->bg); ··· 1875 1871 stripe = &sctx->stripes[i]; 1876 1872 1877 1873 wait_scrub_stripe_io(stripe); 1874 + spin_lock(&sctx->stat_lock); 1875 + sctx->stat.last_physical = stripe->physical + stripe_length(stripe); 1876 + spin_unlock(&sctx->stat_lock); 1878 1877 scrub_reset_stripe(stripe); 1879 1878 } 1880 1879 out: ··· 2146 2139 cur_physical, &found_logical); 2147 2140 if (ret > 0) { 2148 2141 /* No more extent, just update the accounting */ 2142 + spin_lock(&sctx->stat_lock); 2149 2143 sctx->stat.last_physical = physical + logical_length; 2144 + spin_unlock(&sctx->stat_lock); 2150 2145 ret = 0; 2151 2146 break; 2152 2147 } ··· 2345 2336 stripe_logical += chunk_logical; 2346 2337 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg, 2347 2338 map, stripe_logical); 2339 + spin_lock(&sctx->stat_lock); 2340 + sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN, 2341 + physical_end); 2342 + spin_unlock(&sctx->stat_lock); 2348 2343 if (ret) 2349 2344 goto out; 2350 2345 goto next;
+2 -3
fs/btrfs/space-info.c
··· 316 316 found->bytes_used += block_group->used; 317 317 found->disk_used += block_group->used * factor; 318 318 found->bytes_readonly += block_group->bytes_super; 319 - found->bytes_zone_unusable += block_group->zone_unusable; 319 + btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable); 320 320 if (block_group->length > 0) 321 321 found->full = 0; 322 322 btrfs_try_granting_tickets(info, found); ··· 583 583 584 584 spin_lock(&cache->lock); 585 585 avail = cache->length - cache->used - cache->pinned - 586 - cache->reserved - cache->delalloc_bytes - 587 - cache->bytes_super - cache->zone_unusable; 586 + cache->reserved - cache->bytes_super - cache->zone_unusable; 588 587 btrfs_info(fs_info, 589 588 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s", 590 589 cache->start, cache->length, cache->used, cache->pinned,
+1
fs/btrfs/space-info.h
··· 249 249 250 250 DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info"); 251 251 DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned"); 252 + DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable"); 252 253 253 254 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 254 255 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+4 -1
fs/btrfs/super.c
··· 683 683 ret = false; 684 684 685 685 if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) { 686 - if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) 686 + if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) { 687 687 btrfs_info(info, "disk space caching is enabled"); 688 + btrfs_warn(info, 689 + "space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2"); 690 + } 688 691 if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) 689 692 btrfs_info(info, "using free-space-tree"); 690 693 }
+99
fs/btrfs/tests/extent-map-tests.c
··· 900 900 return ret; 901 901 } 902 902 903 + /* 904 + * Test a regression for compressed extent map adjustment when we attempt to 905 + * add an extent map that is partially overlapped by another existing extent 906 + * map. The resulting extent map offset was left unchanged despite having 907 + * incremented its start offset. 908 + */ 909 + static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) 910 + { 911 + struct extent_map_tree *em_tree = &inode->extent_tree; 912 + struct extent_map *em; 913 + int ret; 914 + int ret2; 915 + 916 + em = alloc_extent_map(); 917 + if (!em) { 918 + test_std_err(TEST_ALLOC_EXTENT_MAP); 919 + return -ENOMEM; 920 + } 921 + 922 + /* Compressed extent for the file range [120K, 128K). */ 923 + em->start = SZ_1K * 120; 924 + em->len = SZ_8K; 925 + em->disk_num_bytes = SZ_4K; 926 + em->ram_bytes = SZ_8K; 927 + em->flags |= EXTENT_FLAG_COMPRESS_ZLIB; 928 + write_lock(&em_tree->lock); 929 + ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len); 930 + write_unlock(&em_tree->lock); 931 + free_extent_map(em); 932 + if (ret < 0) { 933 + test_err("couldn't add extent map for range [120K, 128K)"); 934 + goto out; 935 + } 936 + 937 + em = alloc_extent_map(); 938 + if (!em) { 939 + test_std_err(TEST_ALLOC_EXTENT_MAP); 940 + ret = -ENOMEM; 941 + goto out; 942 + } 943 + 944 + /* 945 + * Compressed extent for the file range [108K, 144K), which overlaps 946 + * with the [120K, 128K) we previously inserted. 947 + */ 948 + em->start = SZ_1K * 108; 949 + em->len = SZ_1K * 36; 950 + em->disk_num_bytes = SZ_4K; 951 + em->ram_bytes = SZ_1K * 36; 952 + em->flags |= EXTENT_FLAG_COMPRESS_ZLIB; 953 + 954 + /* 955 + * Try to add the extent map but with a search range of [140K, 144K), 956 + * this should succeed and adjust the extent map to the range 957 + * [128K, 144K), with a length of 16K and an offset of 20K. 958 + * 959 + * This simulates a scenario where in the subvolume tree of an inode we 960 + * have a compressed file extent item for the range [108K, 144K) and we 961 + * have an overlapping compressed extent map for the range [120K, 128K), 962 + * which was created by an encoded write, but its ordered extent was not 963 + * yet completed, so the subvolume tree doesn't have yet the file extent 964 + * item for that range - we only have the extent map in the inode's 965 + * extent map tree. 966 + */ 967 + write_lock(&em_tree->lock); 968 + ret = btrfs_add_extent_mapping(inode, &em, SZ_1K * 140, SZ_4K); 969 + write_unlock(&em_tree->lock); 970 + free_extent_map(em); 971 + if (ret < 0) { 972 + test_err("couldn't add extent map for range [108K, 144K)"); 973 + goto out; 974 + } 975 + 976 + if (em->start != SZ_128K) { 977 + test_err("unexpected extent map start %llu (should be 128K)", em->start); 978 + ret = -EINVAL; 979 + goto out; 980 + } 981 + if (em->len != SZ_16K) { 982 + test_err("unexpected extent map length %llu (should be 16K)", em->len); 983 + ret = -EINVAL; 984 + goto out; 985 + } 986 + if (em->offset != SZ_1K * 20) { 987 + test_err("unexpected extent map offset %llu (should be 20K)", em->offset); 988 + ret = -EINVAL; 989 + goto out; 990 + } 991 + out: 992 + ret2 = free_extent_map_tree(inode); 993 + if (ret == 0) 994 + ret = ret2; 995 + 996 + return ret; 997 + } 998 + 903 999 struct rmap_test_vector { 904 1000 u64 raid_type; 905 1001 u64 physical_start; ··· 1172 1076 if (ret) 1173 1077 goto out; 1174 1078 ret = test_case_7(fs_info, BTRFS_I(inode)); 1079 + if (ret) 1080 + goto out; 1081 + ret = test_case_8(fs_info, BTRFS_I(inode)); 1175 1082 if (ret) 1176 1083 goto out; 1177 1084
+48 -1
fs/btrfs/tree-checker.c
··· 634 634 */ 635 635 if (key->type == BTRFS_DIR_ITEM_KEY || 636 636 key->type == BTRFS_XATTR_ITEM_KEY) { 637 - char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)]; 637 + char namebuf[MAX(BTRFS_NAME_LEN, XATTR_NAME_MAX)]; 638 638 639 639 read_extent_buffer(leaf, namebuf, 640 640 (unsigned long)(di + 1), name_len); ··· 1289 1289 va_end(args); 1290 1290 } 1291 1291 1292 + static bool is_valid_dref_root(u64 rootid) 1293 + { 1294 + /* 1295 + * The following tree root objectids are allowed to have a data backref: 1296 + * - subvolume trees 1297 + * - data reloc tree 1298 + * - tree root 1299 + * For v1 space cache 1300 + */ 1301 + return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID || 1302 + rootid == BTRFS_ROOT_TREE_OBJECTID; 1303 + } 1304 + 1292 1305 static int check_extent_item(struct extent_buffer *leaf, 1293 1306 struct btrfs_key *key, int slot, 1294 1307 struct btrfs_key *prev_key) ··· 1454 1441 struct btrfs_extent_data_ref *dref; 1455 1442 struct btrfs_shared_data_ref *sref; 1456 1443 u64 seq; 1444 + u64 dref_root; 1445 + u64 dref_objectid; 1457 1446 u64 dref_offset; 1458 1447 u64 inline_offset; 1459 1448 u8 inline_type; ··· 1499 1484 */ 1500 1485 case BTRFS_EXTENT_DATA_REF_KEY: 1501 1486 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1487 + dref_root = btrfs_extent_data_ref_root(leaf, dref); 1488 + dref_objectid = btrfs_extent_data_ref_objectid(leaf, dref); 1502 1489 dref_offset = btrfs_extent_data_ref_offset(leaf, dref); 1503 1490 seq = hash_extent_data_ref( 1504 1491 btrfs_extent_data_ref_root(leaf, dref), 1505 1492 btrfs_extent_data_ref_objectid(leaf, dref), 1506 1493 btrfs_extent_data_ref_offset(leaf, dref)); 1494 + if (unlikely(!is_valid_dref_root(dref_root))) { 1495 + extent_err(leaf, slot, 1496 + "invalid data ref root value %llu", 1497 + dref_root); 1498 + return -EUCLEAN; 1499 + } 1500 + if (unlikely(dref_objectid < BTRFS_FIRST_FREE_OBJECTID || 1501 + dref_objectid > BTRFS_LAST_FREE_OBJECTID)) { 1502 + extent_err(leaf, slot, 1503 + "invalid data ref objectid value %llu", 1504 + dref_root); 1505 + return -EUCLEAN; 1506 + } 1507 1507 if (unlikely(!IS_ALIGNED(dref_offset, 1508 1508 fs_info->sectorsize))) { 1509 1509 extent_err(leaf, slot, ··· 1657 1627 return -EUCLEAN; 1658 1628 } 1659 1629 for (; ptr < end; ptr += sizeof(*dref)) { 1630 + u64 root; 1631 + u64 objectid; 1660 1632 u64 offset; 1661 1633 1662 1634 /* ··· 1666 1634 * overflow from the leaf due to hash collisions. 1667 1635 */ 1668 1636 dref = (struct btrfs_extent_data_ref *)ptr; 1637 + root = btrfs_extent_data_ref_root(leaf, dref); 1638 + objectid = btrfs_extent_data_ref_objectid(leaf, dref); 1669 1639 offset = btrfs_extent_data_ref_offset(leaf, dref); 1640 + if (unlikely(!is_valid_dref_root(root))) { 1641 + extent_err(leaf, slot, 1642 + "invalid extent data backref root value %llu", 1643 + root); 1644 + return -EUCLEAN; 1645 + } 1646 + if (unlikely(objectid < BTRFS_FIRST_FREE_OBJECTID || 1647 + objectid > BTRFS_LAST_FREE_OBJECTID)) { 1648 + extent_err(leaf, slot, 1649 + "invalid extent data backref objectid value %llu", 1650 + root); 1651 + return -EUCLEAN; 1652 + } 1670 1653 if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) { 1671 1654 extent_err(leaf, slot, 1672 1655 "invalid extent data backref offset, have %llu expect aligned to %u",
+24 -11
fs/ceph/caps.c
··· 2016 2016 * CHECK_CAPS_AUTHONLY - we should only check the auth cap 2017 2017 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without 2018 2018 * further delay. 2019 + * CHECK_CAPS_FLUSH_FORCE - we should flush any caps immediately, without 2020 + * further delay. 2019 2021 */ 2020 2022 void ceph_check_caps(struct ceph_inode_info *ci, int flags) 2021 2023 { ··· 2099 2097 } 2100 2098 2101 2099 doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s " 2102 - "flushing %s issued %s revoking %s retain %s %s%s%s\n", 2100 + "flushing %s issued %s revoking %s retain %s %s%s%s%s\n", 2103 2101 inode, ceph_vinop(inode), ceph_cap_string(file_wanted), 2104 2102 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), 2105 2103 ceph_cap_string(ci->i_flushing_caps), ··· 2107 2105 ceph_cap_string(retain), 2108 2106 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", 2109 2107 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "", 2110 - (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : ""); 2108 + (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "", 2109 + (flags & CHECK_CAPS_FLUSH_FORCE) ? " FLUSH_FORCE" : ""); 2111 2110 2112 2111 /* 2113 2112 * If we no longer need to hold onto old our caps, and we may ··· 2181 2178 if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref && 2182 2179 (revoking & CEPH_CAP_FILE_BUFFER)) 2183 2180 queue_writeback = true; 2181 + } 2182 + 2183 + if (flags & CHECK_CAPS_FLUSH_FORCE) { 2184 + doutc(cl, "force to flush caps\n"); 2185 + goto ack; 2184 2186 } 2185 2187 2186 2188 if (cap == ci->i_auth_cap && ··· 3518 3510 bool queue_invalidate = false; 3519 3511 bool deleted_inode = false; 3520 3512 bool fill_inline = false; 3513 + bool revoke_wait = false; 3514 + int flags = 0; 3521 3515 3522 3516 /* 3523 3517 * If there is at least one crypto block then we'll trust ··· 3715 3705 ceph_cap_string(cap->issued), ceph_cap_string(newcaps), 3716 3706 ceph_cap_string(revoking)); 3717 3707 if (S_ISREG(inode->i_mode) && 3718 - (revoking & used & CEPH_CAP_FILE_BUFFER)) 3708 + (revoking & used & CEPH_CAP_FILE_BUFFER)) { 3719 3709 writeback = true; /* initiate writeback; will delay ack */ 3720 - else if (queue_invalidate && 3710 + revoke_wait = true; 3711 + } else if (queue_invalidate && 3721 3712 revoking == CEPH_CAP_FILE_CACHE && 3722 - (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) 3723 - ; /* do nothing yet, invalidation will be queued */ 3724 - else if (cap == ci->i_auth_cap) 3713 + (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) { 3714 + revoke_wait = true; /* do nothing yet, invalidation will be queued */ 3715 + } else if (cap == ci->i_auth_cap) { 3725 3716 check_caps = 1; /* check auth cap only */ 3726 - else 3717 + } else { 3727 3718 check_caps = 2; /* check all caps */ 3719 + } 3728 3720 /* If there is new caps, try to wake up the waiters */ 3729 3721 if (~cap->issued & newcaps) 3730 3722 wake = true; ··· 3753 3741 BUG_ON(cap->issued & ~cap->implemented); 3754 3742 3755 3743 /* don't let check_caps skip sending a response to MDS for revoke msgs */ 3756 - if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) { 3744 + if (!revoke_wait && le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) { 3757 3745 cap->mds_wanted = 0; 3746 + flags |= CHECK_CAPS_FLUSH_FORCE; 3758 3747 if (cap == ci->i_auth_cap) 3759 3748 check_caps = 1; /* check auth cap only */ 3760 3749 else ··· 3811 3798 3812 3799 mutex_unlock(&session->s_mutex); 3813 3800 if (check_caps == 1) 3814 - ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL); 3801 + ceph_check_caps(ci, flags | CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL); 3815 3802 else if (check_caps == 2) 3816 - ceph_check_caps(ci, CHECK_CAPS_NOINVAL); 3803 + ceph_check_caps(ci, flags | CHECK_CAPS_NOINVAL); 3817 3804 } 3818 3805 3819 3806 /*
+4 -3
fs/ceph/super.h
··· 200 200 struct list_head caps_item; 201 201 }; 202 202 203 - #define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */ 204 - #define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */ 205 - #define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */ 203 + #define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */ 204 + #define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */ 205 + #define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */ 206 + #define CHECK_CAPS_FLUSH_FORCE 8 /* force flush any caps */ 206 207 207 208 struct ceph_cap_flush { 208 209 u64 tid;
+1
fs/file.c
··· 1248 1248 * tables and this condition does not arise without those. 1249 1249 */ 1250 1250 fdt = files_fdtable(files); 1251 + fd = array_index_nospec(fd, fdt->max_fds); 1251 1252 tofree = fdt->fd[fd]; 1252 1253 if (!tofree && fd_is_open(fd, fdt)) 1253 1254 goto Ebusy;
+1 -2
fs/nfsd/nfsctl.c
··· 2069 2069 continue; 2070 2070 } 2071 2071 2072 - ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 2073 - SVC_SOCK_ANONYMOUS, 2072 + ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0, 2074 2073 get_current_cred()); 2075 2074 /* always save the latest error */ 2076 2075 if (ret < 0)
+1 -1
fs/smb/client/cifs_debug.c
··· 1072 1072 static void 1073 1073 cifs_security_flags_handle_must_flags(unsigned int *flags) 1074 1074 { 1075 - unsigned int signflags = *flags & CIFSSEC_MUST_SIGN; 1075 + unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL); 1076 1076 1077 1077 if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) 1078 1078 *flags = CIFSSEC_MUST_KRB5;
+2 -2
fs/smb/client/cifsfs.h
··· 147 147 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 148 148 149 149 /* when changing internal version - update following two lines at same time */ 150 - #define SMB3_PRODUCT_BUILD 49 151 - #define CIFS_VERSION "2.49" 150 + #define SMB3_PRODUCT_BUILD 50 151 + #define CIFS_VERSION "2.50" 152 152 #endif /* _CIFSFS_H */
+6 -30
fs/smb/client/cifsglob.h
··· 345 345 /* connect to a server share */ 346 346 int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *, 347 347 struct cifs_tcon *, const struct nls_table *); 348 - /* close tree connecion */ 348 + /* close tree connection */ 349 349 int (*tree_disconnect)(const unsigned int, struct cifs_tcon *); 350 350 /* get DFS referrals */ 351 351 int (*get_dfs_refer)(const unsigned int, struct cifs_ses *, ··· 816 816 * Protected by @refpath_lock and @srv_lock. The @refpath_lock is 817 817 * mostly used for not requiring a copy of @leaf_fullpath when getting 818 818 * cached or new DFS referrals (which might also sleep during I/O). 819 - * While @srv_lock is held for making string and NULL comparions against 819 + * While @srv_lock is held for making string and NULL comparisons against 820 820 * both fields as in mount(2) and cache refresh. 821 821 * 822 822 * format: \\HOST\SHARE[\OPTIONAL PATH] ··· 1471 1471 struct TCP_Server_Info *server; 1472 1472 }; 1473 1473 1474 - struct cifs_aio_ctx { 1475 - struct kref refcount; 1476 - struct list_head list; 1477 - struct mutex aio_mutex; 1478 - struct completion done; 1479 - struct iov_iter iter; 1480 - struct kiocb *iocb; 1481 - struct cifsFileInfo *cfile; 1482 - struct bio_vec *bv; 1483 - loff_t pos; 1484 - unsigned int nr_pinned_pages; 1485 - ssize_t rc; 1486 - unsigned int len; 1487 - unsigned int total_len; 1488 - unsigned int bv_need_unpin; /* If ->bv[] needs unpinning */ 1489 - bool should_dirty; 1490 - /* 1491 - * Indicates if this aio_ctx is for direct_io, 1492 - * If yes, iter is a copy of the user passed iov_iter 1493 - */ 1494 - bool direct_io; 1495 - }; 1496 - 1497 1474 struct cifs_io_request { 1498 1475 struct netfs_io_request rreq; 1499 1476 struct cifsFileInfo *cfile; ··· 1881 1904 #define CIFSSEC_MAY_SIGN 0x00001 1882 1905 #define CIFSSEC_MAY_NTLMV2 0x00004 1883 1906 #define CIFSSEC_MAY_KRB5 0x00008 1884 - #define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */ 1907 + #define CIFSSEC_MAY_SEAL 0x00040 1885 1908 #define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */ 1886 1909 1887 1910 #define CIFSSEC_MUST_SIGN 0x01001 ··· 1891 1914 #define CIFSSEC_MUST_NTLMV2 0x04004 1892 1915 #define CIFSSEC_MUST_KRB5 0x08008 1893 1916 #ifdef CONFIG_CIFS_UPCALL 1894 - #define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ 1917 + #define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */ 1895 1918 #else 1896 - #define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */ 1919 + #define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */ 1897 1920 #endif /* UPCALL */ 1898 - #define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ 1921 + #define CIFSSEC_MUST_SEAL 0x40040 1899 1922 #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */ 1900 1923 1901 1924 #define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL) ··· 1987 2010 * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo 1988 2011 * ->invalidHandle initiate_cifs_search 1989 2012 * ->oplock_break_cancelled 1990 - * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc 1991 2013 ****************************************************************************/ 1992 2014 1993 2015 #ifdef DECLARE_GLOBALS_HERE
-2
fs/smb/client/cifsproto.h
··· 619 619 struct shash_desc *shash); 620 620 enum securityEnum cifs_select_sectype(struct TCP_Server_Info *, 621 621 enum securityEnum); 622 - struct cifs_aio_ctx *cifs_aio_ctx_alloc(void); 623 - void cifs_aio_ctx_release(struct kref *refcount); 624 622 625 623 int cifs_alloc_hash(const char *name, struct shash_desc **sdesc); 626 624 void cifs_free_hash(struct shash_desc **sdesc);
+15 -2
fs/smb/client/inode.c
··· 1042 1042 } 1043 1043 1044 1044 rc = -EOPNOTSUPP; 1045 - switch ((data->reparse.tag = tag)) { 1046 - case 0: /* SMB1 symlink */ 1045 + data->reparse.tag = tag; 1046 + if (!data->reparse.tag) { 1047 1047 if (server->ops->query_symlink) { 1048 1048 rc = server->ops->query_symlink(xid, tcon, 1049 1049 cifs_sb, full_path, 1050 1050 &data->symlink_target); 1051 + } 1052 + if (rc == -EOPNOTSUPP) 1053 + data->reparse.tag = IO_REPARSE_TAG_INTERNAL; 1054 + } 1055 + 1056 + switch (data->reparse.tag) { 1057 + case 0: /* SMB1 symlink */ 1058 + break; 1059 + case IO_REPARSE_TAG_INTERNAL: 1060 + rc = 0; 1061 + if (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY) { 1062 + cifs_create_junction_fattr(fattr, sb); 1063 + goto out; 1051 1064 } 1052 1065 break; 1053 1066 case IO_REPARSE_TAG_MOUNT_POINT:
+25 -7
fs/smb/client/ioctl.c
··· 170 170 static int cifs_shutdown(struct super_block *sb, unsigned long arg) 171 171 { 172 172 struct cifs_sb_info *sbi = CIFS_SB(sb); 173 + struct tcon_link *tlink; 174 + struct cifs_tcon *tcon; 173 175 __u32 flags; 176 + int rc; 174 177 175 178 if (!capable(CAP_SYS_ADMIN)) 176 179 return -EPERM; ··· 181 178 if (get_user(flags, (__u32 __user *)arg)) 182 179 return -EFAULT; 183 180 184 - if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH) 185 - return -EINVAL; 181 + tlink = cifs_sb_tlink(sbi); 182 + if (IS_ERR(tlink)) 183 + return PTR_ERR(tlink); 184 + tcon = tlink_tcon(tlink); 185 + 186 + trace_smb3_shutdown_enter(flags, tcon->tid); 187 + if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH) { 188 + rc = -EINVAL; 189 + goto shutdown_out_err; 190 + } 186 191 187 192 if (cifs_forced_shutdown(sbi)) 188 - return 0; 193 + goto shutdown_good; 189 194 190 195 cifs_dbg(VFS, "shut down requested (%d)", flags); 191 - /* trace_cifs_shutdown(sb, flags);*/ 192 196 193 197 /* 194 198 * see: ··· 211 201 */ 212 202 case CIFS_GOING_FLAGS_DEFAULT: 213 203 cifs_dbg(FYI, "shutdown with default flag not supported\n"); 214 - return -EINVAL; 204 + rc = -EINVAL; 205 + goto shutdown_out_err; 215 206 /* 216 207 * FLAGS_LOGFLUSH is easy since it asks to write out metadata (not 217 208 * data) but metadata writes are not cached on the client, so can treat ··· 221 210 case CIFS_GOING_FLAGS_LOGFLUSH: 222 211 case CIFS_GOING_FLAGS_NOLOGFLUSH: 223 212 sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN; 224 - return 0; 213 + goto shutdown_good; 225 214 default: 226 - return -EINVAL; 215 + rc = -EINVAL; 216 + goto shutdown_out_err; 227 217 } 218 + 219 + shutdown_good: 220 + trace_smb3_shutdown_done(flags, tcon->tid); 228 221 return 0; 222 + shutdown_out_err: 223 + trace_smb3_shutdown_err(rc, flags, tcon->tid); 224 + return rc; 229 225 } 230 226 231 227 static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
+6 -59
fs/smb/client/misc.c
··· 352 352 * on simple responses (wct, bcc both zero) 353 353 * in particular have seen this on 354 354 * ulogoffX and FindClose. This leaves 355 - * one byte of bcc potentially unitialized 355 + * one byte of bcc potentially uninitialized 356 356 */ 357 357 /* zero rest of bcc */ 358 358 tmp[sizeof(struct smb_hdr)+1] = 0; ··· 995 995 return rc; 996 996 } 997 997 998 - struct cifs_aio_ctx * 999 - cifs_aio_ctx_alloc(void) 1000 - { 1001 - struct cifs_aio_ctx *ctx; 1002 - 1003 - /* 1004 - * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io 1005 - * to false so that we know when we have to unreference pages within 1006 - * cifs_aio_ctx_release() 1007 - */ 1008 - ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL); 1009 - if (!ctx) 1010 - return NULL; 1011 - 1012 - INIT_LIST_HEAD(&ctx->list); 1013 - mutex_init(&ctx->aio_mutex); 1014 - init_completion(&ctx->done); 1015 - kref_init(&ctx->refcount); 1016 - return ctx; 1017 - } 1018 - 1019 - void 1020 - cifs_aio_ctx_release(struct kref *refcount) 1021 - { 1022 - struct cifs_aio_ctx *ctx = container_of(refcount, 1023 - struct cifs_aio_ctx, refcount); 1024 - 1025 - cifsFileInfo_put(ctx->cfile); 1026 - 1027 - /* 1028 - * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly 1029 - * which means that iov_iter_extract_pages() was a success and thus 1030 - * that we may have references or pins on pages that we need to 1031 - * release. 1032 - */ 1033 - if (ctx->bv) { 1034 - if (ctx->should_dirty || ctx->bv_need_unpin) { 1035 - unsigned int i; 1036 - 1037 - for (i = 0; i < ctx->nr_pinned_pages; i++) { 1038 - struct page *page = ctx->bv[i].bv_page; 1039 - 1040 - if (ctx->should_dirty) 1041 - set_page_dirty(page); 1042 - if (ctx->bv_need_unpin) 1043 - unpin_user_page(page); 1044 - } 1045 - } 1046 - kvfree(ctx->bv); 1047 - } 1048 - 1049 - kfree(ctx); 1050 - } 1051 - 1052 998 /** 1053 999 * cifs_alloc_hash - allocate hash and hash context together 1054 1000 * @name: The name of the crypto hash algo ··· 1234 1288 const char *full_path, 1235 1289 bool *islink) 1236 1290 { 1291 + struct TCP_Server_Info *server = tcon->ses->server; 1237 1292 struct cifs_ses *ses = tcon->ses; 1238 1293 size_t len; 1239 1294 char *path; ··· 1251 1304 !is_tcon_dfs(tcon)) 1252 1305 return 0; 1253 1306 1254 - spin_lock(&tcon->tc_lock); 1255 - if (!tcon->origin_fullpath) { 1256 - spin_unlock(&tcon->tc_lock); 1307 + spin_lock(&server->srv_lock); 1308 + if (!server->leaf_fullpath) { 1309 + spin_unlock(&server->srv_lock); 1257 1310 return 0; 1258 1311 } 1259 - spin_unlock(&tcon->tc_lock); 1312 + spin_unlock(&server->srv_lock); 1260 1313 1261 1314 /* 1262 1315 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
+4
fs/smb/client/reparse.c
··· 505 505 } 506 506 507 507 switch (tag) { 508 + case IO_REPARSE_TAG_INTERNAL: 509 + if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY)) 510 + return false; 511 + fallthrough; 508 512 case IO_REPARSE_TAG_DFS: 509 513 case IO_REPARSE_TAG_DFSR: 510 514 case IO_REPARSE_TAG_MOUNT_POINT:
+17 -2
fs/smb/client/reparse.h
··· 12 12 #include "fs_context.h" 13 13 #include "cifsglob.h" 14 14 15 + /* 16 + * Used only by cifs.ko to ignore reparse points from files when client or 17 + * server doesn't support FSCTL_GET_REPARSE_POINT. 18 + */ 19 + #define IO_REPARSE_TAG_INTERNAL ((__u32)~0U) 20 + 15 21 static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf) 16 22 { 17 23 u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer); ··· 84 78 static inline bool reparse_inode_match(struct inode *inode, 85 79 struct cifs_fattr *fattr) 86 80 { 81 + struct cifsInodeInfo *cinode = CIFS_I(inode); 87 82 struct timespec64 ctime = inode_get_ctime(inode); 88 83 89 - return (CIFS_I(inode)->cifsAttrs & ATTR_REPARSE) && 90 - CIFS_I(inode)->reparse_tag == fattr->cf_cifstag && 84 + /* 85 + * Do not match reparse tags when client or server doesn't support 86 + * FSCTL_GET_REPARSE_POINT. @fattr->cf_cifstag should contain correct 87 + * reparse tag from query dir response but the client won't be able to 88 + * read the reparse point data anyway. This spares us a revalidation. 89 + */ 90 + if (cinode->reparse_tag != IO_REPARSE_TAG_INTERNAL && 91 + cinode->reparse_tag != fattr->cf_cifstag) 92 + return false; 93 + return (cinode->cifsAttrs & ATTR_REPARSE) && 91 94 timespec64_equal(&ctime, &fattr->cf_ctime); 92 95 } 93 96
+6 -2
fs/smb/client/smb2inode.c
··· 930 930 931 931 switch (rc) { 932 932 case 0: 933 + rc = parse_create_response(data, cifs_sb, &out_iov[0]); 934 + break; 933 935 case -EOPNOTSUPP: 934 936 /* 935 937 * BB TODO: When support for special files added to Samba ··· 950 948 cmds[num_cmds++] = SMB2_OP_GET_REPARSE; 951 949 952 950 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, 953 - FILE_READ_ATTRIBUTES | FILE_READ_EA, 951 + FILE_READ_ATTRIBUTES | 952 + FILE_READ_EA | SYNCHRONIZE, 954 953 FILE_OPEN, create_options | 955 954 OPEN_REPARSE_POINT, ACL_NO_MODE); 956 955 cifs_get_readable_path(tcon, full_path, &cfile); ··· 1259 1256 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); 1260 1257 1261 1258 cifs_get_readable_path(tcon, full_path, &cfile); 1262 - oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_READ_ATTRIBUTES, 1259 + oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, 1260 + FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE, 1263 1261 FILE_OPEN, OPEN_REPARSE_POINT, ACL_NO_MODE); 1264 1262 rc = smb2_compound_op(xid, tcon, cifs_sb, 1265 1263 full_path, &oparms, &in_iov,
+3
fs/smb/client/smb2pdu.c
··· 82 82 if (tcon->seal && 83 83 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 84 84 return 1; 85 + if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) && 86 + (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 87 + return 1; 85 88 return 0; 86 89 } 87 90
+4 -4
fs/smb/client/smbdirect.c
··· 406 406 else 407 407 response = get_empty_queue_buffer(info); 408 408 if (!response) { 409 - /* now switch to emtpy packet queue */ 409 + /* now switch to empty packet queue */ 410 410 if (use_receive_queue) { 411 411 use_receive_queue = 0; 412 412 continue; ··· 618 618 619 619 /* 620 620 * Test if FRWR (Fast Registration Work Requests) is supported on the device 621 - * This implementation requries FRWR on RDMA read/write 621 + * This implementation requires FRWR on RDMA read/write 622 622 * return value: true if it is supported 623 623 */ 624 624 static bool frwr_is_supported(struct ib_device_attr *attrs) ··· 2177 2177 * MR available in the list. It may access the list while the 2178 2178 * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock 2179 2179 * as they never modify the same places. However, there may be several CPUs 2180 - * issueing I/O trying to get MR at the same time, mr_list_lock is used to 2180 + * issuing I/O trying to get MR at the same time, mr_list_lock is used to 2181 2181 * protect this situation. 2182 2182 */ 2183 2183 static struct smbd_mr *get_mr(struct smbd_connection *info) ··· 2311 2311 /* 2312 2312 * There is no need for waiting for complemtion on ib_post_send 2313 2313 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution 2314 - * on the next ib_post_send when we actaully send I/O to remote peer 2314 + * on the next ib_post_send when we actually send I/O to remote peer 2315 2315 */ 2316 2316 rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL); 2317 2317 if (!rc)
+50 -1
fs/smb/client/trace.h
··· 1388 1388 __entry->command = command; 1389 1389 ), 1390 1390 TP_printk("xid=%u fid=0x%llx ioctl cmd=0x%x", 1391 - __entry->xid, __entry->fid, __entry->command) 1391 + __entry->xid, __entry->fid, __entry->command) 1392 1392 ) 1393 1393 1394 1394 #define DEFINE_SMB3_IOCTL_EVENT(name) \ ··· 1400 1400 1401 1401 DEFINE_SMB3_IOCTL_EVENT(ioctl); 1402 1402 1403 + DECLARE_EVENT_CLASS(smb3_shutdown_class, 1404 + TP_PROTO(__u32 flags, 1405 + __u32 tid), 1406 + TP_ARGS(flags, tid), 1407 + TP_STRUCT__entry( 1408 + __field(__u32, flags) 1409 + __field(__u32, tid) 1410 + ), 1411 + TP_fast_assign( 1412 + __entry->flags = flags; 1413 + __entry->tid = tid; 1414 + ), 1415 + TP_printk("flags=0x%x tid=0x%x", 1416 + __entry->flags, __entry->tid) 1417 + ) 1403 1418 1419 + #define DEFINE_SMB3_SHUTDOWN_EVENT(name) \ 1420 + DEFINE_EVENT(smb3_shutdown_class, smb3_##name, \ 1421 + TP_PROTO(__u32 flags, \ 1422 + __u32 tid), \ 1423 + TP_ARGS(flags, tid)) 1404 1424 1425 + DEFINE_SMB3_SHUTDOWN_EVENT(shutdown_enter); 1426 + DEFINE_SMB3_SHUTDOWN_EVENT(shutdown_done); 1405 1427 1428 + DECLARE_EVENT_CLASS(smb3_shutdown_err_class, 1429 + TP_PROTO(int rc, 1430 + __u32 flags, 1431 + __u32 tid), 1432 + TP_ARGS(rc, flags, tid), 1433 + TP_STRUCT__entry( 1434 + __field(int, rc) 1435 + __field(__u32, flags) 1436 + __field(__u32, tid) 1437 + ), 1438 + TP_fast_assign( 1439 + __entry->rc = rc; 1440 + __entry->flags = flags; 1441 + __entry->tid = tid; 1442 + ), 1443 + TP_printk("rc=%d flags=0x%x tid=0x%x", 1444 + __entry->rc, __entry->flags, __entry->tid) 1445 + ) 1446 + 1447 + #define DEFINE_SMB3_SHUTDOWN_ERR_EVENT(name) \ 1448 + DEFINE_EVENT(smb3_shutdown_err_class, smb3_##name, \ 1449 + TP_PROTO(int rc, \ 1450 + __u32 flags, \ 1451 + __u32 tid), \ 1452 + TP_ARGS(rc, flags, tid)) 1453 + 1454 + DEFINE_SMB3_SHUTDOWN_ERR_EVENT(shutdown_err); 1406 1455 1407 1456 DECLARE_EVENT_CLASS(smb3_credit_class, 1408 1457 TP_PROTO(__u64 currmid,
+1 -1
fs/smb/client/transport.c
··· 1289 1289 out: 1290 1290 /* 1291 1291 * This will dequeue all mids. After this it is important that the 1292 - * demultiplex_thread will not process any of these mids any futher. 1292 + * demultiplex_thread will not process any of these mids any further. 1293 1293 * This is prevented above by using a noop callback that will not 1294 1294 * wake this thread except for the very last PDU. 1295 1295 */
+2 -2
fs/tracefs/event_inode.c
··· 112 112 entry->release(entry->name, ei->data); 113 113 } 114 114 115 - call_rcu(&ei->rcu, free_ei_rcu); 115 + call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu); 116 116 } 117 117 118 118 static inline void put_ei(struct eventfs_inode *ei) ··· 736 736 /* Was the parent freed? */ 737 737 if (list_empty(&ei->list)) { 738 738 cleanup_ei(ei); 739 - ei = NULL; 739 + ei = ERR_PTR(-EBUSY); 740 740 } 741 741 return ei; 742 742 }
+5 -7
fs/tracefs/inode.c
··· 42 42 struct tracefs_inode *ti; 43 43 unsigned long flags; 44 44 45 - ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL); 45 + ti = alloc_inode_sb(sb, tracefs_inode_cachep, GFP_KERNEL); 46 46 if (!ti) 47 47 return NULL; 48 48 ··· 53 53 return &ti->vfs_inode; 54 54 } 55 55 56 - static void tracefs_free_inode_rcu(struct rcu_head *rcu) 56 + static void tracefs_free_inode(struct inode *inode) 57 57 { 58 - struct tracefs_inode *ti; 58 + struct tracefs_inode *ti = get_tracefs(inode); 59 59 60 - ti = container_of(rcu, struct tracefs_inode, rcu); 61 60 kmem_cache_free(tracefs_inode_cachep, ti); 62 61 } 63 62 64 - static void tracefs_free_inode(struct inode *inode) 63 + static void tracefs_destroy_inode(struct inode *inode) 65 64 { 66 65 struct tracefs_inode *ti = get_tracefs(inode); 67 66 unsigned long flags; ··· 68 69 spin_lock_irqsave(&tracefs_inode_lock, flags); 69 70 list_del_rcu(&ti->list); 70 71 spin_unlock_irqrestore(&tracefs_inode_lock, flags); 71 - 72 - call_rcu(&ti->rcu, tracefs_free_inode_rcu); 73 72 } 74 73 75 74 static ssize_t default_read_file(struct file *file, char __user *buf, ··· 434 437 static const struct super_operations tracefs_super_operations = { 435 438 .alloc_inode = tracefs_alloc_inode, 436 439 .free_inode = tracefs_free_inode, 440 + .destroy_inode = tracefs_destroy_inode, 437 441 .drop_inode = tracefs_drop_inode, 438 442 .statfs = simple_statfs, 439 443 .show_options = tracefs_show_options,
+1 -4
fs/tracefs/internal.h
··· 10 10 }; 11 11 12 12 struct tracefs_inode { 13 - union { 14 - struct inode vfs_inode; 15 - struct rcu_head rcu; 16 - }; 13 + struct inode vfs_inode; 17 14 /* The below gets initialized with memset_after(ti, 0, vfs_inode) */ 18 15 struct list_head list; 19 16 unsigned long flags;
+1 -1
fs/xfs/libxfs/xfs_quota_defs.h
··· 56 56 * And, of course, we also need to take into account the dquot log format item 57 57 * used to describe each dquot. 58 58 */ 59 - #define XFS_DQUOT_LOGRES(mp) \ 59 + #define XFS_DQUOT_LOGRES \ 60 60 ((sizeof(struct xfs_dq_logformat) + sizeof(struct xfs_disk_dquot)) * 6) 61 61 62 62 #define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
+14 -14
fs/xfs/libxfs/xfs_trans_resv.c
··· 338 338 blksz); 339 339 t1 += adj; 340 340 t3 += adj; 341 - return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); 341 + return XFS_DQUOT_LOGRES + max3(t1, t2, t3); 342 342 } 343 343 344 344 t4 = xfs_calc_refcountbt_reservation(mp, 1); 345 - return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3)); 345 + return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3)); 346 346 } 347 347 348 348 unsigned int ··· 410 410 xfs_refcountbt_block_count(mp, 4), 411 411 blksz); 412 412 413 - return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); 413 + return XFS_DQUOT_LOGRES + max3(t1, t2, t3); 414 414 } 415 415 416 416 t4 = xfs_calc_refcountbt_reservation(mp, 2); 417 - return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3)); 417 + return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3)); 418 418 } 419 419 420 420 unsigned int ··· 466 466 xfs_calc_rename_reservation( 467 467 struct xfs_mount *mp) 468 468 { 469 - unsigned int overhead = XFS_DQUOT_LOGRES(mp); 469 + unsigned int overhead = XFS_DQUOT_LOGRES; 470 470 struct xfs_trans_resv *resp = M_RES(mp); 471 471 unsigned int t1, t2, t3 = 0; 472 472 ··· 577 577 xfs_calc_link_reservation( 578 578 struct xfs_mount *mp) 579 579 { 580 - unsigned int overhead = XFS_DQUOT_LOGRES(mp); 580 + unsigned int overhead = XFS_DQUOT_LOGRES; 581 581 struct xfs_trans_resv *resp = M_RES(mp); 582 582 unsigned int t1, t2, t3 = 0; 583 583 ··· 641 641 xfs_calc_remove_reservation( 642 642 struct xfs_mount *mp) 643 643 { 644 - unsigned int overhead = XFS_DQUOT_LOGRES(mp); 644 + unsigned int overhead = XFS_DQUOT_LOGRES; 645 645 struct xfs_trans_resv *resp = M_RES(mp); 646 646 unsigned int t1, t2, t3 = 0; 647 647 ··· 729 729 struct xfs_mount *mp) 730 730 { 731 731 struct xfs_trans_resv *resp = M_RES(mp); 732 - unsigned int overhead = XFS_DQUOT_LOGRES(mp); 732 + unsigned int overhead = XFS_DQUOT_LOGRES; 733 733 unsigned int t1, t2, t3 = 0; 734 734 735 735 t1 = xfs_calc_icreate_resv_alloc(mp); ··· 747 747 xfs_calc_create_tmpfile_reservation( 748 748 struct xfs_mount *mp) 749 749 { 750 - uint res = XFS_DQUOT_LOGRES(mp); 750 + uint res = XFS_DQUOT_LOGRES; 751 751 752 752 res += xfs_calc_icreate_resv_alloc(mp); 753 753 return res + xfs_calc_iunlink_add_reservation(mp); ··· 829 829 xfs_calc_ifree_reservation( 830 830 struct xfs_mount *mp) 831 831 { 832 - return XFS_DQUOT_LOGRES(mp) + 832 + return XFS_DQUOT_LOGRES + 833 833 xfs_calc_inode_res(mp, 1) + 834 834 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + 835 835 xfs_calc_iunlink_remove_reservation(mp) + ··· 846 846 xfs_calc_ichange_reservation( 847 847 struct xfs_mount *mp) 848 848 { 849 - return XFS_DQUOT_LOGRES(mp) + 849 + return XFS_DQUOT_LOGRES + 850 850 xfs_calc_inode_res(mp, 1) + 851 851 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize); 852 852 ··· 955 955 xfs_calc_addafork_reservation( 956 956 struct xfs_mount *mp) 957 957 { 958 - return XFS_DQUOT_LOGRES(mp) + 958 + return XFS_DQUOT_LOGRES + 959 959 xfs_calc_inode_res(mp, 1) + 960 960 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + 961 961 xfs_calc_buf_res(1, mp->m_dir_geo->blksize) + ··· 1003 1003 xfs_calc_attrsetm_reservation( 1004 1004 struct xfs_mount *mp) 1005 1005 { 1006 - return XFS_DQUOT_LOGRES(mp) + 1006 + return XFS_DQUOT_LOGRES + 1007 1007 xfs_calc_inode_res(mp, 1) + 1008 1008 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + 1009 1009 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1)); ··· 1043 1043 xfs_calc_attrrm_reservation( 1044 1044 struct xfs_mount *mp) 1045 1045 { 1046 - return XFS_DQUOT_LOGRES(mp) + 1046 + return XFS_DQUOT_LOGRES + 1047 1047 max((xfs_calc_inode_res(mp, 1) + 1048 1048 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, 1049 1049 XFS_FSB_TO_B(mp, 1)) +
+1 -1
fs/xfs/scrub/agheader_repair.c
··· 696 696 * step. 697 697 */ 698 698 xagb_bitmap_init(&af.used_extents); 699 - af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp), 699 + af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp); 700 700 xagb_bitmap_walk(agfl_extents, xrep_agfl_fill, &af); 701 701 error = xagb_bitmap_disunion(agfl_extents, &af.used_extents); 702 702 if (error)
+1 -1
fs/xfs/scrub/parent.c
··· 799 799 } 800 800 801 801 if (pp->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 802 - goto out_pp; 802 + goto out_names; 803 803 804 804 /* 805 805 * Complain if the number of parent pointers doesn't match the link
+4 -6
fs/xfs/scrub/trace.h
··· 959 959 TP_STRUCT__entry( 960 960 __field(dev_t, dev) 961 961 __field(unsigned long, ino) 962 - __array(char, pathname, 256) 962 + __array(char, pathname, MAXNAMELEN) 963 963 ), 964 964 TP_fast_assign( 965 - char pathname[257]; 966 965 char *path; 967 966 968 967 __entry->ino = file_inode(xf->file)->i_ino; 969 - memset(pathname, 0, sizeof(pathname)); 970 - path = file_path(xf->file, pathname, sizeof(pathname) - 1); 968 + path = file_path(xf->file, __entry->pathname, MAXNAMELEN); 971 969 if (IS_ERR(path)) 972 - path = "(unknown)"; 973 - strncpy(__entry->pathname, path, sizeof(__entry->pathname)); 970 + strncpy(__entry->pathname, "(unknown)", 971 + sizeof(__entry->pathname)); 974 972 ), 975 973 TP_printk("xfino 0x%lx path '%s'", 976 974 __entry->ino,
+1 -1
fs/xfs/xfs_attr_list.c
··· 139 139 sbp->name = sfe->nameval; 140 140 sbp->namelen = sfe->namelen; 141 141 /* These are bytes, and both on-disk, don't endian-flip */ 142 - sbp->value = &sfe->nameval[sfe->namelen], 142 + sbp->value = &sfe->nameval[sfe->namelen]; 143 143 sbp->valuelen = sfe->valuelen; 144 144 sbp->flags = sfe->flags; 145 145 sbp->hash = xfs_attr_hashval(dp->i_mount, sfe->flags,
+4 -6
fs/xfs/xfs_trace.h
··· 4715 4715 TP_STRUCT__entry( 4716 4716 __field(dev_t, dev) 4717 4717 __field(unsigned long, ino) 4718 - __array(char, pathname, 256) 4718 + __array(char, pathname, MAXNAMELEN) 4719 4719 ), 4720 4720 TP_fast_assign( 4721 - char pathname[257]; 4722 4721 char *path; 4723 4722 struct file *file = btp->bt_file; 4724 4723 4725 4724 __entry->dev = btp->bt_mount->m_super->s_dev; 4726 4725 __entry->ino = file_inode(file)->i_ino; 4727 - memset(pathname, 0, sizeof(pathname)); 4728 - path = file_path(file, pathname, sizeof(pathname) - 1); 4726 + path = file_path(file, __entry->pathname, MAXNAMELEN); 4729 4727 if (IS_ERR(path)) 4730 - path = "(unknown)"; 4731 - strncpy(__entry->pathname, path, sizeof(__entry->pathname)); 4728 + strncpy(__entry->pathname, "(unknown)", 4729 + sizeof(__entry->pathname)); 4732 4730 ), 4733 4731 TP_printk("dev %d:%d xmino 0x%lx path '%s'", 4734 4732 MAJOR(__entry->dev), MINOR(__entry->dev),
+18 -1
fs/xfs/xfs_xattr.c
··· 110 110 args->whichfork = XFS_ATTR_FORK; 111 111 xfs_attr_sethash(args); 112 112 113 - return xfs_attr_set(args, op, args->attr_filter & XFS_ATTR_ROOT); 113 + /* 114 + * Some xattrs must be resistant to allocation failure at ENOSPC, e.g. 115 + * creating an inode with ACLs or security attributes requires the 116 + * allocation of the xattr holding that information to succeed. Hence 117 + * we allow xattrs in the VFS TRUSTED, SYSTEM, POSIX_ACL and SECURITY 118 + * (LSM xattr) namespaces to dip into the reserve block pool to allow 119 + * manipulation of these xattrs when at ENOSPC. These VFS xattr 120 + * namespaces translate to the XFS_ATTR_ROOT and XFS_ATTR_SECURE on-disk 121 + * namespaces. 122 + * 123 + * For most of these cases, these special xattrs will fit in the inode 124 + * itself and so consume no extra space or only require temporary extra 125 + * space while an overwrite is being made. Hence the use of the reserved 126 + * pool is largely to avoid the worst case reservation from preventing 127 + * the xattr from being created at ENOSPC. 128 + */ 129 + return xfs_attr_set(args, op, 130 + args->attr_filter & (XFS_ATTR_ROOT | XFS_ATTR_SECURE)); 114 131 } 115 132 116 133
+5 -6
include/asm-generic/vmlinux.lds.h
··· 911 911 #define CON_INITCALL \ 912 912 BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end) 913 913 914 - #define RUNTIME_NAME(t,x) runtime_##t##_##x 914 + #define NAMED_SECTION(name) \ 915 + . = ALIGN(8); \ 916 + name : AT(ADDR(name) - LOAD_OFFSET) \ 917 + { BOUNDED_SECTION_PRE_LABEL(name, name, __start_, __stop_) } 915 918 916 - #define RUNTIME_CONST(t,x) \ 917 - . = ALIGN(8); \ 918 - RUNTIME_NAME(t,x) : AT(ADDR(RUNTIME_NAME(t,x)) - LOAD_OFFSET) { \ 919 - *(RUNTIME_NAME(t,x)); \ 920 - } 919 + #define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x) 921 920 922 921 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ 923 922 #define KUNIT_TABLE() \
+2
include/drm/drm_buddy.h
··· 27 27 #define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) 28 28 #define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) 29 29 #define DRM_BUDDY_CLEARED BIT(4) 30 + #define DRM_BUDDY_TRIM_DISABLE BIT(5) 30 31 31 32 struct drm_buddy_block { 32 33 #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) ··· 156 155 unsigned long flags); 157 156 158 157 int drm_buddy_block_trim(struct drm_buddy *mm, 158 + u64 *start, 159 159 u64 new_size, 160 160 struct list_head *blocks); 161 161
+9
include/linux/compiler.h
··· 297 297 #define is_unsigned_type(type) (!is_signed_type(type)) 298 298 299 299 /* 300 + * Useful shorthand for "is this condition known at compile-time?" 301 + * 302 + * Note that the condition may involve non-constant values, 303 + * but the compiler may know enough about the details of the 304 + * values to determine that the condition is statically true. 305 + */ 306 + #define statically_true(x) (__builtin_constant_p(x) && (x)) 307 + 308 + /* 300 309 * This is needed in functions which generate the stack canary, see 301 310 * arch/x86/kernel/smpboot.c::start_secondary() for an example. 302 311 */
+1 -1
include/linux/cpuhotplug.h
··· 100 100 CPUHP_WORKQUEUE_PREP, 101 101 CPUHP_POWER_NUMA_PREPARE, 102 102 CPUHP_HRTIMERS_PREPARE, 103 - CPUHP_PROFILE_PREPARE, 104 103 CPUHP_X2APIC_PREPARE, 105 104 CPUHP_SMPCFD_PREPARE, 106 105 CPUHP_RELAY_PREPARE, ··· 147 148 CPUHP_AP_IRQ_LOONGARCH_STARTING, 148 149 CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 149 150 CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, 151 + CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, 150 152 CPUHP_AP_ARM_MVEBU_COHERENCY, 151 153 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 152 154 CPUHP_AP_PERF_X86_STARTING,
+1 -1
include/linux/cpumask.h
··· 1037 1037 assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val)) 1038 1038 1039 1039 #define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible)) 1040 - #define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_possible_mask, (enabled)) 1040 + #define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled)) 1041 1041 #define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present)) 1042 1042 #define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active)) 1043 1043 #define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
+5 -5
include/linux/ethtool.h
··· 736 736 * @rxfh_key_space: same as @rxfh_indir_space, but for the key. 737 737 * @rxfh_priv_size: size of the driver private data area the core should 738 738 * allocate for an RSS context (in &struct ethtool_rxfh_context). 739 - * @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this 740 - * is zero then the core may choose any (nonzero) ID, otherwise the core 741 - * will only use IDs strictly less than this value, as the @rss_context 742 - * argument to @create_rxfh_context and friends. 739 + * @rxfh_max_num_contexts: maximum (exclusive) supported RSS context ID. 740 + * If this is zero then the core may choose any (nonzero) ID, otherwise 741 + * the core will only use IDs strictly less than this value, as the 742 + * @rss_context argument to @create_rxfh_context and friends. 743 743 * @supported_coalesce_params: supported types of interrupt coalescing. 744 744 * @supported_ring_params: supported ring params. 745 745 * @get_drvinfo: Report driver/device information. Modern drivers no ··· 954 954 u32 rxfh_indir_space; 955 955 u16 rxfh_key_space; 956 956 u16 rxfh_priv_size; 957 - u32 rxfh_max_context_id; 957 + u32 rxfh_max_num_contexts; 958 958 u32 supported_coalesce_params; 959 959 u32 supported_ring_params; 960 960 void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+1 -1
include/linux/i2c.h
··· 1066 1066 struct acpi_resource; 1067 1067 struct acpi_resource_i2c_serialbus; 1068 1068 1069 - #if IS_ENABLED(CONFIG_ACPI) 1069 + #if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C) 1070 1070 bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, 1071 1071 struct acpi_resource_i2c_serialbus **i2c); 1072 1072 int i2c_acpi_client_count(struct acpi_device *adev);
+5 -4
include/linux/kvm_host.h
··· 2414 2414 } 2415 2415 2416 2416 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, 2417 - unsigned long attrs); 2417 + unsigned long mask, unsigned long attrs); 2418 2418 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, 2419 2419 struct kvm_gfn_range *range); 2420 2420 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, ··· 2445 2445 } 2446 2446 #endif /* CONFIG_KVM_PRIVATE_MEM */ 2447 2447 2448 - #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE 2448 + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE 2449 2449 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); 2450 - bool kvm_arch_gmem_prepare_needed(struct kvm *kvm); 2451 2450 #endif 2452 2451 2452 + #ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM 2453 2453 /** 2454 2454 * kvm_gmem_populate() - Populate/prepare a GPA range with guest data 2455 2455 * ··· 2476 2476 2477 2477 long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, 2478 2478 kvm_gmem_populate_cb post_populate, void *opaque); 2479 + #endif 2479 2480 2480 - #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE 2481 + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE 2481 2482 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); 2482 2483 #endif 2483 2484
+83 -32
include/linux/minmax.h
··· 26 26 #define __typecheck(x, y) \ 27 27 (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) 28 28 29 - /* is_signed_type() isn't a constexpr for pointer types */ 30 - #define __is_signed(x) \ 31 - __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \ 32 - is_signed_type(typeof(x)), 0) 29 + /* 30 + * __sign_use for integer expressions: 31 + * bit #0 set if ok for unsigned comparisons 32 + * bit #1 set if ok for signed comparisons 33 + * 34 + * In particular, statically non-negative signed integer 35 + * expressions are ok for both. 36 + * 37 + * NOTE! Unsigned types smaller than 'int' are implicitly 38 + * converted to 'int' in expressions, and are accepted for 39 + * signed conversions for now. This is debatable. 40 + * 41 + * Note that 'x' is the original expression, and 'ux' is 42 + * the unique variable that contains the value. 43 + * 44 + * We use 'ux' for pure type checking, and 'x' for when 45 + * we need to look at the value (but without evaluating 46 + * it for side effects! Careful to only ever evaluate it 47 + * with sizeof() or __builtin_constant_p() etc). 48 + * 49 + * Pointers end up being checked by the normal C type 50 + * rules at the actual comparison, and these expressions 51 + * only need to be careful to not cause warnings for 52 + * pointer use. 53 + */ 54 + #define __signed_type_use(x,ux) (2+__is_nonneg(x,ux)) 55 + #define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4)) 56 + #define __sign_use(x,ux) (is_signed_type(typeof(ux))? \ 57 + __signed_type_use(x,ux):__unsigned_type_use(x,ux)) 33 58 34 - /* True for a non-negative signed int constant */ 35 - #define __is_noneg_int(x) \ 36 - (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0) 59 + /* 60 + * To avoid warnings about casting pointers to integers 61 + * of different sizes, we need that special sign type. 62 + * 63 + * On 64-bit we can just always use 'long', since any 64 + * integer or pointer type can just be cast to that. 65 + * 66 + * This does not work for 128-bit signed integers since 67 + * the cast would truncate them, but we do not use s128 68 + * types in the kernel (we do use 'u128', but they will 69 + * be handled by the !is_signed_type() case). 70 + * 71 + * NOTE! The cast is there only to avoid any warnings 72 + * from when values that aren't signed integer types. 73 + */ 74 + #ifdef CONFIG_64BIT 75 + #define __signed_type(ux) long 76 + #else 77 + #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L)) 78 + #endif 79 + #define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0) 37 80 38 - #define __types_ok(x, y) \ 39 - (__is_signed(x) == __is_signed(y) || \ 40 - __is_signed((x) + 0) == __is_signed((y) + 0) || \ 41 - __is_noneg_int(x) || __is_noneg_int(y)) 81 + #define __types_ok(x,y,ux,uy) \ 82 + (__sign_use(x,ux) & __sign_use(y,uy)) 83 + 84 + #define __types_ok3(x,y,z,ux,uy,uz) \ 85 + (__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz)) 42 86 43 87 #define __cmp_op_min < 44 88 #define __cmp_op_max > ··· 95 51 #define __cmp_once(op, type, x, y) \ 96 52 __cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_)) 97 53 98 - #define __careful_cmp_once(op, x, y) ({ \ 99 - static_assert(__types_ok(x, y), \ 100 - #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \ 101 - __cmp_once(op, __auto_type, x, y); }) 54 + #define __careful_cmp_once(op, x, y, ux, uy) ({ \ 55 + __auto_type ux = (x); __auto_type uy = (y); \ 56 + BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \ 57 + #op"("#x", "#y") signedness error"); \ 58 + __cmp(op, ux, uy); }) 102 59 103 - #define __careful_cmp(op, x, y) \ 104 - __builtin_choose_expr(__is_constexpr((x) - (y)), \ 105 - __cmp(op, x, y), __careful_cmp_once(op, x, y)) 60 + #define __careful_cmp(op, x, y) \ 61 + __careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_)) 106 62 107 63 #define __clamp(val, lo, hi) \ 108 64 ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) 109 65 110 - #define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \ 111 - typeof(val) unique_val = (val); \ 112 - typeof(lo) unique_lo = (lo); \ 113 - typeof(hi) unique_hi = (hi); \ 66 + #define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \ 67 + __auto_type uval = (val); \ 68 + __auto_type ulo = (lo); \ 69 + __auto_type uhi = (hi); \ 114 70 static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \ 115 71 (lo) <= (hi), true), \ 116 72 "clamp() low limit " #lo " greater than high limit " #hi); \ 117 - static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \ 118 - static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \ 119 - __clamp(unique_val, unique_lo, unique_hi); }) 73 + BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \ 74 + "clamp("#val", "#lo", "#hi") signedness error"); \ 75 + __clamp(uval, ulo, uhi); }) 120 76 121 - #define __careful_clamp(val, lo, hi) ({ \ 122 - __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \ 123 - __clamp(val, lo, hi), \ 124 - __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \ 125 - __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); }) 77 + #define __careful_clamp(val, lo, hi) \ 78 + __clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_)) 126 79 127 80 /** 128 81 * min - return minimum of two values of the same or compatible types ··· 152 111 #define umax(x, y) \ 153 112 __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull) 154 113 114 + #define __careful_op3(op, x, y, z, ux, uy, uz) ({ \ 115 + __auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\ 116 + BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \ 117 + #op"3("#x", "#y", "#z") signedness error"); \ 118 + __cmp(op, ux, __cmp(op, uy, uz)); }) 119 + 155 120 /** 156 121 * min3 - return minimum of three values 157 122 * @x: first value 158 123 * @y: second value 159 124 * @z: third value 160 125 */ 161 - #define min3(x, y, z) min((typeof(x))min(x, y), z) 126 + #define min3(x, y, z) \ 127 + __careful_op3(min, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_)) 162 128 163 129 /** 164 130 * max3 - return maximum of three values ··· 173 125 * @y: second value 174 126 * @z: third value 175 127 */ 176 - #define max3(x, y, z) max((typeof(x))max(x, y), z) 128 + #define max3(x, y, z) \ 129 + __careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_)) 177 130 178 131 /** 179 132 * min_not_zero - return the minimum that is _not_ zero, unless both are zero ··· 326 277 * Use these carefully: no type checking, and uses the arguments 327 278 * multiple times. Use for obvious constants only. 328 279 */ 280 + #define MIN(a,b) __cmp(min,a,b) 281 + #define MAX(a,b) __cmp(max,a,b) 329 282 #define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b)) 330 283 #define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b)) 331 284
-1
include/linux/profile.h
··· 10 10 11 11 #define CPU_PROFILING 1 12 12 #define SCHED_PROFILING 2 13 - #define SLEEP_PROFILING 3 14 13 #define KVM_PROFILING 4 15 14 16 15 struct proc_dir_entry;
-1
include/linux/ring_buffer.h
··· 193 193 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs); 194 194 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); 195 195 196 - size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu); 197 196 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); 198 197 199 198 struct buffer_data_read_page;
+1 -2
include/linux/trace_events.h
··· 680 680 * caching and such. Which is mostly OK ;-) 681 681 */ 682 682 unsigned long flags; 683 - atomic_t ref; /* ref count for opened files */ 683 + refcount_t ref; /* ref count for opened files */ 684 684 atomic_t sm_ref; /* soft-mode reference counter */ 685 685 atomic_t tm_ref; /* trigger-mode reference counter */ 686 686 }; ··· 880 880 struct perf_event; 881 881 882 882 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); 883 - DECLARE_PER_CPU(int, bpf_kprobe_override); 884 883 885 884 extern int perf_trace_init(struct perf_event *event); 886 885 extern void perf_trace_destroy(struct perf_event *event);
+3
include/linux/virtio.h
··· 10 10 #include <linux/mod_devicetable.h> 11 11 #include <linux/gfp.h> 12 12 #include <linux/dma-mapping.h> 13 + #include <linux/completion.h> 13 14 14 15 /** 15 16 * struct virtqueue - a queue to register buffers for sending or receiving. ··· 110 109 __le64 group_member_id; 111 110 struct scatterlist *data_sg; 112 111 struct scatterlist *result_sg; 112 + struct completion completion; 113 + int ret; 113 114 }; 114 115 115 116 /**
-4
include/linux/virtio_config.h
··· 104 104 * Returns 0 on success or error status 105 105 * If disable_vq_and_reset is set, then enable_vq_after_reset must also be 106 106 * set. 107 - * @create_avq: create admin virtqueue resource. 108 - * @destroy_avq: destroy admin virtqueue resource. 109 107 */ 110 108 struct virtio_config_ops { 111 109 void (*get)(struct virtio_device *vdev, unsigned offset, ··· 131 133 struct virtio_shm_region *region, u8 id); 132 134 int (*disable_vq_and_reset)(struct virtqueue *vq); 133 135 int (*enable_vq_after_reset)(struct virtqueue *vq); 134 - int (*create_avq)(struct virtio_device *vdev); 135 - void (*destroy_avq)(struct virtio_device *vdev); 136 136 }; 137 137 138 138 /* If driver didn't advertise the feature, it will never appear. */
+5 -11
include/linux/virtio_net.h
··· 56 56 unsigned int thlen = 0; 57 57 unsigned int p_off = 0; 58 58 unsigned int ip_proto; 59 - u64 ret, remainder, gso_size; 60 59 61 60 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 62 61 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { ··· 97 98 u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start); 98 99 u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); 99 100 u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16)); 100 - 101 - if (hdr->gso_size) { 102 - gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); 103 - ret = div64_u64_rem(skb->len, gso_size, &remainder); 104 - if (!(ret && (hdr->gso_size > needed) && 105 - ((remainder > needed) || (remainder == 0)))) { 106 - return -EINVAL; 107 - } 108 - skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG; 109 - } 110 101 111 102 if (!pskb_may_pull(skb, needed)) 112 103 return -EINVAL; ··· 169 180 if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS) 170 181 return -EINVAL; 171 182 if (gso_type != SKB_GSO_UDP_L4) 183 + return -EINVAL; 184 + break; 185 + case SKB_GSO_TCPV4: 186 + case SKB_GSO_TCPV6: 187 + if (skb->csum_offset != offsetof(struct tcphdr, check)) 172 188 return -EINVAL; 173 189 break; 174 190 }
+5
include/sound/cs35l56.h
··· 277 277 return 0; 278 278 } 279 279 280 + static inline bool cs35l56_is_otp_register(unsigned int reg) 281 + { 282 + return (reg >> 16) == 3; 283 + } 284 + 280 285 extern struct regmap_config cs35l56_regmap_i2c; 281 286 extern struct regmap_config cs35l56_regmap_spi; 282 287 extern struct regmap_config cs35l56_regmap_sdw;
+5
include/sound/soc-component.h
··· 462 462 const char *pin); 463 463 464 464 /* component controls */ 465 + struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component, 466 + const char * const ctl); 467 + struct snd_kcontrol * 468 + snd_soc_component_get_kcontrol_locked(struct snd_soc_component *component, 469 + const char * const ctl); 465 470 int snd_soc_component_notify_control(struct snd_soc_component *component, 466 471 const char * const ctl); 467 472
+1
include/sound/ump_convert.h
··· 13 13 unsigned char cc_nrpn_msb, cc_nrpn_lsb; 14 14 unsigned char cc_data_msb, cc_data_lsb; 15 15 unsigned char cc_bank_msb, cc_bank_lsb; 16 + bool cc_data_msb_set, cc_data_lsb_set; 16 17 }; 17 18 18 19 /* context for converting from MIDI1 byte stream to UMP packet */
+8
include/trace/events/btrfs.h
··· 2383 2383 TP_ARGS(fs_info, sinfo, old, diff) 2384 2384 ); 2385 2385 2386 + DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable, 2387 + 2388 + TP_PROTO(const struct btrfs_fs_info *fs_info, 2389 + const struct btrfs_space_info *sinfo, u64 old, s64 diff), 2390 + 2391 + TP_ARGS(fs_info, sinfo, old, diff) 2392 + ); 2393 + 2386 2394 DECLARE_EVENT_CLASS(btrfs_raid56_bio, 2387 2395 2388 2396 TP_PROTO(const struct btrfs_raid_bio *rbio,
+1 -1
include/trace/events/mptcp.h
··· 34 34 struct sock *ssk; 35 35 36 36 __entry->active = mptcp_subflow_active(subflow); 37 - __entry->backup = subflow->backup; 37 + __entry->backup = subflow->backup || subflow->request_bkup; 38 38 39 39 if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock)) 40 40 __entry->free = sk_stream_memory_free(subflow->tcp_sock);
+1 -4
include/uapi/asm-generic/unistd.h
··· 841 841 #define __NR_mseal 462 842 842 __SYSCALL(__NR_mseal, sys_mseal) 843 843 844 - #define __NR_uretprobe 463 845 - __SYSCALL(__NR_uretprobe, sys_uretprobe) 846 - 847 844 #undef __NR_syscalls 848 - #define __NR_syscalls 464 845 + #define __NR_syscalls 463 849 846 850 847 /* 851 848 * 32 bit systems traditionally used different
+1
include/ufs/ufshcd.h
··· 1109 1109 bool ext_iid_sup; 1110 1110 bool scsi_host_added; 1111 1111 bool mcq_sup; 1112 + bool lsdb_sup; 1112 1113 bool mcq_enabled; 1113 1114 struct ufshcd_res_info res[RES_MAX]; 1114 1115 void __iomem *mcq_base;
+1
include/ufs/ufshci.h
··· 77 77 MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, 78 78 MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, 79 79 MASK_CRYPTO_SUPPORT = 0x10000000, 80 + MASK_LSDB_SUPPORT = 0x20000000, 80 81 MASK_MCQ_SUPPORT = 0x40000000, 81 82 }; 82 83
+1
init/Kconfig
··· 1902 1902 depends on !MODVERSIONS 1903 1903 depends on !GCC_PLUGINS 1904 1904 depends on !RANDSTRUCT 1905 + depends on !SHADOW_CALL_STACK 1905 1906 depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE 1906 1907 help 1907 1908 Enables Rust support in the kernel.
-2
io_uring/napi.c
··· 205 205 void io_napi_free(struct io_ring_ctx *ctx) 206 206 { 207 207 struct io_napi_entry *e; 208 - LIST_HEAD(napi_list); 209 208 unsigned int i; 210 209 211 210 spin_lock(&ctx->napi_lock); ··· 314 315 */ 315 316 int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx) 316 317 { 317 - LIST_HEAD(napi_list); 318 318 bool is_stale = false; 319 319 320 320 if (!READ_ONCE(ctx->napi_busy_poll_dt))
+5 -2
io_uring/net.c
··· 601 601 .iovs = &kmsg->fast_iov, 602 602 .max_len = INT_MAX, 603 603 .nr_iovs = 1, 604 - .mode = KBUF_MODE_EXPAND, 605 604 }; 606 605 607 606 if (kmsg->free_iov) { 608 607 arg.nr_iovs = kmsg->free_iov_nr; 609 608 arg.iovs = kmsg->free_iov; 610 - arg.mode |= KBUF_MODE_FREE; 609 + arg.mode = KBUF_MODE_FREE; 611 610 } 612 611 613 612 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) 614 613 arg.nr_iovs = 1; 614 + else 615 + arg.mode |= KBUF_MODE_EXPAND; 615 616 616 617 ret = io_buffers_select(req, &arg, issue_flags); 617 618 if (unlikely(ret < 0)) ··· 624 623 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { 625 624 kmsg->free_iov_nr = ret; 626 625 kmsg->free_iov = arg.iovs; 626 + req->flags |= REQ_F_NEED_CLEANUP; 627 627 } 628 628 } 629 629 ··· 1096 1094 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { 1097 1095 kmsg->free_iov_nr = ret; 1098 1096 kmsg->free_iov = arg.iovs; 1097 + req->flags |= REQ_F_NEED_CLEANUP; 1099 1098 } 1100 1099 } else { 1101 1100 void __user *buf;
+1
io_uring/poll.c
··· 347 347 v &= IO_POLL_REF_MASK; 348 348 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK); 349 349 350 + io_napi_add(req); 350 351 return IOU_POLL_NO_ACTION; 351 352 } 352 353
+4 -1
kernel/dma/debug.c
··· 416 416 * dma_active_cacheline entry to track per event. dma_map_sg(), on the 417 417 * other hand, consumes a single dma_debug_entry, but inserts 'nents' 418 418 * entries into the tree. 419 + * 420 + * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end 421 + * up right back in the DMA debugging code, leading to a deadlock. 419 422 */ 420 - static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); 423 + static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN); 421 424 static DEFINE_SPINLOCK(radix_lock); 422 425 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 423 426 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+1
kernel/irq/irqdesc.c
··· 530 530 flags = IRQD_AFFINITY_MANAGED | 531 531 IRQD_MANAGED_SHUTDOWN; 532 532 } 533 + flags |= IRQD_AFFINITY_SET; 533 534 mask = &affinity->mask; 534 535 node = cpu_to_node(cpumask_first(mask)); 535 536 affinity++;
+2 -2
kernel/jump_label.c
··· 236 236 } 237 237 238 238 jump_label_lock(); 239 - if (atomic_cmpxchg(&key->enabled, 1, 0)) 239 + if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) 240 240 jump_label_update(key); 241 241 jump_label_unlock(); 242 242 } ··· 289 289 return; 290 290 291 291 guard(mutex)(&jump_label_mutex); 292 - if (atomic_cmpxchg(&key->enabled, 1, 0)) 292 + if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) 293 293 jump_label_update(key); 294 294 else 295 295 WARN_ON_ONCE(!static_key_slow_try_dec(key));
+12 -3
kernel/kcov.c
··· 161 161 kmsan_unpoison_memory(&area->list, sizeof(area->list)); 162 162 } 163 163 164 + /* 165 + * Unlike in_serving_softirq(), this function returns false when called during 166 + * a hardirq or an NMI that happened in the softirq context. 167 + */ 168 + static inline bool in_softirq_really(void) 169 + { 170 + return in_serving_softirq() && !in_hardirq() && !in_nmi(); 171 + } 172 + 164 173 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 165 174 { 166 175 unsigned int mode; ··· 179 170 * so we ignore code executed in interrupts, unless we are in a remote 180 171 * coverage collection section in a softirq. 181 172 */ 182 - if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) 173 + if (!in_task() && !(in_softirq_really() && t->kcov_softirq)) 183 174 return false; 184 175 mode = READ_ONCE(t->kcov_mode); 185 176 /* ··· 858 849 859 850 if (WARN_ON(!kcov_check_handle(handle, true, true, true))) 860 851 return; 861 - if (!in_task() && !in_serving_softirq()) 852 + if (!in_task() && !in_softirq_really()) 862 853 return; 863 854 864 855 local_lock_irqsave(&kcov_percpu_data.lock, flags); ··· 1000 991 int sequence; 1001 992 unsigned long flags; 1002 993 1003 - if (!in_task() && !in_serving_softirq()) 994 + if (!in_task() && !in_softirq_really()) 1004 995 return; 1005 996 1006 997 local_lock_irqsave(&kcov_percpu_data.lock, flags);
+2 -2
kernel/kprobes.c
··· 1557 1557 if (lookup_symbol_name(addr, symbuf)) 1558 1558 return false; 1559 1559 1560 - return str_has_prefix("__cfi_", symbuf) || 1561 - str_has_prefix("__pfx_", symbuf); 1560 + return str_has_prefix(symbuf, "__cfi_") || 1561 + str_has_prefix(symbuf, "__pfx_"); 1562 1562 } 1563 1563 1564 1564 static int check_kprobe_address_safe(struct kprobe *p,
+7
kernel/ksysfs.c
··· 92 92 const char *buf, size_t count) 93 93 { 94 94 int ret; 95 + static DEFINE_MUTEX(lock); 95 96 97 + /* 98 + * We need serialization, for profile_setup() initializes prof_on 99 + * value and profile_init() must not reallocate prof_buffer after 100 + * once allocated. 101 + */ 102 + guard(mutex)(&lock); 96 103 if (prof_on) 97 104 return -EEXIST; 98 105 /*
+6
kernel/locking/lockdep.c
··· 5936 5936 if (DEBUG_LOCKS_WARN_ON(!depth)) 5937 5937 return; 5938 5938 5939 + if (unlikely(lock->key == &__lockdep_no_track__)) 5940 + return; 5941 + 5939 5942 hlock = find_held_lock(curr, lock, depth, &i); 5940 5943 if (!hlock) { 5941 5944 print_lock_contention_bug(curr, lock, ip); ··· 5979 5976 * acquire, how the heck did that happen? 5980 5977 */ 5981 5978 if (DEBUG_LOCKS_WARN_ON(!depth)) 5979 + return; 5980 + 5981 + if (unlikely(lock->key == &__lockdep_no_track__)) 5982 5982 return; 5983 5983 5984 5984 hlock = find_held_lock(curr, lock, depth, &i);
+1 -1
kernel/locking/qspinlock_paravirt.h
··· 357 357 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) 358 358 { 359 359 struct pv_node *pn = (struct pv_node *)node; 360 - enum vcpu_state old = vcpu_halted; 360 + u8 old = vcpu_halted; 361 361 /* 362 362 * If the vCPU is indeed halted, advance its state to match that of 363 363 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
+32 -9
kernel/module/main.c
··· 3104 3104 struct idempotent *existing; 3105 3105 bool first; 3106 3106 3107 - u->ret = 0; 3107 + u->ret = -EINTR; 3108 3108 u->cookie = cookie; 3109 3109 init_completion(&u->complete); 3110 3110 ··· 3140 3140 hlist_for_each_entry_safe(pos, next, head, entry) { 3141 3141 if (pos->cookie != cookie) 3142 3142 continue; 3143 - hlist_del(&pos->entry); 3143 + hlist_del_init(&pos->entry); 3144 3144 pos->ret = ret; 3145 3145 complete(&pos->complete); 3146 3146 } 3147 3147 spin_unlock(&idem_lock); 3148 3148 return ret; 3149 + } 3150 + 3151 + /* 3152 + * Wait for the idempotent worker. 3153 + * 3154 + * If we get interrupted, we need to remove ourselves from the 3155 + * the idempotent list, and the completion may still come in. 3156 + * 3157 + * The 'idem_lock' protects against the race, and 'idem.ret' was 3158 + * initialized to -EINTR and is thus always the right return 3159 + * value even if the idempotent work then completes between 3160 + * the wait_for_completion and the cleanup. 3161 + */ 3162 + static int idempotent_wait_for_completion(struct idempotent *u) 3163 + { 3164 + if (wait_for_completion_interruptible(&u->complete)) { 3165 + spin_lock(&idem_lock); 3166 + if (!hlist_unhashed(&u->entry)) 3167 + hlist_del(&u->entry); 3168 + spin_unlock(&idem_lock); 3169 + } 3170 + return u->ret; 3149 3171 } 3150 3172 3151 3173 static int init_module_from_file(struct file *f, const char __user * uargs, int flags) ··· 3205 3183 if (!f || !(f->f_mode & FMODE_READ)) 3206 3184 return -EBADF; 3207 3185 3208 - /* See if somebody else is doing the operation? */ 3209 - if (idempotent(&idem, file_inode(f))) { 3210 - wait_for_completion(&idem.complete); 3211 - return idem.ret; 3186 + /* Are we the winners of the race and get to do this? */ 3187 + if (!idempotent(&idem, file_inode(f))) { 3188 + int ret = init_module_from_file(f, uargs, flags); 3189 + return idempotent_complete(&idem, ret); 3212 3190 } 3213 3191 3214 - /* Otherwise, we'll do it and complete others */ 3215 - return idempotent_complete(&idem, 3216 - init_module_from_file(f, uargs, flags)); 3192 + /* 3193 + * Somebody else won the race and is loading the module. 3194 + */ 3195 + return idempotent_wait_for_completion(&idem); 3217 3196 } 3218 3197 3219 3198 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
+7
kernel/padata.c
··· 517 517 ps.chunk_size = max(ps.chunk_size, job->min_chunk); 518 518 ps.chunk_size = roundup(ps.chunk_size, job->align); 519 519 520 + /* 521 + * chunk_size can be 0 if the caller sets min_chunk to 0. So force it 522 + * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().` 523 + */ 524 + if (!ps.chunk_size) 525 + ps.chunk_size = 1U; 526 + 520 527 list_for_each_entry(pw, &works, pw_list) 521 528 if (job->numa_aware) { 522 529 int old_node = atomic_read(&last_used_nid);
+7 -235
kernel/profile.c
··· 47 47 int prof_on __read_mostly; 48 48 EXPORT_SYMBOL_GPL(prof_on); 49 49 50 - static cpumask_var_t prof_cpu_mask; 51 - #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) 52 - static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 53 - static DEFINE_PER_CPU(int, cpu_profile_flip); 54 - static DEFINE_MUTEX(profile_flip_mutex); 55 - #endif /* CONFIG_SMP */ 56 - 57 50 int profile_setup(char *str) 58 51 { 59 52 static const char schedstr[] = "schedule"; 60 - static const char sleepstr[] = "sleep"; 61 53 static const char kvmstr[] = "kvm"; 62 54 const char *select = NULL; 63 55 int par; 64 56 65 - if (!strncmp(str, sleepstr, strlen(sleepstr))) { 66 - #ifdef CONFIG_SCHEDSTATS 67 - force_schedstat_enabled(); 68 - prof_on = SLEEP_PROFILING; 69 - select = sleepstr; 70 - #else 71 - pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); 72 - #endif /* CONFIG_SCHEDSTATS */ 73 - } else if (!strncmp(str, schedstr, strlen(schedstr))) { 57 + if (!strncmp(str, schedstr, strlen(schedstr))) { 74 58 prof_on = SCHED_PROFILING; 75 59 select = schedstr; 76 60 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { ··· 98 114 99 115 buffer_bytes = prof_len*sizeof(atomic_t); 100 116 101 - if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 102 - return -ENOMEM; 103 - 104 - cpumask_copy(prof_cpu_mask, cpu_possible_mask); 105 - 106 117 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); 107 118 if (prof_buffer) 108 119 return 0; ··· 111 132 if (prof_buffer) 112 133 return 0; 113 134 114 - free_cpumask_var(prof_cpu_mask); 115 135 return -ENOMEM; 116 136 } 117 - 118 - #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) 119 - /* 120 - * Each cpu has a pair of open-addressed hashtables for pending 121 - * profile hits. read_profile() IPI's all cpus to request them 122 - * to flip buffers and flushes their contents to prof_buffer itself. 123 - * Flip requests are serialized by the profile_flip_mutex. The sole 124 - * use of having a second hashtable is for avoiding cacheline 125 - * contention that would otherwise happen during flushes of pending 126 - * profile hits required for the accuracy of reported profile hits 127 - * and so resurrect the interrupt livelock issue. 128 - * 129 - * The open-addressed hashtables are indexed by profile buffer slot 130 - * and hold the number of pending hits to that profile buffer slot on 131 - * a cpu in an entry. When the hashtable overflows, all pending hits 132 - * are accounted to their corresponding profile buffer slots with 133 - * atomic_add() and the hashtable emptied. As numerous pending hits 134 - * may be accounted to a profile buffer slot in a hashtable entry, 135 - * this amortizes a number of atomic profile buffer increments likely 136 - * to be far larger than the number of entries in the hashtable, 137 - * particularly given that the number of distinct profile buffer 138 - * positions to which hits are accounted during short intervals (e.g. 139 - * several seconds) is usually very small. Exclusion from buffer 140 - * flipping is provided by interrupt disablement (note that for 141 - * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from 142 - * process context). 143 - * The hash function is meant to be lightweight as opposed to strong, 144 - * and was vaguely inspired by ppc64 firmware-supported inverted 145 - * pagetable hash functions, but uses a full hashtable full of finite 146 - * collision chains, not just pairs of them. 147 - * 148 - * -- nyc 149 - */ 150 - static void __profile_flip_buffers(void *unused) 151 - { 152 - int cpu = smp_processor_id(); 153 - 154 - per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); 155 - } 156 - 157 - static void profile_flip_buffers(void) 158 - { 159 - int i, j, cpu; 160 - 161 - mutex_lock(&profile_flip_mutex); 162 - j = per_cpu(cpu_profile_flip, get_cpu()); 163 - put_cpu(); 164 - on_each_cpu(__profile_flip_buffers, NULL, 1); 165 - for_each_online_cpu(cpu) { 166 - struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; 167 - for (i = 0; i < NR_PROFILE_HIT; ++i) { 168 - if (!hits[i].hits) { 169 - if (hits[i].pc) 170 - hits[i].pc = 0; 171 - continue; 172 - } 173 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); 174 - hits[i].hits = hits[i].pc = 0; 175 - } 176 - } 177 - mutex_unlock(&profile_flip_mutex); 178 - } 179 - 180 - static void profile_discard_flip_buffers(void) 181 - { 182 - int i, cpu; 183 - 184 - mutex_lock(&profile_flip_mutex); 185 - i = per_cpu(cpu_profile_flip, get_cpu()); 186 - put_cpu(); 187 - on_each_cpu(__profile_flip_buffers, NULL, 1); 188 - for_each_online_cpu(cpu) { 189 - struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; 190 - memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); 191 - } 192 - mutex_unlock(&profile_flip_mutex); 193 - } 194 - 195 - static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) 196 - { 197 - unsigned long primary, secondary, flags, pc = (unsigned long)__pc; 198 - int i, j, cpu; 199 - struct profile_hit *hits; 200 - 201 - pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); 202 - i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 203 - secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 204 - cpu = get_cpu(); 205 - hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; 206 - if (!hits) { 207 - put_cpu(); 208 - return; 209 - } 210 - /* 211 - * We buffer the global profiler buffer into a per-CPU 212 - * queue and thus reduce the number of global (and possibly 213 - * NUMA-alien) accesses. The write-queue is self-coalescing: 214 - */ 215 - local_irq_save(flags); 216 - do { 217 - for (j = 0; j < PROFILE_GRPSZ; ++j) { 218 - if (hits[i + j].pc == pc) { 219 - hits[i + j].hits += nr_hits; 220 - goto out; 221 - } else if (!hits[i + j].hits) { 222 - hits[i + j].pc = pc; 223 - hits[i + j].hits = nr_hits; 224 - goto out; 225 - } 226 - } 227 - i = (i + secondary) & (NR_PROFILE_HIT - 1); 228 - } while (i != primary); 229 - 230 - /* 231 - * Add the current hit(s) and flush the write-queue out 232 - * to the global buffer: 233 - */ 234 - atomic_add(nr_hits, &prof_buffer[pc]); 235 - for (i = 0; i < NR_PROFILE_HIT; ++i) { 236 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); 237 - hits[i].pc = hits[i].hits = 0; 238 - } 239 - out: 240 - local_irq_restore(flags); 241 - put_cpu(); 242 - } 243 - 244 - static int profile_dead_cpu(unsigned int cpu) 245 - { 246 - struct page *page; 247 - int i; 248 - 249 - if (cpumask_available(prof_cpu_mask)) 250 - cpumask_clear_cpu(cpu, prof_cpu_mask); 251 - 252 - for (i = 0; i < 2; i++) { 253 - if (per_cpu(cpu_profile_hits, cpu)[i]) { 254 - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); 255 - per_cpu(cpu_profile_hits, cpu)[i] = NULL; 256 - __free_page(page); 257 - } 258 - } 259 - return 0; 260 - } 261 - 262 - static int profile_prepare_cpu(unsigned int cpu) 263 - { 264 - int i, node = cpu_to_mem(cpu); 265 - struct page *page; 266 - 267 - per_cpu(cpu_profile_flip, cpu) = 0; 268 - 269 - for (i = 0; i < 2; i++) { 270 - if (per_cpu(cpu_profile_hits, cpu)[i]) 271 - continue; 272 - 273 - page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 274 - if (!page) { 275 - profile_dead_cpu(cpu); 276 - return -ENOMEM; 277 - } 278 - per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); 279 - 280 - } 281 - return 0; 282 - } 283 - 284 - static int profile_online_cpu(unsigned int cpu) 285 - { 286 - if (cpumask_available(prof_cpu_mask)) 287 - cpumask_set_cpu(cpu, prof_cpu_mask); 288 - 289 - return 0; 290 - } 291 - 292 - #else /* !CONFIG_SMP */ 293 - #define profile_flip_buffers() do { } while (0) 294 - #define profile_discard_flip_buffers() do { } while (0) 295 137 296 138 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) 297 139 { 298 140 unsigned long pc; 299 141 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; 300 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); 142 + if (pc < prof_len) 143 + atomic_add(nr_hits, &prof_buffer[pc]); 301 144 } 302 - #endif /* !CONFIG_SMP */ 303 145 304 146 void profile_hits(int type, void *__pc, unsigned int nr_hits) 305 147 { ··· 134 334 { 135 335 struct pt_regs *regs = get_irq_regs(); 136 336 137 - if (!user_mode(regs) && cpumask_available(prof_cpu_mask) && 138 - cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) 337 + /* This is the old kernel-only legacy profiling */ 338 + if (!user_mode(regs)) 139 339 profile_hit(type, (void *)profile_pc(regs)); 140 340 } 141 341 ··· 158 358 char *pnt; 159 359 unsigned long sample_step = 1UL << prof_shift; 160 360 161 - profile_flip_buffers(); 162 361 if (p >= (prof_len+1)*sizeof(unsigned int)) 163 362 return 0; 164 363 if (count > (prof_len+1)*sizeof(unsigned int) - p) ··· 203 404 return -EINVAL; 204 405 } 205 406 #endif 206 - profile_discard_flip_buffers(); 207 407 memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); 208 408 return count; 209 409 } ··· 216 418 int __ref create_proc_profile(void) 217 419 { 218 420 struct proc_dir_entry *entry; 219 - #ifdef CONFIG_SMP 220 - enum cpuhp_state online_state; 221 - #endif 222 - 223 421 int err = 0; 224 422 225 423 if (!prof_on) 226 424 return 0; 227 - #ifdef CONFIG_SMP 228 - err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE", 229 - profile_prepare_cpu, profile_dead_cpu); 230 - if (err) 231 - return err; 232 - 233 - err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE", 234 - profile_online_cpu, NULL); 235 - if (err < 0) 236 - goto err_state_prep; 237 - online_state = err; 238 - err = 0; 239 - #endif 240 425 entry = proc_create("profile", S_IWUSR | S_IRUGO, 241 426 NULL, &profile_proc_ops); 242 - if (!entry) 243 - goto err_state_onl; 244 - proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); 245 - 246 - return err; 247 - err_state_onl: 248 - #ifdef CONFIG_SMP 249 - cpuhp_remove_state(online_state); 250 - err_state_prep: 251 - cpuhp_remove_state(CPUHP_PROFILE_PREPARE); 252 - #endif 427 + if (entry) 428 + proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); 253 429 return err; 254 430 } 255 431 subsys_initcall(create_proc_profile);
+47 -21
kernel/sched/core.c
··· 7845 7845 } 7846 7846 } 7847 7847 7848 + static inline void sched_set_rq_online(struct rq *rq, int cpu) 7849 + { 7850 + struct rq_flags rf; 7851 + 7852 + rq_lock_irqsave(rq, &rf); 7853 + if (rq->rd) { 7854 + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7855 + set_rq_online(rq); 7856 + } 7857 + rq_unlock_irqrestore(rq, &rf); 7858 + } 7859 + 7860 + static inline void sched_set_rq_offline(struct rq *rq, int cpu) 7861 + { 7862 + struct rq_flags rf; 7863 + 7864 + rq_lock_irqsave(rq, &rf); 7865 + if (rq->rd) { 7866 + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7867 + set_rq_offline(rq); 7868 + } 7869 + rq_unlock_irqrestore(rq, &rf); 7870 + } 7871 + 7848 7872 /* 7849 7873 * used to mark begin/end of suspend/resume: 7850 7874 */ ··· 7919 7895 return 0; 7920 7896 } 7921 7897 7898 + static inline void sched_smt_present_inc(int cpu) 7899 + { 7900 + #ifdef CONFIG_SCHED_SMT 7901 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7902 + static_branch_inc_cpuslocked(&sched_smt_present); 7903 + #endif 7904 + } 7905 + 7906 + static inline void sched_smt_present_dec(int cpu) 7907 + { 7908 + #ifdef CONFIG_SCHED_SMT 7909 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7910 + static_branch_dec_cpuslocked(&sched_smt_present); 7911 + #endif 7912 + } 7913 + 7922 7914 int sched_cpu_activate(unsigned int cpu) 7923 7915 { 7924 7916 struct rq *rq = cpu_rq(cpu); 7925 - struct rq_flags rf; 7926 7917 7927 7918 /* 7928 7919 * Clear the balance_push callback and prepare to schedule ··· 7945 7906 */ 7946 7907 balance_push_set(cpu, false); 7947 7908 7948 - #ifdef CONFIG_SCHED_SMT 7949 7909 /* 7950 7910 * When going up, increment the number of cores with SMT present. 7951 7911 */ 7952 - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7953 - static_branch_inc_cpuslocked(&sched_smt_present); 7954 - #endif 7912 + sched_smt_present_inc(cpu); 7955 7913 set_cpu_active(cpu, true); 7956 7914 7957 7915 if (sched_smp_initialized) { ··· 7966 7930 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 7967 7931 * domains. 7968 7932 */ 7969 - rq_lock_irqsave(rq, &rf); 7970 - if (rq->rd) { 7971 - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7972 - set_rq_online(rq); 7973 - } 7974 - rq_unlock_irqrestore(rq, &rf); 7933 + sched_set_rq_online(rq, cpu); 7975 7934 7976 7935 return 0; 7977 7936 } ··· 7974 7943 int sched_cpu_deactivate(unsigned int cpu) 7975 7944 { 7976 7945 struct rq *rq = cpu_rq(cpu); 7977 - struct rq_flags rf; 7978 7946 int ret; 7979 7947 7980 7948 /* ··· 8004 7974 */ 8005 7975 synchronize_rcu(); 8006 7976 8007 - rq_lock_irqsave(rq, &rf); 8008 - if (rq->rd) { 8009 - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 8010 - set_rq_offline(rq); 8011 - } 8012 - rq_unlock_irqrestore(rq, &rf); 7977 + sched_set_rq_offline(rq, cpu); 8013 7978 8014 - #ifdef CONFIG_SCHED_SMT 8015 7979 /* 8016 7980 * When going down, decrement the number of cores with SMT present. 8017 7981 */ 8018 - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 8019 - static_branch_dec_cpuslocked(&sched_smt_present); 7982 + sched_smt_present_dec(cpu); 8020 7983 7984 + #ifdef CONFIG_SCHED_SMT 8021 7985 sched_core_cpu_deactivate(cpu); 8022 7986 #endif 8023 7987 ··· 8021 7997 sched_update_numa(cpu, false); 8022 7998 ret = cpuset_cpu_inactive(cpu); 8023 7999 if (ret) { 8000 + sched_smt_present_inc(cpu); 8001 + sched_set_rq_online(rq, cpu); 8024 8002 balance_push_set(cpu, false); 8025 8003 set_cpu_active(cpu, true); 8026 8004 sched_update_numa(cpu, true);
+6
kernel/sched/cputime.c
··· 582 582 } 583 583 584 584 stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); 585 + /* 586 + * Because mul_u64_u64_div_u64() can approximate on some 587 + * achitectures; enforce the constraint that: a*b/(b+c) <= a. 588 + */ 589 + if (unlikely(stime > rtime)) 590 + stime = rtime; 585 591 586 592 update: 587 593 /*
-10
kernel/sched/stats.c
··· 92 92 93 93 trace_sched_stat_blocked(p, delta); 94 94 95 - /* 96 - * Blocking time is in units of nanosecs, so shift by 97 - * 20 to get a milliseconds-range estimation of the 98 - * amount of time that the task spent sleeping: 99 - */ 100 - if (unlikely(prof_on == SLEEP_PROFILING)) { 101 - profile_hits(SLEEP_PROFILING, 102 - (void *)get_wchan(p), 103 - delta >> 20); 104 - } 105 95 account_scheduler_latency(p, delta >> 10, 0); 106 96 } 107 97 }
+6
kernel/task_work.c
··· 6 6 7 7 static struct callback_head work_exited; /* all we need is ->next == NULL */ 8 8 9 + #ifdef CONFIG_IRQ_WORK 9 10 static void task_work_set_notify_irq(struct irq_work *entry) 10 11 { 11 12 test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME); 12 13 } 13 14 static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) = 14 15 IRQ_WORK_INIT_HARD(task_work_set_notify_irq); 16 + #endif 15 17 16 18 /** 17 19 * task_work_add - ask the @task to execute @work->func() ··· 59 57 if (notify == TWA_NMI_CURRENT) { 60 58 if (WARN_ON_ONCE(task != current)) 61 59 return -EINVAL; 60 + if (!IS_ENABLED(CONFIG_IRQ_WORK)) 61 + return -EINVAL; 62 62 } else { 63 63 /* record the work call stack in order to print it in KASAN reports */ 64 64 kasan_record_aux_stack(work); ··· 85 81 case TWA_SIGNAL_NO_IPI: 86 82 __set_notify_signal(task); 87 83 break; 84 + #ifdef CONFIG_IRQ_WORK 88 85 case TWA_NMI_CURRENT: 89 86 irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume)); 90 87 break; 88 + #endif 91 89 default: 92 90 WARN_ON_ONCE(1); 93 91 break;
+1 -1
kernel/time/clocksource.c
··· 246 246 247 247 wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end); 248 248 if (wd_delay <= WATCHDOG_MAX_SKEW) { 249 - if (nretries > 1 || nretries >= max_retries) { 249 + if (nretries > 1 && nretries >= max_retries) { 250 250 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", 251 251 smp_processor_id(), watchdog->name, nretries); 252 252 }
+4 -5
kernel/time/ntp.c
··· 727 727 } 728 728 729 729 if (txc->modes & ADJ_MAXERROR) 730 - time_maxerror = txc->maxerror; 730 + time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT); 731 731 732 732 if (txc->modes & ADJ_ESTERROR) 733 - time_esterror = txc->esterror; 733 + time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT); 734 734 735 735 if (txc->modes & ADJ_TIMECONST) { 736 - time_constant = txc->constant; 736 + time_constant = clamp(txc->constant, 0, MAXTC); 737 737 if (!(time_status & STA_NANO)) 738 738 time_constant += 4; 739 - time_constant = min(time_constant, (long)MAXTC); 740 - time_constant = max(time_constant, 0l); 739 + time_constant = clamp(time_constant, 0, MAXTC); 741 740 } 742 741 743 742 if (txc->modes & ADJ_TAI &&
+2 -1
kernel/time/tick-broadcast.c
··· 1141 1141 #ifdef CONFIG_HOTPLUG_CPU 1142 1142 void hotplug_cpu__broadcast_tick_pull(int deadcpu) 1143 1143 { 1144 - struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 1145 1144 struct clock_event_device *bc; 1146 1145 unsigned long flags; 1147 1146 ··· 1166 1167 * device to avoid the starvation. 1167 1168 */ 1168 1169 if (tick_check_broadcast_expired()) { 1170 + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 1171 + 1169 1172 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask); 1170 1173 tick_program_event(td->evtdev->next_event, 1); 1171 1174 }
+1 -1
kernel/time/timekeeping.c
··· 2606 2606 clock_set |= timekeeping_advance(TK_ADV_FREQ); 2607 2607 2608 2608 if (clock_set) 2609 - clock_was_set(CLOCK_REALTIME); 2609 + clock_was_set(CLOCK_SET_WALL); 2610 2610 2611 2611 ntp_notify_cmos_timer(); 2612 2612
+1 -1
kernel/trace/fgraph.c
··· 902 902 903 903 i = *idx ? : task->curr_ret_stack; 904 904 while (i > 0) { 905 - ret_stack = get_ret_stack(current, i, &i); 905 + ret_stack = get_ret_stack(task, i, &i); 906 906 if (!ret_stack) 907 907 break; 908 908 /*
-2
kernel/trace/preemptirq_delay_test.c
··· 34 34 35 35 static struct completion done; 36 36 37 - #define MIN(x, y) ((x) < (y) ? (x) : (y)) 38 - 39 37 static void busy_wait(ulong time) 40 38 { 41 39 u64 start, end;
-12
kernel/trace/ring_buffer.c
··· 693 693 } 694 694 695 695 /** 696 - * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 697 - * @buffer: The ring_buffer to get the number of pages from 698 - * @cpu: The cpu of the ring_buffer to get the number of pages from 699 - * 700 - * Returns the number of pages used by a per_cpu buffer of the ring buffer. 701 - */ 702 - size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 703 - { 704 - return buffer->buffers[cpu]->nr_pages; 705 - } 706 - 707 - /** 708 696 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 709 697 * @buffer: The ring_buffer to get the number of pages from 710 698 * @cpu: The cpu of the ring_buffer to get the number of pages from
+23
kernel/trace/trace.h
··· 1634 1634 extern struct mutex event_mutex; 1635 1635 extern struct list_head ftrace_events; 1636 1636 1637 + /* 1638 + * When the trace_event_file is the filp->i_private pointer, 1639 + * it must be taken under the event_mutex lock, and then checked 1640 + * if the EVENT_FILE_FL_FREED flag is set. If it is, then the 1641 + * data pointed to by the trace_event_file can not be trusted. 1642 + * 1643 + * Use the event_file_file() to access the trace_event_file from 1644 + * the filp the first time under the event_mutex and check for 1645 + * NULL. If it is needed to be retrieved again and the event_mutex 1646 + * is still held, then the event_file_data() can be used and it 1647 + * is guaranteed to be valid. 1648 + */ 1649 + static inline struct trace_event_file *event_file_file(struct file *filp) 1650 + { 1651 + struct trace_event_file *file; 1652 + 1653 + lockdep_assert_held(&event_mutex); 1654 + file = READ_ONCE(file_inode(filp)->i_private); 1655 + if (!file || file->flags & EVENT_FILE_FL_FREED) 1656 + return NULL; 1657 + return file; 1658 + } 1659 + 1637 1660 extern const struct file_operations event_trigger_fops; 1638 1661 extern const struct file_operations event_hist_fops; 1639 1662 extern const struct file_operations event_hist_debug_fops;
+24 -17
kernel/trace/trace_events.c
··· 992 992 993 993 void event_file_get(struct trace_event_file *file) 994 994 { 995 - atomic_inc(&file->ref); 995 + refcount_inc(&file->ref); 996 996 } 997 997 998 998 void event_file_put(struct trace_event_file *file) 999 999 { 1000 - if (WARN_ON_ONCE(!atomic_read(&file->ref))) { 1000 + if (WARN_ON_ONCE(!refcount_read(&file->ref))) { 1001 1001 if (file->flags & EVENT_FILE_FL_FREED) 1002 1002 kmem_cache_free(file_cachep, file); 1003 1003 return; 1004 1004 } 1005 1005 1006 - if (atomic_dec_and_test(&file->ref)) { 1006 + if (refcount_dec_and_test(&file->ref)) { 1007 1007 /* Count should only go to zero when it is freed */ 1008 1008 if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED))) 1009 1009 return; ··· 1386 1386 char buf[4] = "0"; 1387 1387 1388 1388 mutex_lock(&event_mutex); 1389 - file = event_file_data(filp); 1389 + file = event_file_file(filp); 1390 1390 if (likely(file)) 1391 1391 flags = file->flags; 1392 1392 mutex_unlock(&event_mutex); 1393 1393 1394 - if (!file || flags & EVENT_FILE_FL_FREED) 1394 + if (!file) 1395 1395 return -ENODEV; 1396 1396 1397 1397 if (flags & EVENT_FILE_FL_ENABLED && ··· 1424 1424 case 1: 1425 1425 ret = -ENODEV; 1426 1426 mutex_lock(&event_mutex); 1427 - file = event_file_data(filp); 1428 - if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) { 1427 + file = event_file_file(filp); 1428 + if (likely(file)) { 1429 1429 ret = tracing_update_buffers(file->tr); 1430 1430 if (ret < 0) { 1431 1431 mutex_unlock(&event_mutex); ··· 1540 1540 1541 1541 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 1542 1542 { 1543 - struct trace_event_call *call = event_file_data(m->private); 1543 + struct trace_event_file *file = event_file_data(m->private); 1544 + struct trace_event_call *call = file->event_call; 1544 1545 struct list_head *common_head = &ftrace_common_fields; 1545 1546 struct list_head *head = trace_get_fields(call); 1546 1547 struct list_head *node = v; ··· 1573 1572 1574 1573 static int f_show(struct seq_file *m, void *v) 1575 1574 { 1576 - struct trace_event_call *call = event_file_data(m->private); 1575 + struct trace_event_file *file = event_file_data(m->private); 1576 + struct trace_event_call *call = file->event_call; 1577 1577 struct ftrace_event_field *field; 1578 1578 const char *array_descriptor; 1579 1579 ··· 1629 1627 1630 1628 static void *f_start(struct seq_file *m, loff_t *pos) 1631 1629 { 1630 + struct trace_event_file *file; 1632 1631 void *p = (void *)FORMAT_HEADER; 1633 1632 loff_t l = 0; 1634 1633 1635 1634 /* ->stop() is called even if ->start() fails */ 1636 1635 mutex_lock(&event_mutex); 1637 - if (!event_file_data(m->private)) 1636 + file = event_file_file(m->private); 1637 + if (!file) 1638 1638 return ERR_PTR(-ENODEV); 1639 1639 1640 1640 while (l < *pos && p) ··· 1710 1706 trace_seq_init(s); 1711 1707 1712 1708 mutex_lock(&event_mutex); 1713 - file = event_file_data(filp); 1714 - if (file && !(file->flags & EVENT_FILE_FL_FREED)) 1709 + file = event_file_file(filp); 1710 + if (file) 1715 1711 print_event_filter(file, s); 1716 1712 mutex_unlock(&event_mutex); 1717 1713 ··· 1740 1736 return PTR_ERR(buf); 1741 1737 1742 1738 mutex_lock(&event_mutex); 1743 - file = event_file_data(filp); 1744 - if (file) 1745 - err = apply_event_filter(file, buf); 1739 + file = event_file_file(filp); 1740 + if (file) { 1741 + if (file->flags & EVENT_FILE_FL_FREED) 1742 + err = -ENODEV; 1743 + else 1744 + err = apply_event_filter(file, buf); 1745 + } 1746 1746 mutex_unlock(&event_mutex); 1747 1747 1748 1748 kfree(buf); ··· 2493 2485 if (strcmp(name, "format") == 0) { 2494 2486 *mode = TRACE_MODE_READ; 2495 2487 *fops = &ftrace_event_format_fops; 2496 - *data = call; 2497 2488 return 1; 2498 2489 } 2499 2490 ··· 3003 2996 atomic_set(&file->tm_ref, 0); 3004 2997 INIT_LIST_HEAD(&file->triggers); 3005 2998 list_add(&file->list, &tr->events); 3006 - event_file_get(file); 2999 + refcount_set(&file->ref, 1); 3007 3000 3008 3001 return file; 3009 3002 }
+2 -2
kernel/trace/trace_events_hist.c
··· 5601 5601 5602 5602 mutex_lock(&event_mutex); 5603 5603 5604 - event_file = event_file_data(m->private); 5604 + event_file = event_file_file(m->private); 5605 5605 if (unlikely(!event_file)) { 5606 5606 ret = -ENODEV; 5607 5607 goto out_unlock; ··· 5880 5880 5881 5881 mutex_lock(&event_mutex); 5882 5882 5883 - event_file = event_file_data(m->private); 5883 + event_file = event_file_file(m->private); 5884 5884 if (unlikely(!event_file)) { 5885 5885 ret = -ENODEV; 5886 5886 goto out_unlock;
+1 -1
kernel/trace/trace_events_inject.c
··· 299 299 strim(buf); 300 300 301 301 mutex_lock(&event_mutex); 302 - file = event_file_data(filp); 302 + file = event_file_file(filp); 303 303 if (file) { 304 304 call = file->event_call; 305 305 size = parse_entry(buf, call, &entry);
+3 -3
kernel/trace/trace_events_trigger.c
··· 159 159 160 160 /* ->stop() is called even if ->start() fails */ 161 161 mutex_lock(&event_mutex); 162 - event_file = event_file_data(m->private); 162 + event_file = event_file_file(m->private); 163 163 if (unlikely(!event_file)) 164 164 return ERR_PTR(-ENODEV); 165 165 ··· 213 213 214 214 mutex_lock(&event_mutex); 215 215 216 - if (unlikely(!event_file_data(file))) { 216 + if (unlikely(!event_file_file(file))) { 217 217 mutex_unlock(&event_mutex); 218 218 return -ENODEV; 219 219 } ··· 293 293 strim(buf); 294 294 295 295 mutex_lock(&event_mutex); 296 - event_file = event_file_data(file); 296 + event_file = event_file_file(file); 297 297 if (unlikely(!event_file)) { 298 298 mutex_unlock(&event_mutex); 299 299 kfree(buf);
+3 -3
kernel/trace/tracing_map.c
··· 454 454 struct tracing_map_elt *elt = NULL; 455 455 int idx; 456 456 457 - idx = atomic_inc_return(&map->next_elt); 457 + idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts); 458 458 if (idx < map->max_elts) { 459 459 elt = *(TRACING_MAP_ELT(map->elts, idx)); 460 460 if (map->ops && map->ops->elt_init) ··· 699 699 { 700 700 unsigned int i; 701 701 702 - atomic_set(&map->next_elt, -1); 702 + atomic_set(&map->next_elt, 0); 703 703 atomic64_set(&map->hits, 0); 704 704 atomic64_set(&map->drops, 0); 705 705 ··· 783 783 784 784 map->map_bits = map_bits; 785 785 map->max_elts = (1 << map_bits); 786 - atomic_set(&map->next_elt, -1); 786 + atomic_set(&map->next_elt, 0); 787 787 788 788 map->map_size = (1 << (map_bits + 1)); 789 789 map->ops = ops;
-1
lib/btree.c
··· 43 43 #include <linux/slab.h> 44 44 #include <linux/module.h> 45 45 46 - #define MAX(a, b) ((a) > (b) ? (a) : (b)) 47 46 #define NODESIZE MAX(L1_CACHE_BYTES, 128) 48 47 49 48 struct btree_geo {
+2
lib/decompress_unlzma.c
··· 37 37 38 38 #include <linux/decompress/mm.h> 39 39 40 + #ifndef MIN 40 41 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 42 + #endif 41 43 42 44 static long long INIT read_int(unsigned char *ptr, int size) 43 45 {
+1 -1
lib/vsprintf.c
··· 1080 1080 #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) 1081 1081 #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") 1082 1082 #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") 1083 - char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 1083 + char sym[MAX(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 1084 1084 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; 1085 1085 1086 1086 char *p = sym, *pend = sym + sizeof(sym);
+22 -6
mm/list_lru.c
··· 85 85 } 86 86 #endif /* CONFIG_MEMCG */ 87 87 88 + /* The caller must ensure the memcg lifetime. */ 88 89 bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid, 89 90 struct mem_cgroup *memcg) 90 91 { ··· 110 109 111 110 bool list_lru_add_obj(struct list_lru *lru, struct list_head *item) 112 111 { 112 + bool ret; 113 113 int nid = page_to_nid(virt_to_page(item)); 114 - struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ? 115 - mem_cgroup_from_slab_obj(item) : NULL; 116 114 117 - return list_lru_add(lru, item, nid, memcg); 115 + if (list_lru_memcg_aware(lru)) { 116 + rcu_read_lock(); 117 + ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item)); 118 + rcu_read_unlock(); 119 + } else { 120 + ret = list_lru_add(lru, item, nid, NULL); 121 + } 122 + 123 + return ret; 118 124 } 119 125 EXPORT_SYMBOL_GPL(list_lru_add_obj); 120 126 127 + /* The caller must ensure the memcg lifetime. */ 121 128 bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid, 122 129 struct mem_cgroup *memcg) 123 130 { ··· 148 139 149 140 bool list_lru_del_obj(struct list_lru *lru, struct list_head *item) 150 141 { 142 + bool ret; 151 143 int nid = page_to_nid(virt_to_page(item)); 152 - struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ? 153 - mem_cgroup_from_slab_obj(item) : NULL; 154 144 155 - return list_lru_del(lru, item, nid, memcg); 145 + if (list_lru_memcg_aware(lru)) { 146 + rcu_read_lock(); 147 + ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item)); 148 + rcu_read_unlock(); 149 + } else { 150 + ret = list_lru_del(lru, item, nid, NULL); 151 + } 152 + 153 + return ret; 156 154 } 157 155 EXPORT_SYMBOL_GPL(list_lru_del_obj); 158 156
+20 -2
mm/memcontrol.c
··· 3386 3386 3387 3387 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) 3388 3388 static DEFINE_IDR(mem_cgroup_idr); 3389 + static DEFINE_SPINLOCK(memcg_idr_lock); 3390 + 3391 + static int mem_cgroup_alloc_id(void) 3392 + { 3393 + int ret; 3394 + 3395 + idr_preload(GFP_KERNEL); 3396 + spin_lock(&memcg_idr_lock); 3397 + ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1, 3398 + GFP_NOWAIT); 3399 + spin_unlock(&memcg_idr_lock); 3400 + idr_preload_end(); 3401 + return ret; 3402 + } 3389 3403 3390 3404 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 3391 3405 { 3392 3406 if (memcg->id.id > 0) { 3407 + spin_lock(&memcg_idr_lock); 3393 3408 idr_remove(&mem_cgroup_idr, memcg->id.id); 3409 + spin_unlock(&memcg_idr_lock); 3410 + 3394 3411 memcg->id.id = 0; 3395 3412 } 3396 3413 } ··· 3541 3524 if (!memcg) 3542 3525 return ERR_PTR(error); 3543 3526 3544 - memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 3545 - 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL); 3527 + memcg->id.id = mem_cgroup_alloc_id(); 3546 3528 if (memcg->id.id < 0) { 3547 3529 error = memcg->id.id; 3548 3530 goto fail; ··· 3683 3667 * publish it here at the end of onlining. This matches the 3684 3668 * regular ID destruction during offlining. 3685 3669 */ 3670 + spin_lock(&memcg_idr_lock); 3686 3671 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 3672 + spin_unlock(&memcg_idr_lock); 3687 3673 3688 3674 return 0; 3689 3675 offline_kmem:
+5 -9
mm/shmem.c
··· 1629 1629 unsigned long mask = READ_ONCE(huge_shmem_orders_always); 1630 1630 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); 1631 1631 unsigned long vm_flags = vma->vm_flags; 1632 - /* 1633 - * Check all the (large) orders below HPAGE_PMD_ORDER + 1 that 1634 - * are enabled for this vma. 1635 - */ 1636 - unsigned long orders = BIT(PMD_ORDER + 1) - 1; 1637 1632 loff_t i_size; 1638 1633 int order; 1639 1634 ··· 1673 1678 if (global_huge) 1674 1679 mask |= READ_ONCE(huge_shmem_orders_inherit); 1675 1680 1676 - return orders & mask; 1681 + return THP_ORDERS_ALL_FILE_DEFAULT & mask; 1677 1682 } 1678 1683 1679 1684 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, ··· 1681 1686 unsigned long orders) 1682 1687 { 1683 1688 struct vm_area_struct *vma = vmf->vma; 1689 + pgoff_t aligned_index; 1684 1690 unsigned long pages; 1685 1691 int order; 1686 1692 ··· 1693 1697 order = highest_order(orders); 1694 1698 while (orders) { 1695 1699 pages = 1UL << order; 1696 - index = round_down(index, pages); 1697 - if (!xa_find(&mapping->i_pages, &index, 1698 - index + pages - 1, XA_PRESENT)) 1700 + aligned_index = round_down(index, pages); 1701 + if (!xa_find(&mapping->i_pages, &aligned_index, 1702 + aligned_index + pages - 1, XA_PRESENT)) 1699 1703 break; 1700 1704 order = next_order(&orders, order); 1701 1705 }
+3
mm/slub.c
··· 4690 4690 if (!df.slab) 4691 4691 continue; 4692 4692 4693 + if (kfence_free(df.freelist)) 4694 + continue; 4695 + 4693 4696 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 4694 4697 _RET_IP_); 4695 4698 } while (likely(size));
-2
mm/zsmalloc.c
··· 120 120 #define CLASS_BITS 8 121 121 #define MAGIC_VAL_BITS 8 122 122 123 - #define MAX(a, b) ((a) >= (b) ? (a) : (b)) 124 - 125 123 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL)) 126 124 127 125 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
-7
net/bluetooth/hci_core.c
··· 119 119 case DISCOVERY_STARTING: 120 120 break; 121 121 case DISCOVERY_FINDING: 122 - /* If discovery was not started then it was initiated by the 123 - * MGMT interface so no MGMT event shall be generated either 124 - */ 125 - if (old_state != DISCOVERY_STARTING) { 126 - hdev->discovery.state = old_state; 127 - return; 128 - } 129 122 mgmt_discovering(hdev, 1); 130 123 break; 131 124 case DISCOVERY_RESOLVING:
+3 -2
net/bluetooth/hci_event.c
··· 1721 1721 switch (enable) { 1722 1722 case LE_SCAN_ENABLE: 1723 1723 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1724 - if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1724 + if (hdev->le_scan_type == LE_SCAN_ACTIVE) { 1725 1725 clear_pending_adv_report(hdev); 1726 - hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1726 + hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1727 + } 1727 1728 break; 1728 1729 1729 1730 case LE_SCAN_DISABLE:
+35
net/bluetooth/hci_sync.c
··· 2976 2976 */ 2977 2977 filter_policy = hci_update_accept_list_sync(hdev); 2978 2978 2979 + /* If suspended and filter_policy set to 0x00 (no acceptlist) then 2980 + * passive scanning cannot be started since that would require the host 2981 + * to be woken up to process the reports. 2982 + */ 2983 + if (hdev->suspended && !filter_policy) { 2984 + /* Check if accept list is empty then there is no need to scan 2985 + * while suspended. 2986 + */ 2987 + if (list_empty(&hdev->le_accept_list)) 2988 + return 0; 2989 + 2990 + /* If there are devices is the accept_list that means some 2991 + * devices could not be programmed which in non-suspended case 2992 + * means filter_policy needs to be set to 0x00 so the host needs 2993 + * to filter, but since this is treating suspended case we 2994 + * can ignore device needing host to filter to allow devices in 2995 + * the acceptlist to be able to wakeup the system. 2996 + */ 2997 + filter_policy = 0x01; 2998 + } 2999 + 2979 3000 /* When the controller is using random resolvable addresses and 2980 3001 * with that having LE privacy enabled, then controllers with 2981 3002 * Extended Scanner Filter Policies support can now enable support ··· 3019 2998 } else if (hci_is_adv_monitoring(hdev)) { 3020 2999 window = hdev->le_scan_window_adv_monitor; 3021 3000 interval = hdev->le_scan_int_adv_monitor; 3001 + 3002 + /* Disable duplicates filter when scanning for advertisement 3003 + * monitor for the following reasons. 3004 + * 3005 + * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm 3006 + * controllers ignore RSSI_Sampling_Period when the duplicates 3007 + * filter is enabled. 3008 + * 3009 + * For SW pattern filtering, when we're not doing interleaved 3010 + * scanning, it is necessary to disable duplicates filter, 3011 + * otherwise hosts can only receive one advertisement and it's 3012 + * impossible to know if a peer is still in range. 3013 + */ 3014 + filter_dups = LE_SCAN_FILTER_DUP_DISABLE; 3022 3015 } else { 3023 3016 window = hdev->le_scan_window; 3024 3017 interval = hdev->le_scan_interval;
+1
net/bluetooth/l2cap_core.c
··· 6774 6774 bt_cb(skb)->l2cap.psm = psm; 6775 6775 6776 6776 if (!chan->ops->recv(chan, skb)) { 6777 + l2cap_chan_unlock(chan); 6777 6778 l2cap_chan_put(chan); 6778 6779 return; 6779 6780 }
+1 -3
net/bridge/br_multicast.c
··· 2045 2045 { 2046 2046 struct net_bridge *br = port->br; 2047 2047 struct net_bridge_port_group *pg; 2048 - HLIST_HEAD(deleted_head); 2049 2048 struct hlist_node *n; 2050 2049 2051 2050 /* Take care of the remaining groups, only perm ones should be left */ 2052 2051 spin_lock_bh(&br->multicast_lock); 2053 2052 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 2054 2053 br_multicast_find_del_pg(br, pg); 2055 - hlist_move_list(&br->mcast_gc_list, &deleted_head); 2056 2054 spin_unlock_bh(&br->multicast_lock); 2057 - br_multicast_gc(&deleted_head); 2055 + flush_work(&br->mcast_gc_work); 2058 2056 br_multicast_port_ctx_deinit(&port->multicast_ctx); 2059 2057 free_percpu(port->mcast_stats); 2060 2058 }
+1
net/core/dev.c
··· 5150 5150 bpf_net_ctx_clear(bpf_net_ctx); 5151 5151 return XDP_DROP; 5152 5152 } 5153 + bpf_net_ctx_clear(bpf_net_ctx); 5153 5154 } 5154 5155 return XDP_PASS; 5155 5156 out_redir:
+1 -1
net/core/rtnetlink.c
··· 3288 3288 if (ifm->ifi_index > 0) 3289 3289 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3290 3290 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3291 - dev = rtnl_dev_get(net, tb); 3291 + dev = rtnl_dev_get(tgt_net, tb); 3292 3292 else if (tb[IFLA_GROUP]) 3293 3293 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3294 3294 else
+44 -17
net/ethtool/ioctl.c
··· 1331 1331 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); 1332 1332 const struct ethtool_ops *ops = dev->ethtool_ops; 1333 1333 u32 dev_indir_size = 0, dev_key_size = 0, i; 1334 + u32 user_indir_len = 0, indir_bytes = 0; 1334 1335 struct ethtool_rxfh_param rxfh_dev = {}; 1335 1336 struct ethtool_rxfh_context *ctx = NULL; 1336 1337 struct netlink_ext_ack *extack = NULL; 1337 1338 struct ethtool_rxnfc rx_rings; 1338 1339 struct ethtool_rxfh rxfh; 1339 1340 bool locked = false; /* dev->ethtool->rss_lock taken */ 1340 - u32 indir_bytes = 0; 1341 1341 bool create = false; 1342 1342 u8 *rss_config; 1343 1343 int ret; ··· 1369 1369 return -EOPNOTSUPP; 1370 1370 create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; 1371 1371 1372 - /* If either indir, hash key or function is valid, proceed further. 1373 - * Must request at least one change: indir size, hash key, function 1374 - * or input transformation. 1375 - */ 1376 1372 if ((rxfh.indir_size && 1377 1373 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && 1378 1374 rxfh.indir_size != dev_indir_size) || 1379 - (rxfh.key_size && (rxfh.key_size != dev_key_size)) || 1375 + (rxfh.key_size && rxfh.key_size != dev_key_size)) 1376 + return -EINVAL; 1377 + 1378 + /* Must request at least one change: indir size, hash key, function 1379 + * or input transformation. 1380 + * There's no need for any of it in case of context creation. 1381 + */ 1382 + if (!create && 1380 1383 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && 1381 1384 rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE && 1382 1385 rxfh.input_xfrm == RXH_XFRM_NO_CHANGE)) 1383 1386 return -EINVAL; 1384 1387 1385 - if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) 1386 - indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]); 1388 + indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]); 1387 1389 1388 - rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER); 1390 + rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER); 1389 1391 if (!rss_config) 1390 1392 return -ENOMEM; 1391 1393 ··· 1402 1400 */ 1403 1401 if (rxfh.indir_size && 1404 1402 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) { 1403 + user_indir_len = indir_bytes; 1405 1404 rxfh_dev.indir = (u32 *)rss_config; 1406 1405 rxfh_dev.indir_size = dev_indir_size; 1407 1406 ret = ethtool_copy_validate_indir(rxfh_dev.indir, ··· 1429 1426 rxfh_dev.key_size = dev_key_size; 1430 1427 rxfh_dev.key = rss_config + indir_bytes; 1431 1428 if (copy_from_user(rxfh_dev.key, 1432 - useraddr + rss_cfg_offset + indir_bytes, 1429 + useraddr + rss_cfg_offset + user_indir_len, 1433 1430 rxfh.key_size)) { 1434 1431 ret = -EFAULT; 1435 1432 goto out; ··· 1452 1449 } 1453 1450 1454 1451 if (ops->create_rxfh_context) { 1455 - u32 limit = ops->rxfh_max_context_id ?: U32_MAX; 1452 + u32 limit = ops->rxfh_max_num_contexts ?: U32_MAX; 1456 1453 u32 ctx_id; 1457 1454 1458 1455 /* driver uses new API, core allocates ID */ 1459 1456 ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx, 1460 - XA_LIMIT(1, limit), GFP_KERNEL_ACCOUNT); 1457 + XA_LIMIT(1, limit - 1), 1458 + GFP_KERNEL_ACCOUNT); 1461 1459 if (ret < 0) { 1462 1460 kfree(ctx); 1463 1461 goto out; ··· 1478 1474 rxfh_dev.input_xfrm = rxfh.input_xfrm; 1479 1475 1480 1476 if (rxfh.rss_context && ops->create_rxfh_context) { 1481 - if (create) 1477 + if (create) { 1482 1478 ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, 1483 1479 extack); 1484 - else if (rxfh_dev.rss_delete) 1480 + /* Make sure driver populates defaults */ 1481 + WARN_ON_ONCE(!ret && !rxfh_dev.key && 1482 + !memchr_inv(ethtool_rxfh_context_key(ctx), 1483 + 0, ctx->key_size)); 1484 + } else if (rxfh_dev.rss_delete) { 1485 1485 ret = ops->remove_rxfh_context(dev, ctx, 1486 1486 rxfh.rss_context, 1487 1487 extack); 1488 - else 1488 + } else { 1489 1489 ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, 1490 1490 extack); 1491 + } 1491 1492 } else { 1492 1493 ret = ops->set_rxfh(dev, &rxfh_dev, extack); 1493 1494 } ··· 1531 1522 kfree(ctx); 1532 1523 goto out; 1533 1524 } 1525 + 1526 + /* Fetch the defaults for the old API, in the new API drivers 1527 + * should write defaults into ctx themselves. 1528 + */ 1529 + rxfh_dev.indir = (u32 *)rss_config; 1530 + rxfh_dev.indir_size = dev_indir_size; 1531 + 1532 + rxfh_dev.key = rss_config + indir_bytes; 1533 + rxfh_dev.key_size = dev_key_size; 1534 + 1535 + ret = ops->get_rxfh(dev, &rxfh_dev); 1536 + if (WARN_ON(ret)) { 1537 + xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); 1538 + kfree(ctx); 1539 + goto out; 1540 + } 1534 1541 } 1535 1542 if (rxfh_dev.rss_delete) { 1536 1543 WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx); ··· 1555 1530 if (rxfh_dev.indir) { 1556 1531 for (i = 0; i < dev_indir_size; i++) 1557 1532 ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i]; 1558 - ctx->indir_configured = 1; 1533 + ctx->indir_configured = 1534 + rxfh.indir_size && 1535 + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE; 1559 1536 } 1560 1537 if (rxfh_dev.key) { 1561 1538 memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key, 1562 1539 dev_key_size); 1563 - ctx->key_configured = 1; 1540 + ctx->key_configured = !!rxfh.key_size; 1564 1541 } 1565 1542 if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE) 1566 1543 ctx->hfunc = rxfh_dev.hfunc;
+7 -1
net/ethtool/rss.c
··· 111 111 const struct rss_reply_data *data = RSS_REPDATA(reply_base); 112 112 int len; 113 113 114 - len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */ 114 + len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */ 115 + nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */ 115 116 nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */ 116 117 nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */ 117 118 nla_total_size(data->hkey_size); /* _RSS_HKEY */ ··· 125 124 const struct ethnl_reply_data *reply_base) 126 125 { 127 126 const struct rss_reply_data *data = RSS_REPDATA(reply_base); 127 + struct rss_req_info *request = RSS_REQINFO(req_base); 128 + 129 + if (request->rss_context && 130 + nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context)) 131 + return -EMSGSIZE; 128 132 129 133 if ((data->hfunc && 130 134 nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
+10 -8
net/ipv4/netfilter/iptable_nat.c
··· 145 145 146 146 static int __init iptable_nat_init(void) 147 147 { 148 - int ret = xt_register_template(&nf_nat_ipv4_table, 149 - iptable_nat_table_init); 148 + int ret; 150 149 150 + /* net->gen->ptr[iptable_nat_net_id] must be allocated 151 + * before calling iptable_nat_table_init(). 152 + */ 153 + ret = register_pernet_subsys(&iptable_nat_net_ops); 151 154 if (ret < 0) 152 155 return ret; 153 156 154 - ret = register_pernet_subsys(&iptable_nat_net_ops); 155 - if (ret < 0) { 156 - xt_unregister_template(&nf_nat_ipv4_table); 157 - return ret; 158 - } 157 + ret = xt_register_template(&nf_nat_ipv4_table, 158 + iptable_nat_table_init); 159 + if (ret < 0) 160 + unregister_pernet_subsys(&iptable_nat_net_ops); 159 161 160 162 return ret; 161 163 } 162 164 163 165 static void __exit iptable_nat_exit(void) 164 166 { 165 - unregister_pernet_subsys(&iptable_nat_net_ops); 166 167 xt_unregister_template(&nf_nat_ipv4_table); 168 + unregister_pernet_subsys(&iptable_nat_net_ops); 167 169 } 168 170 169 171 module_init(iptable_nat_init);
+30 -13
net/ipv4/tcp_ao.c
··· 267 267 kfree_sensitive(key); 268 268 } 269 269 270 - void tcp_ao_destroy_sock(struct sock *sk, bool twsk) 270 + static void tcp_ao_info_free_rcu(struct rcu_head *head) 271 271 { 272 - struct tcp_ao_info *ao; 272 + struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu); 273 273 struct tcp_ao_key *key; 274 274 struct hlist_node *n; 275 275 276 + hlist_for_each_entry_safe(key, n, &ao->head, node) { 277 + hlist_del(&key->node); 278 + tcp_sigpool_release(key->tcp_sigpool_id); 279 + kfree_sensitive(key); 280 + } 281 + kfree(ao); 282 + static_branch_slow_dec_deferred(&tcp_ao_needed); 283 + } 284 + 285 + static void tcp_ao_sk_omem_free(struct sock *sk, struct tcp_ao_info *ao) 286 + { 287 + size_t total_ao_sk_mem = 0; 288 + struct tcp_ao_key *key; 289 + 290 + hlist_for_each_entry(key, &ao->head, node) 291 + total_ao_sk_mem += tcp_ao_sizeof_key(key); 292 + atomic_sub(total_ao_sk_mem, &sk->sk_omem_alloc); 293 + } 294 + 295 + void tcp_ao_destroy_sock(struct sock *sk, bool twsk) 296 + { 297 + struct tcp_ao_info *ao; 298 + 276 299 if (twsk) { 277 300 ao = rcu_dereference_protected(tcp_twsk(sk)->ao_info, 1); 278 - tcp_twsk(sk)->ao_info = NULL; 301 + rcu_assign_pointer(tcp_twsk(sk)->ao_info, NULL); 279 302 } else { 280 303 ao = rcu_dereference_protected(tcp_sk(sk)->ao_info, 1); 281 - tcp_sk(sk)->ao_info = NULL; 304 + rcu_assign_pointer(tcp_sk(sk)->ao_info, NULL); 282 305 } 283 306 284 307 if (!ao || !refcount_dec_and_test(&ao->refcnt)) 285 308 return; 286 309 287 - hlist_for_each_entry_safe(key, n, &ao->head, node) { 288 - hlist_del_rcu(&key->node); 289 - if (!twsk) 290 - atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc); 291 - call_rcu(&key->rcu, tcp_ao_key_free_rcu); 292 - } 293 - 294 - kfree_rcu(ao, rcu); 295 - static_branch_slow_dec_deferred(&tcp_ao_needed); 310 + if (!twsk) 311 + tcp_ao_sk_omem_free(sk, ao); 312 + call_rcu(&ao->rcu, tcp_ao_info_free_rcu); 296 313 } 297 314 298 315 void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
+16 -7
net/ipv4/tcp_input.c
··· 754 754 * <prev RTT . ><current RTT .. ><next RTT .... > 755 755 */ 756 756 757 - if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && 758 - !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 757 + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) { 759 758 u64 rcvwin, grow; 760 759 int rcvbuf; 761 760 ··· 770 771 771 772 rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin), 772 773 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); 773 - if (rcvbuf > sk->sk_rcvbuf) { 774 - WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 774 + if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 775 + if (rcvbuf > sk->sk_rcvbuf) { 776 + WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 775 777 776 - /* Make the window clamp follow along. */ 777 - WRITE_ONCE(tp->window_clamp, 778 - tcp_win_from_space(sk, rcvbuf)); 778 + /* Make the window clamp follow along. */ 779 + WRITE_ONCE(tp->window_clamp, 780 + tcp_win_from_space(sk, rcvbuf)); 781 + } 782 + } else { 783 + /* Make the window clamp follow along while being bounded 784 + * by SO_RCVBUF. 785 + */ 786 + int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf)); 787 + 788 + if (clamp > tp->window_clamp) 789 + WRITE_ONCE(tp->window_clamp, clamp); 779 790 } 780 791 } 781 792 tp->rcvq_space.space = copied;
+3
net/ipv4/tcp_offload.c
··· 140 140 if (thlen < sizeof(*th)) 141 141 goto out; 142 142 143 + if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb))) 144 + goto out; 145 + 143 146 if (!pskb_may_pull(skb, thlen)) 144 147 goto out; 145 148
+4
net/ipv4/udp_offload.c
··· 278 278 if (gso_skb->len <= sizeof(*uh) + mss) 279 279 return ERR_PTR(-EINVAL); 280 280 281 + if (unlikely(skb_checksum_start(gso_skb) != 282 + skb_transport_header(gso_skb))) 283 + return ERR_PTR(-EINVAL); 284 + 281 285 if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) { 282 286 /* Packet is from an untrusted source, reset gso_segs. */ 283 287 skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
+18 -16
net/ipv6/ndisc.c
··· 227 227 return NULL; 228 228 memset(ndopts, 0, sizeof(*ndopts)); 229 229 while (opt_len) { 230 + bool unknown = false; 230 231 int l; 231 232 if (opt_len < sizeof(struct nd_opt_hdr)) 232 233 return NULL; ··· 263 262 break; 264 263 #endif 265 264 default: 266 - if (ndisc_is_useropt(dev, nd_opt)) { 267 - ndopts->nd_useropts_end = nd_opt; 268 - if (!ndopts->nd_useropts) 269 - ndopts->nd_useropts = nd_opt; 270 - } else { 271 - /* 272 - * Unknown options must be silently ignored, 273 - * to accommodate future extension to the 274 - * protocol. 275 - */ 276 - ND_PRINTK(2, notice, 277 - "%s: ignored unsupported option; type=%d, len=%d\n", 278 - __func__, 279 - nd_opt->nd_opt_type, 280 - nd_opt->nd_opt_len); 281 - } 265 + unknown = true; 266 + } 267 + if (ndisc_is_useropt(dev, nd_opt)) { 268 + ndopts->nd_useropts_end = nd_opt; 269 + if (!ndopts->nd_useropts) 270 + ndopts->nd_useropts = nd_opt; 271 + } else if (unknown) { 272 + /* 273 + * Unknown options must be silently ignored, 274 + * to accommodate future extension to the 275 + * protocol. 276 + */ 277 + ND_PRINTK(2, notice, 278 + "%s: ignored unsupported option; type=%d, len=%d\n", 279 + __func__, 280 + nd_opt->nd_opt_type, 281 + nd_opt->nd_opt_len); 282 282 } 283 283 next_opt: 284 284 opt_len -= l;
+9 -5
net/ipv6/netfilter/ip6table_nat.c
··· 147 147 148 148 static int __init ip6table_nat_init(void) 149 149 { 150 - int ret = xt_register_template(&nf_nat_ipv6_table, 151 - ip6table_nat_table_init); 150 + int ret; 152 151 152 + /* net->gen->ptr[ip6table_nat_net_id] must be allocated 153 + * before calling ip6t_nat_register_lookups(). 154 + */ 155 + ret = register_pernet_subsys(&ip6table_nat_net_ops); 153 156 if (ret < 0) 154 157 return ret; 155 158 156 - ret = register_pernet_subsys(&ip6table_nat_net_ops); 159 + ret = xt_register_template(&nf_nat_ipv6_table, 160 + ip6table_nat_table_init); 157 161 if (ret) 158 - xt_unregister_template(&nf_nat_ipv6_table); 162 + unregister_pernet_subsys(&ip6table_nat_net_ops); 159 163 160 164 return ret; 161 165 } 162 166 163 167 static void __exit ip6table_nat_exit(void) 164 168 { 165 - unregister_pernet_subsys(&ip6table_nat_net_ops); 166 169 xt_unregister_template(&nf_nat_ipv6_table); 170 + unregister_pernet_subsys(&ip6table_nat_net_ops); 167 171 } 168 172 169 173 module_init(ip6table_nat_init);
+2 -2
net/iucv/af_iucv.c
··· 335 335 struct iucv_sock *iucv = iucv_sk(sk); 336 336 struct iucv_path *path = iucv->path; 337 337 338 - if (iucv->path) { 339 - iucv->path = NULL; 338 + /* Whoever resets the path pointer, must sever and free it. */ 339 + if (xchg(&iucv->path, NULL)) { 340 340 if (with_user_data) { 341 341 low_nmcpy(user_data, iucv->src_name); 342 342 high_nmcpy(user_data, iucv->dst_name);
+13 -2
net/l2tp/l2tp_core.c
··· 86 86 /* Default trace flags */ 87 87 #define L2TP_DEFAULT_DEBUG_FLAGS 0 88 88 89 + #define L2TP_DEPTH_NESTING 2 90 + #if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING 91 + #error "L2TP requires its own lockdep subclass" 92 + #endif 93 + 89 94 /* Private data stored for received packets in the skb. 90 95 */ 91 96 struct l2tp_skb_cb { ··· 1129 1124 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); 1130 1125 nf_reset_ct(skb); 1131 1126 1132 - bh_lock_sock_nested(sk); 1127 + /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by 1128 + * nested socket calls on the same lockdep socket class. This can 1129 + * happen when data from a user socket is routed over l2tp, which uses 1130 + * another userspace socket. 1131 + */ 1132 + spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING); 1133 + 1133 1134 if (sock_owned_by_user(sk)) { 1134 1135 kfree_skb(skb); 1135 1136 ret = NET_XMIT_DROP; ··· 1187 1176 ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl); 1188 1177 1189 1178 out_unlock: 1190 - bh_unlock_sock(sk); 1179 + spin_unlock(&sk->sk_lock.slock); 1191 1180 1192 1181 return ret; 1193 1182 }
+5 -2
net/mac80211/cfg.c
··· 114 114 115 115 /* apply all changes now - no failures allowed */ 116 116 117 - if (monitor_sdata) 117 + if (monitor_sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 118 118 ieee80211_set_mu_mimo_follow(monitor_sdata, params); 119 119 120 120 if (params->flags) { ··· 3053 3053 sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); 3054 3054 3055 3055 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { 3056 + if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 3057 + return -EOPNOTSUPP; 3058 + 3056 3059 sdata = wiphy_dereference(local->hw.wiphy, 3057 3060 local->monitor_sdata); 3058 3061 if (!sdata) ··· 3118 3115 if (has_monitor) { 3119 3116 sdata = wiphy_dereference(local->hw.wiphy, 3120 3117 local->monitor_sdata); 3121 - if (sdata) { 3118 + if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { 3122 3119 sdata->deflink.user_power_level = local->user_power_level; 3123 3120 if (txp_type != sdata->vif.bss_conf.txpower_type) 3124 3121 update_txp_type = true;
+3 -2
net/mac80211/tx.c
··· 1768 1768 break; 1769 1769 } 1770 1770 sdata = rcu_dereference(local->monitor_sdata); 1771 - if (sdata) { 1771 + if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { 1772 1772 vif = &sdata->vif; 1773 1773 info->hw_queue = 1774 1774 vif->hw_queue[skb_get_queue_mapping(skb)]; ··· 3957 3957 break; 3958 3958 } 3959 3959 tx.sdata = rcu_dereference(local->monitor_sdata); 3960 - if (tx.sdata) { 3960 + if (tx.sdata && 3961 + ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { 3961 3962 vif = &tx.sdata->vif; 3962 3963 info->hw_queue = 3963 3964 vif->hw_queue[skb_get_queue_mapping(skb)];
+1 -1
net/mac80211/util.c
··· 776 776 sdata = rcu_dereference_check(local->monitor_sdata, 777 777 lockdep_is_held(&local->iflist_mtx) || 778 778 lockdep_is_held(&local->hw.wiphy->mtx)); 779 - if (sdata && 779 + if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && 780 780 (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only || 781 781 sdata->flags & IEEE80211_SDATA_IN_DRIVER)) 782 782 iterator(data, sdata->vif.addr, &sdata->vif);
+2
net/mptcp/mib.c
··· 19 19 SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS), 20 20 SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN), 21 21 SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX), 22 + SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX), 22 23 SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX), 24 + SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX), 23 25 SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC), 24 26 SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX), 25 27 SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
+2
net/mptcp/mib.h
··· 14 14 MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */ 15 15 MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */ 16 16 MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */ 17 + MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */ 17 18 MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */ 19 + MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */ 18 20 MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */ 19 21 MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */ 20 22 MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
+3 -2
net/mptcp/options.c
··· 909 909 return true; 910 910 } else if (subflow_req->mp_join) { 911 911 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK; 912 - opts->backup = subflow_req->backup; 912 + opts->backup = subflow_req->request_bkup; 913 913 opts->join_id = subflow_req->local_id; 914 914 opts->thmac = subflow_req->thmac; 915 915 opts->nonce = subflow_req->local_nonce; ··· 958 958 959 959 if (subflow->remote_key_valid && 960 960 (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) || 961 - ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) { 961 + ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && 962 + (!mp_opt->echo || subflow->mp_join)))) { 962 963 /* subflows are fully established as soon as we get any 963 964 * additional ack, including ADD_ADDR. 964 965 */
+12
net/mptcp/pm.c
··· 426 426 return mptcp_pm_nl_get_local_id(msk, &skc_local); 427 427 } 428 428 429 + bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc) 430 + { 431 + struct mptcp_addr_info skc_local; 432 + 433 + mptcp_local_address((struct sock_common *)skc, &skc_local); 434 + 435 + if (mptcp_pm_is_userspace(msk)) 436 + return mptcp_userspace_pm_is_backup(msk, &skc_local); 437 + 438 + return mptcp_pm_nl_is_backup(msk, &skc_local); 439 + } 440 + 429 441 int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, 430 442 u8 *flags, int *ifindex) 431 443 {
+67 -26
net/mptcp/pm_netlink.c
··· 348 348 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 349 349 350 350 if (add_entry) { 351 - if (mptcp_pm_is_kernel(msk)) 351 + if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) 352 352 return false; 353 353 354 354 sk_reset_timer(sk, &add_entry->add_timer, ··· 471 471 slow = lock_sock_fast(ssk); 472 472 if (prio) { 473 473 subflow->send_mp_prio = 1; 474 - subflow->backup = backup; 475 474 subflow->request_bkup = backup; 476 475 } 477 476 ··· 512 513 513 514 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) 514 515 { 516 + struct mptcp_pm_addr_entry *local, *signal_and_subflow = NULL; 515 517 struct sock *sk = (struct sock *)msk; 516 - struct mptcp_pm_addr_entry *local; 517 518 unsigned int add_addr_signal_max; 518 519 unsigned int local_addr_max; 519 520 struct pm_nl_pernet *pernet; ··· 555 556 556 557 /* check first for announce */ 557 558 if (msk->pm.add_addr_signaled < add_addr_signal_max) { 558 - local = select_signal_address(pernet, msk); 559 - 560 559 /* due to racing events on both ends we can reach here while 561 560 * previous add address is still running: if we invoke now 562 561 * mptcp_pm_announce_addr(), that will fail and the ··· 565 568 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) 566 569 return; 567 570 568 - if (local) { 569 - if (mptcp_pm_alloc_anno_list(msk, &local->addr)) { 570 - __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 571 - msk->pm.add_addr_signaled++; 572 - mptcp_pm_announce_addr(msk, &local->addr, false); 573 - mptcp_pm_nl_addr_send_ack(msk); 574 - } 575 - } 571 + local = select_signal_address(pernet, msk); 572 + if (!local) 573 + goto subflow; 574 + 575 + /* If the alloc fails, we are on memory pressure, not worth 576 + * continuing, and trying to create subflows. 577 + */ 578 + if (!mptcp_pm_alloc_anno_list(msk, &local->addr)) 579 + return; 580 + 581 + __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 582 + msk->pm.add_addr_signaled++; 583 + mptcp_pm_announce_addr(msk, &local->addr, false); 584 + mptcp_pm_nl_addr_send_ack(msk); 585 + 586 + if (local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 587 + signal_and_subflow = local; 576 588 } 577 589 590 + subflow: 578 591 /* check if should create a new subflow */ 579 592 while (msk->pm.local_addr_used < local_addr_max && 580 593 msk->pm.subflows < subflows_max) { ··· 592 585 bool fullmesh; 593 586 int i, nr; 594 587 595 - local = select_local_address(pernet, msk); 596 - if (!local) 597 - break; 588 + if (signal_and_subflow) { 589 + local = signal_and_subflow; 590 + signal_and_subflow = NULL; 591 + } else { 592 + local = select_local_address(pernet, msk); 593 + if (!local) 594 + break; 595 + } 598 596 599 597 fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH); 600 598 ··· 1114 1102 return ret; 1115 1103 } 1116 1104 1105 + bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 1106 + { 1107 + struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 1108 + struct mptcp_pm_addr_entry *entry; 1109 + bool backup = false; 1110 + 1111 + rcu_read_lock(); 1112 + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 1113 + if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { 1114 + backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1115 + break; 1116 + } 1117 + } 1118 + rcu_read_unlock(); 1119 + 1120 + return backup; 1121 + } 1122 + 1117 1123 #define MPTCP_PM_CMD_GRP_OFFSET 0 1118 1124 #define MPTCP_PM_EV_GRP_OFFSET 1 1119 1125 ··· 1341 1311 if (ret < 0) 1342 1312 return ret; 1343 1313 1344 - if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { 1345 - GENL_SET_ERR_MSG(info, "flags must have signal when using port"); 1314 + if (addr.addr.port && !address_use_port(&addr)) { 1315 + GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); 1346 1316 return -EINVAL; 1347 1317 } 1348 1318 ··· 1431 1401 ret = remove_anno_list_by_saddr(msk, addr); 1432 1402 if (ret || force) { 1433 1403 spin_lock_bh(&msk->pm.lock); 1404 + msk->pm.add_addr_signaled -= ret; 1434 1405 mptcp_pm_remove_addr(msk, &list); 1435 1406 spin_unlock_bh(&msk->pm.lock); 1436 1407 } ··· 1565 1534 { 1566 1535 struct mptcp_rm_list alist = { .nr = 0 }; 1567 1536 struct mptcp_pm_addr_entry *entry; 1537 + int anno_nr = 0; 1568 1538 1569 1539 list_for_each_entry(entry, rm_list, list) { 1570 - if ((remove_anno_list_by_saddr(msk, &entry->addr) || 1571 - lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) && 1572 - alist.nr < MPTCP_RM_IDS_MAX) 1573 - alist.ids[alist.nr++] = entry->addr.id; 1540 + if (alist.nr >= MPTCP_RM_IDS_MAX) 1541 + break; 1542 + 1543 + /* only delete if either announced or matching a subflow */ 1544 + if (remove_anno_list_by_saddr(msk, &entry->addr)) 1545 + anno_nr++; 1546 + else if (!lookup_subflow_by_saddr(&msk->conn_list, 1547 + &entry->addr)) 1548 + continue; 1549 + 1550 + alist.ids[alist.nr++] = entry->addr.id; 1574 1551 } 1575 1552 1576 1553 if (alist.nr) { 1577 1554 spin_lock_bh(&msk->pm.lock); 1555 + msk->pm.add_addr_signaled -= anno_nr; 1578 1556 mptcp_pm_remove_addr(msk, &alist); 1579 1557 spin_unlock_bh(&msk->pm.lock); 1580 1558 } ··· 1596 1556 struct mptcp_pm_addr_entry *entry; 1597 1557 1598 1558 list_for_each_entry(entry, rm_list, list) { 1599 - if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) && 1600 - slist.nr < MPTCP_RM_IDS_MAX) 1559 + if (slist.nr < MPTCP_RM_IDS_MAX && 1560 + lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) 1601 1561 slist.ids[slist.nr++] = entry->addr.id; 1602 1562 1603 - if (remove_anno_list_by_saddr(msk, &entry->addr) && 1604 - alist.nr < MPTCP_RM_IDS_MAX) 1563 + if (alist.nr < MPTCP_RM_IDS_MAX && 1564 + remove_anno_list_by_saddr(msk, &entry->addr)) 1605 1565 alist.ids[alist.nr++] = entry->addr.id; 1606 1566 } 1607 1567 1608 1568 if (alist.nr) { 1609 1569 spin_lock_bh(&msk->pm.lock); 1570 + msk->pm.add_addr_signaled -= alist.nr; 1610 1571 mptcp_pm_remove_addr(msk, &alist); 1611 1572 spin_unlock_bh(&msk->pm.lock); 1612 1573 }
+18
net/mptcp/pm_userspace.c
··· 165 165 return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true); 166 166 } 167 167 168 + bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, 169 + struct mptcp_addr_info *skc) 170 + { 171 + struct mptcp_pm_addr_entry *entry; 172 + bool backup = false; 173 + 174 + spin_lock_bh(&msk->pm.lock); 175 + list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { 176 + if (mptcp_addresses_equal(&entry->addr, skc, false)) { 177 + backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 178 + break; 179 + } 180 + } 181 + spin_unlock_bh(&msk->pm.lock); 182 + 183 + return backup; 184 + } 185 + 168 186 int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info) 169 187 { 170 188 struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+10 -8
net/mptcp/protocol.c
··· 350 350 skb_orphan(skb); 351 351 352 352 /* try to fetch required memory from subflow */ 353 - if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) 353 + if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) { 354 + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); 354 355 goto drop; 356 + } 355 357 356 358 has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; 357 359 ··· 846 844 sk_rbuf = ssk_rbuf; 847 845 848 846 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/ 849 - if (__mptcp_rmem(sk) > sk_rbuf) { 850 - MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); 847 + if (__mptcp_rmem(sk) > sk_rbuf) 851 848 return; 852 - } 853 849 854 850 /* Wake-up the reader only for in-sequence data */ 855 851 mptcp_data_lock(sk); ··· 1422 1422 } 1423 1423 1424 1424 mptcp_for_each_subflow(msk, subflow) { 1425 + bool backup = subflow->backup || subflow->request_bkup; 1426 + 1425 1427 trace_mptcp_subflow_get_send(subflow); 1426 1428 ssk = mptcp_subflow_tcp_sock(subflow); 1427 1429 if (!mptcp_subflow_active(subflow)) 1428 1430 continue; 1429 1431 1430 1432 tout = max(tout, mptcp_timeout_from_subflow(subflow)); 1431 - nr_active += !subflow->backup; 1433 + nr_active += !backup; 1432 1434 pace = subflow->avg_pacing_rate; 1433 1435 if (unlikely(!pace)) { 1434 1436 /* init pacing rate from socket */ ··· 1441 1439 } 1442 1440 1443 1441 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); 1444 - if (linger_time < send_info[subflow->backup].linger_time) { 1445 - send_info[subflow->backup].ssk = ssk; 1446 - send_info[subflow->backup].linger_time = linger_time; 1442 + if (linger_time < send_info[backup].linger_time) { 1443 + send_info[backup].ssk = ssk; 1444 + send_info[backup].linger_time = linger_time; 1447 1445 } 1448 1446 } 1449 1447 __mptcp_set_timeout(sk, tout);
+4
net/mptcp/protocol.h
··· 448 448 u16 mp_capable : 1, 449 449 mp_join : 1, 450 450 backup : 1, 451 + request_bkup : 1, 451 452 csum_reqd : 1, 452 453 allow_join_id0 : 1; 453 454 u8 local_id; ··· 1109 1108 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); 1110 1109 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); 1111 1110 int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); 1111 + bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc); 1112 + bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc); 1113 + bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc); 1112 1114 int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb); 1113 1115 int mptcp_pm_nl_dump_addr(struct sk_buff *msg, 1114 1116 struct netlink_callback *cb);
+22 -4
net/mptcp/subflow.c
··· 100 100 return NULL; 101 101 } 102 102 subflow_req->local_id = local_id; 103 + subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req); 103 104 104 105 return msk; 105 106 } ··· 169 168 return 0; 170 169 } else if (opt_mp_join) { 171 170 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 171 + 172 + if (mp_opt.backup) 173 + SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX); 172 174 } 173 175 174 176 if (opt_mp_capable && listener->request_mptcp) { ··· 581 577 subflow->mp_join = 1; 582 578 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 583 579 580 + if (subflow->backup) 581 + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); 582 + 584 583 if (subflow_use_different_dport(msk, sk)) { 585 584 pr_debug("synack inet_dport=%d %d", 586 585 ntohs(inet_sk(sk)->inet_dport), ··· 621 614 return err; 622 615 623 616 subflow_set_local_id(subflow, err); 617 + subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); 618 + 624 619 return 0; 625 620 } 626 621 ··· 1230 1221 { 1231 1222 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1232 1223 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 1233 - u32 incr; 1224 + struct tcp_sock *tp = tcp_sk(ssk); 1225 + u32 offset, incr, avail_len; 1234 1226 1235 - incr = limit >= skb->len ? skb->len + fin : limit; 1227 + offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; 1228 + if (WARN_ON_ONCE(offset > skb->len)) 1229 + goto out; 1236 1230 1237 - pr_debug("discarding=%d len=%d seq=%d", incr, skb->len, 1238 - subflow->map_subflow_seq); 1231 + avail_len = skb->len - offset; 1232 + incr = limit >= avail_len ? avail_len + fin : limit; 1233 + 1234 + pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len, 1235 + offset, subflow->map_subflow_seq); 1239 1236 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); 1240 1237 tcp_sk(ssk)->copied_seq += incr; 1238 + 1239 + out: 1241 1240 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) 1242 1241 sk_eat_skb(ssk, skb); 1243 1242 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) ··· 2022 2005 new_ctx->fully_established = 1; 2023 2006 new_ctx->remote_key_valid = 1; 2024 2007 new_ctx->backup = subflow_req->backup; 2008 + new_ctx->request_bkup = subflow_req->request_bkup; 2025 2009 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id); 2026 2010 new_ctx->token = subflow_req->token; 2027 2011 new_ctx->thmac = subflow_req->thmac;
+3 -1
net/sched/act_ct.c
··· 44 44 struct zones_ht_key { 45 45 struct net *net; 46 46 u16 zone; 47 + /* Note : pad[] must be the last field. */ 48 + u8 pad[]; 47 49 }; 48 50 49 51 struct tcf_ct_flow_table { ··· 62 60 static const struct rhashtable_params zones_params = { 63 61 .head_offset = offsetof(struct tcf_ct_flow_table, node), 64 62 .key_offset = offsetof(struct tcf_ct_flow_table, key), 65 - .key_len = sizeof_field(struct tcf_ct_flow_table, key), 63 + .key_len = offsetof(struct zones_ht_key, pad), 66 64 .automatic_shrinking = true, 67 65 }; 68 66
+11 -8
net/sctp/input.c
··· 735 735 struct sock *sk = ep->base.sk; 736 736 struct net *net = sock_net(sk); 737 737 struct sctp_hashbucket *head; 738 + int err = 0; 738 739 739 740 ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port); 740 741 head = &sctp_ep_hashtable[ep->hashent]; 741 742 743 + write_lock(&head->lock); 742 744 if (sk->sk_reuseport) { 743 745 bool any = sctp_is_ep_boundall(sk); 744 746 struct sctp_endpoint *ep2; 745 747 struct list_head *list; 746 - int cnt = 0, err = 1; 748 + int cnt = 0; 749 + 750 + err = 1; 747 751 748 752 list_for_each(list, &ep->base.bind_addr.address_list) 749 753 cnt++; ··· 765 761 if (!err) { 766 762 err = reuseport_add_sock(sk, sk2, any); 767 763 if (err) 768 - return err; 764 + goto out; 769 765 break; 770 766 } else if (err < 0) { 771 - return err; 767 + goto out; 772 768 } 773 769 } 774 770 775 771 if (err) { 776 772 err = reuseport_alloc(sk, any); 777 773 if (err) 778 - return err; 774 + goto out; 779 775 } 780 776 } 781 777 782 - write_lock(&head->lock); 783 778 hlist_add_head(&ep->node, &head->chain); 779 + out: 784 780 write_unlock(&head->lock); 785 - return 0; 781 + return err; 786 782 } 787 783 788 784 /* Add an endpoint to the hash. Local BH-safe. */ ··· 807 803 808 804 head = &sctp_ep_hashtable[ep->hashent]; 809 805 806 + write_lock(&head->lock); 810 807 if (rcu_access_pointer(sk->sk_reuseport_cb)) 811 808 reuseport_detach_sock(sk); 812 - 813 - write_lock(&head->lock); 814 809 hlist_del_init(&ep->node); 815 810 write_unlock(&head->lock); 816 811 }
+4 -3
net/smc/af_smc.c
··· 3319 3319 3320 3320 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, 3321 3321 &smc->clcsock); 3322 - if (rc) { 3323 - sk_common_release(sk); 3322 + if (rc) 3324 3323 return rc; 3325 - } 3326 3324 3327 3325 /* smc_clcsock_release() does not wait smc->clcsock->sk's 3328 3326 * destruction; its sk_state might not be TCP_CLOSE after ··· 3366 3368 smc->clcsock = clcsock; 3367 3369 else 3368 3370 rc = smc_create_clcsk(net, sk, family); 3371 + 3372 + if (rc) 3373 + sk_common_release(sk); 3369 3374 out: 3370 3375 return rc; 3371 3376 }
+1 -1
net/smc/smc_stats.h
··· 19 19 20 20 #include "smc_clc.h" 21 21 22 - #define SMC_MAX_FBACK_RSN_CNT 30 22 + #define SMC_MAX_FBACK_RSN_CNT 36 23 23 24 24 enum { 25 25 SMC_BUF_8K,
+1 -1
net/sunrpc/svc.c
··· 161 161 str[len] = '\n'; 162 162 str[len + 1] = '\0'; 163 163 164 - return sysfs_emit(buf, str); 164 + return sysfs_emit(buf, "%s", str); 165 165 } 166 166 167 167 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
+8 -3
net/wireless/scan.c
··· 3178 3178 struct ieee80211_mgmt *mgmt, size_t len, 3179 3179 gfp_t gfp) 3180 3180 { 3181 - size_t min_hdr_len = offsetof(struct ieee80211_mgmt, 3182 - u.probe_resp.variable); 3181 + size_t min_hdr_len; 3183 3182 struct ieee80211_ext *ext = NULL; 3184 3183 enum cfg80211_bss_frame_type ftype; 3185 3184 u16 beacon_interval; ··· 3201 3202 3202 3203 if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { 3203 3204 ext = (void *) mgmt; 3204 - min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon); 3205 3205 if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) 3206 3206 min_hdr_len = offsetof(struct ieee80211_ext, 3207 3207 u.s1g_short_beacon.variable); 3208 + else 3209 + min_hdr_len = offsetof(struct ieee80211_ext, 3210 + u.s1g_beacon.variable); 3211 + } else { 3212 + /* same for beacons */ 3213 + min_hdr_len = offsetof(struct ieee80211_mgmt, 3214 + u.probe_resp.variable); 3208 3215 } 3209 3216 3210 3217 if (WARN_ON(len < min_hdr_len))
+1
net/wireless/sme.c
··· 1045 1045 cfg80211_hold_bss( 1046 1046 bss_from_pub(params->links[link].bss)); 1047 1047 ev->cr.links[link].bss = params->links[link].bss; 1048 + ev->cr.links[link].status = params->links[link].status; 1048 1049 1049 1050 if (params->links[link].addr) { 1050 1051 ev->cr.links[link].addr = next;
+1 -1
rust/kernel/firmware.rs
··· 2 2 3 3 //! Firmware abstraction 4 4 //! 5 - //! C header: [`include/linux/firmware.h`](srctree/include/linux/firmware.h") 5 + //! C header: [`include/linux/firmware.h`](srctree/include/linux/firmware.h) 6 6 7 7 use crate::{bindings, device::Device, error::Error, error::Result, str::CStr}; 8 8 use core::ptr::NonNull;
+3 -3
scripts/syscall.tbl
··· 53 53 39 common umount2 sys_umount 54 54 40 common mount sys_mount 55 55 41 common pivot_root sys_pivot_root 56 + 42 common nfsservctl sys_ni_syscall 56 57 43 32 statfs64 sys_statfs64 compat_sys_statfs64 57 58 43 64 statfs sys_statfs 58 59 44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 ··· 99 98 77 common tee sys_tee 100 99 78 common readlinkat sys_readlinkat 101 100 79 stat64 fstatat64 sys_fstatat64 102 - 79 newstat fstatat sys_newfstatat 101 + 79 64 newfstatat sys_newfstatat 103 102 80 stat64 fstat64 sys_fstat64 104 - 80 newstat fstat sys_newfstat 103 + 80 64 fstat sys_newfstat 105 104 81 common sync sys_sync 106 105 82 common fsync sys_fsync 107 106 83 common fdatasync sys_fdatasync ··· 403 402 460 common lsm_set_self_attr sys_lsm_set_self_attr 404 403 461 common lsm_list_modules sys_lsm_list_modules 405 404 462 common mseal sys_mseal 406 - 467 common uretprobe sys_uretprobe
+2 -12
sound/core/seq/seq_ports.h
··· 7 7 #define __SND_SEQ_PORTS_H 8 8 9 9 #include <sound/seq_kernel.h> 10 + #include <sound/ump_convert.h> 10 11 #include "seq_lock.h" 11 12 12 13 /* list of 'exported' ports */ ··· 41 40 rwlock_t list_lock; 42 41 int (*open)(void *private_data, struct snd_seq_port_subscribe *info); 43 42 int (*close)(void *private_data, struct snd_seq_port_subscribe *info); 44 - }; 45 - 46 - /* context for converting from legacy control event to UMP packet */ 47 - struct snd_seq_ump_midi2_bank { 48 - bool rpn_set; 49 - bool nrpn_set; 50 - bool bank_set; 51 - unsigned char cc_rpn_msb, cc_rpn_lsb; 52 - unsigned char cc_nrpn_msb, cc_nrpn_lsb; 53 - unsigned char cc_data_msb, cc_data_lsb; 54 - unsigned char cc_bank_msb, cc_bank_lsb; 55 43 }; 56 44 57 45 struct snd_seq_client_port { ··· 78 88 unsigned char ump_group; 79 89 80 90 #if IS_ENABLED(CONFIG_SND_SEQ_UMP) 81 - struct snd_seq_ump_midi2_bank midi2_bank[16]; /* per channel */ 91 + struct ump_cvt_to_ump_bank midi2_bank[16]; /* per channel */ 82 92 #endif 83 93 }; 84 94
+86 -52
sound/core/seq/seq_ump_convert.c
··· 368 368 struct snd_seq_ump_event ev_cvt; 369 369 const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump; 370 370 union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump; 371 - struct snd_seq_ump_midi2_bank *cc; 371 + struct ump_cvt_to_ump_bank *cc; 372 372 373 373 ev_cvt = *event; 374 374 memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump)); ··· 789 789 return 1; 790 790 } 791 791 792 - /* set up the MIDI2 RPN/NRPN packet data from the parsed info */ 793 - static void fill_rpn(struct snd_seq_ump_midi2_bank *cc, 794 - union snd_ump_midi2_msg *data, 795 - unsigned char channel) 792 + static void reset_rpn(struct ump_cvt_to_ump_bank *cc) 796 793 { 794 + cc->rpn_set = 0; 795 + cc->nrpn_set = 0; 796 + cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; 797 + cc->cc_data_msb = cc->cc_data_lsb = 0; 798 + cc->cc_data_msb_set = cc->cc_data_lsb_set = 0; 799 + } 800 + 801 + /* set up the MIDI2 RPN/NRPN packet data from the parsed info */ 802 + static int fill_rpn(struct ump_cvt_to_ump_bank *cc, 803 + union snd_ump_midi2_msg *data, 804 + unsigned char channel, 805 + bool flush) 806 + { 807 + if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set)) 808 + return 0; // skip 809 + /* when not flushing, wait for complete data set */ 810 + if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set)) 811 + return 0; // skip 812 + 797 813 if (cc->rpn_set) { 798 814 data->rpn.status = UMP_MSG_STATUS_RPN; 799 815 data->rpn.bank = cc->cc_rpn_msb; 800 816 data->rpn.index = cc->cc_rpn_lsb; 801 - cc->rpn_set = 0; 802 - cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; 803 - } else { 817 + } else if (cc->nrpn_set) { 804 818 data->rpn.status = UMP_MSG_STATUS_NRPN; 805 819 data->rpn.bank = cc->cc_nrpn_msb; 806 820 data->rpn.index = cc->cc_nrpn_lsb; 807 - cc->nrpn_set = 0; 808 - cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0; 821 + } else { 822 + return 0; // skip 809 823 } 824 + 810 825 data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) | 811 826 cc->cc_data_lsb); 812 827 data->rpn.channel = channel; 813 - cc->cc_data_msb = cc->cc_data_lsb = 0; 828 + 829 + reset_rpn(cc); 830 + return 1; 814 831 } 815 832 816 833 /* convert CC event to MIDI 2.0 UMP */ ··· 839 822 unsigned char channel = event->data.control.channel & 0x0f; 840 823 unsigned char index = event->data.control.param & 0x7f; 841 824 unsigned char val = event->data.control.value & 0x7f; 842 - struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel]; 825 + struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel]; 826 + int ret; 843 827 844 828 /* process special CC's (bank/rpn/nrpn) */ 845 829 switch (index) { 846 830 case UMP_CC_RPN_MSB: 831 + ret = fill_rpn(cc, data, channel, true); 847 832 cc->rpn_set = 1; 848 833 cc->cc_rpn_msb = val; 849 - return 0; // skip 834 + if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) 835 + reset_rpn(cc); 836 + return ret; 850 837 case UMP_CC_RPN_LSB: 838 + ret = fill_rpn(cc, data, channel, true); 851 839 cc->rpn_set = 1; 852 840 cc->cc_rpn_lsb = val; 853 - return 0; // skip 841 + if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) 842 + reset_rpn(cc); 843 + return ret; 854 844 case UMP_CC_NRPN_MSB: 845 + ret = fill_rpn(cc, data, channel, true); 855 846 cc->nrpn_set = 1; 856 847 cc->cc_nrpn_msb = val; 857 - return 0; // skip 848 + return ret; 858 849 case UMP_CC_NRPN_LSB: 850 + ret = fill_rpn(cc, data, channel, true); 859 851 cc->nrpn_set = 1; 860 852 cc->cc_nrpn_lsb = val; 861 - return 0; // skip 853 + return ret; 862 854 case UMP_CC_DATA: 855 + cc->cc_data_msb_set = 1; 863 856 cc->cc_data_msb = val; 864 - return 0; // skip 857 + return fill_rpn(cc, data, channel, false); 865 858 case UMP_CC_BANK_SELECT: 866 859 cc->bank_set = 1; 867 860 cc->cc_bank_msb = val; ··· 881 854 cc->cc_bank_lsb = val; 882 855 return 0; // skip 883 856 case UMP_CC_DATA_LSB: 857 + cc->cc_data_lsb_set = 1; 884 858 cc->cc_data_lsb = val; 885 - if (!(cc->rpn_set || cc->nrpn_set)) 886 - return 0; // skip 887 - fill_rpn(cc, data, channel); 888 - return 1; 859 + return fill_rpn(cc, data, channel, false); 889 860 } 890 861 891 862 data->cc.status = status; ··· 912 887 unsigned char status) 913 888 { 914 889 unsigned char channel = event->data.control.channel & 0x0f; 915 - struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel]; 890 + struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel]; 916 891 917 892 data->pg.status = status; 918 893 data->pg.channel = channel; ··· 949 924 { 950 925 unsigned char channel = event->data.control.channel & 0x0f; 951 926 unsigned char index = event->data.control.param & 0x7f; 952 - struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel]; 927 + struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel]; 953 928 unsigned char msb, lsb; 929 + int ret; 954 930 955 931 msb = (event->data.control.value >> 7) & 0x7f; 956 932 lsb = event->data.control.value & 0x7f; ··· 965 939 cc->cc_bank_lsb = lsb; 966 940 return 0; // skip 967 941 case UMP_CC_RPN_MSB: 968 - cc->cc_rpn_msb = msb; 969 - fallthrough; 970 942 case UMP_CC_RPN_LSB: 971 - cc->rpn_set = 1; 943 + ret = fill_rpn(cc, data, channel, true); 944 + cc->cc_rpn_msb = msb; 972 945 cc->cc_rpn_lsb = lsb; 973 - return 0; // skip 946 + cc->rpn_set = 1; 947 + if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) 948 + reset_rpn(cc); 949 + return ret; 974 950 case UMP_CC_NRPN_MSB: 975 - cc->cc_nrpn_msb = msb; 976 - fallthrough; 977 951 case UMP_CC_NRPN_LSB: 952 + ret = fill_rpn(cc, data, channel, true); 953 + cc->cc_nrpn_msb = msb; 978 954 cc->nrpn_set = 1; 979 955 cc->cc_nrpn_lsb = lsb; 980 - return 0; // skip 956 + return ret; 981 957 case UMP_CC_DATA: 982 - cc->cc_data_msb = msb; 983 - fallthrough; 984 958 case UMP_CC_DATA_LSB: 959 + cc->cc_data_msb_set = cc->cc_data_lsb_set = 1; 960 + cc->cc_data_msb = msb; 985 961 cc->cc_data_lsb = lsb; 986 - if (!(cc->rpn_set || cc->nrpn_set)) 987 - return 0; // skip 988 - fill_rpn(cc, data, channel); 989 - return 1; 962 + return fill_rpn(cc, data, channel, false); 990 963 } 991 964 992 965 data->cc.status = UMP_MSG_STATUS_CC; ··· 1217 1192 { 1218 1193 struct snd_seq_ump_event ev_cvt; 1219 1194 unsigned char status; 1220 - u8 buf[6], *xbuf; 1195 + u8 buf[8], *xbuf; 1221 1196 int offset = 0; 1222 1197 int len, err; 1198 + bool finished = false; 1223 1199 1224 1200 if (!snd_seq_ev_is_variable(event)) 1225 1201 return 0; 1226 1202 1227 1203 setup_ump_event(&ev_cvt, event); 1228 - for (;;) { 1204 + while (!finished) { 1229 1205 len = snd_seq_expand_var_event_at(event, sizeof(buf), buf, offset); 1230 1206 if (len <= 0) 1231 1207 break; 1232 - if (WARN_ON(len > 6)) 1208 + if (WARN_ON(len > sizeof(buf))) 1233 1209 break; 1234 - offset += len; 1210 + 1235 1211 xbuf = buf; 1212 + status = UMP_SYSEX_STATUS_CONTINUE; 1213 + /* truncate the sysex start-marker */ 1236 1214 if (*xbuf == UMP_MIDI1_MSG_SYSEX_START) { 1237 1215 status = UMP_SYSEX_STATUS_START; 1238 - xbuf++; 1239 1216 len--; 1240 - if (len > 0 && xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) { 1241 - status = UMP_SYSEX_STATUS_SINGLE; 1242 - len--; 1243 - } 1244 - } else { 1245 - if (xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) { 1246 - status = UMP_SYSEX_STATUS_END; 1247 - len--; 1248 - } else { 1249 - status = UMP_SYSEX_STATUS_CONTINUE; 1250 - } 1217 + offset++; 1218 + xbuf++; 1251 1219 } 1220 + 1221 + /* if the last of this packet or the 1st byte of the next packet 1222 + * is the end-marker, finish the transfer with this packet 1223 + */ 1224 + if (len > 0 && len < 8 && 1225 + xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) { 1226 + if (status == UMP_SYSEX_STATUS_START) 1227 + status = UMP_SYSEX_STATUS_SINGLE; 1228 + else 1229 + status = UMP_SYSEX_STATUS_END; 1230 + len--; 1231 + finished = true; 1232 + } 1233 + 1234 + len = min(len, 6); 1252 1235 fill_sysex7_ump(dest_port, ev_cvt.ump, status, xbuf, len); 1253 1236 err = __snd_seq_deliver_single_event(dest, dest_port, 1254 1237 (struct snd_seq_event *)&ev_cvt, 1255 1238 atomic, hop); 1256 1239 if (err < 0) 1257 1240 return err; 1241 + offset += len; 1258 1242 } 1259 1243 return 0; 1260 1244 }
+42 -18
sound/core/ump_convert.c
··· 287 287 return 4; 288 288 } 289 289 290 - static void fill_rpn(struct ump_cvt_to_ump_bank *cc, 291 - union snd_ump_midi2_msg *midi2) 290 + static void reset_rpn(struct ump_cvt_to_ump_bank *cc) 292 291 { 292 + cc->rpn_set = 0; 293 + cc->nrpn_set = 0; 294 + cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; 295 + cc->cc_data_msb = cc->cc_data_lsb = 0; 296 + cc->cc_data_msb_set = cc->cc_data_lsb_set = 0; 297 + } 298 + 299 + static int fill_rpn(struct ump_cvt_to_ump_bank *cc, 300 + union snd_ump_midi2_msg *midi2, 301 + bool flush) 302 + { 303 + if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set)) 304 + return 0; // skip 305 + /* when not flushing, wait for complete data set */ 306 + if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set)) 307 + return 0; // skip 308 + 293 309 if (cc->rpn_set) { 294 310 midi2->rpn.status = UMP_MSG_STATUS_RPN; 295 311 midi2->rpn.bank = cc->cc_rpn_msb; 296 312 midi2->rpn.index = cc->cc_rpn_lsb; 297 - cc->rpn_set = 0; 298 - cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; 299 - } else { 313 + } else if (cc->nrpn_set) { 300 314 midi2->rpn.status = UMP_MSG_STATUS_NRPN; 301 315 midi2->rpn.bank = cc->cc_nrpn_msb; 302 316 midi2->rpn.index = cc->cc_nrpn_lsb; 303 - cc->nrpn_set = 0; 304 - cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0; 317 + } else { 318 + return 0; // skip 305 319 } 320 + 306 321 midi2->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) | 307 322 cc->cc_data_lsb); 308 - cc->cc_data_msb = cc->cc_data_lsb = 0; 323 + 324 + reset_rpn(cc); 325 + return 1; 309 326 } 310 327 311 328 /* convert to a MIDI 1.0 Channel Voice message */ ··· 335 318 struct ump_cvt_to_ump_bank *cc; 336 319 union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)data; 337 320 unsigned char status, channel; 321 + int ret; 338 322 339 323 BUILD_BUG_ON(sizeof(union snd_ump_midi1_msg) != 4); 340 324 BUILD_BUG_ON(sizeof(union snd_ump_midi2_msg) != 8); ··· 376 358 case UMP_MSG_STATUS_CC: 377 359 switch (buf[1]) { 378 360 case UMP_CC_RPN_MSB: 361 + ret = fill_rpn(cc, midi2, true); 379 362 cc->rpn_set = 1; 380 363 cc->cc_rpn_msb = buf[2]; 381 - return 0; // skip 364 + if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) 365 + reset_rpn(cc); 366 + return ret; 382 367 case UMP_CC_RPN_LSB: 368 + ret = fill_rpn(cc, midi2, true); 383 369 cc->rpn_set = 1; 384 370 cc->cc_rpn_lsb = buf[2]; 385 - return 0; // skip 371 + if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) 372 + reset_rpn(cc); 373 + return ret; 386 374 case UMP_CC_NRPN_MSB: 375 + ret = fill_rpn(cc, midi2, true); 387 376 cc->nrpn_set = 1; 388 377 cc->cc_nrpn_msb = buf[2]; 389 - return 0; // skip 378 + return ret; 390 379 case UMP_CC_NRPN_LSB: 380 + ret = fill_rpn(cc, midi2, true); 391 381 cc->nrpn_set = 1; 392 382 cc->cc_nrpn_lsb = buf[2]; 393 - return 0; // skip 383 + return ret; 394 384 case UMP_CC_DATA: 385 + cc->cc_data_msb_set = 1; 395 386 cc->cc_data_msb = buf[2]; 396 - return 0; // skip 387 + return fill_rpn(cc, midi2, false); 397 388 case UMP_CC_BANK_SELECT: 398 389 cc->bank_set = 1; 399 390 cc->cc_bank_msb = buf[2]; ··· 412 385 cc->cc_bank_lsb = buf[2]; 413 386 return 0; // skip 414 387 case UMP_CC_DATA_LSB: 388 + cc->cc_data_lsb_set = 1; 415 389 cc->cc_data_lsb = buf[2]; 416 - if (cc->rpn_set || cc->nrpn_set) 417 - fill_rpn(cc, midi2); 418 - else 419 - return 0; // skip 420 - break; 390 + return fill_rpn(cc, midi2, false); 421 391 default: 422 392 midi2->cc.index = buf[1]; 423 393 midi2->cc.data = upscale_7_to_32bit(buf[2]);
+24 -14
sound/firewire/amdtp-stream.c
··· 77 77 // overrun. Actual device can skip more, then this module stops the packet streaming. 78 78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5 79 79 80 + static void pcm_period_work(struct work_struct *work); 81 + 80 82 /** 81 83 * amdtp_stream_init - initialize an AMDTP stream structure 82 84 * @s: the AMDTP stream to initialize ··· 107 105 s->flags = flags; 108 106 s->context = ERR_PTR(-1); 109 107 mutex_init(&s->mutex); 108 + INIT_WORK(&s->period_work, pcm_period_work); 110 109 s->packet_index = 0; 111 110 112 111 init_waitqueue_head(&s->ready_wait); ··· 350 347 */ 351 348 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) 352 349 { 350 + cancel_work_sync(&s->period_work); 353 351 s->pcm_buffer_pointer = 0; 354 352 s->pcm_period_pointer = 0; 355 353 } ··· 615 611 // The program in user process should periodically check the status of intermediate 616 612 // buffer associated to PCM substream to process PCM frames in the buffer, instead 617 613 // of receiving notification of period elapsed by poll wait. 618 - if (!pcm->runtime->no_period_wakeup) { 619 - if (in_softirq()) { 620 - // In software IRQ context for 1394 OHCI. 621 - snd_pcm_period_elapsed(pcm); 622 - } else { 623 - // In process context of ALSA PCM application under acquired lock of 624 - // PCM substream. 625 - snd_pcm_period_elapsed_under_stream_lock(pcm); 626 - } 627 - } 614 + if (!pcm->runtime->no_period_wakeup) 615 + queue_work(system_highpri_wq, &s->period_work); 628 616 } 617 + } 618 + 619 + static void pcm_period_work(struct work_struct *work) 620 + { 621 + struct amdtp_stream *s = container_of(work, struct amdtp_stream, 622 + period_work); 623 + struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); 624 + 625 + if (pcm) 626 + snd_pcm_period_elapsed(pcm); 629 627 } 630 628 631 629 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, ··· 1855 1849 { 1856 1850 struct amdtp_stream *irq_target = d->irq_target; 1857 1851 1858 - // Process isochronous packets queued till recent isochronous cycle to handle PCM frames. 1859 1852 if (irq_target && amdtp_stream_running(irq_target)) { 1860 - // In software IRQ context, the call causes dead-lock to disable the tasklet 1861 - // synchronously. 1862 - if (!in_softirq()) 1853 + // use wq to prevent AB/BA deadlock competition for 1854 + // substream lock: 1855 + // fw_iso_context_flush_completions() acquires 1856 + // lock by ohci_flush_iso_completions(), 1857 + // amdtp-stream process_rx_packets() attempts to 1858 + // acquire same lock by snd_pcm_elapsed() 1859 + if (current_work() != &s->period_work) 1863 1860 fw_iso_context_flush_completions(irq_target->context); 1864 1861 } 1865 1862 ··· 1918 1909 return; 1919 1910 } 1920 1911 1912 + cancel_work_sync(&s->period_work); 1921 1913 fw_iso_context_stop(s->context); 1922 1914 fw_iso_context_destroy(s->context); 1923 1915 s->context = ERR_PTR(-1);
+1
sound/firewire/amdtp-stream.h
··· 191 191 192 192 /* For a PCM substream processing. */ 193 193 struct snd_pcm_substream *pcm; 194 + struct work_struct period_work; 194 195 snd_pcm_uframes_t pcm_buffer_pointer; 195 196 unsigned int pcm_period_pointer; 196 197 unsigned int pcm_frame_multiplier;
-13
sound/pci/hda/cs35l41_hda.c
··· 133 133 { CS35L41_AMP_DIG_VOL_CTRL, 0x0000A678 }, // AMP_HPF_PCM_EN = 1, AMP_VOL_PCM Mute 134 134 }; 135 135 136 - static void cs35l41_add_controls(struct cs35l41_hda *cs35l41) 137 - { 138 - struct hda_cs_dsp_ctl_info info; 139 - 140 - info.device_name = cs35l41->amp_name; 141 - info.fw_type = cs35l41->firmware_type; 142 - info.card = cs35l41->codec->card; 143 - 144 - hda_cs_dsp_add_controls(&cs35l41->cs_dsp, &info); 145 - } 146 - 147 136 static const struct cs_dsp_client_ops client_ops = { 148 137 .control_remove = hda_cs_dsp_control_remove, 149 138 }; ··· 591 602 hda_cs_dsp_fw_ids[cs35l41->firmware_type]); 592 603 if (ret) 593 604 goto err; 594 - 595 - cs35l41_add_controls(cs35l41); 596 605 597 606 cs35l41_hda_apply_calibration(cs35l41); 598 607
+1 -37
sound/pci/hda/cs35l56_hda.c
··· 559 559 kfree(coeff_filename); 560 560 } 561 561 562 - static void cs35l56_hda_create_dsp_controls_work(struct work_struct *work) 563 - { 564 - struct cs35l56_hda *cs35l56 = container_of(work, struct cs35l56_hda, control_work); 565 - struct hda_cs_dsp_ctl_info info; 566 - 567 - info.device_name = cs35l56->amp_name; 568 - info.fw_type = HDA_CS_DSP_FW_MISC; 569 - info.card = cs35l56->codec->card; 570 - 571 - hda_cs_dsp_add_controls(&cs35l56->cs_dsp, &info); 572 - } 573 - 574 562 static void cs35l56_hda_apply_calibration(struct cs35l56_hda *cs35l56) 575 563 { 576 564 int ret; ··· 583 595 char *wmfw_filename = NULL; 584 596 unsigned int preloaded_fw_ver; 585 597 bool firmware_missing; 586 - bool add_dsp_controls_required = false; 587 598 int ret; 588 - 589 - /* 590 - * control_work must be flushed before proceeding, but we can't do that 591 - * here as it would create a deadlock on controls_rwsem so it must be 592 - * performed before queuing dsp_work. 593 - */ 594 - WARN_ON_ONCE(work_busy(&cs35l56->control_work)); 595 599 596 600 /* 597 601 * Prepare for a new DSP power-up. If the DSP has had firmware 598 602 * downloaded previously then it needs to be powered down so that it 599 - * can be updated and if hadn't been patched before then the controls 600 - * will need to be added once firmware download succeeds. 603 + * can be updated. 601 604 */ 602 605 if (cs35l56->base.fw_patched) 603 606 cs_dsp_power_down(&cs35l56->cs_dsp); 604 - else 605 - add_dsp_controls_required = true; 606 607 607 608 cs35l56->base.fw_patched = false; 608 609 ··· 675 698 CS35L56_FIRMWARE_MISSING); 676 699 cs35l56->base.fw_patched = true; 677 700 678 - /* 679 - * Adding controls is deferred to prevent a lock inversion - ALSA takes 680 - * the controls_rwsem when adding a control, the get() / put() 681 - * functions of a control are called holding controls_rwsem and those 682 - * that depend on running firmware wait for dsp_work() to complete. 683 - */ 684 - if (add_dsp_controls_required) 685 - queue_work(system_long_wq, &cs35l56->control_work); 686 - 687 701 ret = cs_dsp_run(&cs35l56->cs_dsp); 688 702 if (ret) 689 703 dev_dbg(cs35l56->base.dev, "%s: cs_dsp_run ret %d\n", __func__, ret); ··· 721 753 strscpy(comp->name, dev_name(dev), sizeof(comp->name)); 722 754 comp->playback_hook = cs35l56_hda_playback_hook; 723 755 724 - flush_work(&cs35l56->control_work); 725 756 queue_work(system_long_wq, &cs35l56->dsp_work); 726 757 727 758 cs35l56_hda_create_controls(cs35l56); ··· 742 775 struct hda_component *comp; 743 776 744 777 cancel_work_sync(&cs35l56->dsp_work); 745 - cancel_work_sync(&cs35l56->control_work); 746 778 747 779 cs35l56_hda_remove_controls(cs35l56); 748 780 ··· 772 806 struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev); 773 807 774 808 cs35l56_hda_wait_dsp_ready(cs35l56); 775 - flush_work(&cs35l56->control_work); 776 809 777 810 if (cs35l56->playing) 778 811 cs35l56_hda_pause(cs35l56); ··· 991 1026 dev_set_drvdata(cs35l56->base.dev, cs35l56); 992 1027 993 1028 INIT_WORK(&cs35l56->dsp_work, cs35l56_hda_dsp_work); 994 - INIT_WORK(&cs35l56->control_work, cs35l56_hda_create_dsp_controls_work); 995 1029 996 1030 ret = cs35l56_hda_read_acpi(cs35l56, hid, id); 997 1031 if (ret)
-1
sound/pci/hda/cs35l56_hda.h
··· 23 23 struct cs35l56_base base; 24 24 struct hda_codec *codec; 25 25 struct work_struct dsp_work; 26 - struct work_struct control_work; 27 26 28 27 int index; 29 28 const char *system_name;
+1 -1
sound/pci/hda/hda_controller.h
··· 28 28 #else 29 29 #define AZX_DCAPS_I915_COMPONENT 0 /* NOP */ 30 30 #endif 31 - /* 14 unused */ 31 + #define AZX_DCAPS_AMD_ALLOC_FIX (1 << 14) /* AMD allocation workaround */ 32 32 #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */ 33 33 #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */ 34 34 #define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
+63
sound/pci/hda/hda_generic.c
··· 4955 4955 } 4956 4956 EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm); 4957 4957 4958 + /* forcibly mute the speaker output without caching; return true if updated */ 4959 + static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid) 4960 + { 4961 + if (!nid) 4962 + return false; 4963 + if (!nid_has_mute(codec, nid, HDA_OUTPUT)) 4964 + return false; /* no mute, skip */ 4965 + if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) & 4966 + snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) & 4967 + HDA_AMP_MUTE) 4968 + return false; /* both channels already muted, skip */ 4969 + 4970 + /* direct amp update without caching */ 4971 + snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, 4972 + AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT | 4973 + AC_AMP_SET_RIGHT | HDA_AMP_MUTE); 4974 + return true; 4975 + } 4976 + 4977 + /** 4978 + * snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs 4979 + * @codec: the HDA codec 4980 + * 4981 + * Forcibly mute the speaker outputs, to be called at suspend or shutdown. 4982 + * 4983 + * The mute state done by this function isn't cached, hence the original state 4984 + * will be restored at resume. 4985 + * 4986 + * Return true if the mute state has been changed. 4987 + */ 4988 + bool snd_hda_gen_shutup_speakers(struct hda_codec *codec) 4989 + { 4990 + struct hda_gen_spec *spec = codec->spec; 4991 + const int *paths; 4992 + const struct nid_path *path; 4993 + int i, p, num_paths; 4994 + bool updated = false; 4995 + 4996 + /* if already powered off, do nothing */ 4997 + if (!snd_hdac_is_power_on(&codec->core)) 4998 + return false; 4999 + 5000 + if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) { 5001 + paths = spec->out_paths; 5002 + num_paths = spec->autocfg.line_outs; 5003 + } else { 5004 + paths = spec->speaker_paths; 5005 + num_paths = spec->autocfg.speaker_outs; 5006 + } 5007 + 5008 + for (i = 0; i < num_paths; i++) { 5009 + path = snd_hda_get_path_from_idx(codec, paths[i]); 5010 + if (!path) 5011 + continue; 5012 + for (p = 0; p < path->depth; p++) 5013 + if (force_mute_output_path(codec, path->path[p])) 5014 + updated = true; 5015 + } 5016 + 5017 + return updated; 5018 + } 5019 + EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers); 5020 + 4958 5021 /** 4959 5022 * snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and 4960 5023 * set up the hda_gen_spec
+1
sound/pci/hda/hda_generic.h
··· 353 353 int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec, 354 354 int (*callback)(struct led_classdev *, 355 355 enum led_brightness)); 356 + bool snd_hda_gen_shutup_speakers(struct hda_codec *codec); 356 357 357 358 #endif /* __SOUND_HDA_GENERIC_H */
+9 -1
sound/pci/hda/hda_intel.c
··· 40 40 41 41 #ifdef CONFIG_X86 42 42 /* for snoop control */ 43 + #include <linux/dma-map-ops.h> 43 44 #include <asm/set_memory.h> 44 45 #include <asm/cpufeature.h> 45 46 #endif ··· 307 306 308 307 /* quirks for ATI HDMI with snoop off */ 309 308 #define AZX_DCAPS_PRESET_ATI_HDMI_NS \ 310 - (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF) 309 + (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_AMD_ALLOC_FIX) 311 310 312 311 /* quirks for AMD SB */ 313 312 #define AZX_DCAPS_PRESET_AMD_SB \ ··· 1702 1701 1703 1702 if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF) 1704 1703 snoop = false; 1704 + 1705 + #ifdef CONFIG_X86 1706 + /* check the presence of DMA ops (i.e. IOMMU), disable snoop conditionally */ 1707 + if ((chip->driver_caps & AZX_DCAPS_AMD_ALLOC_FIX) && 1708 + !get_dma_ops(chip->card->dev)) 1709 + snoop = false; 1710 + #endif 1705 1711 1706 1712 chip->snoop = snoop; 1707 1713 if (!snoop) {
+11 -45
sound/pci/hda/patch_conexant.c
··· 21 21 #include "hda_jack.h" 22 22 #include "hda_generic.h" 23 23 24 - enum { 25 - CX_HEADSET_NOPRESENT = 0, 26 - CX_HEADSET_PARTPRESENT, 27 - CX_HEADSET_ALLPRESENT, 28 - }; 29 - 30 24 struct conexant_spec { 31 25 struct hda_gen_spec gen; 32 26 ··· 42 48 unsigned int gpio_led; 43 49 unsigned int gpio_mute_led_mask; 44 50 unsigned int gpio_mic_led_mask; 45 - unsigned int headset_present_flag; 46 51 bool is_cx8070_sn6140; 47 52 }; 48 53 ··· 205 212 { 206 213 struct conexant_spec *spec = codec->spec; 207 214 215 + snd_hda_gen_shutup_speakers(codec); 216 + 208 217 /* Turn the problematic codec into D3 to avoid spurious noises 209 218 from the internal speaker during (and after) reboot */ 210 219 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); ··· 245 250 } 246 251 } 247 252 248 - static void cx_update_headset_mic_vref(struct hda_codec *codec, unsigned int res) 253 + static void cx_update_headset_mic_vref(struct hda_codec *codec, struct hda_jack_callback *event) 249 254 { 250 - unsigned int phone_present, mic_persent, phone_tag, mic_tag; 251 - struct conexant_spec *spec = codec->spec; 255 + unsigned int mic_present; 252 256 253 257 /* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled, 254 258 * the node 19 can only be config to microphone or disabled. 255 259 * Check hp&mic tag to process headset pulgin&plugout. 256 260 */ 257 - phone_tag = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0); 258 - mic_tag = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0); 259 - if ((phone_tag & (res >> AC_UNSOL_RES_TAG_SHIFT)) || 260 - (mic_tag & (res >> AC_UNSOL_RES_TAG_SHIFT))) { 261 - phone_present = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_PIN_SENSE, 0x0); 262 - if (!(phone_present & AC_PINSENSE_PRESENCE)) {/* headphone plugout */ 263 - spec->headset_present_flag = CX_HEADSET_NOPRESENT; 264 - snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20); 265 - return; 266 - } 267 - if (spec->headset_present_flag == CX_HEADSET_NOPRESENT) { 268 - spec->headset_present_flag = CX_HEADSET_PARTPRESENT; 269 - } else if (spec->headset_present_flag == CX_HEADSET_PARTPRESENT) { 270 - mic_persent = snd_hda_codec_read(codec, 0x19, 0, 271 - AC_VERB_GET_PIN_SENSE, 0x0); 272 - /* headset is present */ 273 - if ((phone_present & AC_PINSENSE_PRESENCE) && 274 - (mic_persent & AC_PINSENSE_PRESENCE)) { 275 - cx_process_headset_plugin(codec); 276 - spec->headset_present_flag = CX_HEADSET_ALLPRESENT; 277 - } 278 - } 279 - } 280 - } 281 - 282 - static void cx_jack_unsol_event(struct hda_codec *codec, unsigned int res) 283 - { 284 - struct conexant_spec *spec = codec->spec; 285 - 286 - if (spec->is_cx8070_sn6140) 287 - cx_update_headset_mic_vref(codec, res); 288 - 289 - snd_hda_jack_unsol_event(codec, res); 261 + mic_present = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0); 262 + if (!(mic_present & AC_PINSENSE_PRESENCE)) /* mic plugout */ 263 + snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20); 264 + else 265 + cx_process_headset_plugin(codec); 290 266 } 291 267 292 268 static int cx_auto_suspend(struct hda_codec *codec) ··· 271 305 .build_pcms = snd_hda_gen_build_pcms, 272 306 .init = cx_auto_init, 273 307 .free = cx_auto_free, 274 - .unsol_event = cx_jack_unsol_event, 308 + .unsol_event = snd_hda_jack_unsol_event, 275 309 .suspend = cx_auto_suspend, 276 310 .check_power_status = snd_hda_gen_check_power_status, 277 311 }; ··· 1129 1163 case 0x14f11f86: 1130 1164 case 0x14f11f87: 1131 1165 spec->is_cx8070_sn6140 = true; 1132 - spec->headset_present_flag = CX_HEADSET_NOPRESENT; 1166 + snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref); 1133 1167 break; 1134 1168 } 1135 1169
+2
sound/pci/hda/patch_hdmi.c
··· 1989 1989 } 1990 1990 1991 1991 static const struct snd_pci_quirk force_connect_list[] = { 1992 + SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1), 1993 + SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1), 1992 1994 SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), 1993 1995 SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), 1994 1996 SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+2
sound/pci/hda/patch_realtek.c
··· 9872 9872 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 9873 9873 SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), 9874 9874 SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF), 9875 + SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST), 9875 9876 SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK), 9876 9877 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), 9877 9878 SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), ··· 10678 10677 SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK), 10679 10678 SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10680 10679 SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10680 + SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10681 10681 10682 10682 #if 0 10683 10683 /* Below is a quirk table taken from the old code.
+21
sound/soc/amd/yc/acp6x-mach.c
··· 224 224 .driver_data = &acp6x_card, 225 225 .matches = { 226 226 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 227 + DMI_MATCH(DMI_PRODUCT_NAME, "21M3"), 228 + } 229 + }, 230 + { 231 + .driver_data = &acp6x_card, 232 + .matches = { 233 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 227 234 DMI_MATCH(DMI_PRODUCT_NAME, "21M5"), 228 235 } 229 236 }, ··· 420 413 .driver_data = &acp6x_card, 421 414 .matches = { 422 415 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 416 + DMI_MATCH(DMI_BOARD_NAME, "8A44"), 417 + } 418 + }, 419 + { 420 + .driver_data = &acp6x_card, 421 + .matches = { 422 + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 423 423 DMI_MATCH(DMI_BOARD_NAME, "8A22"), 424 424 } 425 425 }, ··· 435 421 .matches = { 436 422 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 437 423 DMI_MATCH(DMI_BOARD_NAME, "8A3E"), 424 + } 425 + }, 426 + { 427 + .driver_data = &acp6x_card, 428 + .matches = { 429 + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 430 + DMI_MATCH(DMI_BOARD_NAME, "8B27"), 438 431 } 439 432 }, 440 433 {
+1 -1
sound/soc/codecs/cs-amp-lib.c
··· 108 108 109 109 KUNIT_STATIC_STUB_REDIRECT(cs_amp_get_efi_variable, name, guid, size, buf); 110 110 111 - if (IS_ENABLED(CONFIG_EFI)) 111 + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) 112 112 return efi.get_variable(name, guid, &attr, size, buf); 113 113 114 114 return EFI_NOT_FOUND;
+2 -9
sound/soc/codecs/cs35l45.c
··· 176 176 struct snd_kcontrol *kcontrol; 177 177 struct snd_kcontrol_volatile *vd; 178 178 unsigned int index_offset; 179 - char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 180 179 181 - if (component->name_prefix) 182 - snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s", 183 - component->name_prefix, ctl_name); 184 - else 185 - snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s", ctl_name); 186 - 187 - kcontrol = snd_soc_card_get_kcontrol_locked(component->card, name); 180 + kcontrol = snd_soc_component_get_kcontrol_locked(component, ctl_name); 188 181 if (!kcontrol) { 189 - dev_err(component->dev, "Can't find kcontrol %s\n", name); 182 + dev_err(component->dev, "Can't find kcontrol %s\n", ctl_name); 190 183 return -EINVAL; 191 184 } 192 185
+77
sound/soc/codecs/cs35l56-sdw.c
··· 23 23 /* Register addresses are offset when sent over SoundWire */ 24 24 #define CS35L56_SDW_ADDR_OFFSET 0x8000 25 25 26 + /* Cirrus bus bridge registers */ 27 + #define CS35L56_SDW_MEM_ACCESS_STATUS 0xd0 28 + #define CS35L56_SDW_MEM_READ_DATA 0xd8 29 + 30 + #define CS35L56_SDW_LAST_LATE BIT(3) 31 + #define CS35L56_SDW_CMD_IN_PROGRESS BIT(2) 32 + #define CS35L56_SDW_RDATA_RDY BIT(0) 33 + 34 + #define CS35L56_LATE_READ_POLL_US 10 35 + #define CS35L56_LATE_READ_TIMEOUT_US 1000 36 + 37 + static int cs35l56_sdw_poll_mem_status(struct sdw_slave *peripheral, 38 + unsigned int mask, 39 + unsigned int match) 40 + { 41 + int ret, val; 42 + 43 + ret = read_poll_timeout(sdw_read_no_pm, val, 44 + (val < 0) || ((val & mask) == match), 45 + CS35L56_LATE_READ_POLL_US, CS35L56_LATE_READ_TIMEOUT_US, 46 + false, peripheral, CS35L56_SDW_MEM_ACCESS_STATUS); 47 + if (ret < 0) 48 + return ret; 49 + 50 + if (val < 0) 51 + return val; 52 + 53 + return 0; 54 + } 55 + 56 + static int cs35l56_sdw_slow_read(struct sdw_slave *peripheral, unsigned int reg, 57 + u8 *buf, size_t val_size) 58 + { 59 + int ret, i; 60 + 61 + reg += CS35L56_SDW_ADDR_OFFSET; 62 + 63 + for (i = 0; i < val_size; i += sizeof(u32)) { 64 + /* Poll for bus bridge idle */ 65 + ret = cs35l56_sdw_poll_mem_status(peripheral, 66 + CS35L56_SDW_CMD_IN_PROGRESS, 67 + 0); 68 + if (ret < 0) { 69 + dev_err(&peripheral->dev, "!CMD_IN_PROGRESS fail: %d\n", ret); 70 + return ret; 71 + } 72 + 73 + /* Reading LSByte triggers read of register to holding buffer */ 74 + sdw_read_no_pm(peripheral, reg + i); 75 + 76 + /* Wait for data available */ 77 + ret = cs35l56_sdw_poll_mem_status(peripheral, 78 + CS35L56_SDW_RDATA_RDY, 79 + CS35L56_SDW_RDATA_RDY); 80 + if (ret < 0) { 81 + dev_err(&peripheral->dev, "RDATA_RDY fail: %d\n", ret); 82 + return ret; 83 + } 84 + 85 + /* Read data from buffer */ 86 + ret = sdw_nread_no_pm(peripheral, CS35L56_SDW_MEM_READ_DATA, 87 + sizeof(u32), &buf[i]); 88 + if (ret) { 89 + dev_err(&peripheral->dev, "Late read @%#x failed: %d\n", reg + i, ret); 90 + return ret; 91 + } 92 + 93 + swab32s((u32 *)&buf[i]); 94 + } 95 + 96 + return 0; 97 + } 98 + 26 99 static int cs35l56_sdw_read_one(struct sdw_slave *peripheral, unsigned int reg, void *buf) 27 100 { 28 101 int ret; ··· 121 48 int ret; 122 49 123 50 reg = le32_to_cpu(*(const __le32 *)reg_buf); 51 + 52 + if (cs35l56_is_otp_register(reg)) 53 + return cs35l56_sdw_slow_read(peripheral, reg, buf8, val_size); 54 + 124 55 reg += CS35L56_SDW_ADDR_OFFSET; 125 56 126 57 if (val_size == 4)
+1
sound/soc/codecs/cs35l56-shared.c
··· 36 36 { CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 }, 37 37 { CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 }, 38 38 { CS35L56_SWIRE_DP3_CH4_INPUT, 0x00000028 }, 39 + { CS35L56_IRQ1_MASK_18, 0x1f7df0ff }, 39 40 40 41 /* These are not reset by a soft-reset, so patch to defaults. */ 41 42 { CS35L56_MAIN_RENDER_USER_MUTE, 0x00000000 },
+11
sound/soc/codecs/cs35l56.c
··· 1095 1095 } 1096 1096 EXPORT_SYMBOL_GPL(cs35l56_system_resume); 1097 1097 1098 + static int cs35l56_control_add_nop(struct wm_adsp *dsp, struct cs_dsp_coeff_ctl *cs_ctl) 1099 + { 1100 + return 0; 1101 + } 1102 + 1098 1103 static int cs35l56_dsp_init(struct cs35l56_private *cs35l56) 1099 1104 { 1100 1105 struct wm_adsp *dsp; ··· 1121 1116 */ 1122 1117 dsp->fw = 12; 1123 1118 dsp->wmfw_optional = true; 1119 + 1120 + /* 1121 + * None of the firmware controls need to be exported so add a no-op 1122 + * callback that suppresses creating an ALSA control. 1123 + */ 1124 + dsp->control_add = &cs35l56_control_add_nop; 1124 1125 1125 1126 dev_dbg(cs35l56->base.dev, "DSP system name: '%s'\n", dsp->system_name); 1126 1127
+57 -18
sound/soc/codecs/cs42l43.c
··· 7 7 8 8 #include <linux/bitops.h> 9 9 #include <linux/bits.h> 10 + #include <linux/build_bug.h> 10 11 #include <linux/clk.h> 11 12 #include <linux/device.h> 12 13 #include <linux/err.h> ··· 253 252 static irqreturn_t cs42l43_mic_shutter(int irq, void *data) 254 253 { 255 254 struct cs42l43_codec *priv = data; 256 - static const char * const controls[] = { 257 - "Decimator 1 Switch", 258 - "Decimator 2 Switch", 259 - "Decimator 3 Switch", 260 - "Decimator 4 Switch", 261 - }; 262 - int i, ret; 255 + struct snd_soc_component *component = priv->component; 256 + int i; 263 257 264 258 dev_dbg(priv->dev, "Microphone shutter changed\n"); 265 259 266 - if (!priv->component) 260 + if (!component) 267 261 return IRQ_NONE; 268 262 269 - for (i = 0; i < ARRAY_SIZE(controls); i++) { 270 - ret = snd_soc_component_notify_control(priv->component, 271 - controls[i]); 272 - if (ret) 263 + for (i = 1; i < ARRAY_SIZE(priv->kctl); i++) { 264 + if (!priv->kctl[i]) 273 265 return IRQ_NONE; 266 + 267 + snd_ctl_notify(component->card->snd_card, 268 + SNDRV_CTL_EVENT_MASK_VALUE, &priv->kctl[i]->id); 274 269 } 275 270 276 271 return IRQ_HANDLED; ··· 275 278 static irqreturn_t cs42l43_spk_shutter(int irq, void *data) 276 279 { 277 280 struct cs42l43_codec *priv = data; 278 - int ret; 281 + struct snd_soc_component *component = priv->component; 279 282 280 283 dev_dbg(priv->dev, "Speaker shutter changed\n"); 281 284 282 - if (!priv->component) 285 + if (!component) 283 286 return IRQ_NONE; 284 287 285 - ret = snd_soc_component_notify_control(priv->component, 286 - "Speaker Digital Switch"); 287 - if (ret) 288 + if (!priv->kctl[0]) 288 289 return IRQ_NONE; 290 + 291 + snd_ctl_notify(component->card->snd_card, 292 + SNDRV_CTL_EVENT_MASK_VALUE, &priv->kctl[0]->id); 289 293 290 294 return IRQ_HANDLED; 291 295 } ··· 588 590 return 0; 589 591 } 590 592 593 + static int cs42l43_dai_probe(struct snd_soc_dai *dai) 594 + { 595 + struct snd_soc_component *component = dai->component; 596 + struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component); 597 + static const char * const controls[] = { 598 + "Speaker Digital Switch", 599 + "Decimator 1 Switch", 600 + "Decimator 2 Switch", 601 + "Decimator 3 Switch", 602 + "Decimator 4 Switch", 603 + }; 604 + int i; 605 + 606 + static_assert(ARRAY_SIZE(controls) == ARRAY_SIZE(priv->kctl)); 607 + 608 + for (i = 0; i < ARRAY_SIZE(controls); i++) { 609 + if (priv->kctl[i]) 610 + continue; 611 + 612 + priv->kctl[i] = snd_soc_component_get_kcontrol(component, controls[i]); 613 + } 614 + 615 + return 0; 616 + } 617 + 618 + static int cs42l43_dai_remove(struct snd_soc_dai *dai) 619 + { 620 + struct snd_soc_component *component = dai->component; 621 + struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component); 622 + int i; 623 + 624 + for (i = 0; i < ARRAY_SIZE(priv->kctl); i++) 625 + priv->kctl[i] = NULL; 626 + 627 + return 0; 628 + } 629 + 591 630 static const struct snd_soc_dai_ops cs42l43_asp_ops = { 631 + .probe = cs42l43_dai_probe, 632 + .remove = cs42l43_dai_remove, 592 633 .startup = cs42l43_startup, 593 634 .hw_params = cs42l43_asp_hw_params, 594 635 .set_fmt = cs42l43_asp_set_fmt, ··· 645 608 return ret; 646 609 647 610 return cs42l43_set_sample_rate(substream, params, dai); 648 - }; 611 + } 649 612 650 613 static const struct snd_soc_dai_ops cs42l43_sdw_ops = { 614 + .probe = cs42l43_dai_probe, 615 + .remove = cs42l43_dai_remove, 651 616 .startup = cs42l43_startup, 652 617 .set_stream = cs42l43_sdw_set_stream, 653 618 .hw_params = cs42l43_sdw_hw_params,
+2
sound/soc/codecs/cs42l43.h
··· 100 100 struct delayed_work hp_ilimit_clear_work; 101 101 bool hp_ilimited; 102 102 int hp_ilimit_count; 103 + 104 + struct snd_kcontrol *kctl[5]; 103 105 }; 104 106 105 107 #if IS_REACHABLE(CONFIG_SND_SOC_CS42L43_SDW)
+4 -4
sound/soc/codecs/cs530x.c
··· 129 129 130 130 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -1270, 50, 0); 131 131 132 - static const char * const cs530x_in_hpf_text[] = { 132 + static const char * const cs530x_in_filter_text[] = { 133 133 "Min Phase Slow Roll-off", 134 134 "Min Phase Fast Roll-off", 135 135 "Linear Phase Slow Roll-off", 136 136 "Linear Phase Fast Roll-off", 137 137 }; 138 138 139 - static SOC_ENUM_SINGLE_DECL(cs530x_in_hpf_enum, CS530X_IN_FILTER, 139 + static SOC_ENUM_SINGLE_DECL(cs530x_in_filter_enum, CS530X_IN_FILTER, 140 140 CS530X_IN_FILTER_SHIFT, 141 - cs530x_in_hpf_text); 141 + cs530x_in_filter_text); 142 142 143 143 static const char * const cs530x_in_4ch_sum_text[] = { 144 144 "None", ··· 189 189 SOC_SINGLE_EXT_TLV("IN2 Volume", CS530X_IN_VOL_CTRL1_1, 0, 255, 1, 190 190 snd_soc_get_volsw, cs530x_put_volsw_vu, in_vol_tlv), 191 191 192 - SOC_ENUM("IN HPF Select", cs530x_in_hpf_enum), 192 + SOC_ENUM("IN DEC Filter Select", cs530x_in_filter_enum), 193 193 SOC_ENUM("Input Ramp Up", cs530x_ramp_inc_enum), 194 194 SOC_ENUM("Input Ramp Down", cs530x_ramp_dec_enum), 195 195
+2
sound/soc/codecs/es8326.c
··· 843 843 es8326_disable_micbias(es8326->component); 844 844 if (es8326->jack->status & SND_JACK_HEADPHONE) { 845 845 dev_dbg(comp->dev, "Report hp remove event\n"); 846 + snd_soc_jack_report(es8326->jack, 0, 847 + SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2); 846 848 snd_soc_jack_report(es8326->jack, 0, SND_JACK_HEADSET); 847 849 /* mute adc when mic path switch */ 848 850 regmap_write(es8326->regmap, ES8326_ADC1_SRC, 0x44);
+2
sound/soc/codecs/lpass-va-macro.c
··· 1472 1472 1473 1473 if ((core_id_0 == 0x01) && (core_id_1 == 0x0F)) 1474 1474 version = LPASS_CODEC_VERSION_2_0; 1475 + if ((core_id_0 == 0x02) && (core_id_1 == 0x0F) && core_id_2 == 0x01) 1476 + version = LPASS_CODEC_VERSION_2_0; 1475 1477 if ((core_id_0 == 0x02) && (core_id_1 == 0x0E)) 1476 1478 version = LPASS_CODEC_VERSION_2_1; 1477 1479 if ((core_id_0 == 0x02) && (core_id_1 == 0x0F) && (core_id_2 == 0x50 || core_id_2 == 0x51))
+1 -1
sound/soc/codecs/nau8822.c
··· 736 736 return ret; 737 737 } 738 738 739 - dev_info(component->dev, 739 + dev_dbg(component->dev, 740 740 "pll_int=%x pll_frac=%x mclk_scaler=%x pre_factor=%x\n", 741 741 pll_param->pll_int, pll_param->pll_frac, 742 742 pll_param->mclk_scaler, pll_param->pre_factor);
+2 -2
sound/soc/codecs/wcd937x-sdw.c
··· 1049 1049 pdev->prop.lane_control_support = true; 1050 1050 pdev->prop.simple_clk_stop_capable = true; 1051 1051 if (wcd->is_tx) { 1052 - pdev->prop.source_ports = GENMASK(WCD937X_MAX_TX_SWR_PORTS, 0); 1052 + pdev->prop.source_ports = GENMASK(WCD937X_MAX_TX_SWR_PORTS - 1, 0); 1053 1053 pdev->prop.src_dpn_prop = wcd937x_dpn_prop; 1054 1054 wcd->ch_info = &wcd937x_sdw_tx_ch_info[0]; 1055 1055 pdev->prop.wake_capable = true; ··· 1062 1062 /* Start in cache-only until device is enumerated */ 1063 1063 regcache_cache_only(wcd->regmap, true); 1064 1064 } else { 1065 - pdev->prop.sink_ports = GENMASK(WCD937X_MAX_SWR_PORTS, 0); 1065 + pdev->prop.sink_ports = GENMASK(WCD937X_MAX_SWR_PORTS - 1, 0); 1066 1066 pdev->prop.sink_dpn_prop = wcd937x_dpn_prop; 1067 1067 wcd->ch_info = &wcd937x_sdw_rx_ch_info[0]; 1068 1068 }
+2 -2
sound/soc/codecs/wcd938x-sdw.c
··· 1252 1252 pdev->prop.lane_control_support = true; 1253 1253 pdev->prop.simple_clk_stop_capable = true; 1254 1254 if (wcd->is_tx) { 1255 - pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); 1255 + pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); 1256 1256 pdev->prop.src_dpn_prop = wcd938x_dpn_prop; 1257 1257 wcd->ch_info = &wcd938x_sdw_tx_ch_info[0]; 1258 1258 pdev->prop.wake_capable = true; 1259 1259 } else { 1260 - pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); 1260 + pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); 1261 1261 pdev->prop.sink_dpn_prop = wcd938x_dpn_prop; 1262 1262 wcd->ch_info = &wcd938x_sdw_rx_ch_info[0]; 1263 1263 }
+2 -2
sound/soc/codecs/wcd939x-sdw.c
··· 1453 1453 pdev->prop.lane_control_support = true; 1454 1454 pdev->prop.simple_clk_stop_capable = true; 1455 1455 if (wcd->is_tx) { 1456 - pdev->prop.source_ports = GENMASK(WCD939X_MAX_TX_SWR_PORTS, 0); 1456 + pdev->prop.source_ports = GENMASK(WCD939X_MAX_TX_SWR_PORTS - 1, 0); 1457 1457 pdev->prop.src_dpn_prop = wcd939x_tx_dpn_prop; 1458 1458 wcd->ch_info = &wcd939x_sdw_tx_ch_info[0]; 1459 1459 pdev->prop.wake_capable = true; 1460 1460 } else { 1461 - pdev->prop.sink_ports = GENMASK(WCD939X_MAX_RX_SWR_PORTS, 0); 1461 + pdev->prop.sink_ports = GENMASK(WCD939X_MAX_RX_SWR_PORTS - 1, 0); 1462 1462 pdev->prop.sink_dpn_prop = wcd939x_rx_dpn_prop; 1463 1463 wcd->ch_info = &wcd939x_sdw_rx_ch_info[0]; 1464 1464 }
+14 -3
sound/soc/codecs/wm_adsp.c
··· 583 583 kfree(kcontrol); 584 584 } 585 585 586 - static int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl) 586 + int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl) 587 587 { 588 588 struct wm_adsp *dsp = container_of(cs_ctl->dsp, struct wm_adsp, cs_dsp); 589 589 struct cs_dsp *cs_dsp = &dsp->cs_dsp; ··· 657 657 kfree(ctl); 658 658 659 659 return ret; 660 + } 661 + EXPORT_SYMBOL_GPL(wm_adsp_control_add); 662 + 663 + static int wm_adsp_control_add_cb(struct cs_dsp_coeff_ctl *cs_ctl) 664 + { 665 + struct wm_adsp *dsp = container_of(cs_ctl->dsp, struct wm_adsp, cs_dsp); 666 + 667 + if (dsp->control_add) 668 + return (dsp->control_add)(dsp, cs_ctl); 669 + else 670 + return wm_adsp_control_add(cs_ctl); 660 671 } 661 672 662 673 static void wm_adsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl) ··· 2083 2072 EXPORT_SYMBOL_GPL(wm_halo_wdt_expire); 2084 2073 2085 2074 static const struct cs_dsp_client_ops wm_adsp1_client_ops = { 2086 - .control_add = wm_adsp_control_add, 2075 + .control_add = wm_adsp_control_add_cb, 2087 2076 .control_remove = wm_adsp_control_remove, 2088 2077 }; 2089 2078 2090 2079 static const struct cs_dsp_client_ops wm_adsp2_client_ops = { 2091 - .control_add = wm_adsp_control_add, 2080 + .control_add = wm_adsp_control_add_cb, 2092 2081 .control_remove = wm_adsp_control_remove, 2093 2082 .pre_run = wm_adsp_pre_run, 2094 2083 .post_run = wm_adsp_event_post_run,
+3
sound/soc/codecs/wm_adsp.h
··· 37 37 bool wmfw_optional; 38 38 39 39 struct work_struct boot_work; 40 + int (*control_add)(struct wm_adsp *dsp, struct cs_dsp_coeff_ctl *cs_ctl); 40 41 int (*pre_run)(struct wm_adsp *dsp); 41 42 42 43 bool preloaded; ··· 133 132 int wm_adsp_compr_copy(struct snd_soc_component *component, 134 133 struct snd_compr_stream *stream, 135 134 char __user *buf, size_t count); 135 + 136 + int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl); 136 137 int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type, 137 138 unsigned int alg, void *buf, size_t len); 138 139 int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+1 -1
sound/soc/codecs/wsa881x.c
··· 1152 1152 wsa881x->sconfig.frame_rate = 48000; 1153 1153 wsa881x->sconfig.direction = SDW_DATA_DIR_RX; 1154 1154 wsa881x->sconfig.type = SDW_STREAM_PDM; 1155 - pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0); 1155 + pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS - 1, 0); 1156 1156 pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; 1157 1157 pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; 1158 1158 pdev->prop.clk_stop_mode1 = true;
+1 -1
sound/soc/codecs/wsa883x.c
··· 1406 1406 WSA883X_MAX_SWR_PORTS)) 1407 1407 dev_dbg(dev, "Static Port mapping not specified\n"); 1408 1408 1409 - pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS, 0); 1409 + pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS - 1, 0); 1410 1410 pdev->prop.simple_clk_stop_capable = true; 1411 1411 pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; 1412 1412 pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+1 -1
sound/soc/codecs/wsa884x.c
··· 1895 1895 WSA884X_MAX_SWR_PORTS)) 1896 1896 dev_dbg(dev, "Static Port mapping not specified\n"); 1897 1897 1898 - pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS, 0); 1898 + pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS - 1, 0); 1899 1899 pdev->prop.simple_clk_stop_capable = true; 1900 1900 pdev->prop.sink_dpn_prop = wsa884x_sink_dpn_prop; 1901 1901 pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+15 -5
sound/soc/fsl/fsl_micfil.c
··· 831 831 {REG_MICFIL_CTRL1, 0x00000000}, 832 832 {REG_MICFIL_CTRL2, 0x00000000}, 833 833 {REG_MICFIL_STAT, 0x00000000}, 834 - {REG_MICFIL_FIFO_CTRL, 0x00000007}, 834 + {REG_MICFIL_FIFO_CTRL, 0x0000001F}, 835 835 {REG_MICFIL_FIFO_STAT, 0x00000000}, 836 836 {REG_MICFIL_DATACH0, 0x00000000}, 837 837 {REG_MICFIL_DATACH1, 0x00000000}, ··· 855 855 856 856 static bool fsl_micfil_readable_reg(struct device *dev, unsigned int reg) 857 857 { 858 + struct fsl_micfil *micfil = dev_get_drvdata(dev); 859 + 858 860 switch (reg) { 859 861 case REG_MICFIL_CTRL1: 860 862 case REG_MICFIL_CTRL2: ··· 874 872 case REG_MICFIL_DC_CTRL: 875 873 case REG_MICFIL_OUT_CTRL: 876 874 case REG_MICFIL_OUT_STAT: 877 - case REG_MICFIL_FSYNC_CTRL: 878 - case REG_MICFIL_VERID: 879 - case REG_MICFIL_PARAM: 880 875 case REG_MICFIL_VAD0_CTRL1: 881 876 case REG_MICFIL_VAD0_CTRL2: 882 877 case REG_MICFIL_VAD0_STAT: ··· 882 883 case REG_MICFIL_VAD0_NDATA: 883 884 case REG_MICFIL_VAD0_ZCD: 884 885 return true; 886 + case REG_MICFIL_FSYNC_CTRL: 887 + case REG_MICFIL_VERID: 888 + case REG_MICFIL_PARAM: 889 + if (micfil->soc->use_verid) 890 + return true; 891 + fallthrough; 885 892 default: 886 893 return false; 887 894 } ··· 895 890 896 891 static bool fsl_micfil_writeable_reg(struct device *dev, unsigned int reg) 897 892 { 893 + struct fsl_micfil *micfil = dev_get_drvdata(dev); 894 + 898 895 switch (reg) { 899 896 case REG_MICFIL_CTRL1: 900 897 case REG_MICFIL_CTRL2: ··· 906 899 case REG_MICFIL_DC_CTRL: 907 900 case REG_MICFIL_OUT_CTRL: 908 901 case REG_MICFIL_OUT_STAT: /* Write 1 to Clear */ 909 - case REG_MICFIL_FSYNC_CTRL: 910 902 case REG_MICFIL_VAD0_CTRL1: 911 903 case REG_MICFIL_VAD0_CTRL2: 912 904 case REG_MICFIL_VAD0_STAT: /* Write 1 to Clear */ ··· 913 907 case REG_MICFIL_VAD0_NCONFIG: 914 908 case REG_MICFIL_VAD0_ZCD: 915 909 return true; 910 + case REG_MICFIL_FSYNC_CTRL: 911 + if (micfil->soc->use_verid) 912 + return true; 913 + fallthrough; 916 914 default: 917 915 return false; 918 916 }
+1 -1
sound/soc/fsl/fsl_micfil.h
··· 72 72 #define MICFIL_STAT_CHXF(ch) BIT(ch) 73 73 74 74 /* MICFIL FIFO Control Register -- REG_MICFIL_FIFO_CTRL 0x10 */ 75 - #define MICFIL_FIFO_CTRL_FIFOWMK GENMASK(2, 0) 75 + #define MICFIL_FIFO_CTRL_FIFOWMK GENMASK(4, 0) 76 76 77 77 /* MICFIL FIFO Status Register -- REG_MICFIL_FIFO_STAT 0x14 */ 78 78 #define MICFIL_FIFO_STAT_FIFOX_OVER(ch) BIT(ch)
+10 -16
sound/soc/meson/axg-fifo.c
··· 207 207 status = FIELD_GET(STATUS1_INT_STS, status); 208 208 axg_fifo_ack_irq(fifo, status); 209 209 210 - /* Use the thread to call period elapsed on nonatomic links */ 211 - if (status & FIFO_INT_COUNT_REPEAT) 212 - return IRQ_WAKE_THREAD; 210 + if (status & ~FIFO_INT_COUNT_REPEAT) 211 + dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", 212 + status); 213 213 214 - dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", 215 - status); 214 + if (status & FIFO_INT_COUNT_REPEAT) { 215 + snd_pcm_period_elapsed(ss); 216 + return IRQ_HANDLED; 217 + } 216 218 217 219 return IRQ_NONE; 218 - } 219 - 220 - static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id) 221 - { 222 - struct snd_pcm_substream *ss = dev_id; 223 - 224 - snd_pcm_period_elapsed(ss); 225 - 226 - return IRQ_HANDLED; 227 220 } 228 221 229 222 int axg_fifo_pcm_open(struct snd_soc_component *component, ··· 244 251 if (ret) 245 252 return ret; 246 253 247 - ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block, 248 - axg_fifo_pcm_irq_block_thread, 254 + /* Use the threaded irq handler only with non-atomic links */ 255 + ret = request_threaded_irq(fifo->irq, NULL, 256 + axg_fifo_pcm_irq_block, 249 257 IRQF_ONESHOT, dev_name(dev), ss); 250 258 if (ret) 251 259 return ret;
+34 -8
sound/soc/soc-component.c
··· 236 236 } 237 237 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked); 238 238 239 + static void soc_get_kcontrol_name(struct snd_soc_component *component, 240 + char *buf, int size, const char * const ctl) 241 + { 242 + /* When updating, change also snd_soc_dapm_widget_name_cmp() */ 243 + if (component->name_prefix) 244 + snprintf(buf, size, "%s %s", component->name_prefix, ctl); 245 + else 246 + snprintf(buf, size, "%s", ctl); 247 + } 248 + 249 + struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component, 250 + const char * const ctl) 251 + { 252 + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 253 + 254 + soc_get_kcontrol_name(component, name, ARRAY_SIZE(name), ctl); 255 + 256 + return snd_soc_card_get_kcontrol(component->card, name); 257 + } 258 + EXPORT_SYMBOL_GPL(snd_soc_component_get_kcontrol); 259 + 260 + struct snd_kcontrol * 261 + snd_soc_component_get_kcontrol_locked(struct snd_soc_component *component, 262 + const char * const ctl) 263 + { 264 + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 265 + 266 + soc_get_kcontrol_name(component, name, ARRAY_SIZE(name), ctl); 267 + 268 + return snd_soc_card_get_kcontrol_locked(component->card, name); 269 + } 270 + EXPORT_SYMBOL_GPL(snd_soc_component_get_kcontrol_locked); 271 + 239 272 int snd_soc_component_notify_control(struct snd_soc_component *component, 240 273 const char * const ctl) 241 274 { 242 - char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 243 275 struct snd_kcontrol *kctl; 244 276 245 - /* When updating, change also snd_soc_dapm_widget_name_cmp() */ 246 - if (component->name_prefix) 247 - snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl); 248 - else 249 - snprintf(name, ARRAY_SIZE(name), "%s", ctl); 250 - 251 - kctl = snd_soc_card_get_kcontrol(component->card, name); 277 + kctl = snd_soc_component_get_kcontrol(component, ctl); 252 278 if (!kctl) 253 279 return soc_component_ret(component, -EINVAL); 254 280
+1 -1
sound/soc/sof/mediatek/mt8195/mt8195.c
··· 573 573 static struct snd_sof_of_mach sof_mt8195_machs[] = { 574 574 { 575 575 .compatible = "google,tomato", 576 - .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg" 576 + .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg" 577 577 }, { 578 578 .compatible = "mediatek,mt8195", 579 579 .sof_tplg_filename = "sof-mt8195.tplg"
+1 -1
sound/soc/sti/sti_uniperif.c
··· 352 352 return ret; 353 353 } 354 354 355 - static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) 355 + int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) 356 356 { 357 357 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); 358 358 struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+1
sound/soc/sti/uniperif.h
··· 1380 1380 struct uniperif *reader); 1381 1381 1382 1382 /* common */ 1383 + int sti_uniperiph_dai_probe(struct snd_soc_dai *dai); 1383 1384 int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, 1384 1385 unsigned int fmt); 1385 1386
+1
sound/soc/sti/uniperif_player.c
··· 1038 1038 .startup = uni_player_startup, 1039 1039 .shutdown = uni_player_shutdown, 1040 1040 .prepare = uni_player_prepare, 1041 + .probe = sti_uniperiph_dai_probe, 1041 1042 .trigger = uni_player_trigger, 1042 1043 .hw_params = sti_uniperiph_dai_hw_params, 1043 1044 .set_fmt = sti_uniperiph_dai_set_fmt,
+1
sound/soc/sti/uniperif_reader.c
··· 401 401 .startup = uni_reader_startup, 402 402 .shutdown = uni_reader_shutdown, 403 403 .prepare = uni_reader_prepare, 404 + .probe = sti_uniperiph_dai_probe, 404 405 .trigger = uni_reader_trigger, 405 406 .hw_params = sti_uniperiph_dai_hw_params, 406 407 .set_fmt = sti_uniperiph_dai_set_fmt,
+5
sound/usb/line6/driver.c
··· 286 286 { 287 287 struct usb_line6 *line6 = (struct usb_line6 *)urb->context; 288 288 struct midi_buffer *mb = &line6->line6midi->midibuf_in; 289 + unsigned long flags; 289 290 int done; 290 291 291 292 if (urb->status == -ESHUTDOWN) 292 293 return; 293 294 294 295 if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { 296 + spin_lock_irqsave(&line6->line6midi->lock, flags); 295 297 done = 296 298 line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length); 297 299 ··· 302 300 dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n", 303 301 done, urb->actual_length); 304 302 } 303 + spin_unlock_irqrestore(&line6->line6midi->lock, flags); 305 304 306 305 for (;;) { 306 + spin_lock_irqsave(&line6->line6midi->lock, flags); 307 307 done = 308 308 line6_midibuf_read(mb, line6->buffer_message, 309 309 LINE6_MIDI_MESSAGE_MAXLEN, 310 310 LINE6_MIDIBUF_READ_RX); 311 + spin_unlock_irqrestore(&line6->line6midi->lock, flags); 311 312 312 313 if (done <= 0) 313 314 break;
+4
sound/usb/quirks-table.h
··· 2594 2594 } 2595 2595 }, 2596 2596 2597 + /* Stanton ScratchAmp */ 2598 + { USB_DEVICE(0x103d, 0x0100) }, 2599 + { USB_DEVICE(0x103d, 0x0101) }, 2600 + 2597 2601 /* Novation EMS devices */ 2598 2602 { 2599 2603 USB_DEVICE_VENDOR_SPEC(0x1235, 0x0001),
+2 -2
sound/usb/stream.c
··· 244 244 SNDRV_CHMAP_FR, /* right front */ 245 245 SNDRV_CHMAP_FC, /* center front */ 246 246 SNDRV_CHMAP_LFE, /* LFE */ 247 - SNDRV_CHMAP_SL, /* left surround */ 248 - SNDRV_CHMAP_SR, /* right surround */ 247 + SNDRV_CHMAP_RL, /* left surround */ 248 + SNDRV_CHMAP_RR, /* right surround */ 249 249 SNDRV_CHMAP_FLC, /* left of center */ 250 250 SNDRV_CHMAP_FRC, /* right of center */ 251 251 SNDRV_CHMAP_RC, /* surround */
+43 -10
tools/build/feature/Makefile
··· 82 82 83 83 FILES := $(addprefix $(OUTPUT),$(FILES)) 84 84 85 - PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config 85 + # Some distros provide the command $(CROSS_COMPILE)pkg-config for 86 + # searching packges installed with Multiarch. Use it for cross 87 + # compilation if it is existed. 88 + ifneq (, $(shell which $(CROSS_COMPILE)pkg-config)) 89 + PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config 90 + else 91 + PKG_CONFIG ?= pkg-config 92 + 93 + # PKG_CONFIG_PATH or PKG_CONFIG_LIBDIR, alongside PKG_CONFIG_SYSROOT_DIR 94 + # for modified system root, are required for the cross compilation. 95 + # If these PKG_CONFIG environment variables are not set, Multiarch library 96 + # paths are used instead. 97 + ifdef CROSS_COMPILE 98 + ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),) 99 + CROSS_ARCH = $(shell $(CC) -dumpmachine) 100 + PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/ 101 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/ 102 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/ 103 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/share/pkgconfig/ 104 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/share/pkgconfig/ 105 + export PKG_CONFIG_LIBDIR 106 + endif 107 + endif 108 + endif 86 109 87 110 all: $(FILES) 88 111 ··· 170 147 171 148 DWARFLIBS := -ldw 172 149 ifeq ($(findstring -static,${LDFLAGS}),-static) 173 - DWARFLIBS += -lelf -lebl -lz -llzma -lbz2 150 + DWARFLIBS += -lelf -lz -llzma -lbz2 -lzstd 151 + 152 + LIBDW_VERSION := $(shell $(PKG_CONFIG) --modversion libdw) 153 + LIBDW_VERSION_1 := $(word 1, $(subst ., ,$(LIBDW_VERSION))) 154 + LIBDW_VERSION_2 := $(word 2, $(subst ., ,$(LIBDW_VERSION))) 155 + 156 + # Elfutils merged libebl.a into libdw.a starting from version 0.177, 157 + # Link libebl.a only if libdw is older than this version. 158 + ifeq ($(shell test $(LIBDW_VERSION_2) -lt 177; echo $$?),0) 159 + DWARFLIBS += -lebl 160 + endif 174 161 endif 175 162 176 163 $(OUTPUT)test-dwarf.bin: ··· 211 178 $(BUILD) -lnuma 212 179 213 180 $(OUTPUT)test-libunwind.bin: 214 - $(BUILD) -lelf 181 + $(BUILD) -lelf -llzma 215 182 216 183 $(OUTPUT)test-libunwind-debug-frame.bin: 217 - $(BUILD) -lelf 184 + $(BUILD) -lelf -llzma 218 185 $(OUTPUT)test-libunwind-x86.bin: 219 - $(BUILD) -lelf -lunwind-x86 186 + $(BUILD) -lelf -llzma -lunwind-x86 220 187 221 188 $(OUTPUT)test-libunwind-x86_64.bin: 222 - $(BUILD) -lelf -lunwind-x86_64 189 + $(BUILD) -lelf -llzma -lunwind-x86_64 223 190 224 191 $(OUTPUT)test-libunwind-arm.bin: 225 - $(BUILD) -lelf -lunwind-arm 192 + $(BUILD) -lelf -llzma -lunwind-arm 226 193 227 194 $(OUTPUT)test-libunwind-aarch64.bin: 228 - $(BUILD) -lelf -lunwind-aarch64 195 + $(BUILD) -lelf -llzma -lunwind-aarch64 229 196 230 197 $(OUTPUT)test-libunwind-debug-frame-arm.bin: 231 - $(BUILD) -lelf -lunwind-arm 198 + $(BUILD) -lelf -llzma -lunwind-arm 232 199 233 200 $(OUTPUT)test-libunwind-debug-frame-aarch64.bin: 234 - $(BUILD) -lelf -lunwind-aarch64 201 + $(BUILD) -lelf -llzma -lunwind-aarch64 235 202 236 203 $(OUTPUT)test-libaudit.bin: 237 204 $(BUILD) -laudit
+28
tools/perf/Documentation/Build.txt
··· 71 71 $ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a 72 72 73 73 If UBSan detects any problem at runtime, it outputs a “runtime error:” message. 74 + 75 + 4) Cross compilation 76 + ==================== 77 + As Multiarch is commonly supported in Linux distributions, we can install 78 + libraries for multiple architectures on the same system and then cross-compile 79 + Linux perf. For example, Aarch64 libraries and toolchains can be installed on 80 + an x86_64 machine, allowing us to compile perf for an Aarch64 target. 81 + 82 + Below is the command for building the perf with dynamic linking. 83 + 84 + $ cd /path/to/Linux 85 + $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- -C tools/perf 86 + 87 + For static linking, the option `LDFLAGS="-static"` is required. 88 + 89 + $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- \ 90 + LDFLAGS="-static" -C tools/perf 91 + 92 + In the embedded system world, a use case is to explicitly specify the package 93 + configuration paths for cross building: 94 + 95 + $ PKG_CONFIG_SYSROOT_DIR="/path/to/cross/build/sysroot" \ 96 + PKG_CONFIG_LIBDIR="/usr/lib/:/usr/local/lib" \ 97 + make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- -C tools/perf 98 + 99 + In this case, the variable PKG_CONFIG_SYSROOT_DIR can be used alongside the 100 + variable PKG_CONFIG_LIBDIR or PKG_CONFIG_PATH to prepend the sysroot path to 101 + the library paths for cross compilation.
+19 -1
tools/perf/Makefile.config
··· 152 152 endif 153 153 DWARFLIBS := -ldw 154 154 ifeq ($(findstring -static,${LDFLAGS}),-static) 155 - DWARFLIBS += -lelf -lebl -ldl -lz -llzma -lbz2 155 + DWARFLIBS += -lelf -ldl -lz -llzma -lbz2 -lzstd 156 + 157 + LIBDW_VERSION := $(shell $(PKG_CONFIG) --modversion libdw) 158 + LIBDW_VERSION_1 := $(word 1, $(subst ., ,$(LIBDW_VERSION))) 159 + LIBDW_VERSION_2 := $(word 2, $(subst ., ,$(LIBDW_VERSION))) 160 + 161 + # Elfutils merged libebl.a into libdw.a starting from version 0.177, 162 + # Link libebl.a only if libdw is older than this version. 163 + ifeq ($(shell test $(LIBDW_VERSION_2) -lt 177; echo $$?),0) 164 + DWARFLIBS += -lebl 165 + endif 156 166 endif 157 167 FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS) 158 168 FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) $(DWARFLIBS) ··· 306 296 307 297 ifdef PYTHON_CONFIG 308 298 PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null) 299 + # Update the python flags for cross compilation 300 + ifdef CROSS_COMPILE 301 + PYTHON_NATIVE := $(shell echo $(PYTHON_EMBED_LDOPTS) | sed 's/\(-L.*\/\)\(.*-linux-gnu\).*/\2/') 302 + PYTHON_EMBED_LDOPTS := $(subst $(PYTHON_NATIVE),$(shell $(CC) -dumpmachine),$(PYTHON_EMBED_LDOPTS)) 303 + endif 309 304 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) 310 305 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil 311 306 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) ··· 912 897 PYTHON_SETUPTOOLS_INSTALLED := $(shell $(PYTHON) -c 'import setuptools;' 2> /dev/null && echo "yes" || echo "no") 913 898 ifeq ($(PYTHON_SETUPTOOLS_INSTALLED), yes) 914 899 PYTHON_EXTENSION_SUFFIX := $(shell $(PYTHON) -c 'from importlib import machinery; print(machinery.EXTENSION_SUFFIXES[0])') 900 + ifdef CROSS_COMPILE 901 + PYTHON_EXTENSION_SUFFIX := $(subst $(PYTHON_NATIVE),$(shell $(CC) -dumpmachine),$(PYTHON_EXTENSION_SUFFIX)) 902 + endif 915 903 LANG_BINDINGS += $(obj-perf)python/perf$(PYTHON_EXTENSION_SUFFIX) 916 904 else 917 905 $(warning Missing python setuptools, the python binding won't be built, please install python3-setuptools or equivalent)
+26 -1
tools/perf/Makefile.perf
··· 193 193 HOSTAR ?= ar 194 194 CLANG ?= clang 195 195 196 - PKG_CONFIG = $(CROSS_COMPILE)pkg-config 196 + # Some distros provide the command $(CROSS_COMPILE)pkg-config for 197 + # searching packges installed with Multiarch. Use it for cross 198 + # compilation if it is existed. 199 + ifneq (, $(shell which $(CROSS_COMPILE)pkg-config)) 200 + PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config 201 + else 202 + PKG_CONFIG ?= pkg-config 203 + 204 + # PKG_CONFIG_PATH or PKG_CONFIG_LIBDIR, alongside PKG_CONFIG_SYSROOT_DIR 205 + # for modified system root, is required for the cross compilation. 206 + # If these PKG_CONFIG environment variables are not set, Multiarch library 207 + # paths are used instead. 208 + ifdef CROSS_COMPILE 209 + ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),) 210 + CROSS_ARCH = $(shell $(CC) -dumpmachine) 211 + PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/ 212 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/ 213 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/ 214 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/share/pkgconfig/ 215 + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/share/pkgconfig/ 216 + export PKG_CONFIG_LIBDIR 217 + $(warning Missing PKG_CONFIG_LIBDIR, PKG_CONFIG_PATH and PKG_CONFIG_SYSROOT_DIR for cross compilation,) 218 + $(warning set PKG_CONFIG_LIBDIR for using Multiarch libs.) 219 + endif 220 + endif 221 + endif 197 222 198 223 RM = rm -f 199 224 LN = ln -f
+1 -1
tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
··· 36 36 "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 37 37 }, 38 38 { 39 - "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 39 + "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT" 40 40 }, 41 41 { 42 42 "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+1 -1
tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
··· 74 74 { 75 75 "PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event", 76 76 "ConfigCode": "0x800000000000000c", 77 - "EventName": "FW_SFENCE_VMA_RECEIVED", 77 + "EventName": "FW_SFENCE_VMA_ASID_SENT", 78 78 "BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event" 79 79 }, 80 80 {
+1 -1
tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
··· 36 36 "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 37 37 }, 38 38 { 39 - "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 39 + "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT" 40 40 }, 41 41 { 42 42 "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+1 -1
tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
··· 36 36 "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 37 37 }, 38 38 { 39 - "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 39 + "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT" 40 40 }, 41 41 { 42 42 "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+1 -1
tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
··· 36 36 "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 37 37 }, 38 38 { 39 - "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" 39 + "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT" 40 40 }, 41 41 { 42 42 "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+1 -1
tools/perf/util/callchain.c
··· 1141 1141 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, 1142 1142 bool hide_unresolved) 1143 1143 { 1144 - struct machine *machine = maps__machine(node->ms.maps); 1144 + struct machine *machine = node->ms.maps ? maps__machine(node->ms.maps) : NULL; 1145 1145 1146 1146 maps__put(al->maps); 1147 1147 al->maps = maps__get(node->ms.maps);
+1 -1
tools/testing/selftests/bpf/Makefile
··· 713 713 # Make sure we are able to include and link libbpf against c++. 714 714 $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ) 715 715 $(call msg,CXX,,$@) 716 - $(Q)$(CXX) $(CFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ 716 + $(Q)$(CXX) $(subst -D_GNU_SOURCE=,,$(CFLAGS)) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ 717 717 718 718 # Benchmark runner 719 719 $(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ)
+2 -2
tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
··· 216 216 } 217 217 218 218 #ifndef __NR_uretprobe 219 - #define __NR_uretprobe 467 219 + #define __NR_uretprobe 335 220 220 #endif 221 221 222 222 __naked unsigned long uretprobe_syscall_call_1(void) ··· 253 253 struct uprobe_syscall_executed *skel; 254 254 int pid, status, err, go[2], c; 255 255 256 - if (ASSERT_OK(pipe(go), "pipe")) 256 + if (!ASSERT_OK(pipe(go), "pipe")) 257 257 return; 258 258 259 259 skel = uprobe_syscall_executed__open_and_load();
+3 -1
tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
··· 29 29 version.name = name; 30 30 31 31 ret = ioctl(fd, DRM_IOCTL_VERSION, &version); 32 - if (ret) 32 + if (ret || version.name_len != 4) 33 33 return 0; 34 + 35 + name[4] = '\0'; 34 36 35 37 return !strcmp(name, "vgem"); 36 38 }
+34 -3
tools/testing/selftests/drivers/net/hw/rss_ctx.py
··· 19 19 return [random.randint(0, 255) for _ in range(length)] 20 20 21 21 22 + def _rss_key_check(cfg, data=None, context=0): 23 + if data is None: 24 + data = get_rss(cfg, context=context) 25 + if 'rss-hash-key' not in data: 26 + return 27 + non_zero = [x for x in data['rss-hash-key'] if x != 0] 28 + ksft_eq(bool(non_zero), True, comment=f"RSS key is all zero {data['rss-hash-key']}") 29 + 30 + 22 31 def get_rss(cfg, context=0): 23 32 return ethtool(f"-x {cfg.ifname} context {context}", json=True)[0] 24 33 ··· 99 90 def test_rss_key_indir(cfg): 100 91 """Test basics like updating the main RSS key and indirection table.""" 101 92 102 - if len(_get_rx_cnts(cfg)) < 2: 103 - KsftSkipEx("Device has only one queue (or doesn't support queue stats)") 93 + qcnt = len(_get_rx_cnts(cfg)) 94 + if qcnt < 3: 95 + KsftSkipEx("Device has fewer than 3 queues (or doesn't support queue stats)") 104 96 105 97 data = get_rss(cfg) 106 98 want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table'] ··· 111 101 if not data[k]: 112 102 raise KsftFailEx(f"ethtool results empty for '{k}': {data[k]}") 113 103 104 + _rss_key_check(cfg, data=data) 114 105 key_len = len(data['rss-hash-key']) 115 106 116 107 # Set the key ··· 121 110 data = get_rss(cfg) 122 111 ksft_eq(key, data['rss-hash-key']) 123 112 113 + # Set the indirection table and the key together 114 + key = _rss_key_rand(key_len) 115 + ethtool(f"-X {cfg.ifname} equal 3 hkey " + _rss_key_str(key)) 116 + reset_indir = defer(ethtool, f"-X {cfg.ifname} default") 117 + 118 + data = get_rss(cfg) 119 + _rss_key_check(cfg, data=data) 120 + ksft_eq(0, min(data['rss-indirection-table'])) 121 + ksft_eq(2, max(data['rss-indirection-table'])) 122 + 123 + # Reset indirection table and set the key 124 + key = _rss_key_rand(key_len) 125 + ethtool(f"-X {cfg.ifname} default hkey " + _rss_key_str(key)) 126 + data = get_rss(cfg) 127 + _rss_key_check(cfg, data=data) 128 + ksft_eq(0, min(data['rss-indirection-table'])) 129 + ksft_eq(qcnt - 1, max(data['rss-indirection-table'])) 130 + 124 131 # Set the indirection table 125 132 ethtool(f"-X {cfg.ifname} equal 2") 126 - reset_indir = defer(ethtool, f"-X {cfg.ifname} default") 127 133 data = get_rss(cfg) 128 134 ksft_eq(0, min(data['rss-indirection-table'])) 129 135 ksft_eq(1, max(data['rss-indirection-table'])) ··· 345 317 ctx_cnt = i 346 318 break 347 319 320 + _rss_key_check(cfg, context=ctx_id) 321 + 348 322 if not create_with_cfg: 349 323 ethtool(f"-X {cfg.ifname} context {ctx_id} {want_cfg}") 324 + _rss_key_check(cfg, context=ctx_id) 350 325 351 326 # Sanity check the context we just created 352 327 data = get_rss(cfg, ctx_id)
+26
tools/testing/selftests/hid/hid_bpf.c
··· 532 532 FIXTURE_DATA(hid_bpf) * self, 533 533 const FIXTURE_VARIANT(hid_bpf) * variant) 534 534 { 535 + struct bpf_map *iter_map; 535 536 int err = -EINVAL; 536 537 537 538 ASSERT_LE(progs_count, ARRAY_SIZE(self->hid_links)) ··· 564 563 565 564 *ops_hid_id = self->hid_id; 566 565 } 566 + 567 + /* we disable the auto-attach feature of all maps because we 568 + * only want the tested one to be manually attached in the next 569 + * call to bpf_map__attach_struct_ops() 570 + */ 571 + bpf_object__for_each_map(iter_map, *self->skel->skeleton->obj) 572 + bpf_map__set_autoattach(iter_map, false); 567 573 568 574 err = hid__load(self->skel); 569 575 ASSERT_OK(err) TH_LOG("hid_skel_load failed: %d", err); ··· 692 684 err = read(self->hidraw_fd, buf, sizeof(buf)); 693 685 ASSERT_EQ(err, 6) TH_LOG("read_hidraw"); 694 686 ASSERT_EQ(buf[2], 52); 687 + } 688 + 689 + /* 690 + * Attach hid_first_event to the given uhid device, 691 + * attempt at re-attaching it, we should not lock and 692 + * return an invalid struct bpf_link 693 + */ 694 + TEST_F(hid_bpf, multiple_attach) 695 + { 696 + const struct test_program progs[] = { 697 + { .name = "hid_first_event" }, 698 + }; 699 + struct bpf_link *link; 700 + 701 + LOAD_PROGRAMS(progs); 702 + 703 + link = bpf_map__attach_struct_ops(self->skel->maps.first_event); 704 + ASSERT_NULL(link) TH_LOG("unexpected return value when re-attaching the struct_ops"); 695 705 } 696 706 697 707 /*
+1 -1
tools/testing/selftests/hid/progs/hid.c
··· 455 455 __type(value, struct elem); 456 456 } hmap SEC(".maps"); 457 457 458 - static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work) 458 + static int wq_cb_sleepable(void *map, int *key, void *work) 459 459 { 460 460 __u8 buf[9] = {2, 3, 4, 5, 6, 7, 8, 9, 10}; 461 461 struct hid_bpf_ctx *hid_ctx;
+1 -1
tools/testing/selftests/hid/progs/hid_bpf_helpers.h
··· 114 114 extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; 115 115 extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; 116 116 extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, 117 - int (callback_fn)(void *map, int *key, struct bpf_wq *wq), 117 + int (callback_fn)(void *map, int *key, void *wq), 118 118 unsigned int flags__k, void *aux__ign) __ksym; 119 119 #define bpf_wq_set_callback(timer, cb, flags) \ 120 120 bpf_wq_set_callback_impl(timer, cb, flags, NULL)
+1 -1
tools/testing/selftests/kselftest/ksft.py
··· 70 70 71 71 72 72 def finished(): 73 - if ksft_cnt["pass"] == ksft_num_tests: 73 + if ksft_cnt["pass"] + ksft_cnt["skip"] == ksft_num_tests: 74 74 exit_code = KSFT_PASS 75 75 else: 76 76 exit_code = KSFT_FAIL
+4 -4
tools/testing/selftests/kvm/riscv/get-reg-list.c
··· 961 961 KVM_ISA_EXT_SIMPLE_CONFIG(zbkc, ZBKC); 962 962 KVM_ISA_EXT_SIMPLE_CONFIG(zbkx, ZBKX); 963 963 KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS); 964 - KVM_ISA_EXT_SIMPLE_CONFIG(zca, ZCA), 965 - KVM_ISA_EXT_SIMPLE_CONFIG(zcb, ZCB), 966 - KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD), 967 - KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF), 964 + KVM_ISA_EXT_SIMPLE_CONFIG(zca, ZCA); 965 + KVM_ISA_EXT_SIMPLE_CONFIG(zcb, ZCB); 966 + KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD); 967 + KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF); 968 968 KVM_ISA_EXT_SIMPLE_CONFIG(zcmop, ZCMOP); 969 969 KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA); 970 970 KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH);
+1 -1
tools/testing/selftests/mm/Makefile
··· 110 110 111 111 endif 112 112 113 - ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64)) 113 + ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390)) 114 114 TEST_GEN_FILES += va_high_addr_switch 115 115 TEST_GEN_FILES += virtual_address_range 116 116 TEST_GEN_FILES += write_to_hugetlbfs
+2
tools/testing/selftests/mm/mremap_test.c
··· 22 22 #define VALIDATION_DEFAULT_THRESHOLD 4 /* 4MB */ 23 23 #define VALIDATION_NO_THRESHOLD 0 /* Verify the entire region */ 24 24 25 + #ifndef MIN 25 26 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) 26 27 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) 28 + #endif 27 29 #define SIZE_MB(m) ((size_t)m * (1024 * 1024)) 28 30 #define SIZE_KB(k) ((size_t)k * 1024) 29 31
+4 -4
tools/testing/selftests/net/mptcp/mptcp_connect.c
··· 1115 1115 return 1; 1116 1116 } 1117 1117 1118 - if (--cfg_repeat > 0) { 1119 - if (cfg_input) 1120 - close(fd); 1118 + if (cfg_input) 1119 + close(fd); 1120 + 1121 + if (--cfg_repeat > 0) 1121 1122 goto again; 1122 - } 1123 1123 1124 1124 return 0; 1125 1125 }
+129 -29
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 661 661 done 662 662 663 663 if [ -z "${id}" ]; then 664 - test_fail "bad test - missing endpoint id" 664 + fail_test "bad test - missing endpoint id" 665 665 return 666 666 fi 667 667 ··· 1415 1415 local add_nr=$1 1416 1416 local echo_nr=$2 1417 1417 local port_nr=${3:-0} 1418 - local syn_nr=${4:-$port_nr} 1419 - local syn_ack_nr=${5:-$port_nr} 1420 - local ack_nr=${6:-$port_nr} 1421 - local mis_syn_nr=${7:-0} 1422 - local mis_ack_nr=${8:-0} 1418 + local ns_invert=${4:-""} 1419 + local syn_nr=$port_nr 1420 + local syn_ack_nr=$port_nr 1421 + local ack_nr=$port_nr 1422 + local mis_syn_nr=0 1423 + local mis_ack_nr=0 1424 + local ns_tx=$ns1 1425 + local ns_rx=$ns2 1426 + local extra_msg="" 1423 1427 local count 1424 1428 local timeout 1425 1429 1426 - timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout) 1430 + if [[ $ns_invert = "invert" ]]; then 1431 + ns_tx=$ns2 1432 + ns_rx=$ns1 1433 + extra_msg="invert" 1434 + fi 1435 + 1436 + timeout=$(ip netns exec ${ns_tx} sysctl -n net.mptcp.add_addr_timeout) 1427 1437 1428 1438 print_check "add" 1429 - count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtAddAddr") 1439 + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtAddAddr") 1430 1440 if [ -z "$count" ]; then 1431 1441 print_skip 1432 1442 # if the test configured a short timeout tolerate greater then expected ··· 1448 1438 fi 1449 1439 1450 1440 print_check "echo" 1451 - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtEchoAdd") 1441 + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtEchoAdd") 1452 1442 if [ -z "$count" ]; then 1453 1443 print_skip 1454 1444 elif [ "$count" != "$echo_nr" ]; then ··· 1459 1449 1460 1450 if [ $port_nr -gt 0 ]; then 1461 1451 print_check "pt" 1462 - count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtPortAdd") 1452 + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtPortAdd") 1463 1453 if [ -z "$count" ]; then 1464 1454 print_skip 1465 1455 elif [ "$count" != "$port_nr" ]; then ··· 1469 1459 fi 1470 1460 1471 1461 print_check "syn" 1472 - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortSynRx") 1462 + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortSynRx") 1473 1463 if [ -z "$count" ]; then 1474 1464 print_skip 1475 1465 elif [ "$count" != "$syn_nr" ]; then ··· 1480 1470 fi 1481 1471 1482 1472 print_check "synack" 1483 - count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx") 1473 + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPJoinPortSynAckRx") 1484 1474 if [ -z "$count" ]; then 1485 1475 print_skip 1486 1476 elif [ "$count" != "$syn_ack_nr" ]; then ··· 1491 1481 fi 1492 1482 1493 1483 print_check "ack" 1494 - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortAckRx") 1484 + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortAckRx") 1495 1485 if [ -z "$count" ]; then 1496 1486 print_skip 1497 1487 elif [ "$count" != "$ack_nr" ]; then ··· 1502 1492 fi 1503 1493 1504 1494 print_check "syn" 1505 - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortSynRx") 1495 + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortSynRx") 1506 1496 if [ -z "$count" ]; then 1507 1497 print_skip 1508 1498 elif [ "$count" != "$mis_syn_nr" ]; then ··· 1513 1503 fi 1514 1504 1515 1505 print_check "ack" 1516 - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortAckRx") 1506 + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortAckRx") 1517 1507 if [ -z "$count" ]; then 1518 1508 print_skip 1519 1509 elif [ "$count" != "$mis_ack_nr" ]; then ··· 1523 1513 print_ok 1524 1514 fi 1525 1515 fi 1516 + 1517 + print_info "$extra_msg" 1526 1518 } 1527 1519 1528 1520 chk_add_tx_nr() ··· 1646 1634 { 1647 1635 local mp_prio_nr_tx=$1 1648 1636 local mp_prio_nr_rx=$2 1637 + local mpj_syn=$3 1638 + local mpj_syn_ack=$4 1649 1639 local count 1650 1640 1651 1641 print_check "ptx" ··· 1666 1652 print_skip 1667 1653 elif [ "$count" != "$mp_prio_nr_rx" ]; then 1668 1654 fail_test "got $count MP_PRIO[s] RX expected $mp_prio_nr_rx" 1655 + else 1656 + print_ok 1657 + fi 1658 + 1659 + print_check "syn backup" 1660 + count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx") 1661 + if [ -z "$count" ]; then 1662 + print_skip 1663 + elif [ "$count" != "$mpj_syn" ]; then 1664 + fail_test "got $count JOIN[s] syn with Backup expected $mpj_syn" 1665 + else 1666 + print_ok 1667 + fi 1668 + 1669 + print_check "synack backup" 1670 + count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx") 1671 + if [ -z "$count" ]; then 1672 + print_skip 1673 + elif [ "$count" != "$mpj_syn_ack" ]; then 1674 + fail_test "got $count JOIN[s] synack with Backup expected $mpj_syn_ack" 1669 1675 else 1670 1676 print_ok 1671 1677 fi ··· 1987 1953 run_tests $ns1 $ns2 10.0.1.1 1988 1954 chk_join_nr 2 2 2 1989 1955 chk_add_nr 1 1 1956 + fi 1957 + 1958 + # uncommon: subflow and signal flags on the same endpoint 1959 + # or because the user wrongly picked both, but still expects the client 1960 + # to create additional subflows 1961 + if reset "subflow and signal together"; then 1962 + pm_nl_set_limits $ns1 0 2 1963 + pm_nl_set_limits $ns2 0 2 1964 + pm_nl_add_endpoint $ns2 10.0.3.2 flags signal,subflow 1965 + run_tests $ns1 $ns2 10.0.1.1 1966 + chk_join_nr 1 1 1 1967 + chk_add_nr 1 1 0 invert # only initiated by ns2 1968 + chk_add_nr 0 0 0 # none initiated by ns1 1969 + chk_rst_nr 0 0 invert # no RST sent by the client 1970 + chk_rst_nr 0 0 # no RST sent by the server 1990 1971 fi 1991 1972 1992 1973 # accept and use add_addr with additional subflows ··· 2661 2612 sflags=nobackup speed=slow \ 2662 2613 run_tests $ns1 $ns2 10.0.1.1 2663 2614 chk_join_nr 1 1 1 2664 - chk_prio_nr 0 1 2615 + chk_prio_nr 0 1 1 0 2665 2616 fi 2666 2617 2667 2618 # single address, backup 2668 2619 if reset "single address, backup" && 2620 + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 2621 + pm_nl_set_limits $ns1 0 1 2622 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup 2623 + pm_nl_set_limits $ns2 1 1 2624 + sflags=nobackup speed=slow \ 2625 + run_tests $ns1 $ns2 10.0.1.1 2626 + chk_join_nr 1 1 1 2627 + chk_add_nr 1 1 2628 + chk_prio_nr 1 0 0 1 2629 + fi 2630 + 2631 + # single address, switch to backup 2632 + if reset "single address, switch to backup" && 2669 2633 continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 2670 2634 pm_nl_set_limits $ns1 0 1 2671 2635 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ··· 2687 2625 run_tests $ns1 $ns2 10.0.1.1 2688 2626 chk_join_nr 1 1 1 2689 2627 chk_add_nr 1 1 2690 - chk_prio_nr 1 1 2628 + chk_prio_nr 1 1 0 0 2691 2629 fi 2692 2630 2693 2631 # single address with port, backup 2694 2632 if reset "single address with port, backup" && 2695 2633 continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 2696 2634 pm_nl_set_limits $ns1 0 1 2697 - pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100 2635 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100 2698 2636 pm_nl_set_limits $ns2 1 1 2699 - sflags=backup speed=slow \ 2637 + sflags=nobackup speed=slow \ 2700 2638 run_tests $ns1 $ns2 10.0.1.1 2701 2639 chk_join_nr 1 1 1 2702 2640 chk_add_nr 1 1 2703 - chk_prio_nr 1 1 2641 + chk_prio_nr 1 0 0 1 2704 2642 fi 2705 2643 2706 2644 if reset "mpc backup" && ··· 2709 2647 speed=slow \ 2710 2648 run_tests $ns1 $ns2 10.0.1.1 2711 2649 chk_join_nr 0 0 0 2712 - chk_prio_nr 0 1 2650 + chk_prio_nr 0 1 0 0 2713 2651 fi 2714 2652 2715 2653 if reset "mpc backup both sides" && 2716 2654 continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then 2717 - pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup 2655 + pm_nl_set_limits $ns1 0 2 2656 + pm_nl_set_limits $ns2 1 2 2657 + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup 2718 2658 pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup 2659 + 2660 + # 10.0.2.2 (non-backup) -> 10.0.1.1 (backup) 2661 + pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow 2662 + # 10.0.1.2 (backup) -> 10.0.2.1 (non-backup) 2663 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal 2664 + ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path 2665 + 2719 2666 speed=slow \ 2720 2667 run_tests $ns1 $ns2 10.0.1.1 2721 - chk_join_nr 0 0 0 2722 - chk_prio_nr 1 1 2668 + chk_join_nr 2 2 2 2669 + chk_prio_nr 1 1 1 1 2723 2670 fi 2724 2671 2725 2672 if reset "mpc switch to backup" && ··· 2737 2666 sflags=backup speed=slow \ 2738 2667 run_tests $ns1 $ns2 10.0.1.1 2739 2668 chk_join_nr 0 0 0 2740 - chk_prio_nr 0 1 2669 + chk_prio_nr 0 1 0 0 2741 2670 fi 2742 2671 2743 2672 if reset "mpc switch to backup both sides" && ··· 2747 2676 sflags=backup speed=slow \ 2748 2677 run_tests $ns1 $ns2 10.0.1.1 2749 2678 chk_join_nr 0 0 0 2750 - chk_prio_nr 1 1 2679 + chk_prio_nr 1 1 0 0 2751 2680 fi 2752 2681 } 2753 2682 ··· 3124 3053 addr_nr_ns2=1 sflags=backup,fullmesh speed=slow \ 3125 3054 run_tests $ns1 $ns2 10.0.1.1 3126 3055 chk_join_nr 2 2 2 3127 - chk_prio_nr 0 1 3056 + chk_prio_nr 0 1 1 0 3128 3057 chk_rm_nr 0 1 3129 3058 fi 3130 3059 ··· 3137 3066 sflags=nobackup,nofullmesh speed=slow \ 3138 3067 run_tests $ns1 $ns2 10.0.1.1 3139 3068 chk_join_nr 2 2 2 3140 - chk_prio_nr 0 1 3069 + chk_prio_nr 0 1 1 0 3141 3070 chk_rm_nr 0 1 3142 3071 fi 3143 3072 } ··· 3389 3318 sflags=backup speed=slow \ 3390 3319 run_tests $ns1 $ns2 10.0.1.1 3391 3320 chk_join_nr 1 1 0 3392 - chk_prio_nr 0 0 3321 + chk_prio_nr 0 0 0 0 3393 3322 fi 3394 3323 3395 3324 # userspace pm type prevents rm_addr ··· 3597 3526 chk_mptcp_info subflows 1 subflows 1 3598 3527 mptcp_lib_kill_wait $tests_pid 3599 3528 fi 3529 + 3530 + # remove and re-add 3531 + if reset "delete re-add signal" && 3532 + mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3533 + pm_nl_set_limits $ns1 1 1 3534 + pm_nl_set_limits $ns2 1 1 3535 + pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal 3536 + test_linkfail=4 speed=20 \ 3537 + run_tests $ns1 $ns2 10.0.1.1 & 3538 + local tests_pid=$! 3539 + 3540 + wait_mpj $ns2 3541 + pm_nl_check_endpoint "creation" \ 3542 + $ns1 10.0.2.1 id 1 flags signal 3543 + chk_subflow_nr "before delete" 2 3544 + chk_mptcp_info subflows 1 subflows 1 3545 + 3546 + pm_nl_del_endpoint $ns1 1 10.0.2.1 3547 + sleep 0.5 3548 + chk_subflow_nr "after delete" 1 3549 + chk_mptcp_info subflows 0 subflows 0 3550 + 3551 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal 3552 + wait_mpj $ns2 3553 + chk_subflow_nr "after re-add" 2 3554 + chk_mptcp_info subflows 1 subflows 1 3555 + mptcp_lib_kill_wait $tests_pid 3556 + fi 3557 + 3600 3558 } 3601 3559 3602 3560 # [$1: error message]
+2
tools/testing/selftests/seccomp/seccomp_bpf.c
··· 60 60 #define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__) 61 61 #endif 62 62 63 + #ifndef MIN 63 64 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) 65 + #endif 64 66 65 67 #ifndef PR_SET_PTRACER 66 68 # define PR_SET_PTRACER 0x59616d61
+2 -2
virt/kvm/Kconfig
··· 113 113 select KVM_PRIVATE_MEM 114 114 bool 115 115 116 - config HAVE_KVM_GMEM_PREPARE 116 + config HAVE_KVM_ARCH_GMEM_PREPARE 117 117 bool 118 118 depends on KVM_PRIVATE_MEM 119 119 120 - config HAVE_KVM_GMEM_INVALIDATE 120 + config HAVE_KVM_ARCH_GMEM_INVALIDATE 121 121 bool 122 122 depends on KVM_PRIVATE_MEM
+136 -91
virt/kvm/guest_memfd.c
··· 13 13 struct list_head entry; 14 14 }; 15 15 16 - static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct folio *folio) 16 + /** 17 + * folio_file_pfn - like folio_file_page, but return a pfn. 18 + * @folio: The folio which contains this index. 19 + * @index: The index we want to look up. 20 + * 21 + * Return: The pfn for this index. 22 + */ 23 + static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index) 17 24 { 18 - #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE 19 - struct list_head *gmem_list = &inode->i_mapping->i_private_list; 20 - struct kvm_gmem *gmem; 25 + return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1)); 26 + } 21 27 22 - list_for_each_entry(gmem, gmem_list, entry) { 23 - struct kvm_memory_slot *slot; 24 - struct kvm *kvm = gmem->kvm; 25 - struct page *page; 26 - kvm_pfn_t pfn; 27 - gfn_t gfn; 28 - int rc; 29 - 30 - if (!kvm_arch_gmem_prepare_needed(kvm)) 31 - continue; 32 - 33 - slot = xa_load(&gmem->bindings, index); 34 - if (!slot) 35 - continue; 36 - 37 - page = folio_file_page(folio, index); 38 - pfn = page_to_pfn(page); 39 - gfn = slot->base_gfn + index - slot->gmem.pgoff; 40 - rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page))); 41 - if (rc) { 42 - pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n", 43 - index, gfn, pfn, rc); 44 - return rc; 45 - } 28 + static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, 29 + pgoff_t index, struct folio *folio) 30 + { 31 + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE 32 + kvm_pfn_t pfn = folio_file_pfn(folio, index); 33 + gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; 34 + int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); 35 + if (rc) { 36 + pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n", 37 + index, gfn, pfn, rc); 38 + return rc; 46 39 } 47 - 48 40 #endif 41 + 49 42 return 0; 50 43 } 51 44 52 - static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index, bool prepare) 45 + static inline void kvm_gmem_mark_prepared(struct folio *folio) 53 46 { 54 - struct folio *folio; 47 + folio_mark_uptodate(folio); 48 + } 55 49 56 - /* TODO: Support huge pages. */ 57 - folio = filemap_grab_folio(inode->i_mapping, index); 58 - if (IS_ERR(folio)) 59 - return folio; 50 + /* 51 + * Process @folio, which contains @gfn, so that the guest can use it. 52 + * The folio must be locked and the gfn must be contained in @slot. 53 + * On successful return the guest sees a zero page so as to avoid 54 + * leaking host data and the up-to-date flag is set. 55 + */ 56 + static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, 57 + gfn_t gfn, struct folio *folio) 58 + { 59 + unsigned long nr_pages, i; 60 + pgoff_t index; 61 + int r; 62 + 63 + nr_pages = folio_nr_pages(folio); 64 + for (i = 0; i < nr_pages; i++) 65 + clear_highpage(folio_page(folio, i)); 60 66 61 67 /* 62 - * Use the up-to-date flag to track whether or not the memory has been 63 - * zeroed before being handed off to the guest. There is no backing 64 - * storage for the memory, so the folio will remain up-to-date until 65 - * it's removed. 68 + * Preparing huge folios should always be safe, since it should 69 + * be possible to split them later if needed. 66 70 * 67 - * TODO: Skip clearing pages when trusted firmware will do it when 68 - * assigning memory to the guest. 71 + * Right now the folio order is always going to be zero, but the 72 + * code is ready for huge folios. The only assumption is that 73 + * the base pgoff of memslots is naturally aligned with the 74 + * requested page order, ensuring that huge folios can also use 75 + * huge page table entries for GPA->HPA mapping. 76 + * 77 + * The order will be passed when creating the guest_memfd, and 78 + * checked when creating memslots. 69 79 */ 70 - if (!folio_test_uptodate(folio)) { 71 - unsigned long nr_pages = folio_nr_pages(folio); 72 - unsigned long i; 80 + WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio))); 81 + index = gfn - slot->base_gfn + slot->gmem.pgoff; 82 + index = ALIGN_DOWN(index, 1 << folio_order(folio)); 83 + r = __kvm_gmem_prepare_folio(kvm, slot, index, folio); 84 + if (!r) 85 + kvm_gmem_mark_prepared(folio); 73 86 74 - for (i = 0; i < nr_pages; i++) 75 - clear_highpage(folio_page(folio, i)); 87 + return r; 88 + } 76 89 77 - folio_mark_uptodate(folio); 78 - } 79 - 80 - if (prepare) { 81 - int r = kvm_gmem_prepare_folio(inode, index, folio); 82 - if (r < 0) { 83 - folio_unlock(folio); 84 - folio_put(folio); 85 - return ERR_PTR(r); 86 - } 87 - } 88 - 89 - /* 90 - * Ignore accessed, referenced, and dirty flags. The memory is 91 - * unevictable and there is no storage to write back to. 92 - */ 93 - return folio; 90 + /* 91 + * Returns a locked folio on success. The caller is responsible for 92 + * setting the up-to-date flag before the memory is mapped into the guest. 93 + * There is no backing storage for the memory, so the folio will remain 94 + * up-to-date until it's removed. 95 + * 96 + * Ignore accessed, referenced, and dirty flags. The memory is 97 + * unevictable and there is no storage to write back to. 98 + */ 99 + static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) 100 + { 101 + /* TODO: Support huge pages. */ 102 + return filemap_grab_folio(inode->i_mapping, index); 94 103 } 95 104 96 105 static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start, ··· 199 190 break; 200 191 } 201 192 202 - folio = kvm_gmem_get_folio(inode, index, true); 193 + folio = kvm_gmem_get_folio(inode, index); 203 194 if (IS_ERR(folio)) { 204 195 r = PTR_ERR(folio); 205 196 break; ··· 352 343 return MF_DELAYED; 353 344 } 354 345 355 - #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE 346 + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE 356 347 static void kvm_gmem_free_folio(struct folio *folio) 357 348 { 358 349 struct page *page = folio_page(folio, 0); ··· 367 358 .dirty_folio = noop_dirty_folio, 368 359 .migrate_folio = kvm_gmem_migrate_folio, 369 360 .error_remove_folio = kvm_gmem_error_folio, 370 - #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE 361 + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE 371 362 .free_folio = kvm_gmem_free_folio, 372 363 #endif 373 364 }; ··· 550 541 fput(file); 551 542 } 552 543 553 - static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot, 554 - gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare) 544 + /* Returns a locked folio on success. */ 545 + static struct folio * 546 + __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot, 547 + gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared, 548 + int *max_order) 555 549 { 556 550 pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff; 557 551 struct kvm_gmem *gmem = file->private_data; 558 552 struct folio *folio; 559 - struct page *page; 560 - int r; 561 553 562 554 if (file != slot->gmem.file) { 563 555 WARN_ON_ONCE(slot->gmem.file); 564 - return -EFAULT; 556 + return ERR_PTR(-EFAULT); 565 557 } 566 558 567 559 gmem = file->private_data; 568 560 if (xa_load(&gmem->bindings, index) != slot) { 569 561 WARN_ON_ONCE(xa_load(&gmem->bindings, index)); 570 - return -EIO; 562 + return ERR_PTR(-EIO); 571 563 } 572 564 573 - folio = kvm_gmem_get_folio(file_inode(file), index, prepare); 565 + folio = kvm_gmem_get_folio(file_inode(file), index); 574 566 if (IS_ERR(folio)) 575 - return PTR_ERR(folio); 567 + return folio; 576 568 577 569 if (folio_test_hwpoison(folio)) { 578 570 folio_unlock(folio); 579 571 folio_put(folio); 580 - return -EHWPOISON; 572 + return ERR_PTR(-EHWPOISON); 581 573 } 582 574 583 - page = folio_file_page(folio, index); 584 - 585 - *pfn = page_to_pfn(page); 575 + *pfn = folio_file_pfn(folio, index); 586 576 if (max_order) 587 577 *max_order = 0; 588 578 589 - r = 0; 590 - 591 - folio_unlock(folio); 592 - 593 - return r; 579 + *is_prepared = folio_test_uptodate(folio); 580 + return folio; 594 581 } 595 582 596 583 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, 597 584 gfn_t gfn, kvm_pfn_t *pfn, int *max_order) 598 585 { 599 586 struct file *file = kvm_gmem_get_file(slot); 600 - int r; 587 + struct folio *folio; 588 + bool is_prepared = false; 589 + int r = 0; 601 590 602 591 if (!file) 603 592 return -EFAULT; 604 593 605 - r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true); 594 + folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order); 595 + if (IS_ERR(folio)) { 596 + r = PTR_ERR(folio); 597 + goto out; 598 + } 599 + 600 + if (!is_prepared) 601 + r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); 602 + 603 + folio_unlock(folio); 604 + if (r < 0) 605 + folio_put(folio); 606 + 607 + out: 606 608 fput(file); 607 609 return r; 608 610 } 609 611 EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn); 610 612 613 + #ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM 611 614 long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, 612 615 kvm_gmem_populate_cb post_populate, void *opaque) 613 616 { ··· 646 625 647 626 npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); 648 627 for (i = 0; i < npages; i += (1 << max_order)) { 628 + struct folio *folio; 649 629 gfn_t gfn = start_gfn + i; 630 + bool is_prepared = false; 650 631 kvm_pfn_t pfn; 651 632 652 633 if (signal_pending(current)) { ··· 656 633 break; 657 634 } 658 635 659 - ret = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &max_order, false); 660 - if (ret) 636 + folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order); 637 + if (IS_ERR(folio)) { 638 + ret = PTR_ERR(folio); 661 639 break; 640 + } 662 641 663 - if (!IS_ALIGNED(gfn, (1 << max_order)) || 664 - (npages - i) < (1 << max_order)) 665 - max_order = 0; 642 + if (is_prepared) { 643 + folio_unlock(folio); 644 + folio_put(folio); 645 + ret = -EEXIST; 646 + break; 647 + } 648 + 649 + folio_unlock(folio); 650 + WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) || 651 + (npages - i) < (1 << max_order)); 652 + 653 + ret = -EINVAL; 654 + while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order), 655 + KVM_MEMORY_ATTRIBUTE_PRIVATE, 656 + KVM_MEMORY_ATTRIBUTE_PRIVATE)) { 657 + if (!max_order) 658 + goto put_folio_and_exit; 659 + max_order--; 660 + } 666 661 667 662 p = src ? src + i * PAGE_SIZE : NULL; 668 663 ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); 664 + if (!ret) 665 + kvm_gmem_mark_prepared(folio); 669 666 670 - put_page(pfn_to_page(pfn)); 667 + put_folio_and_exit: 668 + folio_put(folio); 671 669 if (ret) 672 670 break; 673 671 } ··· 699 655 return ret && !i ? ret : i; 700 656 } 701 657 EXPORT_SYMBOL_GPL(kvm_gmem_populate); 658 + #endif
+36 -37
virt/kvm/kvm_main.c
··· 2398 2398 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2399 2399 2400 2400 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 2401 - /* 2402 - * Returns true if _all_ gfns in the range [@start, @end) have attributes 2403 - * matching @attrs. 2404 - */ 2405 - bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, 2406 - unsigned long attrs) 2407 - { 2408 - XA_STATE(xas, &kvm->mem_attr_array, start); 2409 - unsigned long index; 2410 - bool has_attrs; 2411 - void *entry; 2412 - 2413 - rcu_read_lock(); 2414 - 2415 - if (!attrs) { 2416 - has_attrs = !xas_find(&xas, end - 1); 2417 - goto out; 2418 - } 2419 - 2420 - has_attrs = true; 2421 - for (index = start; index < end; index++) { 2422 - do { 2423 - entry = xas_next(&xas); 2424 - } while (xas_retry(&xas, entry)); 2425 - 2426 - if (xas.xa_index != index || xa_to_value(entry) != attrs) { 2427 - has_attrs = false; 2428 - break; 2429 - } 2430 - } 2431 - 2432 - out: 2433 - rcu_read_unlock(); 2434 - return has_attrs; 2435 - } 2436 - 2437 2401 static u64 kvm_supported_mem_attributes(struct kvm *kvm) 2438 2402 { 2439 2403 if (!kvm || kvm_arch_has_private_mem(kvm)) 2440 2404 return KVM_MEMORY_ATTRIBUTE_PRIVATE; 2441 2405 2442 2406 return 0; 2407 + } 2408 + 2409 + /* 2410 + * Returns true if _all_ gfns in the range [@start, @end) have attributes 2411 + * such that the bits in @mask match @attrs. 2412 + */ 2413 + bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, 2414 + unsigned long mask, unsigned long attrs) 2415 + { 2416 + XA_STATE(xas, &kvm->mem_attr_array, start); 2417 + unsigned long index; 2418 + void *entry; 2419 + 2420 + mask &= kvm_supported_mem_attributes(kvm); 2421 + if (attrs & ~mask) 2422 + return false; 2423 + 2424 + if (end == start + 1) 2425 + return (kvm_get_memory_attributes(kvm, start) & mask) == attrs; 2426 + 2427 + guard(rcu)(); 2428 + if (!attrs) 2429 + return !xas_find(&xas, end - 1); 2430 + 2431 + for (index = start; index < end; index++) { 2432 + do { 2433 + entry = xas_next(&xas); 2434 + } while (xas_retry(&xas, entry)); 2435 + 2436 + if (xas.xa_index != index || 2437 + (xa_to_value(entry) & mask) != attrs) 2438 + return false; 2439 + } 2440 + 2441 + return true; 2443 2442 } 2444 2443 2445 2444 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm, ··· 2533 2534 mutex_lock(&kvm->slots_lock); 2534 2535 2535 2536 /* Nothing to do if the entire range as the desired attributes. */ 2536 - if (kvm_range_has_memory_attributes(kvm, start, end, attributes)) 2537 + if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes)) 2537 2538 goto out_unlock; 2538 2539 2539 2540 /*