Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-7.0-rc5).

net/netfilter/nft_set_rbtree.c
598adea720b97 ("netfilter: revert nft_set_rbtree: validate open interval overlap")
3aea466a43998 ("netfilter: nft_set_rbtree: don't disable bh when acquiring tree lock")
https://lore.kernel.org/abgaQBpeGstdN4oq@sirena.org.uk

No adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+5774 -3377
+1
.mailmap
··· 327 327 Herbert Xu <herbert@gondor.apana.org.au> 328 328 Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com> 329 329 Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn> 330 + Ignat Korchagin <ignat@linux.win> <ignat@cloudflare.com> 330 331 Ike Panhc <ikepanhc@gmail.com> <ike.pan@canonical.com> 331 332 J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com> 332 333 J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu>
+3
Documentation/admin-guide/kernel-parameters.txt
··· 8196 8196 p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT 8197 8197 (Reduce timeout of the SET_ADDRESS 8198 8198 request from 5000 ms to 500 ms); 8199 + q = USB_QUIRK_FORCE_ONE_CONFIG (Device 8200 + claims zero configurations, 8201 + forcing to 1); 8199 8202 Example: quirks=0781:5580:bk,0a5c:5834:gij 8200 8203 8201 8204 usbhid.mousepoll=
+2
Documentation/dev-tools/kunit/run_wrapper.rst
··· 336 336 - ``--list_tests_attr``: If set, lists all tests that will be run and all of their 337 337 attributes. 338 338 339 + - ``--list_suites``: If set, lists all suites that will be run. 340 + 339 341 Command-line completion 340 342 ============================== 341 343
+20 -1
Documentation/devicetree/bindings/display/msm/dp-controller.yaml
··· 253 253 enum: 254 254 # these platforms support 2 streams MST on some interfaces, 255 255 # others are SST only 256 - - qcom,glymur-dp 257 256 - qcom,sc8280xp-dp 258 257 - qcom,x1e80100-dp 259 258 then: ··· 308 309 clocks-names: 309 310 minItems: 6 310 311 maxItems: 8 312 + 313 + - if: 314 + properties: 315 + compatible: 316 + contains: 317 + enum: 318 + # these platforms support 2 streams MST on some interfaces, 319 + # others are SST only, but all controllers have 4 ports 320 + - qcom,glymur-dp 321 + then: 322 + properties: 323 + reg: 324 + minItems: 9 325 + maxItems: 9 326 + clocks: 327 + minItems: 5 328 + maxItems: 6 329 + clocks-names: 330 + minItems: 5 331 + maxItems: 6 311 332 312 333 unevaluatedProperties: false 313 334
+10 -6
Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml
··· 176 176 }; 177 177 }; 178 178 179 - displayport-controller@ae90000 { 179 + displayport-controller@af54000 { 180 180 compatible = "qcom,glymur-dp"; 181 - reg = <0xae90000 0x200>, 182 - <0xae90200 0x200>, 183 - <0xae90400 0x600>, 184 - <0xae91000 0x400>, 185 - <0xae91400 0x400>; 181 + reg = <0xaf54000 0x200>, 182 + <0xaf54200 0x200>, 183 + <0xaf55000 0xc00>, 184 + <0xaf56000 0x400>, 185 + <0xaf57000 0x400>, 186 + <0xaf58000 0x400>, 187 + <0xaf59000 0x400>, 188 + <0xaf5a000 0x600>, 189 + <0xaf5b000 0x600>; 186 190 187 191 interrupt-parent = <&mdss>; 188 192 interrupts = <12>;
+1 -1
Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml
··· 10 10 - Krzysztof Kozlowski <krzk@kernel.org> 11 11 12 12 description: 13 - SM8650 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like 13 + SM8750 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like 14 14 DPU display controller, DSI and DP interfaces etc. 15 15 16 16 $ref: /schemas/display/msm/mdss-common.yaml#
+1 -1
Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
··· 7 7 title: Synopsys DesignWare APB I2C Controller 8 8 9 9 maintainers: 10 - - Jarkko Nikula <jarkko.nikula@linux.intel.com> 10 + - Mika Westerberg <mika.westerberg@linux.intel.com> 11 11 12 12 allOf: 13 13 - $ref: /schemas/i2c/i2c-controller.yaml#
+24 -5
Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
··· 6 6 7 7 title: Allwinner A31 SPI Controller 8 8 9 - allOf: 10 - - $ref: spi-controller.yaml 11 - 12 9 maintainers: 13 10 - Chen-Yu Tsai <wens@csie.org> 14 11 - Maxime Ripard <mripard@kernel.org> ··· 79 82 80 83 spi-rx-bus-width: 81 84 items: 82 - - const: 1 85 + enum: [0, 1, 2, 4] 83 86 84 87 spi-tx-bus-width: 85 88 items: 86 - - const: 1 89 + enum: [0, 1, 2, 4] 87 90 88 91 required: 89 92 - compatible ··· 91 94 - interrupts 92 95 - clocks 93 96 - clock-names 97 + 98 + allOf: 99 + - $ref: spi-controller.yaml 100 + - if: 101 + not: 102 + properties: 103 + compatible: 104 + contains: 105 + enum: 106 + - allwinner,sun50i-r329-spi 107 + - allwinner,sun55i-a523-spi 108 + then: 109 + patternProperties: 110 + "^.*@[0-9a-f]+": 111 + properties: 112 + spi-rx-bus-width: 113 + items: 114 + enum: [0, 1] 115 + 116 + spi-tx-bus-width: 117 + items: 118 + enum: [0, 1] 94 119 95 120 unevaluatedProperties: false 96 121
+6 -6
Documentation/netlink/specs/net_shaper.yaml
··· 247 247 flags: [admin-perm] 248 248 249 249 do: 250 - pre: net-shaper-nl-pre-doit 251 - post: net-shaper-nl-post-doit 250 + pre: net-shaper-nl-pre-doit-write 251 + post: net-shaper-nl-post-doit-write 252 252 request: 253 253 attributes: 254 254 - ifindex ··· 278 278 flags: [admin-perm] 279 279 280 280 do: 281 - pre: net-shaper-nl-pre-doit 282 - post: net-shaper-nl-post-doit 281 + pre: net-shaper-nl-pre-doit-write 282 + post: net-shaper-nl-post-doit-write 283 283 request: 284 284 attributes: *ns-binding 285 285 ··· 309 309 flags: [admin-perm] 310 310 311 311 do: 312 - pre: net-shaper-nl-pre-doit 313 - post: net-shaper-nl-post-doit 312 + pre: net-shaper-nl-pre-doit-write 313 + post: net-shaper-nl-post-doit-write 314 314 request: 315 315 attributes: 316 316 - ifindex
+27 -3
Documentation/scheduler/sched-ext.rst
··· 43 43 CONFIG_DEBUG_INFO_BTF=y 44 44 CONFIG_BPF_JIT_ALWAYS_ON=y 45 45 CONFIG_BPF_JIT_DEFAULT_ON=y 46 - CONFIG_PAHOLE_HAS_BTF_TAG=y 47 46 48 47 sched_ext is used only when the BPF scheduler is loaded and running. 49 48 ··· 57 58 However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is 58 59 set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled 59 60 by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and 60 - ``SCHED_IDLE`` policies are scheduled by the fair-class scheduler. 61 + ``SCHED_IDLE`` policies are scheduled by the fair-class scheduler which has 62 + higher sched_class precedence than ``SCHED_EXT``. 61 63 62 64 Terminating the sched_ext scheduler program, triggering `SysRq-S`, or 63 65 detection of any internal error including stalled runnable tasks aborts the ··· 345 345 The functions prefixed with ``scx_bpf_`` can be called from the BPF 346 346 scheduler. 347 347 348 + * ``kernel/sched/ext_idle.c`` contains the built-in idle CPU selection policy. 349 + 348 350 * ``tools/sched_ext/`` hosts example BPF scheduler implementations. 349 351 350 352 * ``scx_simple[.bpf].c``: Minimal global FIFO scheduler example using a ··· 355 353 * ``scx_qmap[.bpf].c``: A multi-level FIFO scheduler supporting five 356 354 levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``. 357 355 356 + * ``scx_central[.bpf].c``: A central FIFO scheduler where all scheduling 357 + decisions are made on one CPU, demonstrating ``LOCAL_ON`` dispatching, 358 + tickless operation, and kthread preemption. 359 + 360 + * ``scx_cpu0[.bpf].c``: A scheduler that queues all tasks to a shared DSQ 361 + and only dispatches them on CPU0 in FIFO order. Useful for testing bypass 362 + behavior. 363 + 364 + * ``scx_flatcg[.bpf].c``: A flattened cgroup hierarchy scheduler 365 + implementing hierarchical weight-based cgroup CPU control by compounding 366 + each cgroup's share at every level into a single flat scheduling layer. 367 + 368 + * ``scx_pair[.bpf].c``: A core-scheduling example that always makes 369 + sibling CPU pairs execute tasks from the same CPU cgroup. 370 + 371 + * ``scx_sdt[.bpf].c``: A variation of ``scx_simple`` demonstrating BPF 372 + arena memory management for per-task data. 373 + 374 + * ``scx_userland[.bpf].c``: A minimal scheduler demonstrating user space 375 + scheduling. Tasks with CPU affinity are direct-dispatched in FIFO order; 376 + all others are scheduled in user space by a simple vruntime scheduler. 377 + 358 378 ABI Instability 359 379 =============== 360 380 361 381 The APIs provided by sched_ext to BPF schedulers programs have no stability 362 382 guarantees. This includes the ops table callbacks and constants defined in 363 383 ``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in 364 - ``kernel/sched/ext.c``. 384 + ``kernel/sched/ext.c`` and ``kernel/sched/ext_idle.c``. 365 385 366 386 While we will attempt to provide a relatively stable API surface when 367 387 possible, they are subject to change without warning between kernel
+107 -99
Documentation/virt/kvm/api.rst
··· 8435 8435 8436 8436 The valid bits in cap.args[0] are: 8437 8437 8438 - =================================== ============================================ 8439 - KVM_X86_QUIRK_LINT0_REENABLED By default, the reset value for the LVT 8440 - LINT0 register is 0x700 (APIC_MODE_EXTINT). 8441 - When this quirk is disabled, the reset value 8442 - is 0x10000 (APIC_LVT_MASKED). 8438 + ======================================== ================================================ 8439 + KVM_X86_QUIRK_LINT0_REENABLED By default, the reset value for the LVT 8440 + LINT0 register is 0x700 (APIC_MODE_EXTINT). 8441 + When this quirk is disabled, the reset value 8442 + is 0x10000 (APIC_LVT_MASKED). 8443 8443 8444 - KVM_X86_QUIRK_CD_NW_CLEARED By default, KVM clears CR0.CD and CR0.NW on 8445 - AMD CPUs to workaround buggy guest firmware 8446 - that runs in perpetuity with CR0.CD, i.e. 8447 - with caches in "no fill" mode. 8444 + KVM_X86_QUIRK_CD_NW_CLEARED By default, KVM clears CR0.CD and CR0.NW on 8445 + AMD CPUs to workaround buggy guest firmware 8446 + that runs in perpetuity with CR0.CD, i.e. 8447 + with caches in "no fill" mode. 8448 8448 8449 - When this quirk is disabled, KVM does not 8450 - change the value of CR0.CD and CR0.NW. 8449 + When this quirk is disabled, KVM does not 8450 + change the value of CR0.CD and CR0.NW. 8451 8451 8452 - KVM_X86_QUIRK_LAPIC_MMIO_HOLE By default, the MMIO LAPIC interface is 8453 - available even when configured for x2APIC 8454 - mode. When this quirk is disabled, KVM 8455 - disables the MMIO LAPIC interface if the 8456 - LAPIC is in x2APIC mode. 8452 + KVM_X86_QUIRK_LAPIC_MMIO_HOLE By default, the MMIO LAPIC interface is 8453 + available even when configured for x2APIC 8454 + mode. When this quirk is disabled, KVM 8455 + disables the MMIO LAPIC interface if the 8456 + LAPIC is in x2APIC mode. 8457 8457 8458 - KVM_X86_QUIRK_OUT_7E_INC_RIP By default, KVM pre-increments %rip before 8459 - exiting to userspace for an OUT instruction 8460 - to port 0x7e. When this quirk is disabled, 8461 - KVM does not pre-increment %rip before 8462 - exiting to userspace. 8458 + KVM_X86_QUIRK_OUT_7E_INC_RIP By default, KVM pre-increments %rip before 8459 + exiting to userspace for an OUT instruction 8460 + to port 0x7e. When this quirk is disabled, 8461 + KVM does not pre-increment %rip before 8462 + exiting to userspace. 8463 8463 8464 - KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT When this quirk is disabled, KVM sets 8465 - CPUID.01H:ECX[bit 3] (MONITOR/MWAIT) if 8466 - IA32_MISC_ENABLE[bit 18] (MWAIT) is set. 8467 - Additionally, when this quirk is disabled, 8468 - KVM clears CPUID.01H:ECX[bit 3] if 8469 - IA32_MISC_ENABLE[bit 18] is cleared. 8464 + KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT When this quirk is disabled, KVM sets 8465 + CPUID.01H:ECX[bit 3] (MONITOR/MWAIT) if 8466 + IA32_MISC_ENABLE[bit 18] (MWAIT) is set. 8467 + Additionally, when this quirk is disabled, 8468 + KVM clears CPUID.01H:ECX[bit 3] if 8469 + IA32_MISC_ENABLE[bit 18] is cleared. 8470 8470 8471 - KVM_X86_QUIRK_FIX_HYPERCALL_INSN By default, KVM rewrites guest 8472 - VMMCALL/VMCALL instructions to match the 8473 - vendor's hypercall instruction for the 8474 - system. When this quirk is disabled, KVM 8475 - will no longer rewrite invalid guest 8476 - hypercall instructions. Executing the 8477 - incorrect hypercall instruction will 8478 - generate a #UD within the guest. 8471 + KVM_X86_QUIRK_FIX_HYPERCALL_INSN By default, KVM rewrites guest 8472 + VMMCALL/VMCALL instructions to match the 8473 + vendor's hypercall instruction for the 8474 + system. When this quirk is disabled, KVM 8475 + will no longer rewrite invalid guest 8476 + hypercall instructions. Executing the 8477 + incorrect hypercall instruction will 8478 + generate a #UD within the guest. 8479 8479 8480 - KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if 8481 - they are intercepted) as NOPs regardless of 8482 - whether or not MONITOR/MWAIT are supported 8483 - according to guest CPUID. When this quirk 8484 - is disabled and KVM_X86_DISABLE_EXITS_MWAIT 8485 - is not set (MONITOR/MWAIT are intercepted), 8486 - KVM will inject a #UD on MONITOR/MWAIT if 8487 - they're unsupported per guest CPUID. Note, 8488 - KVM will modify MONITOR/MWAIT support in 8489 - guest CPUID on writes to MISC_ENABLE if 8490 - KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is 8491 - disabled. 8480 + KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if 8481 + they are intercepted) as NOPs regardless of 8482 + whether or not MONITOR/MWAIT are supported 8483 + according to guest CPUID. When this quirk 8484 + is disabled and KVM_X86_DISABLE_EXITS_MWAIT 8485 + is not set (MONITOR/MWAIT are intercepted), 8486 + KVM will inject a #UD on MONITOR/MWAIT if 8487 + they're unsupported per guest CPUID. Note, 8488 + KVM will modify MONITOR/MWAIT support in 8489 + guest CPUID on writes to MISC_ENABLE if 8490 + KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is 8491 + disabled. 8492 8492 8493 - KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM 8494 - invalidates all SPTEs in all memslots and 8495 - address spaces when a memslot is deleted or 8496 - moved. When this quirk is disabled (or the 8497 - VM type isn't KVM_X86_DEFAULT_VM), KVM only 8498 - ensures the backing memory of the deleted 8499 - or moved memslot isn't reachable, i.e KVM 8500 - _may_ invalidate only SPTEs related to the 8501 - memslot. 8493 + KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM 8494 + invalidates all SPTEs in all memslots and 8495 + address spaces when a memslot is deleted or 8496 + moved. When this quirk is disabled (or the 8497 + VM type isn't KVM_X86_DEFAULT_VM), KVM only 8498 + ensures the backing memory of the deleted 8499 + or moved memslot isn't reachable, i.e KVM 8500 + _may_ invalidate only SPTEs related to the 8501 + memslot. 8502 8502 8503 - KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the 8504 - vCPU's MSR_IA32_PERF_CAPABILITIES (0x345), 8505 - MSR_IA32_ARCH_CAPABILITIES (0x10a), 8506 - MSR_PLATFORM_INFO (0xce), and all VMX MSRs 8507 - (0x480..0x492) to the maximal capabilities 8508 - supported by KVM. KVM also sets 8509 - MSR_IA32_UCODE_REV (0x8b) to an arbitrary 8510 - value (which is different for Intel vs. 8511 - AMD). Lastly, when guest CPUID is set (by 8512 - userspace), KVM modifies select VMX MSR 8513 - fields to force consistency between guest 8514 - CPUID and L2's effective ISA. When this 8515 - quirk is disabled, KVM zeroes the vCPU's MSR 8516 - values (with two exceptions, see below), 8517 - i.e. treats the feature MSRs like CPUID 8518 - leaves and gives userspace full control of 8519 - the vCPU model definition. This quirk does 8520 - not affect VMX MSRs CR0/CR4_FIXED1 (0x487 8521 - and 0x489), as KVM does now allow them to 8522 - be set by userspace (KVM sets them based on 8523 - guest CPUID, for safety purposes). 8503 + KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the 8504 + vCPU's MSR_IA32_PERF_CAPABILITIES (0x345), 8505 + MSR_IA32_ARCH_CAPABILITIES (0x10a), 8506 + MSR_PLATFORM_INFO (0xce), and all VMX MSRs 8507 + (0x480..0x492) to the maximal capabilities 8508 + supported by KVM. KVM also sets 8509 + MSR_IA32_UCODE_REV (0x8b) to an arbitrary 8510 + value (which is different for Intel vs. 8511 + AMD). Lastly, when guest CPUID is set (by 8512 + userspace), KVM modifies select VMX MSR 8513 + fields to force consistency between guest 8514 + CPUID and L2's effective ISA. When this 8515 + quirk is disabled, KVM zeroes the vCPU's MSR 8516 + values (with two exceptions, see below), 8517 + i.e. treats the feature MSRs like CPUID 8518 + leaves and gives userspace full control of 8519 + the vCPU model definition. This quirk does 8520 + not affect VMX MSRs CR0/CR4_FIXED1 (0x487 8521 + and 0x489), as KVM does now allow them to 8522 + be set by userspace (KVM sets them based on 8523 + guest CPUID, for safety purposes). 8524 8524 8525 - KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores 8526 - guest PAT and forces the effective memory 8527 - type to WB in EPT. The quirk is not available 8528 - on Intel platforms which are incapable of 8529 - safely honoring guest PAT (i.e., without CPU 8530 - self-snoop, KVM always ignores guest PAT and 8531 - forces effective memory type to WB). It is 8532 - also ignored on AMD platforms or, on Intel, 8533 - when a VM has non-coherent DMA devices 8534 - assigned; KVM always honors guest PAT in 8535 - such case. The quirk is needed to avoid 8536 - slowdowns on certain Intel Xeon platforms 8537 - (e.g. ICX, SPR) where self-snoop feature is 8538 - supported but UC is slow enough to cause 8539 - issues with some older guests that use 8540 - UC instead of WC to map the video RAM. 8541 - Userspace can disable the quirk to honor 8542 - guest PAT if it knows that there is no such 8543 - guest software, for example if it does not 8544 - expose a bochs graphics device (which is 8545 - known to have had a buggy driver). 8546 - =================================== ============================================ 8525 + KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores 8526 + guest PAT and forces the effective memory 8527 + type to WB in EPT. The quirk is not available 8528 + on Intel platforms which are incapable of 8529 + safely honoring guest PAT (i.e., without CPU 8530 + self-snoop, KVM always ignores guest PAT and 8531 + forces effective memory type to WB). It is 8532 + also ignored on AMD platforms or, on Intel, 8533 + when a VM has non-coherent DMA devices 8534 + assigned; KVM always honors guest PAT in 8535 + such case. The quirk is needed to avoid 8536 + slowdowns on certain Intel Xeon platforms 8537 + (e.g. ICX, SPR) where self-snoop feature is 8538 + supported but UC is slow enough to cause 8539 + issues with some older guests that use 8540 + UC instead of WC to map the video RAM. 8541 + Userspace can disable the quirk to honor 8542 + guest PAT if it knows that there is no such 8543 + guest software, for example if it does not 8544 + expose a bochs graphics device (which is 8545 + known to have had a buggy driver). 8546 + 8547 + KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM By default, KVM relaxes the consistency 8548 + check for GUEST_IA32_DEBUGCTL in vmcs12 8549 + to allow FREEZE_IN_SMM to be set. When 8550 + this quirk is disabled, KVM requires this 8551 + bit to be cleared. Note that the vmcs02 8552 + bit is still completely controlled by the 8553 + host, regardless of the quirk setting. 8554 + ======================================== ================================================ 8547 8555 8548 8556 7.32 KVM_CAP_MAX_VCPU_ID 8549 8557 ------------------------
+2
Documentation/virt/kvm/locking.rst
··· 17 17 18 18 - kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock 19 19 20 + - vcpu->mutex is taken outside kvm->slots_lock and kvm->slots_arch_lock 21 + 20 22 - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring 21 23 them together is quite rare. 22 24
+6 -7
MAINTAINERS
··· 4022 4022 ASYMMETRIC KEYS 4023 4023 M: David Howells <dhowells@redhat.com> 4024 4024 M: Lukas Wunner <lukas@wunner.de> 4025 - M: Ignat Korchagin <ignat@cloudflare.com> 4025 + M: Ignat Korchagin <ignat@linux.win> 4026 4026 L: keyrings@vger.kernel.org 4027 4027 L: linux-crypto@vger.kernel.org 4028 4028 S: Maintained ··· 4035 4035 4036 4036 ASYMMETRIC KEYS - ECDSA 4037 4037 M: Lukas Wunner <lukas@wunner.de> 4038 - M: Ignat Korchagin <ignat@cloudflare.com> 4038 + M: Ignat Korchagin <ignat@linux.win> 4039 4039 R: Stefan Berger <stefanb@linux.ibm.com> 4040 4040 L: linux-crypto@vger.kernel.org 4041 4041 S: Maintained ··· 4045 4045 4046 4046 ASYMMETRIC KEYS - GOST 4047 4047 M: Lukas Wunner <lukas@wunner.de> 4048 - M: Ignat Korchagin <ignat@cloudflare.com> 4048 + M: Ignat Korchagin <ignat@linux.win> 4049 4049 L: linux-crypto@vger.kernel.org 4050 4050 S: Odd fixes 4051 4051 F: crypto/ecrdsa* 4052 4052 4053 4053 ASYMMETRIC KEYS - RSA 4054 4054 M: Lukas Wunner <lukas@wunner.de> 4055 - M: Ignat Korchagin <ignat@cloudflare.com> 4055 + M: Ignat Korchagin <ignat@linux.win> 4056 4056 L: linux-crypto@vger.kernel.org 4057 4057 S: Maintained 4058 4058 F: crypto/rsa* ··· 8626 8626 F: include/uapi/drm/lima_drm.h 8627 8627 8628 8628 DRM DRIVERS FOR LOONGSON 8629 - M: Sui Jingfeng <suijingfeng@loongson.cn> 8630 8629 L: dri-devel@lists.freedesktop.org 8631 - S: Supported 8630 + S: Orphan 8632 8631 T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 8633 8632 F: drivers/gpu/drm/loongson/ 8634 8633 ··· 21934 21935 21935 21936 RADOS BLOCK DEVICE (RBD) 21936 21937 M: Ilya Dryomov <idryomov@gmail.com> 21937 - R: Dongsheng Yang <dongsheng.yang@easystack.cn> 21938 + R: Dongsheng Yang <dongsheng.yang@linux.dev> 21938 21939 L: ceph-devel@vger.kernel.org 21939 21940 S: Supported 21940 21941 W: http://ceph.com/
+5 -1
Makefile
··· 2 2 VERSION = 7 3 3 PATCHLEVEL = 0 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION* ··· 476 476 export rust_common_flags := --edition=2021 \ 477 477 -Zbinary_dep_depinfo=y \ 478 478 -Astable_features \ 479 + -Aunused_features \ 479 480 -Dnon_ascii_idents \ 480 481 -Dunsafe_op_in_unsafe_fn \ 481 482 -Wmissing_docs \ ··· 1114 1113 # change __FILE__ to the relative path to the source directory 1115 1114 ifdef building_out_of_srctree 1116 1115 KBUILD_CPPFLAGS += -fmacro-prefix-map=$(srcroot)/= 1116 + ifeq ($(call rustc-option-yn, --remap-path-scope=macro),y) 1117 + KBUILD_RUSTFLAGS += --remap-path-prefix=$(srcroot)/= --remap-path-scope=macro 1118 + endif 1117 1119 endif 1118 1120 1119 1121 # include additional Makefiles when needed
-1
arch/arm/configs/multi_v7_defconfig
··· 279 279 CONFIG_TI_CPTS=y 280 280 CONFIG_TI_KEYSTONE_NETCP=y 281 281 CONFIG_TI_KEYSTONE_NETCP_ETHSS=y 282 - CONFIG_TI_PRUSS=m 283 282 CONFIG_TI_PRUETH=m 284 283 CONFIG_XILINX_EMACLITE=y 285 284 CONFIG_SFP=m
+8 -8
arch/arm64/boot/dts/renesas/r8a78000.dtsi
··· 698 698 compatible = "renesas,scif-r8a78000", 699 699 "renesas,rcar-gen5-scif", "renesas,scif"; 700 700 reg = <0 0xc0700000 0 0x40>; 701 - interrupts = <GIC_SPI 4074 IRQ_TYPE_LEVEL_HIGH>; 701 + interrupts = <GIC_ESPI 10 IRQ_TYPE_LEVEL_HIGH>; 702 702 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 703 703 clock-names = "fck", "brg_int", "scif_clk"; 704 704 status = "disabled"; ··· 708 708 compatible = "renesas,scif-r8a78000", 709 709 "renesas,rcar-gen5-scif", "renesas,scif"; 710 710 reg = <0 0xc0704000 0 0x40>; 711 - interrupts = <GIC_SPI 4075 IRQ_TYPE_LEVEL_HIGH>; 711 + interrupts = <GIC_ESPI 11 IRQ_TYPE_LEVEL_HIGH>; 712 712 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 713 713 clock-names = "fck", "brg_int", "scif_clk"; 714 714 status = "disabled"; ··· 718 718 compatible = "renesas,scif-r8a78000", 719 719 "renesas,rcar-gen5-scif", "renesas,scif"; 720 720 reg = <0 0xc0708000 0 0x40>; 721 - interrupts = <GIC_SPI 4076 IRQ_TYPE_LEVEL_HIGH>; 721 + interrupts = <GIC_ESPI 12 IRQ_TYPE_LEVEL_HIGH>; 722 722 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 723 723 clock-names = "fck", "brg_int", "scif_clk"; 724 724 status = "disabled"; ··· 728 728 compatible = "renesas,scif-r8a78000", 729 729 "renesas,rcar-gen5-scif", "renesas,scif"; 730 730 reg = <0 0xc070c000 0 0x40>; 731 - interrupts = <GIC_SPI 4077 IRQ_TYPE_LEVEL_HIGH>; 731 + interrupts = <GIC_ESPI 13 IRQ_TYPE_LEVEL_HIGH>; 732 732 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 733 733 clock-names = "fck", "brg_int", "scif_clk"; 734 734 status = "disabled"; ··· 738 738 compatible = "renesas,hscif-r8a78000", 739 739 "renesas,rcar-gen5-hscif", "renesas,hscif"; 740 740 reg = <0 0xc0710000 0 0x60>; 741 - interrupts = <GIC_SPI 4078 IRQ_TYPE_LEVEL_HIGH>; 741 + interrupts = <GIC_ESPI 14 IRQ_TYPE_LEVEL_HIGH>; 742 742 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 743 743 clock-names = "fck", "brg_int", "scif_clk"; 744 744 status = "disabled"; ··· 748 748 compatible = "renesas,hscif-r8a78000", 749 749 "renesas,rcar-gen5-hscif", "renesas,hscif"; 750 750 reg = <0 0xc0714000 0 0x60>; 751 - interrupts = <GIC_SPI 4079 IRQ_TYPE_LEVEL_HIGH>; 751 + interrupts = <GIC_ESPI 15 IRQ_TYPE_LEVEL_HIGH>; 752 752 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 753 753 clock-names = "fck", "brg_int", "scif_clk"; 754 754 status = "disabled"; ··· 758 758 compatible = "renesas,hscif-r8a78000", 759 759 "renesas,rcar-gen5-hscif", "renesas,hscif"; 760 760 reg = <0 0xc0718000 0 0x60>; 761 - interrupts = <GIC_SPI 4080 IRQ_TYPE_LEVEL_HIGH>; 761 + interrupts = <GIC_ESPI 16 IRQ_TYPE_LEVEL_HIGH>; 762 762 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 763 763 clock-names = "fck", "brg_int", "scif_clk"; 764 764 status = "disabled"; ··· 768 768 compatible = "renesas,hscif-r8a78000", 769 769 "renesas,rcar-gen5-hscif", "renesas,hscif"; 770 770 reg = <0 0xc071c000 0 0x60>; 771 - interrupts = <GIC_SPI 4081 IRQ_TYPE_LEVEL_HIGH>; 771 + interrupts = <GIC_ESPI 17 IRQ_TYPE_LEVEL_HIGH>; 772 772 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 773 773 clock-names = "fck", "brg_int", "scif_clk"; 774 774 status = "disabled";
-30
arch/arm64/boot/dts/renesas/r9a09g057.dtsi
··· 581 581 status = "disabled"; 582 582 }; 583 583 584 - wdt0: watchdog@11c00400 { 585 - compatible = "renesas,r9a09g057-wdt"; 586 - reg = <0 0x11c00400 0 0x400>; 587 - clocks = <&cpg CPG_MOD 0x4b>, <&cpg CPG_MOD 0x4c>; 588 - clock-names = "pclk", "oscclk"; 589 - resets = <&cpg 0x75>; 590 - power-domains = <&cpg>; 591 - status = "disabled"; 592 - }; 593 - 594 584 wdt1: watchdog@14400000 { 595 585 compatible = "renesas,r9a09g057-wdt"; 596 586 reg = <0 0x14400000 0 0x400>; 597 587 clocks = <&cpg CPG_MOD 0x4d>, <&cpg CPG_MOD 0x4e>; 598 588 clock-names = "pclk", "oscclk"; 599 589 resets = <&cpg 0x76>; 600 - power-domains = <&cpg>; 601 - status = "disabled"; 602 - }; 603 - 604 - wdt2: watchdog@13000000 { 605 - compatible = "renesas,r9a09g057-wdt"; 606 - reg = <0 0x13000000 0 0x400>; 607 - clocks = <&cpg CPG_MOD 0x4f>, <&cpg CPG_MOD 0x50>; 608 - clock-names = "pclk", "oscclk"; 609 - resets = <&cpg 0x77>; 610 - power-domains = <&cpg>; 611 - status = "disabled"; 612 - }; 613 - 614 - wdt3: watchdog@13000400 { 615 - compatible = "renesas,r9a09g057-wdt"; 616 - reg = <0 0x13000400 0 0x400>; 617 - clocks = <&cpg CPG_MOD 0x51>, <&cpg CPG_MOD 0x52>; 618 - clock-names = "pclk", "oscclk"; 619 - resets = <&cpg 0x78>; 620 590 power-domains = <&cpg>; 621 591 status = "disabled"; 622 592 };
+2 -2
arch/arm64/boot/dts/renesas/r9a09g077.dtsi
··· 974 974 975 975 cpg: clock-controller@80280000 { 976 976 compatible = "renesas,r9a09g077-cpg-mssr"; 977 - reg = <0 0x80280000 0 0x1000>, 978 - <0 0x81280000 0 0x9000>; 977 + reg = <0 0x80280000 0 0x10000>, 978 + <0 0x81280000 0 0x10000>; 979 979 clocks = <&extal_clk>; 980 980 clock-names = "extal"; 981 981 #clock-cells = <2>;
+2 -2
arch/arm64/boot/dts/renesas/r9a09g087.dtsi
··· 977 977 978 978 cpg: clock-controller@80280000 { 979 979 compatible = "renesas,r9a09g087-cpg-mssr"; 980 - reg = <0 0x80280000 0 0x1000>, 981 - <0 0x81280000 0 0x9000>; 980 + reg = <0 0x80280000 0 0x10000>, 981 + <0 0x81280000 0 0x10000>; 982 982 clocks = <&extal_clk>; 983 983 clock-names = "extal"; 984 984 #clock-cells = <2>;
+1 -1
arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
··· 162 162 <100000000>; 163 163 renesas,settings = [ 164 164 80 00 11 19 4c 42 dc 2f 06 7d 20 1a 5f 1e f2 27 165 - 00 40 00 00 00 00 00 00 06 0c 19 02 3f f0 90 86 165 + 00 40 00 00 00 00 00 00 06 0c 19 02 3b f0 90 86 166 166 a0 80 30 30 9c 167 167 ]; 168 168 };
+1
arch/arm64/boot/dts/renesas/rzt2h-n2h-evk-common.dtsi
··· 53 53 regulator-max-microvolt = <3300000>; 54 54 gpios-states = <0>; 55 55 states = <3300000 0>, <1800000 1>; 56 + regulator-ramp-delay = <60>; 56 57 }; 57 58 #endif 58 59
+1
arch/arm64/boot/dts/renesas/rzv2-evk-cn15-sd.dtso
··· 25 25 regulator-max-microvolt = <3300000>; 26 26 gpios-states = <0>; 27 27 states = <3300000 0>, <1800000 1>; 28 + regulator-ramp-delay = <60>; 28 29 }; 29 30 }; 30 31
+23 -14
arch/arm64/crypto/aes-neonbs-glue.c
··· 76 76 unsigned int key_len) 77 77 { 78 78 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); 79 - struct crypto_aes_ctx rk; 79 + struct crypto_aes_ctx *rk; 80 80 int err; 81 81 82 - err = aes_expandkey(&rk, in_key, key_len); 82 + rk = kmalloc(sizeof(*rk), GFP_KERNEL); 83 + if (!rk) 84 + return -ENOMEM; 85 + 86 + err = aes_expandkey(rk, in_key, key_len); 83 87 if (err) 84 - return err; 88 + goto out; 85 89 86 90 ctx->rounds = 6 + key_len / 4; 87 91 88 92 scoped_ksimd() 89 - aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds); 90 - 91 - return 0; 93 + aesbs_convert_key(ctx->rk, rk->key_enc, ctx->rounds); 94 + out: 95 + kfree_sensitive(rk); 96 + return err; 92 97 } 93 98 94 99 static int __ecb_crypt(struct skcipher_request *req, ··· 138 133 unsigned int key_len) 139 134 { 140 135 struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 141 - struct crypto_aes_ctx rk; 136 + struct crypto_aes_ctx *rk; 142 137 int err; 143 138 144 - err = aes_expandkey(&rk, in_key, key_len); 139 + rk = kmalloc(sizeof(*rk), GFP_KERNEL); 140 + if (!rk) 141 + return -ENOMEM; 142 + 143 + err = aes_expandkey(rk, in_key, key_len); 145 144 if (err) 146 - return err; 145 + goto out; 147 146 148 147 ctx->key.rounds = 6 + key_len / 4; 149 148 150 - memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc)); 149 + memcpy(ctx->enc, rk->key_enc, sizeof(ctx->enc)); 151 150 152 151 scoped_ksimd() 153 - aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); 154 - memzero_explicit(&rk, sizeof(rk)); 155 - 156 - return 0; 152 + aesbs_convert_key(ctx->key.rk, rk->key_enc, ctx->key.rounds); 153 + out: 154 + kfree_sensitive(rk); 155 + return err; 157 156 } 158 157 159 158 static int cbc_encrypt(struct skcipher_request *req)
+3
arch/arm64/include/asm/kvm_host.h
··· 784 784 /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */ 785 785 unsigned int debug_brps; 786 786 unsigned int debug_wrps; 787 + 788 + /* Last vgic_irq part of the AP list recorded in an LR */ 789 + struct vgic_irq *last_lr_irq; 787 790 }; 788 791 789 792 struct kvm_host_psci_config {
+9
arch/arm64/kernel/cpufeature.c
··· 2345 2345 !is_midr_in_range_list(has_vgic_v3)) 2346 2346 return false; 2347 2347 2348 + /* 2349 + * pKVM prevents late onlining of CPUs. This means that whatever 2350 + * state the capability is in after deprivilege cannot be affected 2351 + * by a new CPU booting -- this is garanteed to be a CPU we have 2352 + * already seen, and the cap is therefore unchanged. 2353 + */ 2354 + if (system_capabilities_finalized() && is_protected_kvm_enabled()) 2355 + return cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR); 2356 + 2348 2357 if (is_kernel_in_hyp_mode()) 2349 2358 res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2); 2350 2359 else
-2
arch/arm64/kvm/at.c
··· 1504 1504 fail = true; 1505 1505 } 1506 1506 1507 - isb(); 1508 - 1509 1507 if (!fail) 1510 1508 par = read_sysreg_par(); 1511 1509
+2 -2
arch/arm64/kvm/guest.c
··· 29 29 30 30 #include "trace.h" 31 31 32 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 32 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 33 33 KVM_GENERIC_VM_STATS() 34 34 }; 35 35 ··· 42 42 sizeof(kvm_vm_stats_desc), 43 43 }; 44 44 45 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 45 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 46 46 KVM_GENERIC_VCPU_STATS(), 47 47 STATS_DESC_COUNTER(VCPU, hvc_exit_stat), 48 48 STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
+1 -1
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 518 518 granule = kvm_granule_size(level); 519 519 cur.start = ALIGN_DOWN(addr, granule); 520 520 cur.end = cur.start + granule; 521 - if (!range_included(&cur, range)) 521 + if (!range_included(&cur, range) && level < KVM_PGTABLE_LAST_LEVEL) 522 522 continue; 523 523 *range = cur; 524 524 return 0;
+9 -5
arch/arm64/kvm/mmu.c
··· 1751 1751 1752 1752 force_pte = (max_map_size == PAGE_SIZE); 1753 1753 vma_pagesize = min_t(long, vma_pagesize, max_map_size); 1754 + vma_shift = __ffs(vma_pagesize); 1754 1755 } 1755 1756 1756 1757 /* ··· 1838 1837 if (exec_fault && s2_force_noncacheable) 1839 1838 ret = -ENOEXEC; 1840 1839 1841 - if (ret) { 1842 - kvm_release_page_unused(page); 1843 - return ret; 1844 - } 1840 + if (ret) 1841 + goto out_put_page; 1845 1842 1846 1843 /* 1847 1844 * Guest performs atomic/exclusive operations on memory with unsupported ··· 1849 1850 */ 1850 1851 if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) { 1851 1852 kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu)); 1852 - return 1; 1853 + ret = 1; 1854 + goto out_put_page; 1853 1855 } 1854 1856 1855 1857 if (nested) ··· 1936 1936 mark_page_dirty_in_slot(kvm, memslot, gfn); 1937 1937 1938 1938 return ret != -EAGAIN ? ret : 0; 1939 + 1940 + out_put_page: 1941 + kvm_release_page_unused(page); 1942 + return ret; 1939 1943 } 1940 1944 1941 1945 /* Resolve the access fault by making the page young again. */
+16 -11
arch/arm64/kvm/nested.c
··· 152 152 return 64 - wi->t0sz; 153 153 } 154 154 155 - static int check_base_s2_limits(struct s2_walk_info *wi, 155 + static int check_base_s2_limits(struct kvm_vcpu *vcpu, struct s2_walk_info *wi, 156 156 int level, int input_size, int stride) 157 157 { 158 - int start_size, ia_size; 158 + int start_size, pa_max; 159 159 160 - ia_size = get_ia_size(wi); 160 + pa_max = kvm_get_pa_bits(vcpu->kvm); 161 161 162 162 /* Check translation limits */ 163 163 switch (BIT(wi->pgshift)) { 164 164 case SZ_64K: 165 - if (level == 0 || (level == 1 && ia_size <= 42)) 165 + if (level == 0 || (level == 1 && pa_max <= 42)) 166 166 return -EFAULT; 167 167 break; 168 168 case SZ_16K: 169 - if (level == 0 || (level == 1 && ia_size <= 40)) 169 + if (level == 0 || (level == 1 && pa_max <= 40)) 170 170 return -EFAULT; 171 171 break; 172 172 case SZ_4K: 173 - if (level < 0 || (level == 0 && ia_size <= 42)) 173 + if (level < 0 || (level == 0 && pa_max <= 42)) 174 174 return -EFAULT; 175 175 break; 176 176 } 177 177 178 178 /* Check input size limits */ 179 - if (input_size > ia_size) 179 + if (input_size > pa_max) 180 180 return -EFAULT; 181 181 182 182 /* Check number of entries in starting level table */ ··· 269 269 if (input_size > 48 || input_size < 25) 270 270 return -EFAULT; 271 271 272 - ret = check_base_s2_limits(wi, level, input_size, stride); 273 - if (WARN_ON(ret)) 272 + ret = check_base_s2_limits(vcpu, wi, level, input_size, stride); 273 + if (WARN_ON(ret)) { 274 + out->esr = compute_fsc(0, ESR_ELx_FSC_FAULT); 274 275 return ret; 276 + } 275 277 276 278 base_lower_bound = 3 + input_size - ((3 - level) * stride + 277 279 wi->pgshift); 278 280 base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound); 279 281 280 282 if (check_output_size(wi, base_addr)) { 281 - out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); 283 + /* R_BFHQH */ 284 + out->esr = compute_fsc(0, ESR_ELx_FSC_ADDRSZ); 282 285 return 1; 283 286 } 284 287 ··· 296 293 297 294 paddr = base_addr | index; 298 295 ret = read_guest_s2_desc(vcpu, paddr, &desc, wi); 299 - if (ret < 0) 296 + if (ret < 0) { 297 + out->esr = ESR_ELx_FSC_SEA_TTW(level); 300 298 return ret; 299 + } 301 300 302 301 new_desc = desc; 303 302
+17 -17
arch/arm64/kvm/vgic/vgic-init.c
··· 143 143 kvm->arch.vgic.in_kernel = true; 144 144 kvm->arch.vgic.vgic_model = type; 145 145 kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST; 146 - 147 - kvm_for_each_vcpu(i, vcpu, kvm) { 148 - ret = vgic_allocate_private_irqs_locked(vcpu, type); 149 - if (ret) 150 - break; 151 - } 152 - 153 - if (ret) { 154 - kvm_for_each_vcpu(i, vcpu, kvm) { 155 - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 156 - kfree(vgic_cpu->private_irqs); 157 - vgic_cpu->private_irqs = NULL; 158 - } 159 - 160 - goto out_unlock; 161 - } 162 - 163 146 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 164 147 165 148 aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC; ··· 158 175 159 176 kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0); 160 177 kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1); 178 + 179 + kvm_for_each_vcpu(i, vcpu, kvm) { 180 + ret = vgic_allocate_private_irqs_locked(vcpu, type); 181 + if (ret) 182 + break; 183 + } 184 + 185 + if (ret) { 186 + kvm_for_each_vcpu(i, vcpu, kvm) { 187 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 188 + kfree(vgic_cpu->private_irqs); 189 + vgic_cpu->private_irqs = NULL; 190 + } 191 + 192 + kvm->arch.vgic.vgic_model = 0; 193 + goto out_unlock; 194 + } 161 195 162 196 if (type == KVM_DEV_TYPE_ARM_VGIC_V3) 163 197 kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
+2 -2
arch/arm64/kvm/vgic/vgic-v2.c
··· 115 115 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 116 116 struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; 117 117 u32 eoicount = FIELD_GET(GICH_HCR_EOICOUNT, cpuif->vgic_hcr); 118 - struct vgic_irq *irq; 118 + struct vgic_irq *irq = *host_data_ptr(last_lr_irq); 119 119 120 120 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 121 121 ··· 123 123 vgic_v2_fold_lr(vcpu, cpuif->vgic_lr[lr]); 124 124 125 125 /* See the GICv3 equivalent for the EOIcount handling rationale */ 126 - list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 126 + list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) { 127 127 u32 lr; 128 128 129 129 if (!eoicount) {
+6 -6
arch/arm64/kvm/vgic/vgic-v3.c
··· 148 148 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 149 149 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; 150 150 u32 eoicount = FIELD_GET(ICH_HCR_EL2_EOIcount, cpuif->vgic_hcr); 151 - struct vgic_irq *irq; 151 + struct vgic_irq *irq = *host_data_ptr(last_lr_irq); 152 152 153 153 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 154 154 ··· 158 158 /* 159 159 * EOIMode=0: use EOIcount to emulate deactivation. We are 160 160 * guaranteed to deactivate in reverse order of the activation, so 161 - * just pick one active interrupt after the other in the ap_list, 162 - * and replay the deactivation as if the CPU was doing it. We also 163 - * rely on priority drop to have taken place, and the list to be 164 - * sorted by priority. 161 + * just pick one active interrupt after the other in the tail part 162 + * of the ap_list, past the LRs, and replay the deactivation as if 163 + * the CPU was doing it. We also rely on priority drop to have taken 164 + * place, and the list to be sorted by priority. 165 165 */ 166 - list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 166 + list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) { 167 167 u64 lr; 168 168 169 169 /*
+6
arch/arm64/kvm/vgic/vgic.c
··· 814 814 815 815 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 816 816 { 817 + if (!*host_data_ptr(last_lr_irq)) 818 + return; 819 + 817 820 if (kvm_vgic_global_state.type == VGIC_V2) 818 821 vgic_v2_fold_lr_state(vcpu); 819 822 else ··· 963 960 if (irqs_outside_lrs(&als)) 964 961 vgic_sort_ap_list(vcpu); 965 962 963 + *host_data_ptr(last_lr_irq) = NULL; 964 + 966 965 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 967 966 scoped_guard(raw_spinlock, &irq->irq_lock) { 968 967 if (likely(vgic_target_oracle(irq) == vcpu)) { 969 968 vgic_populate_lr(vcpu, irq, count++); 969 + *host_data_ptr(last_lr_irq) = irq; 970 970 } 971 971 } 972 972
+3
arch/loongarch/Kconfig
··· 304 304 config AS_HAS_LVZ_EXTENSION 305 305 def_bool $(as-instr,hvcl 0) 306 306 307 + config AS_HAS_SCQ_EXTENSION 308 + def_bool $(as-instr,sc.q \$t0$(comma)\$t1$(comma)\$t2) 309 + 307 310 config CC_HAS_ANNOTATE_TABLEJUMP 308 311 def_bool $(cc-option,-mannotate-tablejump) 309 312
+5
arch/loongarch/include/asm/cmpxchg.h
··· 238 238 arch_cmpxchg((ptr), (o), (n)); \ 239 239 }) 240 240 241 + #ifdef CONFIG_AS_HAS_SCQ_EXTENSION 242 + 241 243 union __u128_halves { 242 244 u128 full; 243 245 struct { ··· 292 290 BUILD_BUG_ON(sizeof(*(ptr)) != 16); \ 293 291 __arch_cmpxchg128(ptr, o, n, ""); \ 294 292 }) 293 + 294 + #endif /* CONFIG_AS_HAS_SCQ_EXTENSION */ 295 + 295 296 #else 296 297 #include <asm-generic/cmpxchg-local.h> 297 298 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+12 -2
arch/loongarch/include/asm/uaccess.h
··· 253 253 \ 254 254 __get_kernel_common(*((type *)(dst)), sizeof(type), \ 255 255 (__force type *)(src)); \ 256 - if (unlikely(__gu_err)) \ 256 + if (unlikely(__gu_err)) { \ 257 + pr_info("%s: memory access failed, ecode 0x%x\n", \ 258 + __func__, read_csr_excode()); \ 259 + pr_info("%s: the caller is %pS\n", \ 260 + __func__, __builtin_return_address(0)); \ 257 261 goto err_label; \ 262 + } \ 258 263 } while (0) 259 264 260 265 #define __put_kernel_nofault(dst, src, type, err_label) \ ··· 269 264 \ 270 265 __pu_val = *(__force type *)(src); \ 271 266 __put_kernel_common(((type *)(dst)), sizeof(type)); \ 272 - if (unlikely(__pu_err)) \ 267 + if (unlikely(__pu_err)) { \ 268 + pr_info("%s: memory access failed, ecode 0x%x\n", \ 269 + __func__, read_csr_excode()); \ 270 + pr_info("%s: the caller is %pS\n", \ 271 + __func__, __builtin_return_address(0)); \ 273 272 goto err_label; \ 273 + } \ 274 274 } while (0) 275 275 276 276 extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
+25 -6
arch/loongarch/kernel/inst.c
··· 246 246 247 247 if (smp_processor_id() == copy->cpu) { 248 248 ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len); 249 - if (ret) 249 + if (ret) { 250 250 pr_err("%s: operation failed\n", __func__); 251 + return ret; 252 + } 251 253 } 252 254 253 255 flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len); 254 256 255 - return ret; 257 + return 0; 256 258 } 257 259 258 260 int larch_insn_text_copy(void *dst, void *src, size_t len) 259 261 { 260 262 int ret = 0; 263 + int err = 0; 261 264 size_t start, end; 262 265 struct insn_copy copy = { 263 266 .dst = dst, 264 267 .src = src, 265 268 .len = len, 266 - .cpu = smp_processor_id(), 269 + .cpu = raw_smp_processor_id(), 267 270 }; 271 + 272 + /* 273 + * Ensure copy.cpu won't be hot removed before stop_machine. 274 + * If it is removed nobody will really update the text. 275 + */ 276 + lockdep_assert_cpus_held(); 268 277 269 278 start = round_down((size_t)dst, PAGE_SIZE); 270 279 end = round_up((size_t)dst + len, PAGE_SIZE); 271 280 272 - set_memory_rw(start, (end - start) / PAGE_SIZE); 273 - ret = stop_machine(text_copy_cb, &copy, cpu_online_mask); 274 - set_memory_rox(start, (end - start) / PAGE_SIZE); 281 + err = set_memory_rw(start, (end - start) / PAGE_SIZE); 282 + if (err) { 283 + pr_info("%s: set_memory_rw() failed\n", __func__); 284 + return err; 285 + } 286 + 287 + ret = stop_machine_cpuslocked(text_copy_cb, &copy, cpu_online_mask); 288 + 289 + err = set_memory_rox(start, (end - start) / PAGE_SIZE); 290 + if (err) { 291 + pr_info("%s: set_memory_rox() failed\n", __func__); 292 + return err; 293 + } 275 294 276 295 return ret; 277 296 }
+1 -1
arch/loongarch/kvm/vcpu.c
··· 14 14 #define CREATE_TRACE_POINTS 15 15 #include "trace.h" 16 16 17 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 17 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 18 18 KVM_GENERIC_VCPU_STATS(), 19 19 STATS_DESC_COUNTER(VCPU, int_exits), 20 20 STATS_DESC_COUNTER(VCPU, idle_exits),
+3 -3
arch/loongarch/kvm/vm.c
··· 10 10 #include <asm/kvm_eiointc.h> 11 11 #include <asm/kvm_pch_pic.h> 12 12 13 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 13 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 14 14 KVM_GENERIC_VM_STATS(), 15 15 STATS_DESC_ICOUNTER(VM, pages), 16 16 STATS_DESC_ICOUNTER(VM, hugepages), ··· 49 49 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU); 50 50 51 51 /* Enable all PV features by default */ 52 - kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); 53 - kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); 52 + kvm->arch.pv_features |= BIT(KVM_FEATURE_IPI); 53 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); 54 54 if (kvm_pvtime_supported()) { 55 55 kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT); 56 56 kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+11
arch/loongarch/net/bpf_jit.c
··· 1379 1379 { 1380 1380 int ret; 1381 1381 1382 + cpus_read_lock(); 1382 1383 mutex_lock(&text_mutex); 1383 1384 ret = larch_insn_text_copy(dst, src, len); 1384 1385 mutex_unlock(&text_mutex); 1386 + cpus_read_unlock(); 1385 1387 1386 1388 return ret ? ERR_PTR(-EINVAL) : dst; 1387 1389 } ··· 1431 1429 if (ret) 1432 1430 return ret; 1433 1431 1432 + cpus_read_lock(); 1434 1433 mutex_lock(&text_mutex); 1435 1434 if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES)) 1436 1435 ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES); 1437 1436 mutex_unlock(&text_mutex); 1437 + cpus_read_unlock(); 1438 1438 1439 1439 return ret; 1440 1440 } ··· 1454 1450 for (i = 0; i < (len / sizeof(u32)); i++) 1455 1451 inst[i] = INSN_BREAK; 1456 1452 1453 + cpus_read_lock(); 1457 1454 mutex_lock(&text_mutex); 1458 1455 if (larch_insn_text_copy(dst, inst, len)) 1459 1456 ret = -EINVAL; 1460 1457 mutex_unlock(&text_mutex); 1458 + cpus_read_unlock(); 1461 1459 1462 1460 kvfree(inst); 1463 1461 ··· 1572 1566 void arch_free_bpf_trampoline(void *image, unsigned int size) 1573 1567 { 1574 1568 bpf_prog_pack_free(image, size); 1569 + } 1570 + 1571 + int arch_protect_bpf_trampoline(void *image, unsigned int size) 1572 + { 1573 + return 0; 1575 1574 } 1576 1575 1577 1576 /*
+2 -2
arch/mips/kvm/mips.c
··· 38 38 #define VECTORSPACING 0x100 /* for EI/VI mode */ 39 39 #endif 40 40 41 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 41 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 42 42 KVM_GENERIC_VM_STATS() 43 43 }; 44 44 ··· 51 51 sizeof(kvm_vm_stats_desc), 52 52 }; 53 53 54 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 54 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 55 55 KVM_GENERIC_VCPU_STATS(), 56 56 STATS_DESC_COUNTER(VCPU, wait_exits), 57 57 STATS_DESC_COUNTER(VCPU, cache_exits),
+47 -22
arch/powerpc/include/asm/uaccess.h
··· 15 15 #define TASK_SIZE_MAX TASK_SIZE_USER64 16 16 #endif 17 17 18 + /* Threshold above which VMX copy path is used */ 19 + #define VMX_COPY_THRESHOLD 3328 20 + 18 21 #include <asm-generic/access_ok.h> 19 22 20 23 /* ··· 329 326 extern unsigned long __copy_tofrom_user(void __user *to, 330 327 const void __user *from, unsigned long size); 331 328 332 - #ifdef __powerpc64__ 329 + unsigned long __copy_tofrom_user_base(void __user *to, 330 + const void __user *from, unsigned long size); 331 + 332 + unsigned long __copy_tofrom_user_power7_vmx(void __user *to, 333 + const void __user *from, unsigned long size); 334 + 335 + static __always_inline bool will_use_vmx(unsigned long n) 336 + { 337 + return IS_ENABLED(CONFIG_ALTIVEC) && cpu_has_feature(CPU_FTR_VMX_COPY) && 338 + n > VMX_COPY_THRESHOLD; 339 + } 340 + 341 + static __always_inline unsigned long 342 + raw_copy_tofrom_user(void __user *to, const void __user *from, 343 + unsigned long n, unsigned long dir) 344 + { 345 + unsigned long ret; 346 + 347 + if (will_use_vmx(n) && enter_vmx_usercopy()) { 348 + allow_user_access(to, dir); 349 + ret = __copy_tofrom_user_power7_vmx(to, from, n); 350 + prevent_user_access(dir); 351 + exit_vmx_usercopy(); 352 + 353 + if (unlikely(ret)) { 354 + allow_user_access(to, dir); 355 + ret = __copy_tofrom_user_base(to, from, n); 356 + prevent_user_access(dir); 357 + } 358 + return ret; 359 + } 360 + 361 + allow_user_access(to, dir); 362 + ret = __copy_tofrom_user(to, from, n); 363 + prevent_user_access(dir); 364 + return ret; 365 + } 366 + 367 + #ifdef CONFIG_PPC64 333 368 static inline unsigned long 334 369 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 335 370 { 336 - unsigned long ret; 337 - 338 371 barrier_nospec(); 339 - allow_user_access(to, KUAP_READ_WRITE); 340 - ret = __copy_tofrom_user(to, from, n); 341 - prevent_user_access(KUAP_READ_WRITE); 342 - return ret; 372 + return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE); 343 373 } 344 - #endif /* __powerpc64__ */ 374 + #endif /* CONFIG_PPC64 */ 345 375 346 - static inline unsigned long raw_copy_from_user(void *to, 347 - const void __user *from, unsigned long n) 376 + static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) 348 377 { 349 - unsigned long ret; 350 - 351 - allow_user_access(NULL, KUAP_READ); 352 - ret = __copy_tofrom_user((__force void __user *)to, from, n); 353 - prevent_user_access(KUAP_READ); 354 - return ret; 378 + return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ); 355 379 } 356 380 357 381 static inline unsigned long 358 382 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 359 383 { 360 - unsigned long ret; 361 - 362 - allow_user_access(to, KUAP_WRITE); 363 - ret = __copy_tofrom_user(to, (__force const void __user *)from, n); 364 - prevent_user_access(KUAP_WRITE); 365 - return ret; 384 + return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE); 366 385 } 367 386 368 387 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
+1 -1
arch/powerpc/kernel/iommu.c
··· 1159 1159 struct device *dev, 1160 1160 struct iommu_domain *old) 1161 1161 { 1162 - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1162 + struct iommu_domain *domain = iommu_driver_get_domain_for_dev(dev); 1163 1163 struct iommu_table_group *table_group; 1164 1164 struct iommu_group *grp; 1165 1165
-10
arch/powerpc/kernel/setup-common.c
··· 35 35 #include <linux/of_irq.h> 36 36 #include <linux/hugetlb.h> 37 37 #include <linux/pgtable.h> 38 - #include <asm/kexec.h> 39 38 #include <asm/io.h> 40 39 #include <asm/paca.h> 41 40 #include <asm/processor.h> ··· 993 994 smp_release_cpus(); 994 995 995 996 initmem_init(); 996 - 997 - /* 998 - * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM and 999 - * hugetlb. These must be called after initmem_init(), so that 1000 - * pageblock_order is initialised. 1001 - */ 1002 - fadump_cma_init(); 1003 - kdump_cma_reserve(); 1004 - kvm_cma_reserve(); 1005 997 1006 998 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); 1007 999
+2 -2
arch/powerpc/kvm/book3s.c
··· 38 38 39 39 /* #define EXIT_DEBUG */ 40 40 41 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 41 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 42 42 KVM_GENERIC_VM_STATS(), 43 43 STATS_DESC_ICOUNTER(VM, num_2M_pages), 44 44 STATS_DESC_ICOUNTER(VM, num_1G_pages) ··· 53 53 sizeof(kvm_vm_stats_desc), 54 54 }; 55 55 56 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 56 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 57 57 KVM_GENERIC_VCPU_STATS(), 58 58 STATS_DESC_COUNTER(VCPU, sum_exits), 59 59 STATS_DESC_COUNTER(VCPU, mmio_exits),
+2 -2
arch/powerpc/kvm/booke.c
··· 36 36 37 37 unsigned long kvmppc_booke_handlers; 38 38 39 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 39 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 40 40 KVM_GENERIC_VM_STATS(), 41 41 STATS_DESC_ICOUNTER(VM, num_2M_pages), 42 42 STATS_DESC_ICOUNTER(VM, num_1G_pages) ··· 51 51 sizeof(kvm_vm_stats_desc), 52 52 }; 53 53 54 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 54 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 55 55 KVM_GENERIC_VCPU_STATS(), 56 56 STATS_DESC_COUNTER(VCPU, sum_exits), 57 57 STATS_DESC_COUNTER(VCPU, mmio_exits),
+1 -5
arch/powerpc/kvm/e500.h
··· 39 39 /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */ 40 40 #define E500_TLB_MAS2_ATTR (0x7f) 41 41 42 - struct tlbe_ref { 42 + struct tlbe_priv { 43 43 kvm_pfn_t pfn; /* valid only for TLB0, except briefly */ 44 44 unsigned int flags; /* E500_TLB_* */ 45 - }; 46 - 47 - struct tlbe_priv { 48 - struct tlbe_ref ref; 49 45 }; 50 46 51 47 #ifdef CONFIG_KVM_E500V2
+2 -2
arch/powerpc/kvm/e500_mmu.c
··· 920 920 vcpu_e500->gtlb_offset[0] = 0; 921 921 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; 922 922 923 - vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_ref, 923 + vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_priv, 924 924 vcpu_e500->gtlb_params[0].entries); 925 925 if (!vcpu_e500->gtlb_priv[0]) 926 926 goto free_vcpu; 927 927 928 - vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_ref, 928 + vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_priv, 929 929 vcpu_e500->gtlb_params[1].entries); 930 930 if (!vcpu_e500->gtlb_priv[1]) 931 931 goto free_vcpu;
+44 -47
arch/powerpc/kvm/e500_mmu_host.c
··· 189 189 { 190 190 struct kvm_book3e_206_tlb_entry *gtlbe = 191 191 get_entry(vcpu_e500, tlbsel, esel); 192 - struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; 192 + struct tlbe_priv *tlbe = &vcpu_e500->gtlb_priv[tlbsel][esel]; 193 193 194 194 /* Don't bother with unmapped entries */ 195 - if (!(ref->flags & E500_TLB_VALID)) { 196 - WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), 197 - "%s: flags %x\n", __func__, ref->flags); 195 + if (!(tlbe->flags & E500_TLB_VALID)) { 196 + WARN(tlbe->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), 197 + "%s: flags %x\n", __func__, tlbe->flags); 198 198 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); 199 199 } 200 200 201 - if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { 201 + if (tlbsel == 1 && tlbe->flags & E500_TLB_BITMAP) { 202 202 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; 203 203 int hw_tlb_indx; 204 204 unsigned long flags; ··· 216 216 } 217 217 mb(); 218 218 vcpu_e500->g2h_tlb1_map[esel] = 0; 219 - ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); 219 + tlbe->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); 220 220 local_irq_restore(flags); 221 221 } 222 222 223 - if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { 223 + if (tlbsel == 1 && tlbe->flags & E500_TLB_TLB0) { 224 224 /* 225 225 * TLB1 entry is backed by 4k pages. This should happen 226 226 * rarely and is not worth optimizing. Invalidate everything. 227 227 */ 228 228 kvmppc_e500_tlbil_all(vcpu_e500); 229 - ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); 229 + tlbe->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); 230 230 } 231 231 232 232 /* 233 233 * If TLB entry is still valid then it's a TLB0 entry, and thus 234 234 * backed by at most one host tlbe per shadow pid 235 235 */ 236 - if (ref->flags & E500_TLB_VALID) 236 + if (tlbe->flags & E500_TLB_VALID) 237 237 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 238 238 239 239 /* Mark the TLB as not backed by the host anymore */ 240 - ref->flags = 0; 240 + tlbe->flags = 0; 241 241 } 242 242 243 243 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) ··· 245 245 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); 246 246 } 247 247 248 - static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, 249 - struct kvm_book3e_206_tlb_entry *gtlbe, 250 - kvm_pfn_t pfn, unsigned int wimg, 251 - bool writable) 248 + static inline void kvmppc_e500_tlbe_setup(struct tlbe_priv *tlbe, 249 + struct kvm_book3e_206_tlb_entry *gtlbe, 250 + kvm_pfn_t pfn, unsigned int wimg, 251 + bool writable) 252 252 { 253 - ref->pfn = pfn; 254 - ref->flags = E500_TLB_VALID; 253 + tlbe->pfn = pfn; 254 + tlbe->flags = E500_TLB_VALID; 255 255 if (writable) 256 - ref->flags |= E500_TLB_WRITABLE; 256 + tlbe->flags |= E500_TLB_WRITABLE; 257 257 258 258 /* Use guest supplied MAS2_G and MAS2_E */ 259 - ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; 259 + tlbe->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; 260 260 } 261 261 262 - static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) 262 + static inline void kvmppc_e500_tlbe_release(struct tlbe_priv *tlbe) 263 263 { 264 - if (ref->flags & E500_TLB_VALID) { 264 + if (tlbe->flags & E500_TLB_VALID) { 265 265 /* FIXME: don't log bogus pfn for TLB1 */ 266 - trace_kvm_booke206_ref_release(ref->pfn, ref->flags); 267 - ref->flags = 0; 266 + trace_kvm_booke206_ref_release(tlbe->pfn, tlbe->flags); 267 + tlbe->flags = 0; 268 268 } 269 269 } 270 270 ··· 284 284 int i; 285 285 286 286 for (tlbsel = 0; tlbsel <= 1; tlbsel++) { 287 - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { 288 - struct tlbe_ref *ref = 289 - &vcpu_e500->gtlb_priv[tlbsel][i].ref; 290 - kvmppc_e500_ref_release(ref); 291 - } 287 + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) 288 + kvmppc_e500_tlbe_release(&vcpu_e500->gtlb_priv[tlbsel][i]); 292 289 } 293 290 } 294 291 ··· 301 304 static void kvmppc_e500_setup_stlbe( 302 305 struct kvm_vcpu *vcpu, 303 306 struct kvm_book3e_206_tlb_entry *gtlbe, 304 - int tsize, struct tlbe_ref *ref, u64 gvaddr, 307 + int tsize, struct tlbe_priv *tlbe, u64 gvaddr, 305 308 struct kvm_book3e_206_tlb_entry *stlbe) 306 309 { 307 - kvm_pfn_t pfn = ref->pfn; 310 + kvm_pfn_t pfn = tlbe->pfn; 308 311 u32 pr = vcpu->arch.shared->msr & MSR_PR; 309 - bool writable = !!(ref->flags & E500_TLB_WRITABLE); 312 + bool writable = !!(tlbe->flags & E500_TLB_WRITABLE); 310 313 311 - BUG_ON(!(ref->flags & E500_TLB_VALID)); 314 + BUG_ON(!(tlbe->flags & E500_TLB_VALID)); 312 315 313 316 /* Force IPROT=0 for all guest mappings. */ 314 317 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; 315 - stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); 318 + stlbe->mas2 = (gvaddr & MAS2_EPN) | (tlbe->flags & E500_TLB_MAS2_ATTR); 316 319 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 317 320 e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr); 318 321 } ··· 320 323 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 321 324 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 322 325 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, 323 - struct tlbe_ref *ref) 326 + struct tlbe_priv *tlbe) 324 327 { 325 328 struct kvm_memory_slot *slot; 326 329 unsigned int psize; ··· 452 455 } 453 456 } 454 457 455 - kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable); 458 + kvmppc_e500_tlbe_setup(tlbe, gtlbe, pfn, wimg, writable); 456 459 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 457 - ref, gvaddr, stlbe); 460 + tlbe, gvaddr, stlbe); 458 461 writable = tlbe_is_writable(stlbe); 459 462 460 463 /* Clear i-cache for new pages */ ··· 471 474 struct kvm_book3e_206_tlb_entry *stlbe) 472 475 { 473 476 struct kvm_book3e_206_tlb_entry *gtlbe; 474 - struct tlbe_ref *ref; 477 + struct tlbe_priv *tlbe; 475 478 int stlbsel = 0; 476 479 int sesel = 0; 477 480 int r; 478 481 479 482 gtlbe = get_entry(vcpu_e500, 0, esel); 480 - ref = &vcpu_e500->gtlb_priv[0][esel].ref; 483 + tlbe = &vcpu_e500->gtlb_priv[0][esel]; 481 484 482 485 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), 483 486 get_tlb_raddr(gtlbe) >> PAGE_SHIFT, 484 - gtlbe, 0, stlbe, ref); 487 + gtlbe, 0, stlbe, tlbe); 485 488 if (r) 486 489 return r; 487 490 ··· 491 494 } 492 495 493 496 static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, 494 - struct tlbe_ref *ref, 497 + struct tlbe_priv *tlbe, 495 498 int esel) 496 499 { 497 500 unsigned int sesel = vcpu_e500->host_tlb1_nv++; ··· 504 507 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); 505 508 } 506 509 507 - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; 510 + vcpu_e500->gtlb_priv[1][esel].flags |= E500_TLB_BITMAP; 508 511 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; 509 512 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; 510 - WARN_ON(!(ref->flags & E500_TLB_VALID)); 513 + WARN_ON(!(tlbe->flags & E500_TLB_VALID)); 511 514 512 515 return sesel; 513 516 } ··· 519 522 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 520 523 struct kvm_book3e_206_tlb_entry *stlbe, int esel) 521 524 { 522 - struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; 525 + struct tlbe_priv *tlbe = &vcpu_e500->gtlb_priv[1][esel]; 523 526 int sesel; 524 527 int r; 525 528 526 529 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, 527 - ref); 530 + tlbe); 528 531 if (r) 529 532 return r; 530 533 531 534 /* Use TLB0 when we can only map a page with 4k */ 532 535 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { 533 - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; 536 + vcpu_e500->gtlb_priv[1][esel].flags |= E500_TLB_TLB0; 534 537 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); 535 538 return 0; 536 539 } 537 540 538 541 /* Otherwise map into TLB1 */ 539 - sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); 542 + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, tlbe, esel); 540 543 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); 541 544 542 545 return 0; ··· 558 561 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 559 562 560 563 /* Triggers after clear_tlb_privs or on initial mapping */ 561 - if (!(priv->ref.flags & E500_TLB_VALID)) { 564 + if (!(priv->flags & E500_TLB_VALID)) { 562 565 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 563 566 } else { 564 567 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, 565 - &priv->ref, eaddr, &stlbe); 568 + priv, eaddr, &stlbe); 566 569 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); 567 570 } 568 571 break;
+1
arch/powerpc/lib/copyuser_64.S
··· 562 562 li r5,4096 563 563 b .Ldst_aligned 564 564 EXPORT_SYMBOL(__copy_tofrom_user) 565 + EXPORT_SYMBOL(__copy_tofrom_user_base)
+15 -30
arch/powerpc/lib/copyuser_power7.S
··· 5 5 * 6 6 * Author: Anton Blanchard <anton@au.ibm.com> 7 7 */ 8 + #include <linux/export.h> 8 9 #include <asm/ppc_asm.h> 9 - 10 - #ifndef SELFTEST_CASE 11 - /* 0 == don't use VMX, 1 == use VMX */ 12 - #define SELFTEST_CASE 0 13 - #endif 14 10 15 11 #ifdef __BIG_ENDIAN__ 16 12 #define LVS(VRT,RA,RB) lvsl VRT,RA,RB ··· 43 47 ld r15,STK_REG(R15)(r1) 44 48 ld r14,STK_REG(R14)(r1) 45 49 .Ldo_err3: 46 - bl CFUNC(exit_vmx_usercopy) 50 + ld r6,STK_REG(R31)(r1) /* original destination pointer */ 51 + ld r5,STK_REG(R29)(r1) /* original number of bytes */ 52 + subf r7,r6,r3 /* #bytes copied */ 53 + subf r3,r7,r5 /* #bytes not copied in r3 */ 47 54 ld r0,STACKFRAMESIZE+16(r1) 48 55 mtlr r0 49 - b .Lexit 56 + addi r1,r1,STACKFRAMESIZE 57 + blr 50 58 #endif /* CONFIG_ALTIVEC */ 51 59 52 60 .Ldo_err2: ··· 74 74 75 75 _GLOBAL(__copy_tofrom_user_power7) 76 76 cmpldi r5,16 77 - cmpldi cr1,r5,3328 78 77 79 78 std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 80 79 std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) ··· 81 82 82 83 blt .Lshort_copy 83 84 84 - #ifdef CONFIG_ALTIVEC 85 - test_feature = SELFTEST_CASE 86 - BEGIN_FTR_SECTION 87 - bgt cr1,.Lvmx_copy 88 - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 89 - #endif 90 85 91 86 .Lnonvmx_copy: 92 87 /* Get the source 8B aligned */ ··· 256 263 15: li r3,0 257 264 blr 258 265 259 - .Lunwind_stack_nonvmx_copy: 260 - addi r1,r1,STACKFRAMESIZE 261 - b .Lnonvmx_copy 262 - 263 - .Lvmx_copy: 264 266 #ifdef CONFIG_ALTIVEC 267 + _GLOBAL(__copy_tofrom_user_power7_vmx) 265 268 mflr r0 266 269 std r0,16(r1) 267 270 stdu r1,-STACKFRAMESIZE(r1) 268 - bl CFUNC(enter_vmx_usercopy) 269 - cmpwi cr1,r3,0 270 - ld r0,STACKFRAMESIZE+16(r1) 271 - ld r3,STK_REG(R31)(r1) 272 - ld r4,STK_REG(R30)(r1) 273 - ld r5,STK_REG(R29)(r1) 274 - mtlr r0 275 271 272 + std r3,STK_REG(R31)(r1) 273 + std r5,STK_REG(R29)(r1) 276 274 /* 277 275 * We prefetch both the source and destination using enhanced touch 278 276 * instructions. We use a stream ID of 0 for the load side and ··· 283 299 ori r10,r7,1 /* stream=1 */ 284 300 285 301 DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8) 286 - 287 - beq cr1,.Lunwind_stack_nonvmx_copy 288 302 289 303 /* 290 304 * If source and destination are not relatively aligned we use a ··· 460 478 err3; stb r0,0(r3) 461 479 462 480 15: addi r1,r1,STACKFRAMESIZE 463 - b CFUNC(exit_vmx_usercopy) /* tail call optimise */ 481 + li r3,0 482 + blr 464 483 465 484 .Lvmx_unaligned_copy: 466 485 /* Get the destination 16B aligned */ ··· 664 681 err3; stb r0,0(r3) 665 682 666 683 15: addi r1,r1,STACKFRAMESIZE 667 - b CFUNC(exit_vmx_usercopy) /* tail call optimise */ 684 + li r3,0 685 + blr 686 + EXPORT_SYMBOL(__copy_tofrom_user_power7_vmx) 668 687 #endif /* CONFIG_ALTIVEC */
+2
arch/powerpc/lib/vmx-helper.c
··· 27 27 28 28 return 1; 29 29 } 30 + EXPORT_SYMBOL(enter_vmx_usercopy); 30 31 31 32 /* 32 33 * This function must return 0 because we tail call optimise when calling ··· 50 49 set_dec(1); 51 50 return 0; 52 51 } 52 + EXPORT_SYMBOL(exit_vmx_usercopy); 53 53 54 54 int enter_vmx_ops(void) 55 55 {
+14
arch/powerpc/mm/mem.c
··· 30 30 #include <asm/setup.h> 31 31 #include <asm/fixmap.h> 32 32 33 + #include <asm/fadump.h> 34 + #include <asm/kexec.h> 35 + #include <asm/kvm_ppc.h> 36 + 33 37 #include <mm/mmu_decl.h> 34 38 35 39 unsigned long long memory_limit __initdata; ··· 272 268 273 269 void __init arch_mm_preinit(void) 274 270 { 271 + 272 + /* 273 + * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM 274 + * and hugetlb. These must be called after pageblock_order is 275 + * initialised. 276 + */ 277 + fadump_cma_init(); 278 + kdump_cma_reserve(); 279 + kvm_cma_reserve(); 280 + 275 281 /* 276 282 * book3s is limited to 16 page sizes due to encoding this in 277 283 * a 4-bit field for slices.
+5
arch/powerpc/perf/callchain.c
··· 103 103 void 104 104 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 105 105 { 106 + perf_callchain_store(entry, perf_arch_instruction_pointer(regs)); 107 + 108 + if (!current->mm) 109 + return; 110 + 106 111 if (!is_32bit_task()) 107 112 perf_callchain_user_64(entry, regs); 108 113 else
-1
arch/powerpc/perf/callchain_32.c
··· 142 142 next_ip = perf_arch_instruction_pointer(regs); 143 143 lr = regs->link; 144 144 sp = regs->gpr[1]; 145 - perf_callchain_store(entry, next_ip); 146 145 147 146 while (entry->nr < entry->max_stack) { 148 147 fp = (unsigned int __user *) (unsigned long) sp;
-1
arch/powerpc/perf/callchain_64.c
··· 77 77 next_ip = perf_arch_instruction_pointer(regs); 78 78 lr = regs->link; 79 79 sp = regs->gpr[1]; 80 - perf_callchain_store(entry, next_ip); 81 80 82 81 while (entry->nr < entry->max_stack) { 83 82 fp = (unsigned long __user *) sp;
+2
arch/riscv/boot/dts/microchip/mpfs.dtsi
··· 428 428 clocks = <&clkcfg CLK_CAN0>, <&clkcfg CLK_MSSPLL3>; 429 429 interrupt-parent = <&plic>; 430 430 interrupts = <56>; 431 + resets = <&mss_top_sysreg CLK_CAN0>; 431 432 status = "disabled"; 432 433 }; 433 434 ··· 438 437 clocks = <&clkcfg CLK_CAN1>, <&clkcfg CLK_MSSPLL3>; 439 438 interrupt-parent = <&plic>; 440 439 interrupts = <57>; 440 + resets = <&mss_top_sysreg CLK_CAN1>; 441 441 status = "disabled"; 442 442 }; 443 443
+13 -2
arch/riscv/kvm/aia.c
··· 13 13 #include <linux/irqchip/riscv-imsic.h> 14 14 #include <linux/irqdomain.h> 15 15 #include <linux/kvm_host.h> 16 + #include <linux/nospec.h> 16 17 #include <linux/percpu.h> 17 18 #include <linux/spinlock.h> 18 19 #include <asm/cpufeature.h> ··· 183 182 unsigned long *out_val) 184 183 { 185 184 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; 185 + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 186 186 187 - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) 187 + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) 188 188 return -ENOENT; 189 + if (reg_num >= regs_max) 190 + return -ENOENT; 191 + 192 + reg_num = array_index_nospec(reg_num, regs_max); 189 193 190 194 *out_val = 0; 191 195 if (kvm_riscv_aia_available()) ··· 204 198 unsigned long val) 205 199 { 206 200 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; 201 + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 207 202 208 - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) 203 + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) 209 204 return -ENOENT; 205 + if (reg_num >= regs_max) 206 + return -ENOENT; 207 + 208 + reg_num = array_index_nospec(reg_num, regs_max); 210 209 211 210 if (kvm_riscv_aia_available()) { 212 211 ((unsigned long *)csr)[reg_num] = val;
+12 -11
arch/riscv/kvm/aia_aplic.c
··· 10 10 #include <linux/irqchip/riscv-aplic.h> 11 11 #include <linux/kvm_host.h> 12 12 #include <linux/math.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/spinlock.h> 14 15 #include <linux/swab.h> 15 16 #include <kvm/iodev.h> ··· 46 45 47 46 if (!irq || aplic->nr_irqs <= irq) 48 47 return 0; 49 - irqd = &aplic->irqs[irq]; 48 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 50 49 51 50 raw_spin_lock_irqsave(&irqd->lock, flags); 52 51 ret = irqd->sourcecfg; ··· 62 61 63 62 if (!irq || aplic->nr_irqs <= irq) 64 63 return; 65 - irqd = &aplic->irqs[irq]; 64 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 66 65 67 66 if (val & APLIC_SOURCECFG_D) 68 67 val = 0; ··· 82 81 83 82 if (!irq || aplic->nr_irqs <= irq) 84 83 return 0; 85 - irqd = &aplic->irqs[irq]; 84 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 86 85 87 86 raw_spin_lock_irqsave(&irqd->lock, flags); 88 87 ret = irqd->target; ··· 98 97 99 98 if (!irq || aplic->nr_irqs <= irq) 100 99 return; 101 - irqd = &aplic->irqs[irq]; 100 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 102 101 103 102 val &= APLIC_TARGET_EIID_MASK | 104 103 (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) | ··· 117 116 118 117 if (!irq || aplic->nr_irqs <= irq) 119 118 return false; 120 - irqd = &aplic->irqs[irq]; 119 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 121 120 122 121 raw_spin_lock_irqsave(&irqd->lock, flags); 123 122 ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false; ··· 133 132 134 133 if (!irq || aplic->nr_irqs <= irq) 135 134 return; 136 - irqd = &aplic->irqs[irq]; 135 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 137 136 138 137 raw_spin_lock_irqsave(&irqd->lock, flags); 139 138 ··· 171 170 172 171 if (!irq || aplic->nr_irqs <= irq) 173 172 return false; 174 - irqd = &aplic->irqs[irq]; 173 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 175 174 176 175 raw_spin_lock_irqsave(&irqd->lock, flags); 177 176 ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false; ··· 187 186 188 187 if (!irq || aplic->nr_irqs <= irq) 189 188 return; 190 - irqd = &aplic->irqs[irq]; 189 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 191 190 192 191 raw_spin_lock_irqsave(&irqd->lock, flags); 193 192 if (enabled) ··· 206 205 207 206 if (!irq || aplic->nr_irqs <= irq) 208 207 return false; 209 - irqd = &aplic->irqs[irq]; 208 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 210 209 211 210 raw_spin_lock_irqsave(&irqd->lock, flags); 212 211 ··· 255 254 for (irq = first; irq <= last; irq++) { 256 255 if (!irq || aplic->nr_irqs <= irq) 257 256 continue; 258 - irqd = &aplic->irqs[irq]; 257 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 259 258 260 259 raw_spin_lock_irqsave(&irqd->lock, flags); 261 260 ··· 284 283 285 284 if (!aplic || !source || (aplic->nr_irqs <= source)) 286 285 return -ENODEV; 287 - irqd = &aplic->irqs[source]; 286 + irqd = &aplic->irqs[array_index_nospec(source, aplic->nr_irqs)]; 288 287 ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false; 289 288 290 289 raw_spin_lock_irqsave(&irqd->lock, flags);
+14 -4
arch/riscv/kvm/aia_device.c
··· 11 11 #include <linux/irqchip/riscv-imsic.h> 12 12 #include <linux/kvm_host.h> 13 13 #include <linux/uaccess.h> 14 + #include <linux/cpufeature.h> 14 15 15 16 static int aia_create(struct kvm_device *dev, u32 type) 16 17 { ··· 22 21 23 22 if (irqchip_in_kernel(kvm)) 24 23 return -EEXIST; 24 + 25 + if (!riscv_isa_extension_available(NULL, SSAIA)) 26 + return -ENODEV; 25 27 26 28 ret = -EBUSY; 27 29 if (kvm_trylock_all_vcpus(kvm)) ··· 441 437 442 438 static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 443 439 { 444 - int nr_vcpus; 440 + int nr_vcpus, r = -ENXIO; 445 441 446 442 switch (attr->group) { 447 443 case KVM_DEV_RISCV_AIA_GRP_CONFIG: ··· 470 466 } 471 467 break; 472 468 case KVM_DEV_RISCV_AIA_GRP_APLIC: 473 - return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); 469 + mutex_lock(&dev->kvm->lock); 470 + r = kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); 471 + mutex_unlock(&dev->kvm->lock); 472 + break; 474 473 case KVM_DEV_RISCV_AIA_GRP_IMSIC: 475 - return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); 474 + mutex_lock(&dev->kvm->lock); 475 + r = kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); 476 + mutex_unlock(&dev->kvm->lock); 477 + break; 476 478 } 477 479 478 - return -ENXIO; 480 + return r; 479 481 } 480 482 481 483 struct kvm_device_ops kvm_riscv_aia_device_ops = {
+4
arch/riscv/kvm/aia_imsic.c
··· 908 908 int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC; 909 909 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; 910 910 911 + /* If IMSIC vCPU state not initialized then forward to user space */ 912 + if (!imsic) 913 + return KVM_INSN_EXIT_TO_USER_SPACE; 914 + 911 915 if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) { 912 916 /* Read pending and enabled interrupt with highest priority */ 913 917 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
+5 -1
arch/riscv/kvm/mmu.c
··· 245 245 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 246 246 { 247 247 struct kvm_gstage gstage; 248 + bool mmu_locked; 248 249 249 250 if (!kvm->arch.pgd) 250 251 return false; ··· 254 253 gstage.flags = 0; 255 254 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); 256 255 gstage.pgd = kvm->arch.pgd; 256 + mmu_locked = spin_trylock(&kvm->mmu_lock); 257 257 kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT, 258 258 (range->end - range->start) << PAGE_SHIFT, 259 259 range->may_block); 260 + if (mmu_locked) 261 + spin_unlock(&kvm->mmu_lock); 260 262 return false; 261 263 } 262 264 ··· 539 535 goto out_unlock; 540 536 541 537 /* Check if we are backed by a THP and thus use block mapping if possible */ 542 - if (vma_pagesize == PAGE_SIZE) 538 + if (!logging && (vma_pagesize == PAGE_SIZE)) 543 539 vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa); 544 540 545 541 if (writable) {
+1 -1
arch/riscv/kvm/vcpu.c
··· 24 24 #define CREATE_TRACE_POINTS 25 25 #include "trace.h" 26 26 27 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 27 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 28 28 KVM_GENERIC_VCPU_STATS(), 29 29 STATS_DESC_COUNTER(VCPU, ecall_exit_stat), 30 30 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
+13 -4
arch/riscv/kvm/vcpu_fp.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/err.h> 12 12 #include <linux/kvm_host.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/uaccess.h> 14 15 #include <asm/cpufeature.h> 15 16 ··· 94 93 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) 95 94 reg_val = &cntx->fp.f.fcsr; 96 95 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && 97 - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) 96 + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { 97 + reg_num = array_index_nospec(reg_num, 98 + ARRAY_SIZE(cntx->fp.f.f)); 98 99 reg_val = &cntx->fp.f.f[reg_num]; 99 - else 100 + } else 100 101 return -ENOENT; 101 102 } else if ((rtype == KVM_REG_RISCV_FP_D) && 102 103 riscv_isa_extension_available(vcpu->arch.isa, d)) { ··· 110 107 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { 111 108 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 112 109 return -EINVAL; 110 + reg_num = array_index_nospec(reg_num, 111 + ARRAY_SIZE(cntx->fp.d.f)); 113 112 reg_val = &cntx->fp.d.f[reg_num]; 114 113 } else 115 114 return -ENOENT; ··· 143 138 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) 144 139 reg_val = &cntx->fp.f.fcsr; 145 140 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && 146 - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) 141 + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { 142 + reg_num = array_index_nospec(reg_num, 143 + ARRAY_SIZE(cntx->fp.f.f)); 147 144 reg_val = &cntx->fp.f.f[reg_num]; 148 - else 145 + } else 149 146 return -ENOENT; 150 147 } else if ((rtype == KVM_REG_RISCV_FP_D) && 151 148 riscv_isa_extension_available(vcpu->arch.isa, d)) { ··· 159 152 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { 160 153 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 161 154 return -EINVAL; 155 + reg_num = array_index_nospec(reg_num, 156 + ARRAY_SIZE(cntx->fp.d.f)); 162 157 reg_val = &cntx->fp.d.f[reg_num]; 163 158 } else 164 159 return -ENOENT;
+36 -18
arch/riscv/kvm/vcpu_onereg.c
··· 10 10 #include <linux/bitops.h> 11 11 #include <linux/errno.h> 12 12 #include <linux/err.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/uaccess.h> 14 15 #include <linux/kvm_host.h> 15 16 #include <asm/cacheflush.h> ··· 128 127 kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr)) 129 128 return -ENOENT; 130 129 130 + kvm_ext = array_index_nospec(kvm_ext, ARRAY_SIZE(kvm_isa_ext_arr)); 131 131 *guest_ext = kvm_isa_ext_arr[kvm_ext]; 132 132 switch (*guest_ext) { 133 133 case RISCV_ISA_EXT_SMNPM: ··· 445 443 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 446 444 KVM_REG_SIZE_MASK | 447 445 KVM_REG_RISCV_CORE); 446 + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); 448 447 unsigned long reg_val; 449 448 450 449 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 451 450 return -EINVAL; 452 - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 451 + if (reg_num >= regs_max) 453 452 return -ENOENT; 453 + 454 + reg_num = array_index_nospec(reg_num, regs_max); 454 455 455 456 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) 456 457 reg_val = cntx->sepc; ··· 481 476 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 482 477 KVM_REG_SIZE_MASK | 483 478 KVM_REG_RISCV_CORE); 479 + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); 484 480 unsigned long reg_val; 485 481 486 482 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 487 483 return -EINVAL; 488 - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 484 + if (reg_num >= regs_max) 489 485 return -ENOENT; 486 + 487 + reg_num = array_index_nospec(reg_num, regs_max); 490 488 491 489 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id))) 492 490 return -EFAULT; ··· 515 507 unsigned long *out_val) 516 508 { 517 509 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 510 + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 518 511 519 - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 512 + if (reg_num >= regs_max) 520 513 return -ENOENT; 514 + 515 + reg_num = array_index_nospec(reg_num, regs_max); 521 516 522 517 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 523 518 kvm_riscv_vcpu_flush_interrupts(vcpu); ··· 537 526 unsigned long reg_val) 538 527 { 539 528 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 529 + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 540 530 541 - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 531 + if (reg_num >= regs_max) 542 532 return -ENOENT; 533 + 534 + reg_num = array_index_nospec(reg_num, regs_max); 543 535 544 536 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 545 537 reg_val &= VSIP_VALID_MASK; ··· 562 548 unsigned long reg_val) 563 549 { 564 550 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; 551 + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / 552 + sizeof(unsigned long); 565 553 566 - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / 567 - sizeof(unsigned long)) 568 - return -EINVAL; 554 + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) 555 + return -ENOENT; 556 + if (reg_num >= regs_max) 557 + return -ENOENT; 558 + 559 + reg_num = array_index_nospec(reg_num, regs_max); 569 560 570 561 ((unsigned long *)csr)[reg_num] = reg_val; 571 562 return 0; ··· 581 562 unsigned long *out_val) 582 563 { 583 564 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; 565 + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / 566 + sizeof(unsigned long); 584 567 585 - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / 586 - sizeof(unsigned long)) 587 - return -EINVAL; 568 + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) 569 + return -ENOENT; 570 + if (reg_num >= regs_max) 571 + return -ENOENT; 572 + 573 + reg_num = array_index_nospec(reg_num, regs_max); 588 574 589 575 *out_val = ((unsigned long *)csr)[reg_num]; 590 576 return 0; ··· 619 595 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val); 620 596 break; 621 597 case KVM_REG_RISCV_CSR_SMSTATEEN: 622 - rc = -EINVAL; 623 - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) 624 - rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, 625 - &reg_val); 598 + rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, &reg_val); 626 599 break; 627 600 default: 628 601 rc = -ENOENT; ··· 661 640 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); 662 641 break; 663 642 case KVM_REG_RISCV_CSR_SMSTATEEN: 664 - rc = -EINVAL; 665 - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) 666 - rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, 667 - reg_val); 643 + rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val); 668 644 break; 669 645 default: 670 646 rc = -ENOENT;
+12 -4
arch/riscv/kvm/vcpu_pmu.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/err.h> 12 12 #include <linux/kvm_host.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/perf/riscv_pmu.h> 14 15 #include <asm/csr.h> 15 16 #include <asm/kvm_vcpu_sbi.h> ··· 88 87 89 88 static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code) 90 89 { 91 - return hw_event_perf_map[sbi_event_code]; 90 + return hw_event_perf_map[array_index_nospec(sbi_event_code, 91 + SBI_PMU_HW_GENERAL_MAX)]; 92 92 } 93 93 94 94 static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code) ··· 220 218 return -EINVAL; 221 219 } 222 220 221 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 223 222 pmc = &kvpmu->pmc[cidx]; 224 223 225 224 if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW) ··· 247 244 return -EINVAL; 248 245 } 249 246 247 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 250 248 pmc = &kvpmu->pmc[cidx]; 251 249 252 250 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { ··· 524 520 { 525 521 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 526 522 527 - if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) { 523 + if (cidx >= RISCV_KVM_MAX_COUNTERS || cidx == 1) { 528 524 retdata->err_val = SBI_ERR_INVALID_PARAM; 529 525 return 0; 530 526 } 531 527 528 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 532 529 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; 533 530 534 531 return 0; ··· 564 559 } 565 560 /* Start the counters that have been configured and requested by the guest */ 566 561 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 567 - pmc_index = i + ctr_base; 562 + pmc_index = array_index_nospec(i + ctr_base, 563 + RISCV_KVM_MAX_COUNTERS); 568 564 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 569 565 continue; 570 566 /* The guest started the counter again. Reset the overflow status */ ··· 636 630 637 631 /* Stop the counters that have been configured and requested by the guest */ 638 632 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 639 - pmc_index = i + ctr_base; 633 + pmc_index = array_index_nospec(i + ctr_base, 634 + RISCV_KVM_MAX_COUNTERS); 640 635 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 641 636 continue; 642 637 pmc = &kvpmu->pmc[pmc_index]; ··· 768 761 } 769 762 } 770 763 764 + ctr_idx = array_index_nospec(ctr_idx, RISCV_KVM_MAX_COUNTERS); 771 765 pmc = &kvpmu->pmc[ctr_idx]; 772 766 pmc->idx = ctr_idx; 773 767
+1 -1
arch/riscv/kvm/vm.c
··· 13 13 #include <linux/kvm_host.h> 14 14 #include <asm/kvm_mmu.h> 15 15 16 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 16 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 17 17 KVM_GENERIC_VM_STATS() 18 18 }; 19 19 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+8 -6
arch/s390/kernel/irq.c
··· 147 147 bool from_idle; 148 148 149 149 from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 150 - if (from_idle) { 150 + if (from_idle) 151 151 update_timer_idle(); 152 - regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 153 - } 154 152 155 153 irq_enter_rcu(); 156 154 ··· 174 176 175 177 set_irq_regs(old_regs); 176 178 irqentry_exit(regs, state); 179 + 180 + if (from_idle) 181 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 177 182 } 178 183 179 184 void noinstr do_ext_irq(struct pt_regs *regs) ··· 186 185 bool from_idle; 187 186 188 187 from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 189 - if (from_idle) { 188 + if (from_idle) 190 189 update_timer_idle(); 191 - regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 192 - } 193 190 194 191 irq_enter_rcu(); 195 192 ··· 209 210 irq_exit_rcu(); 210 211 set_irq_regs(old_regs); 211 212 irqentry_exit(regs, state); 213 + 214 + if (from_idle) 215 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 212 216 } 213 217 214 218 static void show_msi_interrupt(struct seq_file *p, int irq)
+2 -2
arch/s390/kvm/kvm-s390.c
··· 65 65 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ 66 66 (KVM_MAX_VCPUS + LOCAL_IRQS)) 67 67 68 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 68 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 69 69 KVM_GENERIC_VM_STATS(), 70 70 STATS_DESC_COUNTER(VM, inject_io), 71 71 STATS_DESC_COUNTER(VM, inject_float_mchk), ··· 91 91 sizeof(kvm_vm_stats_desc), 92 92 }; 93 93 94 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 94 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 95 95 KVM_GENERIC_VCPU_STATS(), 96 96 STATS_DESC_COUNTER(VCPU, exit_userspace), 97 97 STATS_DESC_COUNTER(VCPU, exit_null),
+2 -1
arch/x86/include/asm/kvm_host.h
··· 2485 2485 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \ 2486 2486 KVM_X86_QUIRK_SLOT_ZAP_ALL | \ 2487 2487 KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \ 2488 - KVM_X86_QUIRK_IGNORE_GUEST_PAT) 2488 + KVM_X86_QUIRK_IGNORE_GUEST_PAT | \ 2489 + KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM) 2489 2490 2490 2491 #define KVM_X86_CONDITIONAL_QUIRKS \ 2491 2492 (KVM_X86_QUIRK_CD_NW_CLEARED | \
+1
arch/x86/include/uapi/asm/kvm.h
··· 476 476 #define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7) 477 477 #define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8) 478 478 #define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9) 479 + #define KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM (1 << 10) 479 480 480 481 #define KVM_STATE_NESTED_FORMAT_VMX 0 481 482 #define KVM_STATE_NESTED_FORMAT_SVM 1
+6
arch/x86/kernel/apic/apic.c
··· 1894 1894 1895 1895 static inline void try_to_enable_x2apic(int remap_mode) { } 1896 1896 static inline void __x2apic_enable(void) { } 1897 + static inline void __x2apic_disable(void) { } 1897 1898 #endif /* !CONFIG_X86_X2APIC */ 1898 1899 1899 1900 void __init enable_IR_x2apic(void) ··· 2457 2456 if (x2apic_mode) { 2458 2457 __x2apic_enable(); 2459 2458 } else { 2459 + if (x2apic_enabled()) { 2460 + pr_warn_once("x2apic: re-enabled by firmware during resume. Disabling\n"); 2461 + __x2apic_disable(); 2462 + } 2463 + 2460 2464 /* 2461 2465 * Make sure the APICBASE points to the right address 2462 2466 *
+4 -1
arch/x86/kvm/cpuid.c
··· 776 776 #define SYNTHESIZED_F(name) \ 777 777 ({ \ 778 778 kvm_cpu_cap_synthesized |= feature_bit(name); \ 779 - F(name); \ 779 + \ 780 + BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ 781 + if (boot_cpu_has(X86_FEATURE_##name)) \ 782 + F(name); \ 780 783 }) 781 784 782 785 /*
+5 -4
arch/x86/kvm/hyperv.c
··· 1981 1981 if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) 1982 1982 goto out_flush_all; 1983 1983 1984 - if (is_noncanonical_invlpg_address(entries[i], vcpu)) 1985 - continue; 1986 - 1987 1984 /* 1988 1985 * Lower 12 bits of 'address' encode the number of additional 1989 1986 * pages to flush. 1990 1987 */ 1991 1988 gva = entries[i] & PAGE_MASK; 1992 - for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) 1989 + for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) { 1990 + if (is_noncanonical_invlpg_address(gva + j * PAGE_SIZE, vcpu)) 1991 + continue; 1992 + 1993 1993 kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE); 1994 + } 1994 1995 1995 1996 ++vcpu->stat.tlb_flush; 1996 1997 }
+2 -1
arch/x86/kvm/ioapic.c
··· 321 321 idx = srcu_read_lock(&kvm->irq_srcu); 322 322 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); 323 323 if (gsi != -1) 324 - hlist_for_each_entry_rcu(kimn, &ioapic->mask_notifier_list, link) 324 + hlist_for_each_entry_srcu(kimn, &ioapic->mask_notifier_list, link, 325 + srcu_read_lock_held(&kvm->irq_srcu)) 325 326 if (kimn->irq == gsi) 326 327 kimn->func(kimn, mask); 327 328 srcu_read_unlock(&kvm->irq_srcu, idx);
+6 -3
arch/x86/kvm/svm/avic.c
··· 189 189 struct kvm_vcpu *vcpu = &svm->vcpu; 190 190 191 191 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); 192 - 193 192 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; 194 193 vmcb->control.avic_physical_id |= avic_get_max_physical_id(vcpu); 195 - 196 194 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; 195 + 196 + svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); 197 197 198 198 /* 199 199 * Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR ··· 225 225 226 226 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); 227 227 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; 228 + 229 + if (!sev_es_guest(svm->vcpu.kvm)) 230 + svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 228 231 229 232 /* 230 233 * If running nested and the guest uses its own MSR bitmap, there ··· 371 368 vmcb->control.avic_physical_id = __sme_set(__pa(kvm_svm->avic_physical_id_table)); 372 369 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE; 373 370 374 - if (kvm_apicv_activated(svm->vcpu.kvm)) 371 + if (kvm_vcpu_apicv_active(&svm->vcpu)) 375 372 avic_activate_vmcb(svm); 376 373 else 377 374 avic_deactivate_vmcb(svm);
+10 -2
arch/x86/kvm/svm/nested.c
··· 418 418 return __nested_vmcb_check_controls(vcpu, ctl); 419 419 } 420 420 421 + int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu) 422 + { 423 + if (!nested_vmcb_check_save(vcpu) || 424 + !nested_vmcb_check_controls(vcpu)) 425 + return -EINVAL; 426 + 427 + return 0; 428 + } 429 + 421 430 /* 422 431 * If a feature is not advertised to L1, clear the corresponding vmcb12 423 432 * intercept. ··· 1037 1028 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 1038 1029 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 1039 1030 1040 - if (!nested_vmcb_check_save(vcpu) || 1041 - !nested_vmcb_check_controls(vcpu)) { 1031 + if (nested_svm_check_cached_vmcb12(vcpu) < 0) { 1042 1032 vmcb12->control.exit_code = SVM_EXIT_ERR; 1043 1033 vmcb12->control.exit_info_1 = 0; 1044 1034 vmcb12->control.exit_info_2 = 0;
+11 -6
arch/x86/kvm/svm/svm.c
··· 1077 1077 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1078 1078 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); 1079 1079 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); 1080 - if (!kvm_vcpu_apicv_active(vcpu)) 1081 - svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 1080 + svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 1082 1081 1083 1082 set_dr_intercepts(svm); 1084 1083 ··· 1188 1189 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS)) 1189 1190 svm->vmcb->control.erap_ctl |= ERAP_CONTROL_ALLOW_LARGER_RAP; 1190 1191 1191 - if (kvm_vcpu_apicv_active(vcpu)) 1192 + if (enable_apicv && irqchip_in_kernel(vcpu->kvm)) 1192 1193 avic_init_vmcb(svm, vmcb); 1193 1194 1194 1195 if (vnmi) ··· 2673 2674 2674 2675 static int cr8_write_interception(struct kvm_vcpu *vcpu) 2675 2676 { 2677 + u8 cr8_prev = kvm_get_cr8(vcpu); 2676 2678 int r; 2677 2679 2678 - u8 cr8_prev = kvm_get_cr8(vcpu); 2680 + WARN_ON_ONCE(kvm_vcpu_apicv_active(vcpu)); 2681 + 2679 2682 /* instruction emulation calls kvm_set_cr8() */ 2680 2683 r = cr_interception(vcpu); 2681 2684 if (lapic_in_kernel(vcpu)) ··· 4880 4879 vmcb12 = map.hva; 4881 4880 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 4882 4881 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 4883 - ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false); 4884 4882 4885 - if (ret) 4883 + if (nested_svm_check_cached_vmcb12(vcpu) < 0) 4886 4884 goto unmap_save; 4887 4885 4886 + if (enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, 4887 + vmcb12, false) != 0) 4888 + goto unmap_save; 4889 + 4890 + ret = 0; 4888 4891 svm->nested.nested_run_pending = 1; 4889 4892 4890 4893 unmap_save:
+1
arch/x86/kvm/svm/svm.h
··· 797 797 798 798 int nested_svm_exit_handled(struct vcpu_svm *svm); 799 799 int nested_svm_check_permissions(struct kvm_vcpu *vcpu); 800 + int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu); 800 801 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 801 802 bool has_error_code, u32 error_code); 802 803 int nested_svm_exit_special(struct vcpu_svm *svm);
+45 -16
arch/x86/kvm/vmx/nested.c
··· 3300 3300 if (CC(vmcs12->guest_cr4 & X86_CR4_CET && !(vmcs12->guest_cr0 & X86_CR0_WP))) 3301 3301 return -EINVAL; 3302 3302 3303 - if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 3304 - (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) || 3305 - CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false)))) 3306 - return -EINVAL; 3303 + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 3304 + u64 debugctl = vmcs12->guest_ia32_debugctl; 3305 + 3306 + /* 3307 + * FREEZE_IN_SMM is not virtualized, but allow L1 to set it in 3308 + * vmcs12's DEBUGCTL under a quirk for backwards compatibility. 3309 + * Note that the quirk only relaxes the consistency check. The 3310 + * vmcc02 bit is still under the control of the host. In 3311 + * particular, if a host administrator decides to clear the bit, 3312 + * then L1 has no say in the matter. 3313 + */ 3314 + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM)) 3315 + debugctl &= ~DEBUGCTLMSR_FREEZE_IN_SMM; 3316 + 3317 + if (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) || 3318 + CC(!vmx_is_valid_debugctl(vcpu, debugctl, false))) 3319 + return -EINVAL; 3320 + } 3307 3321 3308 3322 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 3309 3323 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) ··· 6856 6842 free_nested(vcpu); 6857 6843 } 6858 6844 6845 + int nested_vmx_check_restored_vmcs12(struct kvm_vcpu *vcpu) 6846 + { 6847 + enum vm_entry_failure_code ignored; 6848 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6849 + 6850 + if (nested_cpu_has_shadow_vmcs(vmcs12) && 6851 + vmcs12->vmcs_link_pointer != INVALID_GPA) { 6852 + struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6853 + 6854 + if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6855 + !shadow_vmcs12->hdr.shadow_vmcs) 6856 + return -EINVAL; 6857 + } 6858 + 6859 + if (nested_vmx_check_controls(vcpu, vmcs12) || 6860 + nested_vmx_check_host_state(vcpu, vmcs12) || 6861 + nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6862 + return -EINVAL; 6863 + 6864 + return 0; 6865 + } 6866 + 6859 6867 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6860 6868 struct kvm_nested_state __user *user_kvm_nested_state, 6861 6869 struct kvm_nested_state *kvm_state) 6862 6870 { 6863 6871 struct vcpu_vmx *vmx = to_vmx(vcpu); 6864 6872 struct vmcs12 *vmcs12; 6865 - enum vm_entry_failure_code ignored; 6866 6873 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6867 6874 &user_kvm_nested_state->data.vmx[0]; 6868 6875 int ret; ··· 7014 6979 vmx->nested.mtf_pending = 7015 6980 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 7016 6981 7017 - ret = -EINVAL; 7018 6982 if (nested_cpu_has_shadow_vmcs(vmcs12) && 7019 6983 vmcs12->vmcs_link_pointer != INVALID_GPA) { 7020 6984 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 7021 6985 6986 + ret = -EINVAL; 7022 6987 if (kvm_state->size < 7023 6988 sizeof(*kvm_state) + 7024 6989 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 7025 6990 goto error_guest_mode; 7026 6991 6992 + ret = -EFAULT; 7027 6993 if (copy_from_user(shadow_vmcs12, 7028 6994 user_vmx_nested_state->shadow_vmcs12, 7029 - sizeof(*shadow_vmcs12))) { 7030 - ret = -EFAULT; 7031 - goto error_guest_mode; 7032 - } 7033 - 7034 - if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 7035 - !shadow_vmcs12->hdr.shadow_vmcs) 6995 + sizeof(*shadow_vmcs12))) 7036 6996 goto error_guest_mode; 7037 6997 } 7038 6998 ··· 7038 7008 kvm_state->hdr.vmx.preemption_timer_deadline; 7039 7009 } 7040 7010 7041 - if (nested_vmx_check_controls(vcpu, vmcs12) || 7042 - nested_vmx_check_host_state(vcpu, vmcs12) || 7043 - nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 7011 + ret = nested_vmx_check_restored_vmcs12(vcpu); 7012 + if (ret < 0) 7044 7013 goto error_guest_mode; 7045 7014 7046 7015 vmx->nested.dirty_vmcs12 = true;
+1
arch/x86/kvm/vmx/nested.h
··· 22 22 void nested_vmx_hardware_unsetup(void); 23 23 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); 24 24 void nested_vmx_set_vmcs_shadowing_bitmap(void); 25 + int nested_vmx_check_restored_vmcs12(struct kvm_vcpu *vcpu); 25 26 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); 26 27 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 27 28 bool from_vmentry);
+7 -3
arch/x86/kvm/vmx/vmx.c
··· 1149 1149 } 1150 1150 1151 1151 vmx_add_auto_msr(&m->guest, msr, guest_val, VM_ENTRY_MSR_LOAD_COUNT, kvm); 1152 - vmx_add_auto_msr(&m->guest, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm); 1152 + vmx_add_auto_msr(&m->host, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm); 1153 1153 } 1154 1154 1155 1155 static bool update_transition_efer(struct vcpu_vmx *vmx) ··· 8528 8528 } 8529 8529 8530 8530 if (vmx->nested.smm.guest_mode) { 8531 + /* Triple fault if the state is invalid. */ 8532 + if (nested_vmx_check_restored_vmcs12(vcpu) < 0) 8533 + return 1; 8534 + 8531 8535 ret = nested_vmx_enter_non_root_mode(vcpu, false); 8532 - if (ret) 8533 - return ret; 8536 + if (ret != NVMX_VMENTRY_SUCCESS) 8537 + return 1; 8534 8538 8535 8539 vmx->nested.nested_run_pending = 1; 8536 8540 vmx->nested.smm.guest_mode = false;
+2 -2
arch/x86/kvm/x86.c
··· 243 243 bool __read_mostly enable_device_posted_irqs = true; 244 244 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_device_posted_irqs); 245 245 246 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 246 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 247 247 KVM_GENERIC_VM_STATS(), 248 248 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 249 249 STATS_DESC_COUNTER(VM, mmu_pte_write), ··· 269 269 sizeof(kvm_vm_stats_desc), 270 270 }; 271 271 272 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 272 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 273 273 KVM_GENERIC_VCPU_STATS(), 274 274 STATS_DESC_COUNTER(VCPU, pf_taken), 275 275 STATS_DESC_COUNTER(VCPU, pf_fixed),
+2 -12
drivers/accel/amdxdna/aie2_ctx.c
··· 165 165 166 166 trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq); 167 167 168 - amdxdna_pm_suspend_put(job->hwctx->client->xdna); 169 168 job->hwctx->priv->completed++; 170 169 dma_fence_signal(fence); 171 170 ··· 289 290 struct dma_fence *fence; 290 291 int ret; 291 292 292 - ret = amdxdna_pm_resume_get(hwctx->client->xdna); 293 - if (ret) 293 + if (!hwctx->priv->mbox_chann) 294 294 return NULL; 295 295 296 - if (!hwctx->priv->mbox_chann) { 297 - amdxdna_pm_suspend_put(hwctx->client->xdna); 298 - return NULL; 299 - } 300 - 301 - if (!mmget_not_zero(job->mm)) { 302 - amdxdna_pm_suspend_put(hwctx->client->xdna); 296 + if (!mmget_not_zero(job->mm)) 303 297 return ERR_PTR(-ESRCH); 304 - } 305 298 306 299 kref_get(&job->refcnt); 307 300 fence = dma_fence_get(job->fence); ··· 324 333 325 334 out: 326 335 if (ret) { 327 - amdxdna_pm_suspend_put(hwctx->client->xdna); 328 336 dma_fence_put(job->fence); 329 337 aie2_job_put(job); 330 338 mmput(job->mm);
+10
drivers/accel/amdxdna/amdxdna_ctx.c
··· 17 17 #include "amdxdna_ctx.h" 18 18 #include "amdxdna_gem.h" 19 19 #include "amdxdna_pci_drv.h" 20 + #include "amdxdna_pm.h" 20 21 21 22 #define MAX_HWCTX_ID 255 22 23 #define MAX_ARG_COUNT 4095 ··· 446 445 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job) 447 446 { 448 447 trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release"); 448 + amdxdna_pm_suspend_put(job->hwctx->client->xdna); 449 449 amdxdna_arg_bos_put(job); 450 450 amdxdna_gem_put_obj(job->cmd_bo); 451 451 dma_fence_put(job->fence); ··· 482 480 if (ret) { 483 481 XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret); 484 482 goto cmd_put; 483 + } 484 + 485 + ret = amdxdna_pm_resume_get(xdna); 486 + if (ret) { 487 + XDNA_ERR(xdna, "Resume failed, ret %d", ret); 488 + goto put_bos; 485 489 } 486 490 487 491 idx = srcu_read_lock(&client->hwctx_srcu); ··· 530 522 dma_fence_put(job->fence); 531 523 unlock_srcu: 532 524 srcu_read_unlock(&client->hwctx_srcu, idx); 525 + amdxdna_pm_suspend_put(xdna); 526 + put_bos: 533 527 amdxdna_arg_bos_put(job); 534 528 cmd_put: 535 529 amdxdna_gem_put_obj(job->cmd_bo);
-6
drivers/accel/ivpu/ivpu_hw_40xx_reg.h
··· 121 121 #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu 122 122 #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0) 123 123 124 - #define VPU_40XX_HOST_SS_AON_RETENTION0 0x0003000cu 125 - #define VPU_40XX_HOST_SS_AON_RETENTION1 0x00030010u 126 - #define VPU_40XX_HOST_SS_AON_RETENTION2 0x00030014u 127 - #define VPU_40XX_HOST_SS_AON_RETENTION3 0x00030018u 128 - #define VPU_40XX_HOST_SS_AON_RETENTION4 0x0003001cu 129 - 130 124 #define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u 131 125 #define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0) 132 126 #define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
-1
drivers/accel/ivpu/ivpu_hw_ip.c
··· 931 931 932 932 static int soc_cpu_boot_60xx(struct ivpu_device *vdev) 933 933 { 934 - REGV_WR64(VPU_40XX_HOST_SS_AON_RETENTION1, vdev->fw->mem_bp->vpu_addr); 935 934 soc_cpu_set_entry_point_40xx(vdev, vdev->fw->cold_boot_entry_point); 936 935 937 936 return 0;
+1
drivers/acpi/Kconfig
··· 9 9 menuconfig ACPI 10 10 bool "ACPI (Advanced Configuration and Power Interface) Support" 11 11 depends on ARCH_SUPPORTS_ACPI 12 + select AUXILIARY_BUS 12 13 select PNP 13 14 select NLS 14 15 select CRC32
+1 -1
drivers/acpi/acpi_platform.c
··· 135 135 } 136 136 } 137 137 138 - if (adev->device_type == ACPI_BUS_TYPE_DEVICE && !adev->pnp.type.backlight) { 138 + if (adev->device_type == ACPI_BUS_TYPE_DEVICE) { 139 139 LIST_HEAD(resource_list); 140 140 141 141 count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+8 -7
drivers/acpi/acpi_processor.c
··· 113 113 PCI_ANY_ID, PCI_ANY_ID, NULL); 114 114 if (ide_dev) { 115 115 errata.piix4.bmisx = pci_resource_start(ide_dev, 4); 116 + if (errata.piix4.bmisx) 117 + dev_dbg(&ide_dev->dev, 118 + "Bus master activity detection (BM-IDE) erratum enabled\n"); 119 + 116 120 pci_dev_put(ide_dev); 117 121 } 118 122 ··· 135 131 if (isa_dev) { 136 132 pci_read_config_byte(isa_dev, 0x76, &value1); 137 133 pci_read_config_byte(isa_dev, 0x77, &value2); 138 - if ((value1 & 0x80) || (value2 & 0x80)) 134 + if ((value1 & 0x80) || (value2 & 0x80)) { 139 135 errata.piix4.fdma = 1; 136 + dev_dbg(&isa_dev->dev, 137 + "Type-F DMA livelock erratum (C3 disabled)\n"); 138 + } 140 139 pci_dev_put(isa_dev); 141 140 } 142 141 143 142 break; 144 143 } 145 - 146 - if (ide_dev) 147 - dev_dbg(&ide_dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n"); 148 - 149 - if (isa_dev) 150 - dev_dbg(&isa_dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n"); 151 144 152 145 return 0; 153 146 }
+22 -23
drivers/acpi/acpi_video.c
··· 9 9 10 10 #define pr_fmt(fmt) "ACPI: video: " fmt 11 11 12 + #include <linux/auxiliary_bus.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/module.h> 14 15 #include <linux/init.h> ··· 22 21 #include <linux/sort.h> 23 22 #include <linux/pci.h> 24 23 #include <linux/pci_ids.h> 25 - #include <linux/platform_device.h> 26 24 #include <linux/slab.h> 27 25 #include <linux/dmi.h> 28 26 #include <linux/suspend.h> ··· 77 77 static DEFINE_MUTEX(register_count_mutex); 78 78 static DEFINE_MUTEX(video_list_lock); 79 79 static LIST_HEAD(video_bus_head); 80 - static int acpi_video_bus_probe(struct platform_device *pdev); 81 - static void acpi_video_bus_remove(struct platform_device *pdev); 80 + static int acpi_video_bus_probe(struct auxiliary_device *aux_dev, 81 + const struct auxiliary_device_id *id); 82 + static void acpi_video_bus_remove(struct auxiliary_device *aux); 82 83 static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data); 83 84 84 85 /* ··· 94 93 ACPI_VIDEO_FIRST_LEVEL, /* actual supported levels begin here */ 95 94 }; 96 95 97 - static const struct acpi_device_id video_device_ids[] = { 98 - {ACPI_VIDEO_HID, 0}, 99 - {"", 0}, 96 + static const struct auxiliary_device_id video_bus_auxiliary_id_table[] = { 97 + { .name = "acpi.video_bus" }, 98 + {}, 100 99 }; 101 - MODULE_DEVICE_TABLE(acpi, video_device_ids); 100 + MODULE_DEVICE_TABLE(auxiliary, video_bus_auxiliary_id_table); 102 101 103 - static struct platform_driver acpi_video_bus = { 102 + static struct auxiliary_driver acpi_video_bus = { 104 103 .probe = acpi_video_bus_probe, 105 104 .remove = acpi_video_bus_remove, 106 - .driver = { 107 - .name = "acpi-video", 108 - .acpi_match_table = video_device_ids, 109 - }, 105 + .id_table = video_bus_auxiliary_id_table, 110 106 }; 111 107 112 108 struct acpi_video_bus_flags { ··· 1883 1885 } 1884 1886 1885 1887 static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video, 1886 - struct platform_device *pdev) 1888 + struct device *parent) 1887 1889 { 1888 1890 struct input_dev *input; 1889 1891 struct acpi_video_device *dev; ··· 1906 1908 input->phys = video->phys; 1907 1909 input->id.bustype = BUS_HOST; 1908 1910 input->id.product = 0x06; 1909 - input->dev.parent = &pdev->dev; 1911 + input->dev.parent = parent; 1910 1912 input->evbit[0] = BIT(EV_KEY); 1911 1913 set_bit(KEY_SWITCHVIDEOMODE, input->keybit); 1912 1914 set_bit(KEY_VIDEO_NEXT, input->keybit); ··· 1978 1980 1979 1981 static int instance; 1980 1982 1981 - static int acpi_video_bus_probe(struct platform_device *pdev) 1983 + static int acpi_video_bus_probe(struct auxiliary_device *aux_dev, 1984 + const struct auxiliary_device_id *id_unused) 1982 1985 { 1983 - struct acpi_device *device = ACPI_COMPANION(&pdev->dev); 1986 + struct acpi_device *device = ACPI_COMPANION(&aux_dev->dev); 1984 1987 struct acpi_video_bus *video; 1985 1988 bool auto_detect; 1986 1989 int error; ··· 2018 2019 instance++; 2019 2020 } 2020 2021 2021 - platform_set_drvdata(pdev, video); 2022 + auxiliary_set_drvdata(aux_dev, video); 2022 2023 2023 2024 video->device = device; 2024 2025 strscpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); ··· 2067 2068 !auto_detect) 2068 2069 acpi_video_bus_register_backlight(video); 2069 2070 2070 - error = acpi_video_bus_add_notify_handler(video, pdev); 2071 + error = acpi_video_bus_add_notify_handler(video, &aux_dev->dev); 2071 2072 if (error) 2072 2073 goto err_del; 2073 2074 ··· 2095 2096 return error; 2096 2097 } 2097 2098 2098 - static void acpi_video_bus_remove(struct platform_device *pdev) 2099 + static void acpi_video_bus_remove(struct auxiliary_device *aux_dev) 2099 2100 { 2100 - struct acpi_video_bus *video = platform_get_drvdata(pdev); 2101 - struct acpi_device *device = ACPI_COMPANION(&pdev->dev); 2101 + struct acpi_video_bus *video = auxiliary_get_drvdata(aux_dev); 2102 + struct acpi_device *device = ACPI_COMPANION(&aux_dev->dev); 2102 2103 2103 2104 acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY, 2104 2105 acpi_video_bus_notify); ··· 2162 2163 2163 2164 dmi_check_system(video_dmi_table); 2164 2165 2165 - ret = platform_driver_register(&acpi_video_bus); 2166 + ret = auxiliary_driver_register(&acpi_video_bus); 2166 2167 if (ret) 2167 2168 goto leave; 2168 2169 ··· 2182 2183 { 2183 2184 mutex_lock(&register_count_mutex); 2184 2185 if (register_count) { 2185 - platform_driver_unregister(&acpi_video_bus); 2186 + auxiliary_driver_unregister(&acpi_video_bus); 2186 2187 register_count = 0; 2187 2188 may_report_brightness_keys = false; 2188 2189 }
+1 -1
drivers/acpi/acpica/acpredef.h
··· 451 451 452 452 {{"_DSM", 453 453 METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, 454 - ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) | 454 + ACPI_TYPE_PACKAGE | ACPI_TYPE_ANY) | 455 455 ARG_COUNT_IS_MINIMUM, 456 456 METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */ 457 457
-3
drivers/acpi/bus.c
··· 818 818 if (list_empty(&adev->pnp.ids)) 819 819 return NULL; 820 820 821 - if (adev->pnp.type.backlight) 822 - return adev; 823 - 824 821 return acpi_primary_dev_companion(adev, dev); 825 822 } 826 823
+1 -1
drivers/acpi/osl.c
··· 1681 1681 * Use acpi_os_map_generic_address to pre-map the reset 1682 1682 * register if it's in system memory. 1683 1683 */ 1684 - void *rv; 1684 + void __iomem *rv; 1685 1685 1686 1686 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1687 1687 pr_debug("%s: Reset register mapping %s\n", __func__,
+45
drivers/acpi/scan.c
··· 6 6 #define pr_fmt(fmt) "ACPI: " fmt 7 7 8 8 #include <linux/async.h> 9 + #include <linux/auxiliary_bus.h> 9 10 #include <linux/module.h> 10 11 #include <linux/init.h> 11 12 #include <linux/slab.h> ··· 2193 2192 return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p); 2194 2193 } 2195 2194 2195 + static void acpi_video_bus_device_release(struct device *dev) 2196 + { 2197 + struct auxiliary_device *aux_dev = to_auxiliary_dev(dev); 2198 + 2199 + kfree(aux_dev); 2200 + } 2201 + 2202 + static void acpi_create_video_bus_device(struct acpi_device *adev, 2203 + struct acpi_device *parent) 2204 + { 2205 + struct auxiliary_device *aux_dev; 2206 + static unsigned int aux_dev_id; 2207 + 2208 + aux_dev = kzalloc_obj(*aux_dev); 2209 + if (!aux_dev) 2210 + return; 2211 + 2212 + aux_dev->id = aux_dev_id++; 2213 + aux_dev->name = "video_bus"; 2214 + aux_dev->dev.parent = acpi_get_first_physical_node(parent); 2215 + if (!aux_dev->dev.parent) 2216 + goto err; 2217 + 2218 + aux_dev->dev.release = acpi_video_bus_device_release; 2219 + 2220 + if (auxiliary_device_init(aux_dev)) 2221 + goto err; 2222 + 2223 + ACPI_COMPANION_SET(&aux_dev->dev, adev); 2224 + if (__auxiliary_device_add(aux_dev, "acpi")) 2225 + auxiliary_device_uninit(aux_dev); 2226 + 2227 + return; 2228 + 2229 + err: 2230 + kfree(aux_dev); 2231 + } 2232 + 2196 2233 struct acpi_scan_system_dev { 2197 2234 struct list_head node; 2198 2235 struct acpi_device *adev; ··· 2268 2229 sd->adev = device; 2269 2230 list_add_tail(&sd->node, &acpi_scan_system_dev_list); 2270 2231 } 2232 + } else if (device->pnp.type.backlight) { 2233 + struct acpi_device *parent; 2234 + 2235 + parent = acpi_dev_parent(device); 2236 + if (parent) 2237 + acpi_create_video_bus_device(device, parent); 2271 2238 } else { 2272 2239 /* For a regular device object, create a platform device. */ 2273 2240 acpi_create_platform_device(device, NULL);
+64 -20
drivers/android/binder/page_range.rs
··· 142 142 _pin: PhantomPinned, 143 143 } 144 144 145 + // We do not define any ops. For now, used only to check identity of vmas. 146 + static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed(); 147 + 148 + // To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we 149 + // check its vm_ops and private data before using it. 150 + fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> { 151 + // SAFETY: Just reading the vm_ops pointer of any active vma is safe. 152 + let vm_ops = unsafe { (*vma.as_ptr()).vm_ops }; 153 + if !ptr::eq(vm_ops, &BINDER_VM_OPS) { 154 + return None; 155 + } 156 + 157 + // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe. 158 + let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data }; 159 + // The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once 160 + // the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any 161 + // VMA associated with it, so there can't be any false positives due to pointer reuse here. 162 + if !ptr::eq(vm_private_data, owner.cast()) { 163 + return None; 164 + } 165 + 166 + vma.as_mixedmap_vma() 167 + } 168 + 145 169 struct Inner { 146 170 /// Array of pages. 147 171 /// ··· 332 308 inner.size = num_pages; 333 309 inner.vma_addr = vma.start(); 334 310 311 + // This pointer is only used for comparison - it's not dereferenced. 312 + // 313 + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on 314 + // `vm_private_data`. 315 + unsafe { 316 + (*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::<c_void>() 317 + }; 318 + 319 + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on 320 + // `vm_ops`. 321 + unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS }; 322 + 335 323 Ok(num_pages) 336 324 } 337 325 ··· 435 399 // 436 400 // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a 437 401 // workqueue. 438 - MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?) 439 - .mmap_read_lock() 440 - .vma_lookup(vma_addr) 441 - .ok_or(ESRCH)? 442 - .as_mixedmap_vma() 443 - .ok_or(ESRCH)? 444 - .vm_insert_page(user_page_addr, &new_page) 445 - .inspect_err(|err| { 446 - pr_warn!( 447 - "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}", 448 - user_page_addr, 449 - vma_addr, 450 - i, 451 - err 452 - ) 453 - })?; 402 + let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?); 403 + { 404 + let vma_read; 405 + let mmap_read; 406 + let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) { 407 + vma_read = ret; 408 + check_vma(&vma_read, self) 409 + } else { 410 + mmap_read = mm.mmap_read_lock(); 411 + mmap_read 412 + .vma_lookup(vma_addr) 413 + .and_then(|vma| check_vma(vma, self)) 414 + }; 415 + 416 + match vma { 417 + Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?, 418 + None => return Err(ESRCH), 419 + } 420 + } 454 421 455 422 let inner = self.lock.lock(); 456 423 ··· 706 667 let mmap_read; 707 668 let mm_mutex; 708 669 let vma_addr; 670 + let range_ptr; 709 671 710 672 { 711 673 // CAST: The `list_head` field is first in `PageInfo`. 712 674 let info = item as *mut PageInfo; 713 675 // SAFETY: The `range` field of `PageInfo` is immutable. 714 - let range = unsafe { &*((*info).range) }; 676 + range_ptr = unsafe { (*info).range }; 677 + // SAFETY: The `range` outlives its `PageInfo` values. 678 + let range = unsafe { &*range_ptr }; 715 679 716 680 mm = match range.mm.mmget_not_zero() { 717 681 Some(mm) => MmWithUser::into_mmput_async(mm), ··· 759 717 // SAFETY: The lru lock is locked when this method is called. 760 718 unsafe { bindings::spin_unlock(&raw mut (*lru).lock) }; 761 719 762 - if let Some(vma) = mmap_read.vma_lookup(vma_addr) { 763 - let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); 764 - vma.zap_page_range_single(user_page_addr, PAGE_SIZE); 720 + if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) { 721 + if let Some(vma) = check_vma(unchecked_vma, range_ptr) { 722 + let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); 723 + vma.zap_page_range_single(user_page_addr, PAGE_SIZE); 724 + } 765 725 } 766 726 767 727 drop(mmap_read);
+2 -1
drivers/android/binder/process.rs
··· 1295 1295 } 1296 1296 1297 1297 pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) { 1298 - if let Some(death) = self.inner.lock().pull_delivered_death(cookie) { 1298 + let death = self.inner.lock().pull_delivered_death(cookie); 1299 + if let Some(death) = death { 1299 1300 death.set_notification_done(thread); 1300 1301 } 1301 1302 }
+33 -2
drivers/android/binder/range_alloc/array.rs
··· 118 118 size: usize, 119 119 is_oneway: bool, 120 120 pid: Pid, 121 - ) -> Result<usize> { 121 + ) -> Result<(usize, bool)> { 122 122 // Compute new value of free_oneway_space, which is set only on success. 123 123 let new_oneway_space = if is_oneway { 124 124 match self.free_oneway_space.checked_sub(size) { ··· 146 146 .ok() 147 147 .unwrap(); 148 148 149 - Ok(insert_at_offset) 149 + // Start detecting spammers once we have less than 20% 150 + // of async space left (which is less than 10% of total 151 + // buffer size). 152 + // 153 + // (This will short-circuit, so `low_oneway_space` is 154 + // only called when necessary.) 155 + let oneway_spam_detected = 156 + is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid); 157 + 158 + Ok((insert_at_offset, oneway_spam_detected)) 159 + } 160 + 161 + /// Find the amount and size of buffers allocated by the current caller. 162 + /// 163 + /// The idea is that once we cross the threshold, whoever is responsible 164 + /// for the low async space is likely to try to send another async transaction, 165 + /// and at some point we'll catch them in the act. This is more efficient 166 + /// than keeping a map per pid. 167 + fn low_oneway_space(&self, calling_pid: Pid) -> bool { 168 + let mut total_alloc_size = 0; 169 + let mut num_buffers = 0; 170 + 171 + // Warn if this pid has more than 50 transactions, or more than 50% of 172 + // async space (which is 25% of total buffer size). Oneway spam is only 173 + // detected when the threshold is exceeded. 174 + for range in &self.ranges { 175 + if range.state.is_oneway() && range.state.pid() == calling_pid { 176 + total_alloc_size += range.size; 177 + num_buffers += 1; 178 + } 179 + } 180 + num_buffers > 50 || total_alloc_size > self.size / 4 150 181 } 151 182 152 183 pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+2 -2
drivers/android/binder/range_alloc/mod.rs
··· 188 188 self.reserve_new(args) 189 189 } 190 190 Impl::Array(array) => { 191 - let offset = 191 + let (offset, oneway_spam_detected) = 192 192 array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?; 193 193 Ok(ReserveNew::Success(ReserveNewSuccess { 194 194 offset, 195 - oneway_spam_detected: false, 195 + oneway_spam_detected, 196 196 _empty_array_alloc: args.empty_array_alloc, 197 197 _new_tree_alloc: args.new_tree_alloc, 198 198 _tree_alloc: args.tree_alloc,
+9 -9
drivers/android/binder/range_alloc/tree.rs
··· 164 164 self.free_oneway_space 165 165 }; 166 166 167 - // Start detecting spammers once we have less than 20% 168 - // of async space left (which is less than 10% of total 169 - // buffer size). 170 - // 171 - // (This will short-circut, so `low_oneway_space` is 172 - // only called when necessary.) 173 - let oneway_spam_detected = 174 - is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid); 175 - 176 167 let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) { 177 168 None => { 178 169 pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size); ··· 193 202 self.tree.insert(tree_node); 194 203 self.free_tree.insert(free_tree_node); 195 204 } 205 + 206 + // Start detecting spammers once we have less than 20% 207 + // of async space left (which is less than 10% of total 208 + // buffer size). 209 + // 210 + // (This will short-circuit, so `low_oneway_space` is 211 + // only called when necessary.) 212 + let oneway_spam_detected = 213 + is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid); 196 214 197 215 Ok((found_off, oneway_spam_detected)) 198 216 }
+6 -11
drivers/android/binder/thread.rs
··· 1015 1015 1016 1016 // Copy offsets if there are any. 1017 1017 if offsets_size > 0 { 1018 - { 1019 - let mut reader = 1020 - UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) 1021 - .reader(); 1022 - alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?; 1023 - } 1018 + let mut offsets_reader = 1019 + UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) 1020 + .reader(); 1024 1021 1025 1022 let offsets_start = aligned_data_size; 1026 1023 let offsets_end = aligned_data_size + offsets_size; ··· 1038 1041 .step_by(size_of::<u64>()) 1039 1042 .enumerate() 1040 1043 { 1041 - let offset: usize = view 1042 - .alloc 1043 - .read::<u64>(index_offset)? 1044 - .try_into() 1045 - .map_err(|_| EINVAL)?; 1044 + let offset = offsets_reader.read::<u64>()?; 1045 + view.alloc.write(index_offset, &offset)?; 1046 + let offset: usize = offset.try_into().map_err(|_| EINVAL)?; 1046 1047 1047 1048 if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) { 1048 1049 pr_warn!("Got transaction with invalid offset.");
+1
drivers/base/power/runtime.c
··· 1895 1895 void pm_runtime_remove(struct device *dev) 1896 1896 { 1897 1897 __pm_runtime_disable(dev, false); 1898 + flush_work(&dev->power.work); 1898 1899 pm_runtime_reinit(dev); 1899 1900 } 1900 1901
+12 -4
drivers/block/ublk_drv.c
··· 4443 4443 4444 4444 /* Skip partition scan if disabled by user */ 4445 4445 if (ub->dev_info.flags & UBLK_F_NO_AUTO_PART_SCAN) { 4446 - clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 4446 + /* Not clear for unprivileged daemons, see comment above */ 4447 + if (!ub->unprivileged_daemons) 4448 + clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 4447 4449 } else { 4448 4450 /* Schedule async partition scan for trusted daemons */ 4449 4451 if (!ub->unprivileged_daemons) ··· 5008 5006 return 0; 5009 5007 } 5010 5008 5011 - static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header) 5009 + static int ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header) 5012 5010 { 5013 5011 struct ublk_param_basic *p = &ub->params.basic; 5014 5012 u64 new_size = header->data[0]; 5013 + int ret = 0; 5015 5014 5016 5015 mutex_lock(&ub->mutex); 5016 + if (!ub->ub_disk) { 5017 + ret = -ENODEV; 5018 + goto out; 5019 + } 5017 5020 p->dev_sectors = new_size; 5018 5021 set_capacity_and_notify(ub->ub_disk, p->dev_sectors); 5022 + out: 5019 5023 mutex_unlock(&ub->mutex); 5024 + return ret; 5020 5025 } 5021 5026 5022 5027 struct count_busy { ··· 5344 5335 ret = ublk_ctrl_end_recovery(ub, &header); 5345 5336 break; 5346 5337 case UBLK_CMD_UPDATE_SIZE: 5347 - ublk_ctrl_set_size(ub, &header); 5348 - ret = 0; 5338 + ret = ublk_ctrl_set_size(ub, &header); 5349 5339 break; 5350 5340 case UBLK_CMD_QUIESCE_DEV: 5351 5341 ret = ublk_ctrl_quiesce_dev(ub, &header);
+2
drivers/bluetooth/btqca.c
··· 787 787 */ 788 788 if (soc_type == QCA_WCN3988) 789 789 rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f); 790 + else if (soc_type == QCA_WCN3998) 791 + rom_ver = ((soc_ver & 0x0000f000) >> 0x07) | (soc_ver & 0x0000000f); 790 792 else 791 793 rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f); 792 794
+2 -2
drivers/cache/ax45mp_cache.c
··· 178 178 179 179 static int __init ax45mp_cache_init(void) 180 180 { 181 - struct device_node *np; 182 181 struct resource res; 183 182 int ret; 184 183 185 - np = of_find_matching_node(NULL, ax45mp_cache_ids); 184 + struct device_node *np __free(device_node) = 185 + of_find_matching_node(NULL, ax45mp_cache_ids); 186 186 if (!of_device_is_available(np)) 187 187 return -ENODEV; 188 188
-10
drivers/cpuidle/cpuidle.c
··· 359 359 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, 360 360 bool *stop_tick) 361 361 { 362 - /* 363 - * If there is only a single idle state (or none), there is nothing 364 - * meaningful for the governor to choose. Skip the governor and 365 - * always use state 0 with the tick running. 366 - */ 367 - if (drv->state_count <= 1) { 368 - *stop_tick = false; 369 - return 0; 370 - } 371 - 372 362 return cpuidle_curr_governor->select(drv, dev, stop_tick); 373 363 } 374 364
+1 -3
drivers/crypto/ccp/sev-dev.c
··· 2408 2408 * in Firmware state on failure. Use snp_reclaim_pages() to 2409 2409 * transition either case back to Hypervisor-owned state. 2410 2410 */ 2411 - if (snp_reclaim_pages(__pa(data), 1, true)) { 2412 - snp_leak_pages(__page_to_pfn(status_page), 1); 2411 + if (snp_reclaim_pages(__pa(data), 1, true)) 2413 2412 return -EFAULT; 2414 - } 2415 2413 } 2416 2414 2417 2415 if (ret)
+7
drivers/crypto/padlock-sha.c
··· 332 332 if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) 333 333 return -ENODEV; 334 334 335 + /* 336 + * Skip family 0x07 and newer used by Zhaoxin processors, 337 + * as the driver's self-tests fail on these CPUs. 338 + */ 339 + if (c->x86 >= 0x07) 340 + return -ENODEV; 341 + 335 342 /* Register the newly added algorithm module if on * 336 343 * VIA Nano processor, or else just do as before */ 337 344 if (c->x86_model < 0x0f) {
+3 -2
drivers/firewire/net.c
··· 257 257 memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len); 258 258 } 259 259 260 - static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) 260 + static int fwnet_header_parse(const struct sk_buff *skb, const struct net_device *dev, 261 + unsigned char *haddr) 261 262 { 262 - memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); 263 + memcpy(haddr, dev->dev_addr, FWNET_ALEN); 263 264 264 265 return FWNET_ALEN; 265 266 }
+4 -4
drivers/firmware/arm_ffa/driver.c
··· 205 205 return 0; 206 206 } 207 207 208 - static int ffa_rxtx_unmap(u16 vm_id) 208 + static int ffa_rxtx_unmap(void) 209 209 { 210 210 ffa_value_t ret; 211 211 212 212 invoke_ffa_fn((ffa_value_t){ 213 - .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0), 213 + .a0 = FFA_RXTX_UNMAP, 214 214 }, &ret); 215 215 216 216 if (ret.a0 == FFA_ERROR) ··· 2097 2097 2098 2098 pr_err("failed to setup partitions\n"); 2099 2099 ffa_notifications_cleanup(); 2100 - ffa_rxtx_unmap(drv_info->vm_id); 2100 + ffa_rxtx_unmap(); 2101 2101 free_pages: 2102 2102 if (drv_info->tx_buffer) 2103 2103 free_pages_exact(drv_info->tx_buffer, rxtx_bufsz); ··· 2112 2112 { 2113 2113 ffa_notifications_cleanup(); 2114 2114 ffa_partitions_cleanup(); 2115 - ffa_rxtx_unmap(drv_info->vm_id); 2115 + ffa_rxtx_unmap(); 2116 2116 free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz); 2117 2117 free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz); 2118 2118 kfree(drv_info);
+2 -2
drivers/firmware/arm_scmi/notify.c
··· 1066 1066 * since at creation time we usually want to have all setup and ready before 1067 1067 * events really start flowing. 1068 1068 * 1069 - * Return: A properly refcounted handler on Success, NULL on Failure 1069 + * Return: A properly refcounted handler on Success, ERR_PTR on Failure 1070 1070 */ 1071 1071 static inline struct scmi_event_handler * 1072 1072 __scmi_event_handler_get_ops(struct scmi_notify_instance *ni, ··· 1113 1113 } 1114 1114 mutex_unlock(&ni->pending_mtx); 1115 1115 1116 - return hndl; 1116 + return hndl ?: ERR_PTR(-ENODEV); 1117 1117 } 1118 1118 1119 1119 static struct scmi_event_handler *
+2 -2
drivers/firmware/arm_scmi/protocols.h
··· 189 189 190 190 /** 191 191 * struct scmi_iterator_state - Iterator current state descriptor 192 - * @desc_index: Starting index for the current mulit-part request. 192 + * @desc_index: Starting index for the current multi-part request. 193 193 * @num_returned: Number of returned items in the last multi-part reply. 194 194 * @num_remaining: Number of remaining items in the multi-part message. 195 195 * @max_resources: Maximum acceptable number of items, configured by the caller 196 196 * depending on the underlying resources that it is querying. 197 197 * @loop_idx: The iterator loop index in the current multi-part reply. 198 - * @rx_len: Size in bytes of the currenly processed message; it can be used by 198 + * @rx_len: Size in bytes of the currently processed message; it can be used by 199 199 * the user of the iterator to verify a reply size. 200 200 * @priv: Optional pointer to some additional state-related private data setup 201 201 * by the caller during the iterations.
+3 -2
drivers/firmware/arm_scpi.c
··· 18 18 19 19 #include <linux/bitmap.h> 20 20 #include <linux/bitfield.h> 21 + #include <linux/cleanup.h> 21 22 #include <linux/device.h> 22 23 #include <linux/err.h> 23 24 #include <linux/export.h> ··· 941 940 int idx = scpi_drvinfo->num_chans; 942 941 struct scpi_chan *pchan = scpi_drvinfo->channels + idx; 943 942 struct mbox_client *cl = &pchan->cl; 944 - struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 943 + struct device_node *shmem __free(device_node) = 944 + of_parse_phandle(np, "shmem", idx); 945 945 946 946 if (!of_match_node(shmem_of_match, shmem)) 947 947 return -ENXIO; 948 948 949 949 ret = of_address_to_resource(shmem, 0, &res); 950 - of_node_put(shmem); 951 950 if (ret) { 952 951 dev_err(dev, "failed to get SCPI payload mem resource\n"); 953 952 return ret;
+18 -6
drivers/firmware/cirrus/cs_dsp.c
··· 1610 1610 region_name); 1611 1611 1612 1612 if (reg) { 1613 + /* 1614 + * Although we expect the underlying bus does not require 1615 + * physically-contiguous buffers, we pessimistically use 1616 + * a temporary buffer instead of trusting that the 1617 + * alignment of region->data is ok. 1618 + */ 1613 1619 region_len = le32_to_cpu(region->len); 1614 1620 if (region_len > buf_len) { 1615 1621 buf_len = round_up(region_len, PAGE_SIZE); 1616 - kfree(buf); 1617 - buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA); 1622 + vfree(buf); 1623 + buf = vmalloc(buf_len); 1618 1624 if (!buf) { 1619 1625 ret = -ENOMEM; 1620 1626 goto out_fw; ··· 1649 1643 1650 1644 ret = 0; 1651 1645 out_fw: 1652 - kfree(buf); 1646 + vfree(buf); 1653 1647 1654 1648 if (ret == -EOVERFLOW) 1655 1649 cs_dsp_err(dsp, "%s: file content overflows file data\n", file); ··· 2337 2331 } 2338 2332 2339 2333 if (reg) { 2334 + /* 2335 + * Although we expect the underlying bus does not require 2336 + * physically-contiguous buffers, we pessimistically use 2337 + * a temporary buffer instead of trusting that the 2338 + * alignment of blk->data is ok. 2339 + */ 2340 2340 region_len = le32_to_cpu(blk->len); 2341 2341 if (region_len > buf_len) { 2342 2342 buf_len = round_up(region_len, PAGE_SIZE); 2343 - kfree(buf); 2344 - buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA); 2343 + vfree(buf); 2344 + buf = vmalloc(buf_len); 2345 2345 if (!buf) { 2346 2346 ret = -ENOMEM; 2347 2347 goto out_fw; ··· 2378 2366 2379 2367 ret = 0; 2380 2368 out_fw: 2381 - kfree(buf); 2369 + vfree(buf); 2382 2370 2383 2371 if (ret == -EOVERFLOW) 2384 2372 cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
+2
drivers/firmware/stratix10-rsu.c
··· 768 768 rsu_async_status_callback); 769 769 if (ret) { 770 770 dev_err(dev, "Error, getting RSU status %i\n", ret); 771 + stratix10_svc_remove_async_client(priv->chan); 771 772 stratix10_svc_free_channel(priv->chan); 773 + return ret; 772 774 } 773 775 774 776 /* get DCMF version from firmware */
+126 -102
drivers/firmware/stratix10-svc.c
··· 37 37 * service layer will return error to FPGA manager when timeout occurs, 38 38 * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. 39 39 */ 40 - #define SVC_NUM_DATA_IN_FIFO 32 40 + #define SVC_NUM_DATA_IN_FIFO 8 41 41 #define SVC_NUM_CHANNEL 4 42 - #define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 42 + #define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 2000 43 43 #define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 44 44 #define BYTE_TO_WORD_SIZE 4 45 45 46 46 /* stratix10 service layer clients */ 47 47 #define STRATIX10_RSU "stratix10-rsu" 48 - #define INTEL_FCS "intel-fcs" 49 48 50 49 /* Maximum number of SDM client IDs. */ 51 50 #define MAX_SDM_CLIENT_IDS 16 ··· 104 105 /** 105 106 * struct stratix10_svc - svc private data 106 107 * @stratix10_svc_rsu: pointer to stratix10 RSU device 107 - * @intel_svc_fcs: pointer to the FCS device 108 108 */ 109 109 struct stratix10_svc { 110 110 struct platform_device *stratix10_svc_rsu; 111 - struct platform_device *intel_svc_fcs; 112 111 }; 113 112 114 113 /** ··· 248 251 * @num_active_client: number of active service client 249 252 * @node: list management 250 253 * @genpool: memory pool pointing to the memory region 251 - * @task: pointer to the thread task which handles SMC or HVC call 252 - * @svc_fifo: a queue for storing service message data 253 254 * @complete_status: state for completion 254 - * @svc_fifo_lock: protect access to service message data queue 255 255 * @invoke_fn: function to issue secure monitor call or hypervisor call 256 256 * @svc: manages the list of client svc drivers 257 + * @sdm_lock: only allows a single command single response to SDM 257 258 * @actrl: async control structure 258 259 * 259 260 * This struct is used to create communication channels for service clients, to ··· 264 269 int num_active_client; 265 270 struct list_head node; 266 271 struct gen_pool *genpool; 267 - struct task_struct *task; 268 - struct kfifo svc_fifo; 269 272 struct completion complete_status; 270 - spinlock_t svc_fifo_lock; 271 273 svc_invoke_fn *invoke_fn; 272 274 struct stratix10_svc *svc; 275 + struct mutex sdm_lock; 273 276 struct stratix10_async_ctrl actrl; 274 277 }; 275 278 ··· 276 283 * @ctrl: pointer to service controller which is the provider of this channel 277 284 * @scl: pointer to service client which owns the channel 278 285 * @name: service client name associated with the channel 286 + * @task: pointer to the thread task which handles SMC or HVC call 287 + * @svc_fifo: a queue for storing service message data (separate fifo for every channel) 288 + * @svc_fifo_lock: protect access to service message data queue (locking pending fifo) 279 289 * @lock: protect access to the channel 280 290 * @async_chan: reference to asynchronous channel object for this channel 281 291 * ··· 289 293 struct stratix10_svc_controller *ctrl; 290 294 struct stratix10_svc_client *scl; 291 295 char *name; 296 + struct task_struct *task; 297 + struct kfifo svc_fifo; 298 + spinlock_t svc_fifo_lock; 292 299 spinlock_t lock; 293 300 struct stratix10_async_chan *async_chan; 294 301 }; ··· 526 527 */ 527 528 static int svc_normal_to_secure_thread(void *data) 528 529 { 529 - struct stratix10_svc_controller 530 - *ctrl = (struct stratix10_svc_controller *)data; 531 - struct stratix10_svc_data *pdata; 532 - struct stratix10_svc_cb_data *cbdata; 530 + struct stratix10_svc_chan *chan = (struct stratix10_svc_chan *)data; 531 + struct stratix10_svc_controller *ctrl = chan->ctrl; 532 + struct stratix10_svc_data *pdata = NULL; 533 + struct stratix10_svc_cb_data *cbdata = NULL; 533 534 struct arm_smccc_res res; 534 535 unsigned long a0, a1, a2, a3, a4, a5, a6, a7; 535 536 int ret_fifo = 0; ··· 554 555 a6 = 0; 555 556 a7 = 0; 556 557 557 - pr_debug("smc_hvc_shm_thread is running\n"); 558 + pr_debug("%s: %s: Thread is running!\n", __func__, chan->name); 558 559 559 560 while (!kthread_should_stop()) { 560 - ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo, 561 + ret_fifo = kfifo_out_spinlocked(&chan->svc_fifo, 561 562 pdata, sizeof(*pdata), 562 - &ctrl->svc_fifo_lock); 563 + &chan->svc_fifo_lock); 563 564 564 565 if (!ret_fifo) 565 566 continue; ··· 568 569 (unsigned int)pdata->paddr, pdata->command, 569 570 (unsigned int)pdata->size); 570 571 572 + /* SDM can only process one command at a time */ 573 + pr_debug("%s: %s: Thread is waiting for mutex!\n", 574 + __func__, chan->name); 575 + if (mutex_lock_interruptible(&ctrl->sdm_lock)) { 576 + /* item already dequeued; notify client to unblock it */ 577 + cbdata->status = BIT(SVC_STATUS_ERROR); 578 + cbdata->kaddr1 = NULL; 579 + cbdata->kaddr2 = NULL; 580 + cbdata->kaddr3 = NULL; 581 + if (pdata->chan->scl) 582 + pdata->chan->scl->receive_cb(pdata->chan->scl, 583 + cbdata); 584 + break; 585 + } 586 + 571 587 switch (pdata->command) { 572 588 case COMMAND_RECONFIG_DATA_CLAIM: 573 589 svc_thread_cmd_data_claim(ctrl, pdata, cbdata); 590 + mutex_unlock(&ctrl->sdm_lock); 574 591 continue; 575 592 case COMMAND_RECONFIG: 576 593 a0 = INTEL_SIP_SMC_FPGA_CONFIG_START; ··· 715 700 break; 716 701 default: 717 702 pr_warn("it shouldn't happen\n"); 718 - break; 703 + mutex_unlock(&ctrl->sdm_lock); 704 + continue; 719 705 } 720 - pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x", 721 - __func__, 706 + pr_debug("%s: %s: before SMC call -- a0=0x%016x a1=0x%016x", 707 + __func__, chan->name, 722 708 (unsigned int)a0, 723 709 (unsigned int)a1); 724 710 pr_debug(" a2=0x%016x\n", (unsigned int)a2); ··· 728 712 pr_debug(" a5=0x%016x\n", (unsigned int)a5); 729 713 ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res); 730 714 731 - pr_debug("%s: after SMC call -- res.a0=0x%016x", 732 - __func__, (unsigned int)res.a0); 715 + pr_debug("%s: %s: after SMC call -- res.a0=0x%016x", 716 + __func__, chan->name, (unsigned int)res.a0); 733 717 pr_debug(" res.a1=0x%016x, res.a2=0x%016x", 734 718 (unsigned int)res.a1, (unsigned int)res.a2); 735 719 pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3); ··· 744 728 cbdata->kaddr2 = NULL; 745 729 cbdata->kaddr3 = NULL; 746 730 pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); 731 + mutex_unlock(&ctrl->sdm_lock); 747 732 continue; 748 733 } 749 734 ··· 818 801 break; 819 802 820 803 } 804 + 805 + mutex_unlock(&ctrl->sdm_lock); 821 806 } 822 807 823 808 kfree(cbdata); ··· 1715 1696 if (!p_data) 1716 1697 return -ENOMEM; 1717 1698 1718 - /* first client will create kernel thread */ 1719 - if (!chan->ctrl->task) { 1720 - chan->ctrl->task = 1721 - kthread_run_on_cpu(svc_normal_to_secure_thread, 1722 - (void *)chan->ctrl, 1723 - cpu, "svc_smc_hvc_thread"); 1724 - if (IS_ERR(chan->ctrl->task)) { 1699 + /* first caller creates the per-channel kthread */ 1700 + if (!chan->task) { 1701 + struct task_struct *task; 1702 + 1703 + task = kthread_run_on_cpu(svc_normal_to_secure_thread, 1704 + (void *)chan, 1705 + cpu, "svc_smc_hvc_thread"); 1706 + if (IS_ERR(task)) { 1725 1707 dev_err(chan->ctrl->dev, 1726 1708 "failed to create svc_smc_hvc_thread\n"); 1727 1709 kfree(p_data); 1728 1710 return -EINVAL; 1729 1711 } 1712 + 1713 + spin_lock(&chan->lock); 1714 + if (chan->task) { 1715 + /* another caller won the race; discard our thread */ 1716 + spin_unlock(&chan->lock); 1717 + kthread_stop(task); 1718 + } else { 1719 + chan->task = task; 1720 + spin_unlock(&chan->lock); 1721 + } 1730 1722 } 1731 1723 1732 - pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, 1733 - p_msg->payload, p_msg->command, 1724 + pr_debug("%s: %s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, 1725 + chan->name, p_msg->payload, p_msg->command, 1734 1726 (unsigned int)p_msg->payload_length); 1735 1727 1736 1728 if (list_empty(&svc_data_mem)) { ··· 1777 1747 p_data->arg[2] = p_msg->arg[2]; 1778 1748 p_data->size = p_msg->payload_length; 1779 1749 p_data->chan = chan; 1780 - pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__, 1781 - (unsigned int)p_data->paddr, p_data->command, 1782 - (unsigned int)p_data->size); 1783 - ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data, 1750 + pr_debug("%s: %s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", 1751 + __func__, 1752 + chan->name, 1753 + (unsigned int)p_data->paddr, 1754 + p_data->command, 1755 + (unsigned int)p_data->size); 1756 + 1757 + ret = kfifo_in_spinlocked(&chan->svc_fifo, p_data, 1784 1758 sizeof(*p_data), 1785 - &chan->ctrl->svc_fifo_lock); 1759 + &chan->svc_fifo_lock); 1786 1760 1787 1761 kfree(p_data); 1788 1762 ··· 1807 1773 */ 1808 1774 void stratix10_svc_done(struct stratix10_svc_chan *chan) 1809 1775 { 1810 - /* stop thread when thread is running AND only one active client */ 1811 - if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) { 1812 - pr_debug("svc_smc_hvc_shm_thread is stopped\n"); 1813 - kthread_stop(chan->ctrl->task); 1814 - chan->ctrl->task = NULL; 1776 + /* stop thread when thread is running */ 1777 + if (chan->task) { 1778 + pr_debug("%s: %s: svc_smc_hvc_shm_thread is stopping\n", 1779 + __func__, chan->name); 1780 + kthread_stop(chan->task); 1781 + chan->task = NULL; 1815 1782 } 1816 1783 } 1817 1784 EXPORT_SYMBOL_GPL(stratix10_svc_done); ··· 1852 1817 pmem->paddr = pa; 1853 1818 pmem->size = s; 1854 1819 list_add_tail(&pmem->node, &svc_data_mem); 1855 - pr_debug("%s: va=%p, pa=0x%016x\n", __func__, 1856 - pmem->vaddr, (unsigned int)pmem->paddr); 1820 + pr_debug("%s: %s: va=%p, pa=0x%016x\n", __func__, 1821 + chan->name, pmem->vaddr, (unsigned int)pmem->paddr); 1857 1822 1858 1823 return (void *)va; 1859 1824 } ··· 1890 1855 {}, 1891 1856 }; 1892 1857 1858 + static const char * const chan_names[SVC_NUM_CHANNEL] = { 1859 + SVC_CLIENT_FPGA, 1860 + SVC_CLIENT_RSU, 1861 + SVC_CLIENT_FCS, 1862 + SVC_CLIENT_HWMON 1863 + }; 1864 + 1893 1865 static int stratix10_svc_drv_probe(struct platform_device *pdev) 1894 1866 { 1895 1867 struct device *dev = &pdev->dev; ··· 1904 1862 struct stratix10_svc_chan *chans; 1905 1863 struct gen_pool *genpool; 1906 1864 struct stratix10_svc_sh_memory *sh_memory; 1907 - struct stratix10_svc *svc; 1865 + struct stratix10_svc *svc = NULL; 1908 1866 1909 1867 svc_invoke_fn *invoke_fn; 1910 1868 size_t fifo_size; 1911 - int ret; 1869 + int ret, i = 0; 1912 1870 1913 1871 /* get SMC or HVC function */ 1914 1872 invoke_fn = get_invoke_func(dev); ··· 1947 1905 controller->num_active_client = 0; 1948 1906 controller->chans = chans; 1949 1907 controller->genpool = genpool; 1950 - controller->task = NULL; 1951 1908 controller->invoke_fn = invoke_fn; 1909 + INIT_LIST_HEAD(&controller->node); 1952 1910 init_completion(&controller->complete_status); 1953 1911 1954 1912 ret = stratix10_svc_async_init(controller); ··· 1959 1917 } 1960 1918 1961 1919 fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; 1962 - ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); 1963 - if (ret) { 1964 - dev_err(dev, "failed to allocate FIFO\n"); 1965 - goto err_async_exit; 1920 + mutex_init(&controller->sdm_lock); 1921 + 1922 + for (i = 0; i < SVC_NUM_CHANNEL; i++) { 1923 + chans[i].scl = NULL; 1924 + chans[i].ctrl = controller; 1925 + chans[i].name = (char *)chan_names[i]; 1926 + spin_lock_init(&chans[i].lock); 1927 + ret = kfifo_alloc(&chans[i].svc_fifo, fifo_size, GFP_KERNEL); 1928 + if (ret) { 1929 + dev_err(dev, "failed to allocate FIFO %d\n", i); 1930 + goto err_free_fifos; 1931 + } 1932 + spin_lock_init(&chans[i].svc_fifo_lock); 1966 1933 } 1967 - spin_lock_init(&controller->svc_fifo_lock); 1968 - 1969 - chans[0].scl = NULL; 1970 - chans[0].ctrl = controller; 1971 - chans[0].name = SVC_CLIENT_FPGA; 1972 - spin_lock_init(&chans[0].lock); 1973 - 1974 - chans[1].scl = NULL; 1975 - chans[1].ctrl = controller; 1976 - chans[1].name = SVC_CLIENT_RSU; 1977 - spin_lock_init(&chans[1].lock); 1978 - 1979 - chans[2].scl = NULL; 1980 - chans[2].ctrl = controller; 1981 - chans[2].name = SVC_CLIENT_FCS; 1982 - spin_lock_init(&chans[2].lock); 1983 - 1984 - chans[3].scl = NULL; 1985 - chans[3].ctrl = controller; 1986 - chans[3].name = SVC_CLIENT_HWMON; 1987 - spin_lock_init(&chans[3].lock); 1988 1934 1989 1935 list_add_tail(&controller->node, &svc_ctrl); 1990 1936 platform_set_drvdata(pdev, controller); ··· 1981 1951 svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL); 1982 1952 if (!svc) { 1983 1953 ret = -ENOMEM; 1984 - goto err_free_kfifo; 1954 + goto err_free_fifos; 1985 1955 } 1986 1956 controller->svc = svc; 1987 1957 ··· 1989 1959 if (!svc->stratix10_svc_rsu) { 1990 1960 dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU); 1991 1961 ret = -ENOMEM; 1992 - goto err_free_kfifo; 1962 + goto err_free_fifos; 1993 1963 } 1994 1964 1995 1965 ret = platform_device_add(svc->stratix10_svc_rsu); 1996 - if (ret) { 1997 - platform_device_put(svc->stratix10_svc_rsu); 1998 - goto err_free_kfifo; 1999 - } 2000 - 2001 - svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1); 2002 - if (!svc->intel_svc_fcs) { 2003 - dev_err(dev, "failed to allocate %s device\n", INTEL_FCS); 2004 - ret = -ENOMEM; 2005 - goto err_unregister_rsu_dev; 2006 - } 2007 - 2008 - ret = platform_device_add(svc->intel_svc_fcs); 2009 - if (ret) { 2010 - platform_device_put(svc->intel_svc_fcs); 2011 - goto err_unregister_rsu_dev; 2012 - } 1966 + if (ret) 1967 + goto err_put_device; 2013 1968 2014 1969 ret = of_platform_default_populate(dev_of_node(dev), NULL, dev); 2015 1970 if (ret) 2016 - goto err_unregister_fcs_dev; 1971 + goto err_unregister_rsu_dev; 2017 1972 2018 1973 pr_info("Intel Service Layer Driver Initialized\n"); 2019 1974 2020 1975 return 0; 2021 1976 2022 - err_unregister_fcs_dev: 2023 - platform_device_unregister(svc->intel_svc_fcs); 2024 1977 err_unregister_rsu_dev: 2025 1978 platform_device_unregister(svc->stratix10_svc_rsu); 2026 - err_free_kfifo: 2027 - kfifo_free(&controller->svc_fifo); 2028 - err_async_exit: 1979 + goto err_free_fifos; 1980 + err_put_device: 1981 + platform_device_put(svc->stratix10_svc_rsu); 1982 + err_free_fifos: 1983 + /* only remove from list if list_add_tail() was reached */ 1984 + if (!list_empty(&controller->node)) 1985 + list_del(&controller->node); 1986 + /* free only the FIFOs that were successfully allocated */ 1987 + while (i--) 1988 + kfifo_free(&chans[i].svc_fifo); 2029 1989 stratix10_svc_async_exit(controller); 2030 1990 err_destroy_pool: 2031 1991 gen_pool_destroy(genpool); 1992 + 2032 1993 return ret; 2033 1994 } 2034 1995 2035 1996 static void stratix10_svc_drv_remove(struct platform_device *pdev) 2036 1997 { 1998 + int i; 2037 1999 struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); 2038 2000 struct stratix10_svc *svc = ctrl->svc; 2039 2001 ··· 2033 2011 2034 2012 of_platform_depopulate(ctrl->dev); 2035 2013 2036 - platform_device_unregister(svc->intel_svc_fcs); 2037 2014 platform_device_unregister(svc->stratix10_svc_rsu); 2038 2015 2039 - kfifo_free(&ctrl->svc_fifo); 2040 - if (ctrl->task) { 2041 - kthread_stop(ctrl->task); 2042 - ctrl->task = NULL; 2016 + for (i = 0; i < SVC_NUM_CHANNEL; i++) { 2017 + if (ctrl->chans[i].task) { 2018 + kthread_stop(ctrl->chans[i].task); 2019 + ctrl->chans[i].task = NULL; 2020 + } 2021 + kfifo_free(&ctrl->chans[i].svc_fifo); 2043 2022 } 2023 + 2044 2024 if (ctrl->genpool) 2045 2025 gen_pool_destroy(ctrl->genpool); 2046 2026 list_del(&ctrl->node);
+4 -3
drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
··· 38 38 /* 39 39 * Table of devices that work with this driver. 40 40 * 41 - * Currently, only one device is known to be used in the 42 - * lpvo_usb_gpib adapter (FTDI 0403:6001). 41 + * Currently, only one device is known to be used in the lpvo_usb_gpib 42 + * adapter (FTDI 0403:6001) but as this device id is already handled by the 43 + * ftdi_sio USB serial driver the LPVO driver must not bind to it by default. 44 + * 43 45 * If your adapter uses a different chip, insert a line 44 46 * in the following table with proper <Vendor-id>, <Product-id>. 45 47 * ··· 52 50 */ 53 51 54 52 static const struct usb_device_id skel_table[] = { 55 - { USB_DEVICE(0x0403, 0x6001) }, 56 53 { } /* Terminating entry */ 57 54 }; 58 55 MODULE_DEVICE_TABLE(usb, skel_table);
+13 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2690 2690 break; 2691 2691 default: 2692 2692 r = amdgpu_discovery_set_ip_blocks(adev); 2693 - if (r) 2693 + if (r) { 2694 + adev->num_ip_blocks = 0; 2694 2695 return r; 2696 + } 2695 2697 break; 2696 2698 } 2697 2699 ··· 3249 3247 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 3250 3248 if (!adev->ip_blocks[i].status.late_initialized) 3251 3249 continue; 3250 + if (!adev->ip_blocks[i].version) 3251 + continue; 3252 3252 /* skip CG for GFX, SDMA on S0ix */ 3253 3253 if (adev->in_s0ix && 3254 3254 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || ··· 3289 3285 for (j = 0; j < adev->num_ip_blocks; j++) { 3290 3286 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 3291 3287 if (!adev->ip_blocks[i].status.late_initialized) 3288 + continue; 3289 + if (!adev->ip_blocks[i].version) 3292 3290 continue; 3293 3291 /* skip PG for GFX, SDMA on S0ix */ 3294 3292 if (adev->in_s0ix && ··· 3499 3493 int i, r; 3500 3494 3501 3495 for (i = 0; i < adev->num_ip_blocks; i++) { 3496 + if (!adev->ip_blocks[i].version) 3497 + continue; 3502 3498 if (!adev->ip_blocks[i].version->funcs->early_fini) 3503 3499 continue; 3504 3500 ··· 3578 3570 if (!adev->ip_blocks[i].status.sw) 3579 3571 continue; 3580 3572 3573 + if (!adev->ip_blocks[i].version) 3574 + continue; 3581 3575 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 3582 3576 amdgpu_ucode_free_bo(adev); 3583 3577 amdgpu_free_static_csa(&adev->virt.csa_obj); ··· 3605 3595 3606 3596 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3607 3597 if (!adev->ip_blocks[i].status.late_initialized) 3598 + continue; 3599 + if (!adev->ip_blocks[i].version) 3608 3600 continue; 3609 3601 if (adev->ip_blocks[i].version->funcs->late_fini) 3610 3602 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 83 83 { 84 84 struct amdgpu_device *adev = drm_to_adev(dev); 85 85 86 - if (adev == NULL) 86 + if (adev == NULL || !adev->num_ip_blocks) 87 87 return; 88 88 89 89 amdgpu_unregister_gpu_instance(adev);
+8 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 368 368 369 369 struct drm_property *plane_ctm_property; 370 370 /** 371 - * @shaper_lut_property: Plane property to set pre-blending shaper LUT 372 - * that converts color content before 3D LUT. If 373 - * plane_shaper_tf_property != Identity TF, AMD color module will 371 + * @plane_shaper_lut_property: Plane property to set pre-blending 372 + * shaper LUT that converts color content before 3D LUT. 373 + * If plane_shaper_tf_property != Identity TF, AMD color module will 374 374 * combine the user LUT values with pre-defined TF into the LUT 375 375 * parameters to be programmed. 376 376 */ 377 377 struct drm_property *plane_shaper_lut_property; 378 378 /** 379 - * @shaper_lut_size_property: Plane property for the size of 379 + * @plane_shaper_lut_size_property: Plane property for the size of 380 380 * pre-blending shaper LUT as supported by the driver (read-only). 381 381 */ 382 382 struct drm_property *plane_shaper_lut_size_property; ··· 400 400 */ 401 401 struct drm_property *plane_lut3d_property; 402 402 /** 403 - * @plane_degamma_lut_size_property: Plane property to define the max 404 - * size of 3D LUT as supported by the driver (read-only). The max size 405 - * is the max size of one dimension and, therefore, the max number of 406 - * entries for 3D LUT array is the 3D LUT size cubed; 403 + * @plane_lut3d_size_property: Plane property to define the max size 404 + * of 3D LUT as supported by the driver (read-only). The max size is 405 + * the max size of one dimension and, therefore, the max number of 406 + * entries for 3D LUT array is the 3D LUT size cubed. 407 407 */ 408 408 struct drm_property *plane_lut3d_size_property; 409 409 /**
+4 -1
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
··· 731 731 int i; 732 732 struct amdgpu_device *adev = mes->adev; 733 733 union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt; 734 + uint32_t mes_rev = (pipe == AMDGPU_MES_SCHED_PIPE) ? 735 + (mes->sched_version & AMDGPU_MES_VERSION_MASK) : 736 + (mes->kiq_version & AMDGPU_MES_VERSION_MASK); 734 737 735 738 memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt)); 736 739 ··· 788 785 * handling support, other queue will not use the oversubscribe timer. 789 786 * handling mode - 0: disabled; 1: basic version; 2: basic+ version 790 787 */ 791 - mes_set_hw_res_pkt.oversubscription_timer = 50; 788 + mes_set_hw_res_pkt.oversubscription_timer = mes_rev < 0x8b ? 0 : 50; 792 789 mes_set_hw_res_pkt.unmapped_doorbell_handling = 1; 793 790 794 791 if (amdgpu_mes_log_enable) {
+1
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 593 593 p->queue_size)) { 594 594 pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n", 595 595 p->queue_address, p->queue_size); 596 + amdgpu_bo_unreserve(vm->root.bo); 596 597 return -EFAULT; 597 598 } 598 599
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
··· 38 38 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ 39 39 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\ 40 40 SR(DISPCLK_FREQ_CHANGE_CNTL),\ 41 - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL) 41 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 42 + SR(MICROSECOND_TIME_BASE_DIV),\ 43 + SR(MILLISECOND_TIME_BASE_DIV),\ 44 + SR(DCCG_GATE_DISABLE_CNTL),\ 45 + SR(DCCG_GATE_DISABLE_CNTL2) 42 46 43 47 #define DCCG_REG_LIST_DCN2() \ 44 48 DCCG_COMMON_REG_LIST_DCN_BASE(),\
+20 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
··· 96 96 dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; 97 97 } 98 98 99 + /* 100 + * On DCN21 S0i3 resume, BIOS programs MICROSECOND_TIME_BASE_DIV to 101 + * 0x00120464 as a marker that golden init has already been done. 102 + * dcn21_s0i3_golden_init_wa() reads this marker later in bios_golden_init() 103 + * to decide whether to skip golden init. 104 + * 105 + * dccg2_init() unconditionally overwrites MICROSECOND_TIME_BASE_DIV to 106 + * 0x00120264, destroying the marker before it can be read. 107 + * 108 + * Guard the call: if the S0i3 marker is present, skip dccg2_init() so the 109 + * WA can function correctly. bios_golden_init() will handle init in that case. 110 + */ 111 + static void dccg21_init(struct dccg *dccg) 112 + { 113 + if (dccg2_is_s0i3_golden_init_wa_done(dccg)) 114 + return; 115 + 116 + dccg2_init(dccg); 117 + } 99 118 100 119 static const struct dccg_funcs dccg21_funcs = { 101 120 .update_dpp_dto = dccg21_update_dpp_dto, ··· 122 103 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 123 104 .otg_add_pixel = dccg2_otg_add_pixel, 124 105 .otg_drop_pixel = dccg2_otg_drop_pixel, 125 - .dccg_init = dccg2_init, 106 + .dccg_init = dccg21_init, 126 107 .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 127 108 .allow_clock_gating = dccg2_allow_clock_gating, 128 109 .enable_memory_low_power = dccg2_enable_memory_low_power,
+7 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
··· 34 34 DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ 35 35 DCCG_SRII(DTO_PARAM, DPPCLK, 2),\ 36 36 DCCG_SRII(DTO_PARAM, DPPCLK, 3),\ 37 - SR(REFCLK_CNTL) 37 + SR(REFCLK_CNTL),\ 38 + SR(DISPCLK_FREQ_CHANGE_CNTL),\ 39 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 40 + SR(MICROSECOND_TIME_BASE_DIV),\ 41 + SR(MILLISECOND_TIME_BASE_DIV),\ 42 + SR(DCCG_GATE_DISABLE_CNTL),\ 43 + SR(DCCG_GATE_DISABLE_CNTL2) 38 44 39 45 #define DCCG_MASK_SH_LIST_DCN301(mask_sh) \ 40 46 DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+4 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
··· 64 64 SR(DSCCLK1_DTO_PARAM),\ 65 65 SR(DSCCLK2_DTO_PARAM),\ 66 66 SR(DSCCLK_DTO_CTRL),\ 67 + SR(DCCG_GATE_DISABLE_CNTL),\ 67 68 SR(DCCG_GATE_DISABLE_CNTL2),\ 68 69 SR(DCCG_GATE_DISABLE_CNTL3),\ 69 - SR(HDMISTREAMCLK0_DTO_PARAM) 70 + SR(HDMISTREAMCLK0_DTO_PARAM),\ 71 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 72 + SR(MICROSECOND_TIME_BASE_DIV) 70 73 71 74 72 75 #define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
+4 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
··· 70 70 SR(DSCCLK2_DTO_PARAM),\ 71 71 SR(DSCCLK3_DTO_PARAM),\ 72 72 SR(DSCCLK_DTO_CTRL),\ 73 + SR(DCCG_GATE_DISABLE_CNTL),\ 73 74 SR(DCCG_GATE_DISABLE_CNTL2),\ 74 75 SR(DCCG_GATE_DISABLE_CNTL3),\ 75 76 SR(HDMISTREAMCLK0_DTO_PARAM),\ 76 77 SR(OTG_PIXEL_RATE_DIV),\ 77 - SR(DTBCLK_P_CNTL) 78 + SR(DTBCLK_P_CNTL),\ 79 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 80 + SR(MICROSECOND_TIME_BASE_DIV) 78 81 79 82 #define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \ 80 83 DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 2222 2222 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | 2223 2223 BIT(PP_OD_FEATURE_UCLK_BIT) | 2224 2224 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | 2225 - BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2225 + BIT(PP_OD_FEATURE_FAN_CURVE_BIT) | 2226 + BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2226 2227 res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table); 2227 2228 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 2228 2229 if (res == 0)
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 2224 2224 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | 2225 2225 BIT(PP_OD_FEATURE_UCLK_BIT) | 2226 2226 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | 2227 - BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2227 + BIT(PP_OD_FEATURE_FAN_CURVE_BIT) | 2228 + BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2228 2229 res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table); 2229 2230 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 2230 2231 if (res == 0)
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 2311 2311 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | 2312 2312 BIT(PP_OD_FEATURE_UCLK_BIT) | 2313 2313 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | 2314 - BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2314 + BIT(PP_OD_FEATURE_FAN_CURVE_BIT) | 2315 + BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2315 2316 res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table); 2316 2317 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 2317 2318 if (res == 0)
+7 -6
drivers/gpu/drm/bridge/ti-sn65dsi83.c
··· 351 351 * DSI_CLK = mode clock * bpp / dsi_data_lanes / 2 352 352 * the 2 is there because the bus is DDR. 353 353 */ 354 - return DIV_ROUND_UP(clamp((unsigned int)mode->clock * 355 - mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) / 356 - ctx->dsi->lanes / 2, 40000U, 500000U), 5000U); 354 + return clamp((unsigned int)mode->clock * 355 + mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) / 356 + ctx->dsi->lanes / 2, 40000U, 500000U) / 5000U; 357 357 } 358 358 359 359 static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx) ··· 517 517 struct drm_atomic_state *state) 518 518 { 519 519 struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); 520 + const unsigned int dual_factor = ctx->lvds_dual_link ? 2 : 1; 520 521 const struct drm_bridge_state *bridge_state; 521 522 const struct drm_crtc_state *crtc_state; 522 523 const struct drm_display_mode *mode; ··· 654 653 /* 32 + 1 pixel clock to ensure proper operation */ 655 654 le16val = cpu_to_le16(32 + 1); 656 655 regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &le16val, 2); 657 - le16val = cpu_to_le16(mode->hsync_end - mode->hsync_start); 656 + le16val = cpu_to_le16((mode->hsync_end - mode->hsync_start) / dual_factor); 658 657 regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW, 659 658 &le16val, 2); 660 659 le16val = cpu_to_le16(mode->vsync_end - mode->vsync_start); 661 660 regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW, 662 661 &le16val, 2); 663 662 regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH, 664 - mode->htotal - mode->hsync_end); 663 + (mode->htotal - mode->hsync_end) / dual_factor); 665 664 regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH, 666 665 mode->vtotal - mode->vsync_end); 667 666 regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH, 668 - mode->hsync_start - mode->hdisplay); 667 + (mode->hsync_start - mode->hdisplay) / dual_factor); 669 668 regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH, 670 669 mode->vsync_start - mode->vdisplay); 671 670 regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
+8 -1
drivers/gpu/drm/gud/gud_drv.c
··· 339 339 } 340 340 341 341 static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = { 342 - .atomic_check = drm_crtc_helper_atomic_check 342 + .atomic_check = drm_crtc_helper_atomic_check, 343 + .atomic_enable = gud_crtc_atomic_enable, 344 + .atomic_disable = gud_crtc_atomic_disable, 343 345 }; 344 346 345 347 static const struct drm_crtc_funcs gud_crtc_funcs = { ··· 364 362 .disable_plane = drm_atomic_helper_disable_plane, 365 363 .destroy = drm_plane_cleanup, 366 364 DRM_GEM_SHADOW_PLANE_FUNCS, 365 + }; 366 + 367 + static const struct drm_mode_config_helper_funcs gud_mode_config_helpers = { 368 + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, 367 369 }; 368 370 369 371 static const struct drm_mode_config_funcs gud_mode_config_funcs = { ··· 505 499 drm->mode_config.min_height = le32_to_cpu(desc.min_height); 506 500 drm->mode_config.max_height = le32_to_cpu(desc.max_height); 507 501 drm->mode_config.funcs = &gud_mode_config_funcs; 502 + drm->mode_config.helper_private = &gud_mode_config_helpers; 508 503 509 504 /* Format init */ 510 505 formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
+4
drivers/gpu/drm/gud/gud_internal.h
··· 62 62 63 63 void gud_clear_damage(struct gud_device *gdrm); 64 64 void gud_flush_work(struct work_struct *work); 65 + void gud_crtc_atomic_enable(struct drm_crtc *crtc, 66 + struct drm_atomic_state *state); 67 + void gud_crtc_atomic_disable(struct drm_crtc *crtc, 68 + struct drm_atomic_state *state); 65 69 int gud_plane_atomic_check(struct drm_plane *plane, 66 70 struct drm_atomic_state *state); 67 71 void gud_plane_atomic_update(struct drm_plane *plane,
+36 -18
drivers/gpu/drm/gud/gud_pipe.c
··· 580 580 return ret; 581 581 } 582 582 583 + void gud_crtc_atomic_enable(struct drm_crtc *crtc, 584 + struct drm_atomic_state *state) 585 + { 586 + struct drm_device *drm = crtc->dev; 587 + struct gud_device *gdrm = to_gud_device(drm); 588 + int idx; 589 + 590 + if (!drm_dev_enter(drm, &idx)) 591 + return; 592 + 593 + gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1); 594 + gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0); 595 + gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 1); 596 + 597 + drm_dev_exit(idx); 598 + } 599 + 600 + void gud_crtc_atomic_disable(struct drm_crtc *crtc, 601 + struct drm_atomic_state *state) 602 + { 603 + struct drm_device *drm = crtc->dev; 604 + struct gud_device *gdrm = to_gud_device(drm); 605 + int idx; 606 + 607 + if (!drm_dev_enter(drm, &idx)) 608 + return; 609 + 610 + gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 0); 611 + gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0); 612 + 613 + drm_dev_exit(idx); 614 + } 615 + 583 616 void gud_plane_atomic_update(struct drm_plane *plane, 584 617 struct drm_atomic_state *atomic_state) 585 618 { ··· 640 607 mutex_unlock(&gdrm->damage_lock); 641 608 } 642 609 643 - if (!drm_dev_enter(drm, &idx)) 610 + if (!crtc || !drm_dev_enter(drm, &idx)) 644 611 return; 645 - 646 - if (!old_state->fb) 647 - gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1); 648 - 649 - if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed)) 650 - gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0); 651 - 652 - if (crtc->state->active_changed) 653 - gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active); 654 - 655 - if (!fb) 656 - goto ctrl_disable; 657 612 658 613 ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 659 614 if (ret) 660 - goto ctrl_disable; 615 + goto out; 661 616 662 617 drm_atomic_helper_damage_iter_init(&iter, old_state, new_state); 663 618 drm_atomic_for_each_plane_damage(&iter, &damage) ··· 653 632 654 633 drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 655 634 656 - ctrl_disable: 657 - if (!crtc->state->enable) 658 - gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0); 659 - 635 + out: 660 636 drm_dev_exit(idx); 661 637 }
-6
drivers/gpu/drm/i915/display/intel_alpm.c
··· 43 43 44 44 void intel_alpm_init(struct intel_dp *intel_dp) 45 45 { 46 - u8 dpcd; 47 - 48 - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &dpcd) < 0) 49 - return; 50 - 51 - intel_dp->alpm_dpcd = dpcd; 52 46 mutex_init(&intel_dp->alpm.lock); 53 47 } 54 48
-1
drivers/gpu/drm/i915/display/intel_display.c
··· 1614 1614 } 1615 1615 1616 1616 intel_set_transcoder_timings(crtc_state); 1617 - intel_vrr_set_transcoder_timings(crtc_state); 1618 1617 1619 1618 if (cpu_transcoder != TRANSCODER_EDP) 1620 1619 intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
+7
drivers/gpu/drm/i915/display/intel_dp.c
··· 4577 4577 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 4578 4578 { 4579 4579 struct intel_display *display = to_intel_display(intel_dp); 4580 + int ret; 4580 4581 4581 4582 /* this function is meant to be called only once */ 4582 4583 drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); ··· 4616 4615 * available (such as HDR backlight controls) 4617 4616 */ 4618 4617 intel_dp_init_source_oui(intel_dp); 4618 + 4619 + /* Read the ALPM DPCD caps */ 4620 + ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 4621 + &intel_dp->alpm_dpcd); 4622 + if (ret < 0) 4623 + return false; 4619 4624 4620 4625 /* 4621 4626 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+48 -12
drivers/gpu/drm/i915/display/intel_psr.c
··· 2619 2619 2620 2620 intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 2621 2621 crtc_state->pipe_srcsz_early_tpt); 2622 + 2623 + if (!crtc_state->dsc.compression_enable) 2624 + return; 2625 + 2626 + intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state, 2627 + drm_rect_height(&crtc_state->psr2_su_area)); 2622 2628 } 2623 2629 2624 2630 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, ··· 2695 2689 overlap_damage_area->y2 = damage_area->y2; 2696 2690 } 2697 2691 2698 - static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) 2692 + static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) 2699 2693 { 2700 2694 struct intel_display *display = to_intel_display(crtc_state); 2701 2695 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2702 2696 u16 y_alignment; 2697 + bool su_area_changed = false; 2703 2698 2704 2699 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */ 2705 2700 if (crtc_state->dsc.compression_enable && ··· 2709 2702 else 2710 2703 y_alignment = crtc_state->su_y_granularity; 2711 2704 2712 - crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment; 2713 - if (crtc_state->psr2_su_area.y2 % y_alignment) 2705 + if (crtc_state->psr2_su_area.y1 % y_alignment) { 2706 + crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment; 2707 + su_area_changed = true; 2708 + } 2709 + 2710 + if (crtc_state->psr2_su_area.y2 % y_alignment) { 2714 2711 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 / 2715 2712 y_alignment) + 1) * y_alignment; 2713 + su_area_changed = true; 2714 + } 2715 + 2716 + return su_area_changed; 2716 2717 } 2717 2718 2718 2719 /* ··· 2854 2839 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 2855 2840 struct intel_plane_state *new_plane_state, *old_plane_state; 2856 2841 struct intel_plane *plane; 2857 - bool full_update = false, cursor_in_su_area = false; 2842 + bool full_update = false, su_area_changed; 2858 2843 int i, ret; 2859 2844 2860 2845 if (!crtc_state->enable_psr2_sel_fetch) ··· 2961 2946 if (ret) 2962 2947 return ret; 2963 2948 2964 - /* 2965 - * Adjust su area to cover cursor fully as necessary (early 2966 - * transport). This needs to be done after 2967 - * drm_atomic_add_affected_planes to ensure visible cursor is added into 2968 - * affected planes even when cursor is not updated by itself. 2969 - */ 2970 - intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area); 2949 + do { 2950 + bool cursor_in_su_area; 2971 2951 2972 - intel_psr2_sel_fetch_pipe_alignment(crtc_state); 2952 + /* 2953 + * Adjust su area to cover cursor fully as necessary 2954 + * (early transport). This needs to be done after 2955 + * drm_atomic_add_affected_planes to ensure visible 2956 + * cursor is added into affected planes even when 2957 + * cursor is not updated by itself. 2958 + */ 2959 + intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area); 2960 + 2961 + su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state); 2962 + 2963 + /* 2964 + * If the cursor was outside the SU area before 2965 + * alignment, the alignment step (which only expands 2966 + * SU) may pull the cursor partially inside, so we 2967 + * must run ET alignment again to fully cover it. But 2968 + * if the cursor was already fully inside before 2969 + * alignment, expanding the SU area won't change that, 2970 + * so no further work is needed. 2971 + */ 2972 + if (cursor_in_su_area) 2973 + break; 2974 + } while (su_area_changed); 2973 2975 2974 2976 /* 2975 2977 * Now that we have the pipe damaged area check if it intersect with ··· 3046 3014 } 3047 3015 3048 3016 skip_sel_fetch_set_loop: 3017 + if (full_update) 3018 + clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src, 3019 + &crtc_state->pipe_src); 3020 + 3049 3021 psr2_man_trk_ctl_calc(crtc_state, full_update); 3050 3022 crtc_state->pipe_srcsz_early_tpt = 3051 3023 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
+23
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 767 767 sizeof(dp_dsc_pps_sdp)); 768 768 } 769 769 770 + void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder, 771 + const struct intel_crtc_state *crtc_state, int su_lines) 772 + { 773 + struct intel_display *display = to_intel_display(crtc_state); 774 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 775 + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 776 + enum pipe pipe = crtc->pipe; 777 + int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state); 778 + int slice_row_per_frame = su_lines / vdsc_cfg->slice_height; 779 + u32 val; 780 + 781 + drm_WARN_ON_ONCE(display->drm, su_lines % vdsc_cfg->slice_height); 782 + drm_WARN_ON_ONCE(display->drm, vdsc_instances_per_pipe > 2); 783 + 784 + val = DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(slice_row_per_frame); 785 + val |= DSC_SUPS0_SU_PIC_HEIGHT(su_lines); 786 + 787 + intel_de_write_dsb(display, dsb, LNL_DSC0_SU_PARAMETER_SET_0(pipe), val); 788 + 789 + if (vdsc_instances_per_pipe == 2) 790 + intel_de_write_dsb(display, dsb, LNL_DSC1_SU_PARAMETER_SET_0(pipe), val); 791 + } 792 + 770 793 static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder) 771 794 { 772 795 return is_pipe_dsc(crtc, cpu_transcoder) ?
+3
drivers/gpu/drm/i915/display/intel_vdsc.h
··· 13 13 enum transcoder; 14 14 struct intel_crtc; 15 15 struct intel_crtc_state; 16 + struct intel_dsb; 16 17 struct intel_encoder; 17 18 18 19 bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state); ··· 32 31 const struct intel_crtc_state *crtc_state); 33 32 void intel_dsc_dp_pps_write(struct intel_encoder *encoder, 34 33 const struct intel_crtc_state *crtc_state); 34 + void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder, 35 + const struct intel_crtc_state *crtc_state, int su_lines); 35 36 void intel_vdsc_state_dump(struct drm_printer *p, int indent, 36 37 const struct intel_crtc_state *crtc_state); 37 38 int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
+12
drivers/gpu/drm/i915/display/intel_vdsc_regs.h
··· 196 196 #define DSC_PPS18_NSL_BPG_OFFSET(offset) REG_FIELD_PREP(DSC_PPS18_NSL_BPG_OFFSET_MASK, offset) 197 197 #define DSC_PPS18_SL_OFFSET_ADJ(offset) REG_FIELD_PREP(DSC_PPS18_SL_OFFSET_ADJ_MASK, offset) 198 198 199 + #define _LNL_DSC0_SU_PARAMETER_SET_0_PA 0x78064 200 + #define _LNL_DSC1_SU_PARAMETER_SET_0_PA 0x78164 201 + #define _LNL_DSC0_SU_PARAMETER_SET_0_PB 0x78264 202 + #define _LNL_DSC1_SU_PARAMETER_SET_0_PB 0x78364 203 + #define LNL_DSC0_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC0_SU_PARAMETER_SET_0_PA, _LNL_DSC0_SU_PARAMETER_SET_0_PB) 204 + #define LNL_DSC1_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC1_SU_PARAMETER_SET_0_PA, _LNL_DSC1_SU_PARAMETER_SET_0_PB) 205 + 206 + #define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK REG_GENMASK(31, 20) 207 + #define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(rows) REG_FIELD_PREP(DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK, (rows)) 208 + #define DSC_SUPS0_SU_PIC_HEIGHT_MASK REG_GENMASK(15, 0) 209 + #define DSC_SUPS0_SU_PIC_HEIGHT(h) REG_FIELD_PREP(DSC_SUPS0_SU_PIC_HEIGHT_MASK, (h)) 210 + 199 211 /* Icelake Rate Control Buffer Threshold Registers */ 200 212 #define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) 201 213 #define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+14
drivers/gpu/drm/i915/display/intel_vrr.c
··· 598 598 return; 599 599 600 600 /* 601 + * Bspec says: 602 + * "(note: VRR needs to be programmed after 603 + * TRANS_DDI_FUNC_CTL and before TRANS_CONF)." 604 + * 605 + * In practice it turns out that ICL can hang if 606 + * TRANS_VRR_VMAX/FLIPLINE are written before 607 + * enabling TRANS_DDI_FUNC_CTL. 608 + */ 609 + drm_WARN_ON(display->drm, 610 + !(intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE)); 611 + 612 + /* 601 613 * This bit seems to have two meanings depending on the platform: 602 614 * TGL: generate VRR "safe window" for DSB vblank waits 603 615 * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR ··· 950 938 void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state) 951 939 { 952 940 struct intel_display *display = to_intel_display(crtc_state); 941 + 942 + intel_vrr_set_transcoder_timings(crtc_state); 953 943 954 944 if (!intel_vrr_possible(crtc_state)) 955 945 return;
+9 -3
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
··· 153 153 } 154 154 } while (1); 155 155 156 - nr_pages = min_t(unsigned long, 157 - folio_nr_pages(folio), page_count - i); 156 + nr_pages = min_array(((unsigned long[]) { 157 + folio_nr_pages(folio), 158 + page_count - i, 159 + max_segment / PAGE_SIZE, 160 + }), 3); 161 + 158 162 if (!i || 159 163 sg->length >= max_segment || 160 164 folio_pfn(folio) != next_pfn) { ··· 168 164 st->nents++; 169 165 sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0); 170 166 } else { 171 - /* XXX: could overflow? */ 167 + nr_pages = min_t(unsigned long, nr_pages, 168 + (max_segment - sg->length) / PAGE_SIZE); 169 + 172 170 sg->length += nr_pages * PAGE_SIZE; 173 171 } 174 172 next_pfn = folio_pfn(folio) + nr_pages;
+1 -1
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
··· 78 78 { 79 79 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); 80 80 81 - dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, 81 + dma_free_attrs(mmu->dev, TABLE_SIZE + 32, gpummu->table, gpummu->pt_base, 82 82 DMA_ATTR_FORCE_CONTIGUOUS); 83 83 84 84 kfree(gpummu);
+1 -2
drivers/gpu/drm/msm/adreno/a6xx_catalog.c
··· 1759 1759 A6XX_PROTECT_NORDWR(0x27c06, 0x0000), 1760 1760 }; 1761 1761 1762 - DECLARE_ADRENO_PROTECT(x285_protect, 64); 1762 + DECLARE_ADRENO_PROTECT(x285_protect, 15); 1763 1763 1764 1764 static const struct adreno_reglist_pipe a840_nonctxt_regs[] = { 1765 1765 { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) }, ··· 1966 1966 BUILD_BUG_ON(a660_protect.count > a660_protect.count_max); 1967 1967 BUILD_BUG_ON(a690_protect.count > a690_protect.count_max); 1968 1968 BUILD_BUG_ON(a730_protect.count > a730_protect.count_max); 1969 - BUILD_BUG_ON(a840_protect.count > a840_protect.count_max); 1970 1969 }
+12 -2
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
··· 310 310 hbb = cfg->highest_bank_bit - 13; 311 311 hbb_hi = hbb >> 2; 312 312 hbb_lo = hbb & 3; 313 - a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5); 314 - a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5); 313 + 314 + a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, 315 + hbb << 5 | 316 + level3_swizzling_dis << 4 | 317 + level2_swizzling_dis << 3); 318 + 319 + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, 320 + hbb << 5 | 321 + level3_swizzling_dis << 4 | 322 + level2_swizzling_dis << 3); 315 323 316 324 a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CCU_NC_MODE_CNTL, 317 325 yuvnotcomptofc << 6 | 326 + level3_swizzling_dis << 5 | 327 + level2_swizzling_dis << 4 | 318 328 hbb_hi << 3 | 319 329 hbb_lo << 1); 320 330
+1
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 302 302 { .compatible = "qcom,kgsl-3d0" }, 303 303 {} 304 304 }; 305 + MODULE_DEVICE_TABLE(of, dt_match); 305 306 306 307 static int adreno_runtime_resume(struct device *dev) 307 308 {
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
··· 133 133 static const struct dpu_lm_cfg sc8280xp_lm[] = { 134 134 { 135 135 .name = "lm_0", .id = LM_0, 136 - .base = 0x44000, .len = 0x320, 136 + .base = 0x44000, .len = 0x400, 137 137 .features = MIXER_MSM8998_MASK, 138 138 .sblk = &sdm845_lm_sblk, 139 139 .lm_pair = LM_1, ··· 141 141 .dspp = DSPP_0, 142 142 }, { 143 143 .name = "lm_1", .id = LM_1, 144 - .base = 0x45000, .len = 0x320, 144 + .base = 0x45000, .len = 0x400, 145 145 .features = MIXER_MSM8998_MASK, 146 146 .sblk = &sdm845_lm_sblk, 147 147 .lm_pair = LM_0, ··· 149 149 .dspp = DSPP_1, 150 150 }, { 151 151 .name = "lm_2", .id = LM_2, 152 - .base = 0x46000, .len = 0x320, 152 + .base = 0x46000, .len = 0x400, 153 153 .features = MIXER_MSM8998_MASK, 154 154 .sblk = &sdm845_lm_sblk, 155 155 .lm_pair = LM_3, ··· 157 157 .dspp = DSPP_2, 158 158 }, { 159 159 .name = "lm_3", .id = LM_3, 160 - .base = 0x47000, .len = 0x320, 160 + .base = 0x47000, .len = 0x400, 161 161 .features = MIXER_MSM8998_MASK, 162 162 .sblk = &sdm845_lm_sblk, 163 163 .lm_pair = LM_2, ··· 165 165 .dspp = DSPP_3, 166 166 }, { 167 167 .name = "lm_4", .id = LM_4, 168 - .base = 0x48000, .len = 0x320, 168 + .base = 0x48000, .len = 0x400, 169 169 .features = MIXER_MSM8998_MASK, 170 170 .sblk = &sdm845_lm_sblk, 171 171 .lm_pair = LM_5, 172 172 .pingpong = PINGPONG_4, 173 173 }, { 174 174 .name = "lm_5", .id = LM_5, 175 - .base = 0x49000, .len = 0x320, 175 + .base = 0x49000, .len = 0x400, 176 176 .features = MIXER_MSM8998_MASK, 177 177 .sblk = &sdm845_lm_sblk, 178 178 .lm_pair = LM_4,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
··· 134 134 static const struct dpu_lm_cfg sm8450_lm[] = { 135 135 { 136 136 .name = "lm_0", .id = LM_0, 137 - .base = 0x44000, .len = 0x320, 137 + .base = 0x44000, .len = 0x400, 138 138 .features = MIXER_MSM8998_MASK, 139 139 .sblk = &sdm845_lm_sblk, 140 140 .lm_pair = LM_1, ··· 142 142 .dspp = DSPP_0, 143 143 }, { 144 144 .name = "lm_1", .id = LM_1, 145 - .base = 0x45000, .len = 0x320, 145 + .base = 0x45000, .len = 0x400, 146 146 .features = MIXER_MSM8998_MASK, 147 147 .sblk = &sdm845_lm_sblk, 148 148 .lm_pair = LM_0, ··· 150 150 .dspp = DSPP_1, 151 151 }, { 152 152 .name = "lm_2", .id = LM_2, 153 - .base = 0x46000, .len = 0x320, 153 + .base = 0x46000, .len = 0x400, 154 154 .features = MIXER_MSM8998_MASK, 155 155 .sblk = &sdm845_lm_sblk, 156 156 .lm_pair = LM_3, ··· 158 158 .dspp = DSPP_2, 159 159 }, { 160 160 .name = "lm_3", .id = LM_3, 161 - .base = 0x47000, .len = 0x320, 161 + .base = 0x47000, .len = 0x400, 162 162 .features = MIXER_MSM8998_MASK, 163 163 .sblk = &sdm845_lm_sblk, 164 164 .lm_pair = LM_2, ··· 166 166 .dspp = DSPP_3, 167 167 }, { 168 168 .name = "lm_4", .id = LM_4, 169 - .base = 0x48000, .len = 0x320, 169 + .base = 0x48000, .len = 0x400, 170 170 .features = MIXER_MSM8998_MASK, 171 171 .sblk = &sdm845_lm_sblk, 172 172 .lm_pair = LM_5, 173 173 .pingpong = PINGPONG_4, 174 174 }, { 175 175 .name = "lm_5", .id = LM_5, 176 - .base = 0x49000, .len = 0x320, 176 + .base = 0x49000, .len = 0x400, 177 177 .features = MIXER_MSM8998_MASK, 178 178 .sblk = &sdm845_lm_sblk, 179 179 .lm_pair = LM_4,
+2 -2
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
··· 366 366 .type = INTF_NONE, 367 367 .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ 368 368 .prog_fetch_lines_worst_case = 24, 369 - .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), 370 - .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), 369 + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), 370 + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), 371 371 }, { 372 372 .name = "intf_7", .id = INTF_7, 373 373 .base = 0x3b000, .len = 0x280,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
··· 131 131 static const struct dpu_lm_cfg sm8550_lm[] = { 132 132 { 133 133 .name = "lm_0", .id = LM_0, 134 - .base = 0x44000, .len = 0x320, 134 + .base = 0x44000, .len = 0x400, 135 135 .features = MIXER_MSM8998_MASK, 136 136 .sblk = &sdm845_lm_sblk, 137 137 .lm_pair = LM_1, ··· 139 139 .dspp = DSPP_0, 140 140 }, { 141 141 .name = "lm_1", .id = LM_1, 142 - .base = 0x45000, .len = 0x320, 142 + .base = 0x45000, .len = 0x400, 143 143 .features = MIXER_MSM8998_MASK, 144 144 .sblk = &sdm845_lm_sblk, 145 145 .lm_pair = LM_0, ··· 147 147 .dspp = DSPP_1, 148 148 }, { 149 149 .name = "lm_2", .id = LM_2, 150 - .base = 0x46000, .len = 0x320, 150 + .base = 0x46000, .len = 0x400, 151 151 .features = MIXER_MSM8998_MASK, 152 152 .sblk = &sdm845_lm_sblk, 153 153 .lm_pair = LM_3, ··· 155 155 .dspp = DSPP_2, 156 156 }, { 157 157 .name = "lm_3", .id = LM_3, 158 - .base = 0x47000, .len = 0x320, 158 + .base = 0x47000, .len = 0x400, 159 159 .features = MIXER_MSM8998_MASK, 160 160 .sblk = &sdm845_lm_sblk, 161 161 .lm_pair = LM_2, ··· 163 163 .dspp = DSPP_3, 164 164 }, { 165 165 .name = "lm_4", .id = LM_4, 166 - .base = 0x48000, .len = 0x320, 166 + .base = 0x48000, .len = 0x400, 167 167 .features = MIXER_MSM8998_MASK, 168 168 .sblk = &sdm845_lm_sblk, 169 169 .lm_pair = LM_5, 170 170 .pingpong = PINGPONG_4, 171 171 }, { 172 172 .name = "lm_5", .id = LM_5, 173 - .base = 0x49000, .len = 0x320, 173 + .base = 0x49000, .len = 0x400, 174 174 .features = MIXER_MSM8998_MASK, 175 175 .sblk = &sdm845_lm_sblk, 176 176 .lm_pair = LM_4,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
··· 131 131 static const struct dpu_lm_cfg sar2130p_lm[] = { 132 132 { 133 133 .name = "lm_0", .id = LM_0, 134 - .base = 0x44000, .len = 0x320, 134 + .base = 0x44000, .len = 0x400, 135 135 .features = MIXER_MSM8998_MASK, 136 136 .sblk = &sdm845_lm_sblk, 137 137 .lm_pair = LM_1, ··· 139 139 .dspp = DSPP_0, 140 140 }, { 141 141 .name = "lm_1", .id = LM_1, 142 - .base = 0x45000, .len = 0x320, 142 + .base = 0x45000, .len = 0x400, 143 143 .features = MIXER_MSM8998_MASK, 144 144 .sblk = &sdm845_lm_sblk, 145 145 .lm_pair = LM_0, ··· 147 147 .dspp = DSPP_1, 148 148 }, { 149 149 .name = "lm_2", .id = LM_2, 150 - .base = 0x46000, .len = 0x320, 150 + .base = 0x46000, .len = 0x400, 151 151 .features = MIXER_MSM8998_MASK, 152 152 .sblk = &sdm845_lm_sblk, 153 153 .lm_pair = LM_3, ··· 155 155 .dspp = DSPP_2, 156 156 }, { 157 157 .name = "lm_3", .id = LM_3, 158 - .base = 0x47000, .len = 0x320, 158 + .base = 0x47000, .len = 0x400, 159 159 .features = MIXER_MSM8998_MASK, 160 160 .sblk = &sdm845_lm_sblk, 161 161 .lm_pair = LM_2, ··· 163 163 .dspp = DSPP_3, 164 164 }, { 165 165 .name = "lm_4", .id = LM_4, 166 - .base = 0x48000, .len = 0x320, 166 + .base = 0x48000, .len = 0x400, 167 167 .features = MIXER_MSM8998_MASK, 168 168 .sblk = &sdm845_lm_sblk, 169 169 .lm_pair = LM_5, 170 170 .pingpong = PINGPONG_4, 171 171 }, { 172 172 .name = "lm_5", .id = LM_5, 173 - .base = 0x49000, .len = 0x320, 173 + .base = 0x49000, .len = 0x400, 174 174 .features = MIXER_MSM8998_MASK, 175 175 .sblk = &sdm845_lm_sblk, 176 176 .lm_pair = LM_4,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
··· 130 130 static const struct dpu_lm_cfg x1e80100_lm[] = { 131 131 { 132 132 .name = "lm_0", .id = LM_0, 133 - .base = 0x44000, .len = 0x320, 133 + .base = 0x44000, .len = 0x400, 134 134 .features = MIXER_MSM8998_MASK, 135 135 .sblk = &sdm845_lm_sblk, 136 136 .lm_pair = LM_1, ··· 138 138 .dspp = DSPP_0, 139 139 }, { 140 140 .name = "lm_1", .id = LM_1, 141 - .base = 0x45000, .len = 0x320, 141 + .base = 0x45000, .len = 0x400, 142 142 .features = MIXER_MSM8998_MASK, 143 143 .sblk = &sdm845_lm_sblk, 144 144 .lm_pair = LM_0, ··· 146 146 .dspp = DSPP_1, 147 147 }, { 148 148 .name = "lm_2", .id = LM_2, 149 - .base = 0x46000, .len = 0x320, 149 + .base = 0x46000, .len = 0x400, 150 150 .features = MIXER_MSM8998_MASK, 151 151 .sblk = &sdm845_lm_sblk, 152 152 .lm_pair = LM_3, ··· 154 154 .dspp = DSPP_2, 155 155 }, { 156 156 .name = "lm_3", .id = LM_3, 157 - .base = 0x47000, .len = 0x320, 157 + .base = 0x47000, .len = 0x400, 158 158 .features = MIXER_MSM8998_MASK, 159 159 .sblk = &sdm845_lm_sblk, 160 160 .lm_pair = LM_2, ··· 162 162 .dspp = DSPP_3, 163 163 }, { 164 164 .name = "lm_4", .id = LM_4, 165 - .base = 0x48000, .len = 0x320, 165 + .base = 0x48000, .len = 0x400, 166 166 .features = MIXER_MSM8998_MASK, 167 167 .sblk = &sdm845_lm_sblk, 168 168 .lm_pair = LM_5, 169 169 .pingpong = PINGPONG_4, 170 170 }, { 171 171 .name = "lm_5", .id = LM_5, 172 - .base = 0x49000, .len = 0x320, 172 + .base = 0x49000, .len = 0x400, 173 173 .features = MIXER_MSM8998_MASK, 174 174 .sblk = &sdm845_lm_sblk, 175 175 .lm_pair = LM_4,
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
··· 89 89 base = ctx->cap->sblk->gc.base; 90 90 91 91 if (!base) { 92 - DRM_ERROR("invalid ctx %pK gc base\n", ctx); 92 + DRM_ERROR("invalid ctx %p gc base\n", ctx); 93 93 return; 94 94 } 95 95
+3 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp_v13.c
··· 156 156 u8 color; 157 157 u32 lr_pe[4], tb_pe[4]; 158 158 const u32 bytemask = 0xff; 159 - u32 offset = ctx->cap->sblk->sspp_rec0_blk.base; 159 + u32 offset; 160 160 161 161 if (!ctx || !pe_ext) 162 162 return; 163 + 164 + offset = ctx->cap->sblk->sspp_rec0_blk.base; 163 165 164 166 c = &ctx->hw; 165 167 /* program SW pixel extension override for all pipes*/
+14 -38
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 350 350 return true; 351 351 } 352 352 353 - static bool dpu_rm_find_lms(struct dpu_rm *rm, 354 - struct dpu_global_state *global_state, 355 - uint32_t crtc_id, bool skip_dspp, 356 - struct msm_display_topology *topology, 357 - int *lm_idx, int *pp_idx, int *dspp_idx) 353 + static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 354 + struct dpu_global_state *global_state, 355 + uint32_t crtc_id, 356 + struct msm_display_topology *topology) 358 357 359 358 { 359 + int lm_idx[MAX_BLOCKS]; 360 + int pp_idx[MAX_BLOCKS]; 361 + int dspp_idx[MAX_BLOCKS] = {0}; 360 362 int i, lm_count = 0; 363 + 364 + if (!topology->num_lm) { 365 + DPU_ERROR("zero LMs in topology\n"); 366 + return -EINVAL; 367 + } 361 368 362 369 /* Find a primary mixer */ 363 370 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 364 371 lm_count < topology->num_lm; i++) { 365 372 if (!rm->mixer_blks[i]) 366 373 continue; 367 - 368 - if (skip_dspp && to_dpu_hw_mixer(rm->mixer_blks[i])->cap->dspp) { 369 - DPU_DEBUG("Skipping LM_%d, skipping LMs with DSPPs\n", i); 370 - continue; 371 - } 372 374 373 375 /* 374 376 * Reset lm_count to an even index. This will drop the previous ··· 410 408 } 411 409 } 412 410 413 - return lm_count == topology->num_lm; 414 - } 415 - 416 - static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 417 - struct dpu_global_state *global_state, 418 - uint32_t crtc_id, 419 - struct msm_display_topology *topology) 420 - 421 - { 422 - int lm_idx[MAX_BLOCKS]; 423 - int pp_idx[MAX_BLOCKS]; 424 - int dspp_idx[MAX_BLOCKS] = {0}; 425 - int i; 426 - bool found; 427 - 428 - if (!topology->num_lm) { 429 - DPU_ERROR("zero LMs in topology\n"); 430 - return -EINVAL; 431 - } 432 - 433 - /* Try using non-DSPP LM blocks first */ 434 - found = dpu_rm_find_lms(rm, global_state, crtc_id, !topology->num_dspp, 435 - topology, lm_idx, pp_idx, dspp_idx); 436 - if (!found && !topology->num_dspp) 437 - found = dpu_rm_find_lms(rm, global_state, crtc_id, false, 438 - topology, lm_idx, pp_idx, dspp_idx); 439 - if (!found) { 411 + if (lm_count != topology->num_lm) { 440 412 DPU_DEBUG("unable to find appropriate mixers\n"); 441 413 return -ENAVAIL; 442 414 } 443 415 444 - for (i = 0; i < topology->num_lm; i++) { 416 + for (i = 0; i < lm_count; i++) { 445 417 global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id; 446 418 global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id; 447 419 global_state->dspp_to_crtc_id[dspp_idx[i]] =
+31 -12
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 584 584 * FIXME: Reconsider this if/when CMD mode handling is rewritten to use 585 585 * transfer time and data overhead as a starting point of the calculations. 586 586 */ 587 - static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode, 588 - const struct drm_dsc_config *dsc) 587 + static unsigned long 588 + dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode, 589 + const struct drm_dsc_config *dsc, 590 + bool is_bonded_dsi) 589 591 { 590 - int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc), 591 - dsc->bits_per_component * 3); 592 + int hdisplay, new_hdisplay, new_htotal; 592 593 593 - int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; 594 + /* 595 + * For bonded DSI, split hdisplay across two links and round up each 596 + * half separately, passing the full hdisplay would only round up once. 597 + * This also aligns with the hdisplay we program later in 598 + * dsi_timing_setup() 599 + */ 600 + hdisplay = mode->hdisplay; 601 + if (is_bonded_dsi) 602 + hdisplay /= 2; 603 + 604 + new_hdisplay = DIV_ROUND_UP(hdisplay * drm_dsc_get_bpp_int(dsc), 605 + dsc->bits_per_component * 3); 606 + 607 + if (is_bonded_dsi) 608 + new_hdisplay *= 2; 609 + 610 + new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; 594 611 595 612 return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal); 596 613 } ··· 620 603 pclk_rate = mode->clock * 1000u; 621 604 622 605 if (dsc) 623 - pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); 606 + pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc, is_bonded_dsi); 624 607 625 608 /* 626 609 * For bonded DSI mode, the current DRM mode has the complete width of the ··· 1010 993 1011 994 if (msm_host->dsc) { 1012 995 struct drm_dsc_config *dsc = msm_host->dsc; 1013 - u32 bytes_per_pclk; 996 + u32 bits_per_pclk; 1014 997 1015 998 /* update dsc params with timing params */ 1016 999 if (!dsc || !mode->hdisplay || !mode->vdisplay) { ··· 1032 1015 1033 1016 /* 1034 1017 * DPU sends 3 bytes per pclk cycle to DSI. If widebus is 1035 - * enabled, bus width is extended to 6 bytes. 1018 + * enabled, MDP always sends out 48-bit compressed data per 1019 + * pclk and on average, DSI consumes an amount of compressed 1020 + * data equivalent to the uncompressed pixel depth per pclk. 1036 1021 * 1037 1022 * Calculate the number of pclks needed to transmit one line of 1038 1023 * the compressed data. ··· 1046 1027 * unused anyway. 1047 1028 */ 1048 1029 h_total -= hdisplay; 1049 - if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1050 - bytes_per_pclk = 6; 1030 + if (wide_bus_enabled) 1031 + bits_per_pclk = mipi_dsi_pixel_format_to_bpp(msm_host->format); 1051 1032 else 1052 - bytes_per_pclk = 3; 1033 + bits_per_pclk = 24; 1053 1034 1054 - hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk); 1035 + hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc) * 8, bits_per_pclk); 1055 1036 1056 1037 h_total += hdisplay; 1057 1038 ha_end = ha_start + hdisplay;
+11 -11
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
··· 51 51 #define DSI_PHY_7NM_QUIRK_V4_3 BIT(3) 52 52 /* Hardware is V5.2 */ 53 53 #define DSI_PHY_7NM_QUIRK_V5_2 BIT(4) 54 - /* Hardware is V7.0 */ 55 - #define DSI_PHY_7NM_QUIRK_V7_0 BIT(5) 54 + /* Hardware is V7.2 */ 55 + #define DSI_PHY_7NM_QUIRK_V7_2 BIT(5) 56 56 57 57 struct dsi_pll_config { 58 58 bool enable_ssc; ··· 143 143 144 144 if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) { 145 145 config->pll_clock_inverters = 0x28; 146 - } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 146 + } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 147 147 if (pll_freq < 163000000ULL) 148 148 config->pll_clock_inverters = 0xa0; 149 149 else if (pll_freq < 175000000ULL) ··· 284 284 } 285 285 286 286 if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 287 - (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 287 + (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 288 288 if (pll->vco_current_rate < 1557000000ULL) 289 289 vco_config_1 = 0x08; 290 290 else ··· 699 699 case MSM_DSI_PHY_MASTER: 700 700 pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX]; 701 701 /* v7.0: Enable ATB_EN0 and alternate clock output to external phy */ 702 - if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) 702 + if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2) 703 703 writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5); 704 704 break; 705 705 case MSM_DSI_PHY_SLAVE: ··· 987 987 /* Request for REFGEN READY */ 988 988 if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) || 989 989 (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 990 - (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 990 + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 991 991 writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10); 992 992 udelay(500); 993 993 } ··· 1021 1021 lane_ctrl0 = 0x1f; 1022 1022 } 1023 1023 1024 - if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 1024 + if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 1025 1025 if (phy->cphy_mode) { 1026 1026 /* TODO: different for second phy */ 1027 1027 vreg_ctrl_0 = 0x57; ··· 1097 1097 1098 1098 /* program CMN_CTRL_4 for minor_ver 2 chipsets*/ 1099 1099 if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 1100 - (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) || 1100 + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2) || 1101 1101 (readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20) 1102 1102 writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4); 1103 1103 ··· 1213 1213 /* Turn off REFGEN Vote */ 1214 1214 if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) || 1215 1215 (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 1216 - (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 1216 + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 1217 1217 writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10); 1218 1218 wmb(); 1219 1219 /* Delay to ensure HW removes vote before PHY shut down */ ··· 1502 1502 #endif 1503 1503 .io_start = { 0xae95000, 0xae97000 }, 1504 1504 .num_dsi_phy = 2, 1505 - .quirks = DSI_PHY_7NM_QUIRK_V7_0, 1505 + .quirks = DSI_PHY_7NM_QUIRK_V7_2, 1506 1506 }; 1507 1507 1508 1508 const struct msm_dsi_phy_cfg dsi_phy_3nm_kaanapali_cfgs = { ··· 1525 1525 #endif 1526 1526 .io_start = { 0x9ac1000, 0x9ac4000 }, 1527 1527 .num_dsi_phy = 2, 1528 - .quirks = DSI_PHY_7NM_QUIRK_V7_0, 1528 + .quirks = DSI_PHY_7NM_QUIRK_V7_2, 1529 1529 };
+6 -9
drivers/gpu/drm/sitronix/st7586.c
··· 347 347 if (ret) 348 348 return ret; 349 349 350 + /* 351 + * Override value set by mipi_dbi_spi_init(). This driver is a bit 352 + * non-standard, so best to set it explicitly here. 353 + */ 354 + dbi->write_memory_bpw = 8; 355 + 350 356 /* Cannot read from this controller via SPI */ 351 357 dbi->read_commands = NULL; 352 358 ··· 361 355 &st7586_mode, rotation, bufsize); 362 356 if (ret) 363 357 return ret; 364 - 365 - /* 366 - * we are using 8-bit data, so we are not actually swapping anything, 367 - * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the 368 - * right thing and not use 16-bit transfers (which results in swapped 369 - * bytes on little-endian systems and causes out of order data to be 370 - * sent to the display). 371 - */ 372 - dbi->swap_bytes = true; 373 358 374 359 drm_mode_config_reset(drm); 375 360
+24 -22
drivers/gpu/nova-core/gsp.rs
··· 47 47 unsafe impl<const NUM_ENTRIES: usize> AsBytes for PteArray<NUM_ENTRIES> {} 48 48 49 49 impl<const NUM_PAGES: usize> PteArray<NUM_PAGES> { 50 - /// Creates a new page table array mapping `NUM_PAGES` GSP pages starting at address `start`. 51 - fn new(start: DmaAddress) -> Result<Self> { 52 - let mut ptes = [0u64; NUM_PAGES]; 53 - for (i, pte) in ptes.iter_mut().enumerate() { 54 - *pte = start 55 - .checked_add(num::usize_as_u64(i) << GSP_PAGE_SHIFT) 56 - .ok_or(EOVERFLOW)?; 57 - } 58 - 59 - Ok(Self(ptes)) 50 + /// Returns the page table entry for `index`, for a mapping starting at `start`. 51 + // TODO: Replace with `IoView` projection once available. 52 + fn entry(start: DmaAddress, index: usize) -> Result<u64> { 53 + start 54 + .checked_add(num::usize_as_u64(index) << GSP_PAGE_SHIFT) 55 + .ok_or(EOVERFLOW) 60 56 } 61 57 } 62 58 ··· 82 86 NUM_PAGES * GSP_PAGE_SIZE, 83 87 GFP_KERNEL | __GFP_ZERO, 84 88 )?); 85 - let ptes = PteArray::<NUM_PAGES>::new(obj.0.dma_handle())?; 89 + 90 + let start_addr = obj.0.dma_handle(); 86 91 87 92 // SAFETY: `obj` has just been created and we are its sole user. 88 - unsafe { 89 - // Copy the self-mapping PTE at the expected location. 93 + let pte_region = unsafe { 90 94 obj.0 91 - .as_slice_mut(size_of::<u64>(), size_of_val(&ptes))? 92 - .copy_from_slice(ptes.as_bytes()) 95 + .as_slice_mut(size_of::<u64>(), NUM_PAGES * size_of::<u64>())? 93 96 }; 97 + 98 + // Write values one by one to avoid an on-stack instance of `PteArray`. 99 + for (i, chunk) in pte_region.chunks_exact_mut(size_of::<u64>()).enumerate() { 100 + let pte_value = PteArray::<0>::entry(start_addr, i)?; 101 + 102 + chunk.copy_from_slice(&pte_value.to_ne_bytes()); 103 + } 94 104 95 105 Ok(obj) 96 106 } ··· 145 143 // _kgspInitLibosLoggingStructures (allocates memory for buffers) 146 144 // kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) 147 145 dma_write!( 148 - libos[0] = LibosMemoryRegionInitArgument::new("LOGINIT", &loginit.0) 149 - )?; 146 + libos, [0]?, LibosMemoryRegionInitArgument::new("LOGINIT", &loginit.0) 147 + ); 150 148 dma_write!( 151 - libos[1] = LibosMemoryRegionInitArgument::new("LOGINTR", &logintr.0) 152 - )?; 153 - dma_write!(libos[2] = LibosMemoryRegionInitArgument::new("LOGRM", &logrm.0))?; 154 - dma_write!(rmargs[0].inner = fw::GspArgumentsCached::new(cmdq))?; 155 - dma_write!(libos[3] = LibosMemoryRegionInitArgument::new("RMARGS", rmargs))?; 149 + libos, [1]?, LibosMemoryRegionInitArgument::new("LOGINTR", &logintr.0) 150 + ); 151 + dma_write!(libos, [2]?, LibosMemoryRegionInitArgument::new("LOGRM", &logrm.0)); 152 + dma_write!(rmargs, [0]?.inner, fw::GspArgumentsCached::new(cmdq)); 153 + dma_write!(libos, [3]?, LibosMemoryRegionInitArgument::new("RMARGS", rmargs)); 156 154 }, 157 155 })) 158 156 })
+1 -1
drivers/gpu/nova-core/gsp/boot.rs
··· 157 157 158 158 let wpr_meta = 159 159 CoherentAllocation::<GspFwWprMeta>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?; 160 - dma_write!(wpr_meta[0] = GspFwWprMeta::new(&gsp_fw, &fb_layout))?; 160 + dma_write!(wpr_meta, [0]?, GspFwWprMeta::new(&gsp_fw, &fb_layout)); 161 161 162 162 self.cmdq 163 163 .send_command(bar, commands::SetSystemInfo::new(pdev))?;
+33 -60
drivers/gpu/nova-core/gsp/cmdq.rs
··· 2 2 3 3 use core::{ 4 4 cmp, 5 - mem, 6 - sync::atomic::{ 7 - fence, 8 - Ordering, // 9 - }, // 5 + mem, // 10 6 }; 11 7 12 8 use kernel::{ ··· 142 146 #[repr(C)] 143 147 // There is no struct defined for this in the open-gpu-kernel-source headers. 144 148 // Instead it is defined by code in `GspMsgQueuesInit()`. 145 - struct Msgq { 149 + // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module. 150 + pub(super) struct Msgq { 146 151 /// Header for sending messages, including the write pointer. 147 - tx: MsgqTxHeader, 152 + pub(super) tx: MsgqTxHeader, 148 153 /// Header for receiving messages, including the read pointer. 149 - rx: MsgqRxHeader, 154 + pub(super) rx: MsgqRxHeader, 150 155 /// The message queue proper. 151 156 msgq: MsgqData, 152 157 } 153 158 154 159 /// Structure shared between the driver and the GSP and containing the command and message queues. 155 160 #[repr(C)] 156 - struct GspMem { 161 + // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module. 162 + pub(super) struct GspMem { 157 163 /// Self-mapping page table entries. 158 - ptes: PteArray<{ GSP_PAGE_SIZE / size_of::<u64>() }>, 164 + ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>, 159 165 /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the 160 166 /// write and read pointers that the CPU updates. 161 167 /// 162 168 /// This member is read-only for the GSP. 163 - cpuq: Msgq, 169 + pub(super) cpuq: Msgq, 164 170 /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the 165 171 /// write and read pointers that the GSP updates. 166 172 /// 167 173 /// This member is read-only for the driver. 168 - gspq: Msgq, 174 + pub(super) gspq: Msgq, 175 + } 176 + 177 + impl GspMem { 178 + const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>(); 169 179 } 170 180 171 181 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but ··· 203 201 204 202 let gsp_mem = 205 203 CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?; 206 - dma_write!(gsp_mem[0].ptes = PteArray::new(gsp_mem.dma_handle())?)?; 207 - dma_write!(gsp_mem[0].cpuq.tx = MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES))?; 208 - dma_write!(gsp_mem[0].cpuq.rx = MsgqRxHeader::new())?; 204 + 205 + let start = gsp_mem.dma_handle(); 206 + // Write values one by one to avoid an on-stack instance of `PteArray`. 207 + for i in 0..GspMem::PTE_ARRAY_SIZE { 208 + dma_write!(gsp_mem, [0]?.ptes.0[i], PteArray::<0>::entry(start, i)?); 209 + } 210 + 211 + dma_write!( 212 + gsp_mem, 213 + [0]?.cpuq.tx, 214 + MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES) 215 + ); 216 + dma_write!(gsp_mem, [0]?.cpuq.rx, MsgqRxHeader::new()); 209 217 210 218 Ok(Self(gsp_mem)) 211 219 } ··· 329 317 // 330 318 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 331 319 fn gsp_write_ptr(&self) -> u32 { 332 - let gsp_mem = self.0.start_ptr(); 333 - 334 - // SAFETY: 335 - // - The 'CoherentAllocation' contains at least one object. 336 - // - By the invariants of `CoherentAllocation` the pointer is valid. 337 - (unsafe { (*gsp_mem).gspq.tx.write_ptr() } % MSGQ_NUM_PAGES) 320 + super::fw::gsp_mem::gsp_write_ptr(&self.0) 338 321 } 339 322 340 323 // Returns the index of the memory page the GSP will read the next command from. ··· 338 331 // 339 332 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 340 333 fn gsp_read_ptr(&self) -> u32 { 341 - let gsp_mem = self.0.start_ptr(); 342 - 343 - // SAFETY: 344 - // - The 'CoherentAllocation' contains at least one object. 345 - // - By the invariants of `CoherentAllocation` the pointer is valid. 346 - (unsafe { (*gsp_mem).gspq.rx.read_ptr() } % MSGQ_NUM_PAGES) 334 + super::fw::gsp_mem::gsp_read_ptr(&self.0) 347 335 } 348 336 349 337 // Returns the index of the memory page the CPU can read the next message from. ··· 347 345 // 348 346 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 349 347 fn cpu_read_ptr(&self) -> u32 { 350 - let gsp_mem = self.0.start_ptr(); 351 - 352 - // SAFETY: 353 - // - The ['CoherentAllocation'] contains at least one object. 354 - // - By the invariants of CoherentAllocation the pointer is valid. 355 - (unsafe { (*gsp_mem).cpuq.rx.read_ptr() } % MSGQ_NUM_PAGES) 348 + super::fw::gsp_mem::cpu_read_ptr(&self.0) 356 349 } 357 350 358 351 // Informs the GSP that it can send `elem_count` new pages into the message queue. 359 352 fn advance_cpu_read_ptr(&mut self, elem_count: u32) { 360 - let rptr = self.cpu_read_ptr().wrapping_add(elem_count) % MSGQ_NUM_PAGES; 361 - 362 - // Ensure read pointer is properly ordered. 363 - fence(Ordering::SeqCst); 364 - 365 - let gsp_mem = self.0.start_ptr_mut(); 366 - 367 - // SAFETY: 368 - // - The 'CoherentAllocation' contains at least one object. 369 - // - By the invariants of `CoherentAllocation` the pointer is valid. 370 - unsafe { (*gsp_mem).cpuq.rx.set_read_ptr(rptr) }; 353 + super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count) 371 354 } 372 355 373 356 // Returns the index of the memory page the CPU can write the next command to. ··· 361 374 // 362 375 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 363 376 fn cpu_write_ptr(&self) -> u32 { 364 - let gsp_mem = self.0.start_ptr(); 365 - 366 - // SAFETY: 367 - // - The 'CoherentAllocation' contains at least one object. 368 - // - By the invariants of `CoherentAllocation` the pointer is valid. 369 - (unsafe { (*gsp_mem).cpuq.tx.write_ptr() } % MSGQ_NUM_PAGES) 377 + super::fw::gsp_mem::cpu_write_ptr(&self.0) 370 378 } 371 379 372 380 // Informs the GSP that it can process `elem_count` new pages from the command queue. 373 381 fn advance_cpu_write_ptr(&mut self, elem_count: u32) { 374 - let wptr = self.cpu_write_ptr().wrapping_add(elem_count) & MSGQ_NUM_PAGES; 375 - let gsp_mem = self.0.start_ptr_mut(); 376 - 377 - // SAFETY: 378 - // - The 'CoherentAllocation' contains at least one object. 379 - // - By the invariants of `CoherentAllocation` the pointer is valid. 380 - unsafe { (*gsp_mem).cpuq.tx.set_write_ptr(wptr) }; 381 - 382 - // Ensure all command data is visible before triggering the GSP read. 383 - fence(Ordering::SeqCst); 382 + super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count) 384 383 } 385 384 } 386 385
+69 -32
drivers/gpu/nova-core/gsp/fw.rs
··· 40 40 }, 41 41 }; 42 42 43 + // TODO: Replace with `IoView` projections once available; the `unwrap()` calls go away once we 44 + // switch to the new `dma::Coherent` API. 45 + pub(super) mod gsp_mem { 46 + use core::sync::atomic::{ 47 + fence, 48 + Ordering, // 49 + }; 50 + 51 + use kernel::{ 52 + dma::CoherentAllocation, 53 + dma_read, 54 + dma_write, 55 + prelude::*, // 56 + }; 57 + 58 + use crate::gsp::cmdq::{ 59 + GspMem, 60 + MSGQ_NUM_PAGES, // 61 + }; 62 + 63 + pub(in crate::gsp) fn gsp_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 64 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 65 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap() 66 + } 67 + 68 + pub(in crate::gsp) fn gsp_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 69 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 70 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() 71 + } 72 + 73 + pub(in crate::gsp) fn cpu_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 74 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 75 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() 76 + } 77 + 78 + pub(in crate::gsp) fn advance_cpu_read_ptr(qs: &CoherentAllocation<GspMem>, count: u32) { 79 + let rptr = cpu_read_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 80 + 81 + // Ensure read pointer is properly ordered. 82 + fence(Ordering::SeqCst); 83 + 84 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 85 + || -> Result { 86 + dma_write!(qs, [0]?.cpuq.rx.0.readPtr, rptr); 87 + Ok(()) 88 + }() 89 + .unwrap() 90 + } 91 + 92 + pub(in crate::gsp) fn cpu_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 93 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 94 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap() 95 + } 96 + 97 + pub(in crate::gsp) fn advance_cpu_write_ptr(qs: &CoherentAllocation<GspMem>, count: u32) { 98 + let wptr = cpu_write_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 99 + 100 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 101 + || -> Result { 102 + dma_write!(qs, [0]?.cpuq.tx.0.writePtr, wptr); 103 + Ok(()) 104 + }() 105 + .unwrap(); 106 + 107 + // Ensure all command data is visible before triggering the GSP read. 108 + fence(Ordering::SeqCst); 109 + } 110 + } 111 + 43 112 /// Empty type to group methods related to heap parameters for running the GSP firmware. 44 113 enum GspFwHeapParams {} 45 114 ··· 777 708 entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), 778 709 }) 779 710 } 780 - 781 - /// Returns the value of the write pointer for this queue. 782 - pub(crate) fn write_ptr(&self) -> u32 { 783 - let ptr = core::ptr::from_ref(&self.0.writePtr); 784 - 785 - // SAFETY: `ptr` is a valid pointer to a `u32`. 786 - unsafe { ptr.read_volatile() } 787 - } 788 - 789 - /// Sets the value of the write pointer for this queue. 790 - pub(crate) fn set_write_ptr(&mut self, val: u32) { 791 - let ptr = core::ptr::from_mut(&mut self.0.writePtr); 792 - 793 - // SAFETY: `ptr` is a valid pointer to a `u32`. 794 - unsafe { ptr.write_volatile(val) } 795 - } 796 711 } 797 712 798 713 // SAFETY: Padding is explicit and does not contain uninitialized data. ··· 791 738 /// Creates a new RX queue header. 792 739 pub(crate) fn new() -> Self { 793 740 Self(Default::default()) 794 - } 795 - 796 - /// Returns the value of the read pointer for this queue. 797 - pub(crate) fn read_ptr(&self) -> u32 { 798 - let ptr = core::ptr::from_ref(&self.0.readPtr); 799 - 800 - // SAFETY: `ptr` is a valid pointer to a `u32`. 801 - unsafe { ptr.read_volatile() } 802 - } 803 - 804 - /// Sets the value of the read pointer for this queue. 805 - pub(crate) fn set_read_ptr(&mut self, val: u32) { 806 - let ptr = core::ptr::from_mut(&mut self.0.readPtr); 807 - 808 - // SAFETY: `ptr` is a valid pointer to a `u32`. 809 - unsafe { ptr.write_volatile(val) } 810 741 } 811 742 } 812 743
+2
drivers/hid/bpf/hid_bpf_dispatch.c
··· 444 444 (u64)(long)ctx, 445 445 true); /* prevent infinite recursions */ 446 446 447 + if (ret > size) 448 + ret = size; 447 449 if (ret > 0) 448 450 memcpy(buf, dma_data, ret); 449 451
+3 -2
drivers/hid/hid-appletb-kbd.c
··· 476 476 return 0; 477 477 } 478 478 479 - static int appletb_kbd_reset_resume(struct hid_device *hdev) 479 + static int appletb_kbd_resume(struct hid_device *hdev) 480 480 { 481 481 struct appletb_kbd *kbd = hid_get_drvdata(hdev); 482 482 ··· 500 500 .event = appletb_kbd_hid_event, 501 501 .input_configured = appletb_kbd_input_configured, 502 502 .suspend = pm_ptr(appletb_kbd_suspend), 503 - .reset_resume = pm_ptr(appletb_kbd_reset_resume), 503 + .resume = pm_ptr(appletb_kbd_resume), 504 + .reset_resume = pm_ptr(appletb_kbd_resume), 504 505 .driver.dev_groups = appletb_kbd_groups, 505 506 }; 506 507 module_hid_driver(appletb_kbd_hid_driver);
+3
drivers/hid/hid-asus.c
··· 1498 1498 USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X), 1499 1499 QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD }, 1500 1500 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 1501 + USB_DEVICE_ID_ASUSTEK_XGM_2022), 1502 + }, 1503 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 1501 1504 USB_DEVICE_ID_ASUSTEK_XGM_2023), 1502 1505 }, 1503 1506 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+4 -3
drivers/hid/hid-core.c
··· 2057 2057 rsize = max_buffer_size; 2058 2058 2059 2059 if (csize < rsize) { 2060 - dbg_hid("report %d is too short, (%d < %d)\n", report->id, 2061 - csize, rsize); 2062 - memset(cdata + csize, 0, rsize - csize); 2060 + hid_warn_ratelimited(hid, "Event data for report %d was too short (%d vs %d)\n", 2061 + report->id, rsize, csize); 2062 + ret = -EINVAL; 2063 + goto out; 2063 2064 } 2064 2065 2065 2066 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
+1 -2
drivers/hid/hid-ids.h
··· 229 229 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c 230 230 #define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b 231 231 #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869 232 + #define USB_DEVICE_ID_ASUSTEK_XGM_2022 0x1970 232 233 #define USB_DEVICE_ID_ASUSTEK_XGM_2023 0x1a9a 233 234 234 235 #define USB_VENDOR_ID_ATEN 0x0557 ··· 455 454 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 456 455 #define USB_DEVICE_ID_HP_X2 0x074d 457 456 #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 458 - #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 459 - #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 460 457 #define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81 461 458 462 459 #define USB_VENDOR_ID_ELECOM 0x056e
+11 -7
drivers/hid/hid-input.c
··· 354 354 #define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */ 355 355 #define HID_BATTERY_QUIRK_IGNORE (1 << 2) /* completely ignore the battery */ 356 356 #define HID_BATTERY_QUIRK_AVOID_QUERY (1 << 3) /* do not query the battery */ 357 + #define HID_BATTERY_QUIRK_DYNAMIC (1 << 4) /* report present only after life signs */ 357 358 358 359 static const struct hid_device_id hid_battery_quirks[] = { 359 360 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, ··· 387 386 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 388 387 USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD), 389 388 HID_BATTERY_QUIRK_IGNORE }, 390 - { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), 391 - HID_BATTERY_QUIRK_IGNORE }, 392 - { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), 393 - HID_BATTERY_QUIRK_IGNORE }, 394 389 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L), 395 390 HID_BATTERY_QUIRK_AVOID_QUERY }, 396 391 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW), ··· 399 402 * Elan HID touchscreens seem to all report a non present battery, 400 403 * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C and USB HID devices. 401 404 */ 402 - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, 403 - { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, 405 + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC }, 406 + { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC }, 404 407 {} 405 408 }; 406 409 ··· 457 460 int ret = 0; 458 461 459 462 switch (prop) { 460 - case POWER_SUPPLY_PROP_PRESENT: 461 463 case POWER_SUPPLY_PROP_ONLINE: 462 464 val->intval = 1; 465 + break; 466 + 467 + case POWER_SUPPLY_PROP_PRESENT: 468 + val->intval = dev->battery_present; 463 469 break; 464 470 465 471 case POWER_SUPPLY_PROP_CAPACITY: ··· 577 577 if (quirks & HID_BATTERY_QUIRK_AVOID_QUERY) 578 578 dev->battery_avoid_query = true; 579 579 580 + dev->battery_present = (quirks & HID_BATTERY_QUIRK_DYNAMIC) ? false : true; 581 + 580 582 dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); 581 583 if (IS_ERR(dev->battery)) { 582 584 error = PTR_ERR(dev->battery); ··· 634 632 return; 635 633 636 634 if (hidinput_update_battery_charge_status(dev, usage, value)) { 635 + dev->battery_present = true; 637 636 power_supply_changed(dev->battery); 638 637 return; 639 638 } ··· 650 647 if (dev->battery_status != HID_BATTERY_REPORTED || 651 648 capacity != dev->battery_capacity || 652 649 ktime_after(ktime_get_coarse(), dev->battery_ratelimit_time)) { 650 + dev->battery_present = true; 653 651 dev->battery_capacity = capacity; 654 652 dev->battery_status = HID_BATTERY_REPORTED; 655 653 dev->battery_ratelimit_time =
+5 -1
drivers/hid/hid-logitech-hidpp.c
··· 4487 4487 if (!ret) 4488 4488 ret = hidpp_ff_init(hidpp, &data); 4489 4489 4490 - if (ret) 4490 + if (ret) { 4491 4491 hid_warn(hidpp->hid_dev, 4492 4492 "Unable to initialize force feedback support, errno %d\n", 4493 4493 ret); 4494 + ret = 0; 4495 + } 4494 4496 } 4495 4497 4496 4498 /* ··· 4670 4668 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb038) }, 4671 4669 { /* Slim Solar+ K980 Keyboard over Bluetooth */ 4672 4670 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb391) }, 4671 + { /* MX Master 4 mouse over Bluetooth */ 4672 + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb042) }, 4673 4673 {} 4674 4674 }; 4675 4675
+7
drivers/hid/hid-multitouch.c
··· 526 526 dev_warn(&hdev->dev, "failed to fetch feature %d\n", 527 527 report->id); 528 528 } else { 529 + /* The report ID in the request and the response should match */ 530 + if (report->id != buf[0]) { 531 + hid_err(hdev, "Returned feature report did not match the request\n"); 532 + goto free; 533 + } 534 + 529 535 ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf, 530 536 size, 0); 531 537 if (ret) 532 538 dev_warn(&hdev->dev, "failed to report feature\n"); 533 539 } 534 540 541 + free: 535 542 kfree(buf); 536 543 } 537 544
+1
drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
··· 127 127 hid->product = le16_to_cpu(qcdev->dev_desc.product_id); 128 128 snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quicki2c-hid", 129 129 hid->vendor, hid->product); 130 + strscpy(hid->phys, dev_name(qcdev->dev), sizeof(hid->phys)); 130 131 131 132 ret = hid_add_device(hid); 132 133 if (ret) {
+1
drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
··· 118 118 hid->product = le16_to_cpu(qsdev->dev_desc.product_id); 119 119 snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quickspi-hid", 120 120 hid->vendor, hid->product); 121 + strscpy(hid->phys, dev_name(qsdev->dev), sizeof(hid->phys)); 121 122 122 123 ret = hid_add_device(hid); 123 124 if (ret) {
+10
drivers/hid/wacom_wac.c
··· 1208 1208 1209 1209 switch (data[0]) { 1210 1210 case 0x04: 1211 + if (len < 32) { 1212 + dev_warn(wacom->pen_input->dev.parent, 1213 + "Report 0x04 too short: %zu bytes\n", len); 1214 + break; 1215 + } 1211 1216 wacom_intuos_bt_process_data(wacom, data + i); 1212 1217 i += 10; 1213 1218 fallthrough; 1214 1219 case 0x03: 1220 + if (i == 1 && len < 22) { 1221 + dev_warn(wacom->pen_input->dev.parent, 1222 + "Report 0x03 too short: %zu bytes\n", len); 1223 + break; 1224 + } 1215 1225 wacom_intuos_bt_process_data(wacom, data + i); 1216 1226 i += 10; 1217 1227 wacom_intuos_bt_process_data(wacom, data + i);
+2 -4
drivers/hwmon/Kconfig
··· 1493 1493 1494 1494 config SENSORS_LM75 1495 1495 tristate "National Semiconductor LM75 and compatibles" 1496 - depends on I2C 1497 - depends on I3C || !I3C 1496 + depends on I3C_OR_I2C 1498 1497 select REGMAP_I2C 1499 1498 select REGMAP_I3C if I3C 1500 1499 help ··· 2381 2382 2382 2383 config SENSORS_TMP108 2383 2384 tristate "Texas Instruments TMP108" 2384 - depends on I2C 2385 - depends on I3C || !I3C 2385 + depends on I3C_OR_I2C 2386 2386 select REGMAP_I2C 2387 2387 select REGMAP_I3C if I3C 2388 2388 help
+12
drivers/i3c/Kconfig
··· 22 22 if I3C 23 23 source "drivers/i3c/master/Kconfig" 24 24 endif # I3C 25 + 26 + config I3C_OR_I2C 27 + tristate 28 + default m if I3C=m 29 + default I2C 30 + help 31 + Device drivers using module_i3c_i2c_driver() can use either 32 + i2c or i3c hosts, but cannot be built-in for the kernel when 33 + CONFIG_I3C=m. 34 + 35 + Add 'depends on I2C_OR_I3C' in Kconfig for those drivers to 36 + get the correct dependencies.
+4 -2
drivers/i3c/master/dw-i3c-master.c
··· 1024 1024 master->free_pos &= ~BIT(pos); 1025 1025 } 1026 1026 1027 - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 1027 + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr) | DEV_ADDR_TABLE_SIR_REJECT, 1028 1028 master->regs + 1029 1029 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1030 1030 ··· 1053 1053 master->free_pos &= ~BIT(pos); 1054 1054 i3c_dev_set_master_data(dev, data); 1055 1055 1056 - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr), 1056 + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr) | DEV_ADDR_TABLE_SIR_REJECT, 1057 1057 master->regs + 1058 1058 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1059 1059 ··· 1659 1659 pm_runtime_get_noresume(&pdev->dev); 1660 1660 1661 1661 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1662 + 1663 + device_set_of_node_from_dev(&master->base.i2c.dev, &pdev->dev); 1662 1664 ret = i3c_master_register(&master->base, &pdev->dev, 1663 1665 &dw_mipi_i3c_ops, false); 1664 1666 if (ret)
+1
drivers/i3c/master/mipi-i3c-hci/cmd.h
··· 17 17 #define CMD_0_TOC W0_BIT_(31) 18 18 #define CMD_0_ROC W0_BIT_(30) 19 19 #define CMD_0_ATTR W0_MASK(2, 0) 20 + #define CMD_0_TID W0_MASK(6, 3) 20 21 21 22 /* 22 23 * Response Descriptor Structure
+3 -5
drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
··· 331 331 CMD_A0_ROC | CMD_A0_TOC; 332 332 xfer->cmd_desc[1] = 0; 333 333 xfer->completion = &done; 334 - hci->io->queue_xfer(hci, xfer, 1); 335 - if (!wait_for_completion_timeout(&done, HZ) && 336 - hci->io->dequeue_xfer(hci, xfer, 1)) { 337 - ret = -ETIME; 334 + xfer->timeout = HZ; 335 + ret = i3c_hci_process_xfer(hci, xfer, 1); 336 + if (ret) 338 337 break; 339 - } 340 338 if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER || 341 339 RESP_STATUS(xfer->response) == RESP_ERR_NACK) && 342 340 RESP_DATA_LENGTH(xfer->response) == 1) {
+3 -5
drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
··· 253 253 xfer[0].rnw = true; 254 254 xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8); 255 255 xfer[1].completion = &done; 256 + xfer[1].timeout = HZ; 256 257 257 258 for (;;) { 258 259 ret = i3c_master_get_free_addr(&hci->master, next_addr); ··· 273 272 CMD_A0_ASSIGN_ADDRESS(next_addr) | 274 273 CMD_A0_ROC | 275 274 CMD_A0_TOC; 276 - hci->io->queue_xfer(hci, xfer, 2); 277 - if (!wait_for_completion_timeout(&done, HZ) && 278 - hci->io->dequeue_xfer(hci, xfer, 2)) { 279 - ret = -ETIME; 275 + ret = i3c_hci_process_xfer(hci, xfer, 2); 276 + if (ret) 280 277 break; 281 - } 282 278 if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) { 283 279 ret = 0; /* no more devices to be assigned */ 284 280 break;
+90 -53
drivers/i3c/master/mipi-i3c-hci/core.c
··· 152 152 if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD) 153 153 amd_set_resp_buf_thld(hci); 154 154 155 - reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 155 + scoped_guard(spinlock_irqsave, &hci->lock) 156 + hci->irq_inactive = false; 157 + 158 + /* Enable bus with Hot-Join disabled */ 159 + reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL); 156 160 dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL)); 157 161 158 162 return 0; ··· 181 177 return ret; 182 178 } 183 179 180 + static int i3c_hci_software_reset(struct i3c_hci *hci) 181 + { 182 + u32 regval; 183 + int ret; 184 + 185 + /* 186 + * SOFT_RST must be clear before we write to it. 187 + * Then we must wait until it clears again. 188 + */ 189 + ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 190 + !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 191 + if (ret) { 192 + dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__); 193 + return ret; 194 + } 195 + 196 + reg_write(RESET_CONTROL, SOFT_RST); 197 + 198 + ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 199 + !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 200 + if (ret) { 201 + dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__); 202 + return ret; 203 + } 204 + 205 + return 0; 206 + } 207 + 184 208 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci) 185 209 { 186 210 struct platform_device *pdev = to_platform_device(hci->master.dev.parent); 187 211 int irq = platform_get_irq(pdev, 0); 188 212 189 213 reg_write(INTR_SIGNAL_ENABLE, 0x0); 190 - hci->irq_inactive = true; 191 214 synchronize_irq(irq); 215 + scoped_guard(spinlock_irqsave, &hci->lock) 216 + hci->irq_inactive = true; 192 217 } 193 218 194 219 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m) 195 220 { 196 221 struct i3c_hci *hci = to_i3c_hci(m); 197 222 198 - i3c_hci_bus_disable(hci); 223 + if (i3c_hci_bus_disable(hci)) 224 + i3c_hci_software_reset(hci); 199 225 hci->io->cleanup(hci); 200 226 } 201 227 ··· 244 210 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci) 245 211 { 246 212 reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0)); 213 + } 214 + 215 + int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n) 216 + { 217 + struct completion *done = xfer[n - 1].completion; 218 + unsigned long timeout = xfer[n - 1].timeout; 219 + int ret; 220 + 221 + ret = hci->io->queue_xfer(hci, xfer, n); 222 + if (ret) 223 + return ret; 224 + 225 + if (!wait_for_completion_timeout(done, timeout)) { 226 + if (hci->io->dequeue_xfer(hci, xfer, n)) { 227 + dev_err(&hci->master.dev, "%s: timeout error\n", __func__); 228 + return -ETIMEDOUT; 229 + } 230 + return 0; 231 + } 232 + 233 + if (hci->io->handle_error) { 234 + bool error = false; 235 + 236 + for (int i = 0; i < n && !error; i++) 237 + error = RESP_STATUS(xfer[i].response); 238 + if (error) 239 + return hci->io->handle_error(hci, xfer, n); 240 + } 241 + 242 + return 0; 247 243 } 248 244 249 245 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m, ··· 316 252 last = i - 1; 317 253 xfer[last].cmd_desc[0] |= CMD_0_TOC; 318 254 xfer[last].completion = &done; 255 + xfer[last].timeout = HZ; 319 256 320 257 if (prefixed) 321 258 xfer--; 322 259 323 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 260 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 324 261 if (ret) 325 262 goto out; 326 - if (!wait_for_completion_timeout(&done, HZ) && 327 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 328 - ret = -ETIME; 329 - goto out; 330 - } 331 263 for (i = prefixed; i < nxfers; i++) { 332 264 if (ccc->rnw) 333 265 ccc->dests[i - prefixed].payload.len = ··· 394 334 last = i - 1; 395 335 xfer[last].cmd_desc[0] |= CMD_0_TOC; 396 336 xfer[last].completion = &done; 337 + xfer[last].timeout = HZ; 397 338 398 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 339 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 399 340 if (ret) 400 341 goto out; 401 - if (!wait_for_completion_timeout(&done, HZ) && 402 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 403 - ret = -ETIME; 404 - goto out; 405 - } 406 342 for (i = 0; i < nxfers; i++) { 407 343 if (i3c_xfers[i].rnw) 408 344 i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response); ··· 438 382 last = i - 1; 439 383 xfer[last].cmd_desc[0] |= CMD_0_TOC; 440 384 xfer[last].completion = &done; 385 + xfer[last].timeout = m->i2c.timeout; 441 386 442 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 387 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 443 388 if (ret) 444 389 goto out; 445 - if (!wait_for_completion_timeout(&done, m->i2c.timeout) && 446 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 447 - ret = -ETIME; 448 - goto out; 449 - } 450 390 for (i = 0; i < nxfers; i++) { 451 391 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) { 452 392 ret = -EIO; ··· 618 566 irqreturn_t result = IRQ_NONE; 619 567 u32 val; 620 568 569 + guard(spinlock)(&hci->lock); 570 + 621 571 /* 622 572 * The IRQ can be shared, so the handler may be called when the IRQ is 623 573 * due to a different device. That could happen when runtime suspended, ··· 653 599 result = IRQ_HANDLED; 654 600 655 601 return result; 656 - } 657 - 658 - static int i3c_hci_software_reset(struct i3c_hci *hci) 659 - { 660 - u32 regval; 661 - int ret; 662 - 663 - /* 664 - * SOFT_RST must be clear before we write to it. 665 - * Then we must wait until it clears again. 666 - */ 667 - ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 668 - !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 669 - if (ret) { 670 - dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__); 671 - return ret; 672 - } 673 - 674 - reg_write(RESET_CONTROL, SOFT_RST); 675 - 676 - ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 677 - !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 678 - if (ret) { 679 - dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__); 680 - return ret; 681 - } 682 - 683 - return 0; 684 602 } 685 603 686 604 static inline bool is_version_1_1_or_newer(struct i3c_hci *hci) ··· 765 739 int ret; 766 740 767 741 ret = i3c_hci_bus_disable(hci); 768 - if (ret) 742 + if (ret) { 743 + /* Fall back to software reset to disable the bus */ 744 + ret = i3c_hci_software_reset(hci); 745 + i3c_hci_sync_irq_inactive(hci); 769 746 return ret; 747 + } 770 748 771 749 hci->io->suspend(hci); 772 750 ··· 790 760 791 761 mipi_i3c_hci_dat_v1.restore(hci); 792 762 793 - hci->irq_inactive = false; 794 - 795 763 hci->io->resume(hci); 796 764 797 - reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 765 + scoped_guard(spinlock_irqsave, &hci->lock) 766 + hci->irq_inactive = false; 767 + 768 + /* Enable bus with Hot-Join disabled */ 769 + reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL); 798 770 799 771 return 0; 800 772 } ··· 956 924 if (!hci) 957 925 return -ENOMEM; 958 926 927 + spin_lock_init(&hci->lock); 928 + mutex_init(&hci->control_mutex); 929 + 959 930 /* 960 931 * Multi-bus instances share the same MMIO address range, but not 961 932 * necessarily in separate contiguous sub-ranges. To avoid overlapping ··· 984 949 ret = i3c_hci_init(hci); 985 950 if (ret) 986 951 return ret; 952 + 953 + hci->irq_inactive = true; 987 954 988 955 irq = platform_get_irq(pdev, 0); 989 956 ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
+83 -73
drivers/i3c/master/mipi-i3c-hci/dma.c
··· 129 129 dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma; 130 130 unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total; 131 131 unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz; 132 - unsigned int done_ptr, ibi_chunk_ptr; 132 + unsigned int done_ptr, ibi_chunk_ptr, xfer_space; 133 133 struct hci_xfer **src_xfers; 134 - spinlock_t lock; 135 134 struct completion op_done; 136 135 }; 137 136 ··· 260 261 261 262 rh->done_ptr = 0; 262 263 rh->ibi_chunk_ptr = 0; 264 + rh->xfer_space = rh->xfer_entries; 263 265 } 264 266 265 267 static void hci_dma_init_rings(struct i3c_hci *hci) ··· 344 344 goto err_out; 345 345 rh = &rings->headers[i]; 346 346 rh->regs = hci->base_regs + offset; 347 - spin_lock_init(&rh->lock); 348 347 init_completion(&rh->op_done); 349 348 350 349 rh->xfer_entries = XFER_RING_ENTRIES; ··· 438 439 } 439 440 } 440 441 442 + static struct i3c_dma *hci_dma_map_xfer(struct device *dev, struct hci_xfer *xfer) 443 + { 444 + enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 445 + bool need_bounce = device_iommu_mapped(dev) && xfer->rnw && (xfer->data_len & 3); 446 + 447 + return i3c_master_dma_map_single(dev, xfer->data, xfer->data_len, need_bounce, dir); 448 + } 449 + 450 + static int hci_dma_map_xfer_list(struct i3c_hci *hci, struct device *dev, 451 + struct hci_xfer *xfer_list, int n) 452 + { 453 + for (int i = 0; i < n; i++) { 454 + struct hci_xfer *xfer = xfer_list + i; 455 + 456 + if (!xfer->data) 457 + continue; 458 + 459 + xfer->dma = hci_dma_map_xfer(dev, xfer); 460 + if (!xfer->dma) { 461 + hci_dma_unmap_xfer(hci, xfer_list, i); 462 + return -ENOMEM; 463 + } 464 + } 465 + 466 + return 0; 467 + } 468 + 441 469 static int hci_dma_queue_xfer(struct i3c_hci *hci, 442 470 struct hci_xfer *xfer_list, int n) 443 471 { 444 472 struct hci_rings_data *rings = hci->io_data; 445 473 struct hci_rh_data *rh; 446 474 unsigned int i, ring, enqueue_ptr; 447 - u32 op1_val, op2_val; 475 + u32 op1_val; 476 + int ret; 477 + 478 + ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n); 479 + if (ret) 480 + return ret; 448 481 449 482 /* For now we only use ring 0 */ 450 483 ring = 0; 451 484 rh = &rings->headers[ring]; 485 + 486 + spin_lock_irq(&hci->lock); 487 + 488 + if (n > rh->xfer_space) { 489 + spin_unlock_irq(&hci->lock); 490 + hci_dma_unmap_xfer(hci, xfer_list, n); 491 + return -EBUSY; 492 + } 452 493 453 494 op1_val = rh_reg_read(RING_OPERATION1); 454 495 enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val); 455 496 for (i = 0; i < n; i++) { 456 497 struct hci_xfer *xfer = xfer_list + i; 457 498 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr; 458 - enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : 459 - DMA_TO_DEVICE; 460 - bool need_bounce; 461 499 462 500 /* store cmd descriptor */ 463 501 *ring_data++ = xfer->cmd_desc[0]; ··· 513 477 514 478 /* 2nd and 3rd words of Data Buffer Descriptor Structure */ 515 479 if (xfer->data) { 516 - need_bounce = device_iommu_mapped(rings->sysdev) && 517 - xfer->rnw && 518 - xfer->data_len != ALIGN(xfer->data_len, 4); 519 - xfer->dma = i3c_master_dma_map_single(rings->sysdev, 520 - xfer->data, 521 - xfer->data_len, 522 - need_bounce, 523 - dir); 524 - if (!xfer->dma) { 525 - hci_dma_unmap_xfer(hci, xfer_list, i); 526 - return -ENOMEM; 527 - } 528 480 *ring_data++ = lower_32_bits(xfer->dma->addr); 529 481 *ring_data++ = upper_32_bits(xfer->dma->addr); 530 482 } else { ··· 527 503 xfer->ring_entry = enqueue_ptr; 528 504 529 505 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries; 530 - 531 - /* 532 - * We may update the hardware view of the enqueue pointer 533 - * only if we didn't reach its dequeue pointer. 534 - */ 535 - op2_val = rh_reg_read(RING_OPERATION2); 536 - if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) { 537 - /* the ring is full */ 538 - hci_dma_unmap_xfer(hci, xfer_list, i + 1); 539 - return -EBUSY; 540 - } 541 506 } 542 507 543 - /* take care to update the hardware enqueue pointer atomically */ 544 - spin_lock_irq(&rh->lock); 545 - op1_val = rh_reg_read(RING_OPERATION1); 508 + rh->xfer_space -= n; 509 + 546 510 op1_val &= ~RING_OP1_CR_ENQ_PTR; 547 511 op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr); 548 512 rh_reg_write(RING_OPERATION1, op1_val); 549 - spin_unlock_irq(&rh->lock); 513 + spin_unlock_irq(&hci->lock); 550 514 551 515 return 0; 552 516 } ··· 546 534 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number]; 547 535 unsigned int i; 548 536 bool did_unqueue = false; 537 + u32 ring_status; 549 538 550 - /* stop the ring */ 551 - rh_reg_write(RING_CONTROL, RING_CTRL_ABORT); 552 - if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) { 553 - /* 554 - * We're deep in it if ever this condition is ever met. 555 - * Hardware might still be writing to memory, etc. 556 - */ 557 - dev_crit(&hci->master.dev, "unable to abort the ring\n"); 558 - WARN_ON(1); 539 + guard(mutex)(&hci->control_mutex); 540 + 541 + ring_status = rh_reg_read(RING_STATUS); 542 + if (ring_status & RING_STATUS_RUNNING) { 543 + /* stop the ring */ 544 + reinit_completion(&rh->op_done); 545 + rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_ABORT); 546 + wait_for_completion_timeout(&rh->op_done, HZ); 547 + ring_status = rh_reg_read(RING_STATUS); 548 + if (ring_status & RING_STATUS_RUNNING) { 549 + /* 550 + * We're deep in it if ever this condition is ever met. 551 + * Hardware might still be writing to memory, etc. 552 + */ 553 + dev_crit(&hci->master.dev, "unable to abort the ring\n"); 554 + WARN_ON(1); 555 + } 559 556 } 557 + 558 + spin_lock_irq(&hci->lock); 560 559 561 560 for (i = 0; i < n; i++) { 562 561 struct hci_xfer *xfer = xfer_list + i; ··· 582 559 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx; 583 560 584 561 /* store no-op cmd descriptor */ 585 - *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7); 562 + *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7) | FIELD_PREP(CMD_0_TID, xfer->cmd_tid); 586 563 *ring_data++ = 0; 587 564 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 588 565 *ring_data++ = 0; ··· 600 577 } 601 578 602 579 /* restart the ring */ 580 + mipi_i3c_hci_resume(hci); 603 581 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 582 + rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP); 583 + 584 + spin_unlock_irq(&hci->lock); 604 585 605 586 return did_unqueue; 587 + } 588 + 589 + static int hci_dma_handle_error(struct i3c_hci *hci, struct hci_xfer *xfer_list, int n) 590 + { 591 + return hci_dma_dequeue_xfer(hci, xfer_list, n) ? -EIO : 0; 606 592 } 607 593 608 594 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh) 609 595 { 610 596 u32 op1_val, op2_val, resp, *ring_resp; 611 597 unsigned int tid, done_ptr = rh->done_ptr; 598 + unsigned int done_cnt = 0; 612 599 struct hci_xfer *xfer; 613 600 614 601 for (;;) { ··· 636 603 dev_dbg(&hci->master.dev, "orphaned ring entry"); 637 604 } else { 638 605 hci_dma_unmap_xfer(hci, xfer, 1); 606 + rh->src_xfers[done_ptr] = NULL; 639 607 xfer->ring_entry = -1; 640 608 xfer->response = resp; 641 609 if (tid != xfer->cmd_tid) { ··· 651 617 652 618 done_ptr = (done_ptr + 1) % rh->xfer_entries; 653 619 rh->done_ptr = done_ptr; 620 + done_cnt += 1; 654 621 } 655 622 656 - /* take care to update the software dequeue pointer atomically */ 657 - spin_lock(&rh->lock); 623 + rh->xfer_space += done_cnt; 658 624 op1_val = rh_reg_read(RING_OPERATION1); 659 625 op1_val &= ~RING_OP1_CR_SW_DEQ_PTR; 660 626 op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr); 661 627 rh_reg_write(RING_OPERATION1, op1_val); 662 - spin_unlock(&rh->lock); 663 628 } 664 629 665 630 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev, ··· 838 805 i3c_master_queue_ibi(dev, slot); 839 806 840 807 done: 841 - /* take care to update the ibi dequeue pointer atomically */ 842 - spin_lock(&rh->lock); 843 808 op1_val = rh_reg_read(RING_OPERATION1); 844 809 op1_val &= ~RING_OP1_IBI_DEQ_PTR; 845 810 op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr); 846 811 rh_reg_write(RING_OPERATION1, op1_val); 847 - spin_unlock(&rh->lock); 848 812 849 813 /* update the chunk pointer */ 850 814 rh->ibi_chunk_ptr += ibi_chunks; ··· 875 845 hci_dma_xfer_done(hci, rh); 876 846 if (status & INTR_RING_OP) 877 847 complete(&rh->op_done); 878 - 879 - if (status & INTR_TRANSFER_ABORT) { 880 - u32 ring_status; 881 - 882 - dev_notice_ratelimited(&hci->master.dev, 883 - "Ring %d: Transfer Aborted\n", i); 884 - mipi_i3c_hci_resume(hci); 885 - ring_status = rh_reg_read(RING_STATUS); 886 - if (!(ring_status & RING_STATUS_RUNNING) && 887 - status & INTR_TRANSFER_COMPLETION && 888 - status & INTR_TRANSFER_ERR) { 889 - /* 890 - * Ring stop followed by run is an Intel 891 - * specific required quirk after resuming the 892 - * halted controller. Do it only when the ring 893 - * is not in running state after a transfer 894 - * error. 895 - */ 896 - rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 897 - rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | 898 - RING_CTRL_RUN_STOP); 899 - } 900 - } 848 + if (status & INTR_TRANSFER_ABORT) 849 + dev_dbg(&hci->master.dev, "Ring %d: Transfer Aborted\n", i); 901 850 if (status & INTR_IBI_RING_FULL) 902 851 dev_err_ratelimited(&hci->master.dev, 903 852 "Ring %d: IBI Ring Full Condition\n", i); ··· 892 883 .cleanup = hci_dma_cleanup, 893 884 .queue_xfer = hci_dma_queue_xfer, 894 885 .dequeue_xfer = hci_dma_dequeue_xfer, 886 + .handle_error = hci_dma_handle_error, 895 887 .irq_handler = hci_dma_irq_handler, 896 888 .request_ibi = hci_dma_request_ibi, 897 889 .free_ibi = hci_dma_free_ibi,
+5
drivers/i3c/master/mipi-i3c-hci/hci.h
··· 50 50 const struct hci_io_ops *io; 51 51 void *io_data; 52 52 const struct hci_cmd_ops *cmd; 53 + spinlock_t lock; 54 + struct mutex control_mutex; 53 55 atomic_t next_cmd_tid; 54 56 bool irq_inactive; 55 57 u32 caps; ··· 89 87 unsigned int data_len; 90 88 unsigned int cmd_tid; 91 89 struct completion *completion; 90 + unsigned long timeout; 92 91 union { 93 92 struct { 94 93 /* PIO specific */ ··· 123 120 bool (*irq_handler)(struct i3c_hci *hci); 124 121 int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 125 122 bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 123 + int (*handle_error)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 126 124 int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev, 127 125 const struct i3c_ibi_setup *req); 128 126 void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev); ··· 158 154 void amd_set_od_pp_timing(struct i3c_hci *hci); 159 155 void amd_set_resp_buf_thld(struct i3c_hci *hci); 160 156 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci); 157 + int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 161 158 162 159 #endif
+5 -11
drivers/i3c/master/mipi-i3c-hci/pio.c
··· 123 123 }; 124 124 125 125 struct hci_pio_data { 126 - spinlock_t lock; 127 126 struct hci_xfer *curr_xfer, *xfer_queue; 128 127 struct hci_xfer *curr_rx, *rx_queue; 129 128 struct hci_xfer *curr_tx, *tx_queue; ··· 211 212 return -ENOMEM; 212 213 213 214 hci->io_data = pio; 214 - spin_lock_init(&pio->lock); 215 215 216 216 __hci_pio_init(hci, &size_val); 217 217 ··· 629 631 xfer[i].data_left = xfer[i].data_len; 630 632 } 631 633 632 - spin_lock_irq(&pio->lock); 634 + spin_lock_irq(&hci->lock); 633 635 prev_queue_tail = pio->xfer_queue; 634 636 pio->xfer_queue = &xfer[n - 1]; 635 637 if (pio->curr_xfer) { ··· 643 645 pio_reg_read(INTR_STATUS), 644 646 pio_reg_read(INTR_SIGNAL_ENABLE)); 645 647 } 646 - spin_unlock_irq(&pio->lock); 648 + spin_unlock_irq(&hci->lock); 647 649 return 0; 648 650 } 649 651 ··· 714 716 struct hci_pio_data *pio = hci->io_data; 715 717 int ret; 716 718 717 - spin_lock_irq(&pio->lock); 719 + spin_lock_irq(&hci->lock); 718 720 dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n, 719 721 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 720 722 dev_dbg(&hci->master.dev, "main_status = %#x/%#x", 721 723 readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28)); 722 724 723 725 ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n); 724 - spin_unlock_irq(&pio->lock); 726 + spin_unlock_irq(&hci->lock); 725 727 return ret; 726 728 } 727 729 ··· 1014 1016 struct hci_pio_data *pio = hci->io_data; 1015 1017 u32 status; 1016 1018 1017 - spin_lock(&pio->lock); 1018 1019 status = pio_reg_read(INTR_STATUS); 1019 1020 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1020 1021 status, pio->enabled_irqs); 1021 1022 status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS; 1022 - if (!status) { 1023 - spin_unlock(&pio->lock); 1023 + if (!status) 1024 1024 return false; 1025 - } 1026 1025 1027 1026 if (status & STAT_IBI_STATUS_THLD) 1028 1027 hci_pio_process_ibi(hci, pio); ··· 1053 1058 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs); 1054 1059 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1055 1060 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 1056 - spin_unlock(&pio->lock); 1057 1061 return true; 1058 1062 } 1059 1063
+11 -2
drivers/iio/adc/ad7768-1.c
··· 531 531 return ret; 532 532 } 533 533 534 - static void ad7768_fill_scale_tbl(struct iio_dev *dev) 534 + static int ad7768_fill_scale_tbl(struct iio_dev *dev) 535 535 { 536 536 struct ad7768_state *st = iio_priv(dev); 537 537 const struct iio_scan_type *scan_type; ··· 541 541 u64 tmp2; 542 542 543 543 scan_type = iio_get_current_scan_type(dev, &dev->channels[0]); 544 + if (IS_ERR(scan_type)) { 545 + dev_err(&st->spi->dev, "Failed to get scan type.\n"); 546 + return PTR_ERR(scan_type); 547 + } 548 + 544 549 if (scan_type->sign == 's') 545 550 val2 = scan_type->realbits - 1; 546 551 else ··· 570 565 st->scale_tbl[i][0] = tmp0; /* Integer part */ 571 566 st->scale_tbl[i][1] = abs(tmp1); /* Fractional part */ 572 567 } 568 + 569 + return 0; 573 570 } 574 571 575 572 static int ad7768_set_sinc3_dec_rate(struct ad7768_state *st, ··· 676 669 } 677 670 678 671 /* Update scale table: scale values vary according to the precision */ 679 - ad7768_fill_scale_tbl(dev); 672 + ret = ad7768_fill_scale_tbl(dev); 673 + if (ret) 674 + return ret; 680 675 681 676 ad7768_fill_samp_freq_tbl(st); 682 677
+1 -1
drivers/iio/chemical/bme680_core.c
··· 613 613 * + heater duration 614 614 */ 615 615 int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press + 616 - data->oversampling_humid) * 1936) + (477 * 4) + 616 + data->oversampling_humid) * 1963) + (477 * 4) + 617 617 (477 * 5) + 1000 + (data->heater_dur * 1000); 618 618 619 619 fsleep(wait_eoc_us);
+1 -1
drivers/iio/chemical/sps30_i2c.c
··· 171 171 if (!sps30_i2c_meas_ready(state)) 172 172 return -ETIMEDOUT; 173 173 174 - return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(num) * num); 174 + return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(*meas) * num); 175 175 } 176 176 177 177 static int sps30_i2c_clean_fan(struct sps30_state *state)
+1 -1
drivers/iio/chemical/sps30_serial.c
··· 303 303 if (msleep_interruptible(1000)) 304 304 return -EINTR; 305 305 306 - ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(num)); 306 + ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(*meas)); 307 307 if (ret < 0) 308 308 return ret; 309 309 /* if measurements aren't ready sensor returns empty frame */
+1 -1
drivers/iio/dac/ds4424.c
··· 140 140 141 141 switch (mask) { 142 142 case IIO_CHAN_INFO_RAW: 143 - if (val < S8_MIN || val > S8_MAX) 143 + if (val <= S8_MIN || val > S8_MAX) 144 144 return -EINVAL; 145 145 146 146 if (val > 0) {
+1 -1
drivers/iio/frequency/adf4377.c
··· 508 508 return ret; 509 509 510 510 return regmap_read_poll_timeout(st->regmap, 0x0, read_val, 511 - !(read_val & (ADF4377_0000_SOFT_RESET_R_MSK | 511 + !(read_val & (ADF4377_0000_SOFT_RESET_MSK | 512 512 ADF4377_0000_SOFT_RESET_R_MSK)), 200, 200 * 100); 513 513 } 514 514
+13 -5
drivers/iio/gyro/mpu3050-core.c
··· 322 322 } 323 323 case IIO_CHAN_INFO_RAW: 324 324 /* Resume device */ 325 - pm_runtime_get_sync(mpu3050->dev); 325 + ret = pm_runtime_resume_and_get(mpu3050->dev); 326 + if (ret) 327 + return ret; 326 328 mutex_lock(&mpu3050->lock); 327 329 328 330 ret = mpu3050_set_8khz_samplerate(mpu3050); ··· 649 647 static int mpu3050_buffer_preenable(struct iio_dev *indio_dev) 650 648 { 651 649 struct mpu3050 *mpu3050 = iio_priv(indio_dev); 650 + int ret; 652 651 653 - pm_runtime_get_sync(mpu3050->dev); 652 + ret = pm_runtime_resume_and_get(mpu3050->dev); 653 + if (ret) 654 + return ret; 654 655 655 656 /* Unless we have OUR trigger active, run at full speed */ 656 - if (!mpu3050->hw_irq_trigger) 657 - return mpu3050_set_8khz_samplerate(mpu3050); 657 + if (!mpu3050->hw_irq_trigger) { 658 + ret = mpu3050_set_8khz_samplerate(mpu3050); 659 + if (ret) 660 + pm_runtime_put_autosuspend(mpu3050->dev); 661 + } 658 662 659 - return 0; 663 + return ret; 660 664 } 661 665 662 666 static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
+1 -2
drivers/iio/gyro/mpu3050-i2c.c
··· 19 19 struct mpu3050 *mpu3050 = i2c_mux_priv(mux); 20 20 21 21 /* Just power up the device, that is all that is needed */ 22 - pm_runtime_get_sync(mpu3050->dev); 23 - return 0; 22 + return pm_runtime_resume_and_get(mpu3050->dev); 24 23 } 25 24 26 25 static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+1 -1
drivers/iio/imu/adis.c
··· 526 526 527 527 adis->spi = spi; 528 528 adis->data = data; 529 - if (!adis->ops->write && !adis->ops->read && !adis->ops->reset) 529 + if (!adis->ops) 530 530 adis->ops = &adis_default_ops; 531 531 else if (!adis->ops->write || !adis->ops->read || !adis->ops->reset) 532 532 return -EINVAL;
+2
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
··· 651 651 return -EINVAL; 652 652 653 653 conf.odr = inv_icm42600_accel_odr_conv[idx / 2]; 654 + if (conf.odr == st->conf.accel.odr) 655 + return 0; 654 656 655 657 pm_runtime_get_sync(dev); 656 658 mutex_lock(&st->lock);
+4
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
··· 371 371 static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) 372 372 { 373 373 struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); 374 + struct inv_icm42600_sensor_state *sensor_st = iio_priv(indio_dev); 375 + struct inv_sensors_timestamp *ts = &sensor_st->ts; 374 376 struct device *dev = regmap_get_device(st->map); 375 377 unsigned int sensor; 376 378 unsigned int *watermark; ··· 393 391 } 394 392 395 393 mutex_lock(&st->lock); 394 + 395 + inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); 396 396 397 397 ret = inv_icm42600_buffer_set_fifo_en(st, st->fifo.en & ~sensor); 398 398 if (ret)
+2
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
··· 358 358 return -EINVAL; 359 359 360 360 conf.odr = inv_icm42600_gyro_odr_conv[idx / 2]; 361 + if (conf.odr == st->conf.gyro.odr) 362 + return 0; 361 363 362 364 pm_runtime_get_sync(dev); 363 365 mutex_lock(&st->lock);
+1 -1
drivers/iio/imu/inv_icm45600/inv_icm45600.h
··· 205 205 #define INV_ICM45600_SPI_SLEW_RATE_38NS 0 206 206 207 207 #define INV_ICM45600_REG_INT1_CONFIG2 0x0018 208 - #define INV_ICM45600_INT1_CONFIG2_PUSH_PULL BIT(2) 208 + #define INV_ICM45600_INT1_CONFIG2_OPEN_DRAIN BIT(2) 209 209 #define INV_ICM45600_INT1_CONFIG2_LATCHED BIT(1) 210 210 #define INV_ICM45600_INT1_CONFIG2_ACTIVE_HIGH BIT(0) 211 211 #define INV_ICM45600_INT1_CONFIG2_ACTIVE_LOW 0x00
+8 -3
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
··· 637 637 break; 638 638 } 639 639 640 - if (!open_drain) 641 - val |= INV_ICM45600_INT1_CONFIG2_PUSH_PULL; 640 + if (open_drain) 641 + val |= INV_ICM45600_INT1_CONFIG2_OPEN_DRAIN; 642 642 643 643 ret = regmap_write(st->map, INV_ICM45600_REG_INT1_CONFIG2, val); 644 644 if (ret) ··· 744 744 */ 745 745 fsleep(5 * USEC_PER_MSEC); 746 746 747 + /* set pm_runtime active early for disable vddio resource cleanup */ 748 + ret = pm_runtime_set_active(dev); 749 + if (ret) 750 + return ret; 751 + 747 752 ret = inv_icm45600_enable_regulator_vddio(st); 748 753 if (ret) 749 754 return ret; ··· 781 776 if (ret) 782 777 return ret; 783 778 784 - ret = devm_pm_runtime_set_active_enabled(dev); 779 + ret = devm_pm_runtime_enable(dev); 785 780 if (ret) 786 781 return ret; 787 782
+8
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 1943 1943 irq_type); 1944 1944 return -EINVAL; 1945 1945 } 1946 + 1947 + /* 1948 + * Acking interrupts by status register does not work reliably 1949 + * but seem to work when this bit is set. 1950 + */ 1951 + if (st->chip_type == INV_MPU9150) 1952 + st->irq_mask |= INV_MPU6050_INT_RD_CLEAR; 1953 + 1946 1954 device_set_wakeup_capable(dev, true); 1947 1955 1948 1956 st->vdd_supply = devm_regulator_get(dev, "vdd");
+2
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
··· 390 390 /* enable level triggering */ 391 391 #define INV_MPU6050_LATCH_INT_EN 0x20 392 392 #define INV_MPU6050_BIT_BYPASS_EN 0x2 393 + /* allow acking interrupts by any register read */ 394 + #define INV_MPU6050_INT_RD_CLEAR 0x10 393 395 394 396 /* Allowed timestamp period jitter in percent */ 395 397 #define INV_MPU6050_TS_PERIOD_JITTER 4
+4 -1
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
··· 248 248 switch (st->chip_type) { 249 249 case INV_MPU6000: 250 250 case INV_MPU6050: 251 - case INV_MPU9150: 252 251 /* 253 252 * WoM is not supported and interrupt status read seems to be broken for 254 253 * some chips. Since data ready is the only interrupt, bypass interrupt ··· 256 257 wom_bits = 0; 257 258 int_status = INV_MPU6050_BIT_RAW_DATA_RDY_INT; 258 259 goto data_ready_interrupt; 260 + case INV_MPU9150: 261 + /* IRQ needs to be acked */ 262 + wom_bits = 0; 263 + break; 259 264 case INV_MPU6500: 260 265 case INV_MPU6515: 261 266 case INV_MPU6880:
+4 -2
drivers/iio/industrialio-buffer.c
··· 228 228 written = 0; 229 229 add_wait_queue(&rb->pollq, &wait); 230 230 do { 231 - if (!indio_dev->info) 232 - return -ENODEV; 231 + if (!indio_dev->info) { 232 + ret = -ENODEV; 233 + break; 234 + } 233 235 234 236 if (!iio_buffer_space_available(rb)) { 235 237 if (signal_pending(current)) {
+1 -1
drivers/iio/light/bh1780.c
··· 109 109 case IIO_LIGHT: 110 110 pm_runtime_get_sync(&bh1780->client->dev); 111 111 value = bh1780_read_word(bh1780, BH1780_REG_DLOW); 112 + pm_runtime_put_autosuspend(&bh1780->client->dev); 112 113 if (value < 0) 113 114 return value; 114 - pm_runtime_put_autosuspend(&bh1780->client->dev); 115 115 *val = value; 116 116 117 117 return IIO_VAL_INT;
+1 -2
drivers/iio/magnetometer/Kconfig
··· 143 143 tristate "MEMSIC MMC5633 3-axis magnetic sensor" 144 144 select REGMAP_I2C 145 145 select REGMAP_I3C if I3C 146 - depends on I2C 147 - depends on I3C || !I3C 146 + depends on I3C_OR_I2C 148 147 help 149 148 Say yes here to build support for the MEMSIC MMC5633 3-axis 150 149 magnetic sensor.
+1 -1
drivers/iio/magnetometer/tlv493d.c
··· 171 171 switch (ch) { 172 172 case TLV493D_AXIS_X: 173 173 val = FIELD_GET(TLV493D_BX_MAG_X_AXIS_MSB, b[TLV493D_RD_REG_BX]) << 4 | 174 - FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]) >> 4; 174 + FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]); 175 175 break; 176 176 case TLV493D_AXIS_Y: 177 177 val = FIELD_GET(TLV493D_BY_MAG_Y_AXIS_MSB, b[TLV493D_RD_REG_BY]) << 4 |
+1 -1
drivers/iio/potentiometer/mcp4131.c
··· 221 221 222 222 mutex_lock(&data->lock); 223 223 224 - data->buf[0] = address << MCP4131_WIPER_SHIFT; 224 + data->buf[0] = address; 225 225 data->buf[0] |= MCP4131_WRITE | (val >> 8); 226 226 data->buf[1] = val & 0xFF; /* 8 bits here */ 227 227
+4 -2
drivers/iio/proximity/hx9023s.c
··· 719 719 struct device *dev = regmap_get_device(data->regmap); 720 720 unsigned int i, period_ms; 721 721 722 + if (!val && !val2) 723 + return -EINVAL; 724 + 722 725 period_ms = div_u64(NANO, (val * MEGA + val2)); 723 726 724 727 for (i = 0; i < ARRAY_SIZE(hx9023s_samp_freq_table); i++) { ··· 1037 1034 if (!bin) 1038 1035 return -ENOMEM; 1039 1036 1040 - memcpy(bin->data, fw->data, fw->size); 1041 - 1042 1037 bin->fw_size = fw->size; 1038 + memcpy(bin->data, fw->data, bin->fw_size); 1043 1039 bin->fw_ver = bin->data[FW_VER_OFFSET]; 1044 1040 bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET); 1045 1041
+17 -4
drivers/irqchip/irq-riscv-aplic-main.c
··· 116 116 .ops = &aplic_syscore_ops, 117 117 }; 118 118 119 + static bool aplic_syscore_registered __ro_after_init; 120 + 121 + static void aplic_syscore_init(void) 122 + { 123 + if (!aplic_syscore_registered) { 124 + register_syscore(&aplic_syscore); 125 + aplic_syscore_registered = true; 126 + } 127 + } 128 + 119 129 static int aplic_pm_notifier(struct notifier_block *nb, unsigned long action, void *data) 120 130 { 121 131 struct aplic_priv *priv = container_of(nb, struct aplic_priv, genpd_nb); ··· 382 372 rc = aplic_msi_setup(dev, regs); 383 373 else 384 374 rc = aplic_direct_setup(dev, regs); 385 - if (rc) 375 + 376 + if (rc) { 386 377 dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n", 387 378 msi_mode ? "MSI" : "direct"); 388 - else 389 - register_syscore(&aplic_syscore); 379 + return rc; 380 + } 381 + 382 + aplic_syscore_init(); 390 383 391 384 #ifdef CONFIG_ACPI 392 385 if (!acpi_disabled) 393 386 acpi_dev_clear_dependencies(ACPI_COMPANION(dev)); 394 387 #endif 395 388 396 - return rc; 389 + return 0; 397 390 } 398 391 399 392 static const struct of_device_id aplic_match[] = {
+1 -2
drivers/misc/amd-sbi/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config AMD_SBRMI_I2C 3 3 tristate "AMD side band RMI support" 4 - depends on I2C 4 + depends on I3C_OR_I2C 5 5 depends on ARM || ARM64 || COMPILE_TEST 6 6 select REGMAP_I2C 7 - depends on I3C || !I3C 8 7 select REGMAP_I3C if I3C 9 8 help 10 9 Side band RMI over I2C/I3C support for AMD out of band management.
+11 -5
drivers/net/bonding/bond_debugfs.c
··· 34 34 for (; hash_index != RLB_NULL_INDEX; 35 35 hash_index = client_info->used_next) { 36 36 client_info = &(bond_info->rx_hashtbl[hash_index]); 37 - seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", 38 - &client_info->ip_src, 39 - &client_info->ip_dst, 40 - &client_info->mac_dst, 41 - client_info->slave->dev->name); 37 + if (client_info->slave) 38 + seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", 39 + &client_info->ip_src, 40 + &client_info->ip_dst, 41 + &client_info->mac_dst, 42 + client_info->slave->dev->name); 43 + else 44 + seq_printf(m, "%-15pI4 %-15pI4 %-17pM (none)\n", 45 + &client_info->ip_src, 46 + &client_info->ip_dst, 47 + &client_info->mac_dst); 42 48 } 43 49 44 50 spin_unlock_bh(&bond->mode_lock);
+5 -3
drivers/net/bonding/bond_main.c
··· 1530 1530 return ret; 1531 1531 } 1532 1532 1533 - static int bond_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1533 + static int bond_header_parse(const struct sk_buff *skb, 1534 + const struct net_device *dev, 1535 + unsigned char *haddr) 1534 1536 { 1535 - struct bonding *bond = netdev_priv(skb->dev); 1537 + struct bonding *bond = netdev_priv(dev); 1536 1538 const struct header_ops *slave_ops; 1537 1539 struct slave *slave; 1538 1540 int ret = 0; ··· 1544 1542 if (slave) { 1545 1543 slave_ops = READ_ONCE(slave->dev->header_ops); 1546 1544 if (slave_ops && slave_ops->parse) 1547 - ret = slave_ops->parse(skb, haddr); 1545 + ret = slave_ops->parse(skb, slave->dev, haddr); 1548 1546 } 1549 1547 rcu_read_unlock(); 1550 1548 return ret;
+6 -2
drivers/net/dsa/bcm_sf2.c
··· 980 980 ret = bcm_sf2_sw_rst(priv); 981 981 if (ret) { 982 982 pr_err("%s: failed to software reset switch\n", __func__); 983 + if (!priv->wol_ports_mask) 984 + clk_disable_unprepare(priv->clk); 983 985 return ret; 984 986 } 985 987 986 988 bcm_sf2_crossbar_setup(priv); 987 989 988 990 ret = bcm_sf2_cfp_resume(ds); 989 - if (ret) 991 + if (ret) { 992 + if (!priv->wol_ports_mask) 993 + clk_disable_unprepare(priv->clk); 990 994 return ret; 991 - 995 + } 992 996 if (priv->hw_params.num_gphy == 1) 993 997 bcm_sf2_gphy_enable_set(ds, true); 994 998
-1
drivers/net/ethernet/airoha/airoha_eth.c
··· 3093 3093 if (!port) 3094 3094 continue; 3095 3095 3096 - airoha_dev_stop(port->dev); 3097 3096 unregister_netdev(port->dev); 3098 3097 airoha_metadata_dst_free(port); 3099 3098 }
+2
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 2929 2929 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1); 2930 2930 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2); 2931 2931 2932 + if (type >= ARRAY_SIZE(bp->bs_trace)) 2933 + goto async_event_process_exit; 2932 2934 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset); 2933 2935 goto async_event_process_exit; 2934 2936 }
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 2146 2146 }; 2147 2147 2148 2148 #define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc) 2149 - #define BNXT_TRACE_MAX 11 2149 + #define BNXT_TRACE_MAX (DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE + 1) 2150 2150 2151 2151 struct bnxt_bs_trace_info { 2152 2152 u8 *magic_byte;
+1 -1
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
··· 123 123 while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) 124 124 & RBUF_STATUS_WOL)) { 125 125 retries++; 126 - if (retries > 5) { 126 + if (retries > 50) { 127 127 netdev_crit(dev, "polling wol mode timeout\n"); 128 128 return -ETIMEDOUT; 129 129 }
+11
drivers/net/ethernet/broadcom/tg3.c
··· 17029 17029 return err; 17030 17030 } 17031 17031 17032 + static int tg3_is_default_mac_address(u8 *addr) 17033 + { 17034 + static const u8 default_mac_address[ETH_ALEN] = { 0x00, 0x10, 0x18, 0x00, 0x00, 0x00 }; 17035 + 17036 + return ether_addr_equal(default_mac_address, addr); 17037 + } 17038 + 17032 17039 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 17033 17040 { 17034 17041 u32 hi, lo, mac_offset; ··· 17109 17102 17110 17103 if (!is_valid_ether_addr(addr)) 17111 17104 return -EINVAL; 17105 + 17106 + if (tg3_is_default_mac_address(addr)) 17107 + return device_get_mac_address(&tp->pdev->dev, addr); 17108 + 17112 17109 return 0; 17113 17110 } 17114 17111
+22 -4
drivers/net/ethernet/cadence/macb_main.c
··· 2812 2812 desc->ctrl = 0; 2813 2813 } 2814 2814 2815 + static void gem_init_rx_ring(struct macb_queue *queue) 2816 + { 2817 + queue->rx_tail = 0; 2818 + queue->rx_prepared_head = 0; 2819 + 2820 + gem_rx_refill(queue); 2821 + } 2822 + 2815 2823 static void gem_init_rings(struct macb *bp) 2816 2824 { 2817 2825 struct macb_queue *queue; ··· 2837 2829 queue->tx_head = 0; 2838 2830 queue->tx_tail = 0; 2839 2831 2840 - queue->rx_tail = 0; 2841 - queue->rx_prepared_head = 0; 2842 - 2843 - gem_rx_refill(queue); 2832 + gem_init_rx_ring(queue); 2844 2833 } 2845 2834 2846 2835 macb_init_tieoff(bp); ··· 4123 4118 { 4124 4119 struct macb *bp = netdev_priv(netdev); 4125 4120 int ret; 4121 + 4122 + if (!(netdev->hw_features & NETIF_F_NTUPLE)) 4123 + return -EOPNOTSUPP; 4126 4124 4127 4125 switch (cmd->cmd) { 4128 4126 case ETHTOOL_SRXCLSRLINS: ··· 6102 6094 rtnl_unlock(); 6103 6095 } 6104 6096 6097 + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 6098 + macb_init_buffers(bp); 6099 + 6105 6100 for (q = 0, queue = bp->queues; q < bp->num_queues; 6106 6101 ++q, ++queue) { 6102 + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 6103 + if (macb_is_gem(bp)) 6104 + gem_init_rx_ring(queue); 6105 + else 6106 + macb_init_rx_ring(queue); 6107 + } 6108 + 6107 6109 napi_enable(&queue->napi_rx); 6108 6110 napi_enable(&queue->napi_tx); 6109 6111 }
+3 -1
drivers/net/ethernet/cadence/macb_ptp.c
··· 357 357 { 358 358 struct macb *bp = netdev_priv(ndev); 359 359 360 - if (bp->ptp_clock) 360 + if (bp->ptp_clock) { 361 361 ptp_clock_unregister(bp->ptp_clock); 362 + bp->ptp_clock = NULL; 363 + } 362 364 363 365 gem_ptp_clear_timer(bp); 364 366
+6 -3
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 757 757 adapter->num_vlan_filters++; 758 758 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); 759 759 } else if (f->state == IAVF_VLAN_REMOVE) { 760 - /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. 761 - * We can safely only change the state here. 760 + /* Re-add the filter since we cannot tell whether the 761 + * pending delete has already been processed by the PF. 762 + * A duplicate add is harmless. 762 763 */ 763 - f->state = IAVF_VLAN_ACTIVE; 764 + f->state = IAVF_VLAN_ADD; 765 + iavf_schedule_aq_request(adapter, 766 + IAVF_FLAG_AQ_ADD_VLAN_FILTER); 764 767 } 765 768 766 769 clearout:
+2
drivers/net/ethernet/intel/igc/igc.h
··· 781 781 struct kernel_hwtstamp_config *config, 782 782 struct netlink_ext_ack *extack); 783 783 void igc_ptp_tx_hang(struct igc_adapter *adapter); 784 + void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter, 785 + u16 queue_id); 784 786 void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); 785 787 void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); 786 788
+9 -5
drivers/net/ethernet/intel/igc/igc_main.c
··· 264 264 /* reset next_to_use and next_to_clean */ 265 265 tx_ring->next_to_use = 0; 266 266 tx_ring->next_to_clean = 0; 267 + 268 + /* Clear any lingering XSK TX timestamp requests */ 269 + if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { 270 + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 271 + 272 + igc_ptp_clear_xsk_tx_tstamp_queue(adapter, tx_ring->queue_index); 273 + } 267 274 } 268 275 269 276 /** ··· 1737 1730 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1738 1731 * in order to meet this minimum size requirement. 1739 1732 */ 1740 - if (skb->len < 17) { 1741 - if (skb_padto(skb, 17)) 1742 - return NETDEV_TX_OK; 1743 - skb->len = 17; 1744 - } 1733 + if (skb_put_padto(skb, 17)) 1734 + return NETDEV_TX_OK; 1745 1735 1746 1736 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1747 1737 }
+33
drivers/net/ethernet/intel/igc/igc_ptp.c
··· 577 577 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); 578 578 } 579 579 580 + /** 581 + * igc_ptp_clear_xsk_tx_tstamp_queue - Clear pending XSK TX timestamps for a queue 582 + * @adapter: Board private structure 583 + * @queue_id: TX queue index to clear timestamps for 584 + * 585 + * Iterates over all TX timestamp registers and releases any pending 586 + * timestamp requests associated with the given TX queue. This is 587 + * called when an XDP pool is being disabled to ensure no stale 588 + * timestamp references remain. 589 + */ 590 + void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter, u16 queue_id) 591 + { 592 + unsigned long flags; 593 + int i; 594 + 595 + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); 596 + 597 + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { 598 + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; 599 + 600 + if (tstamp->buffer_type != IGC_TX_BUFFER_TYPE_XSK) 601 + continue; 602 + if (tstamp->xsk_queue_index != queue_id) 603 + continue; 604 + if (!tstamp->xsk_tx_buffer) 605 + continue; 606 + 607 + igc_ptp_free_tx_buffer(adapter, tstamp); 608 + } 609 + 610 + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); 611 + } 612 + 580 613 static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) 581 614 { 582 615 struct igc_hw *hw = &adapter->hw;
+36 -13
drivers/net/ethernet/intel/libie/fwlog.c
··· 433 433 module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry); 434 434 if (module < 0) { 435 435 dev_info(dev, "unknown module\n"); 436 - return -EINVAL; 436 + count = -EINVAL; 437 + goto free_cmd_buf; 437 438 } 438 439 439 440 cnt = sscanf(cmd_buf, "%s", user_val); 440 - if (cnt != 1) 441 - return -EINVAL; 441 + if (cnt != 1) { 442 + count = -EINVAL; 443 + goto free_cmd_buf; 444 + } 442 445 443 446 log_level = sysfs_match_string(libie_fwlog_level_string, user_val); 444 447 if (log_level < 0) { 445 448 dev_info(dev, "unknown log level '%s'\n", user_val); 446 - return -EINVAL; 449 + count = -EINVAL; 450 + goto free_cmd_buf; 447 451 } 448 452 449 453 if (module != LIBIE_AQC_FW_LOG_ID_MAX) { ··· 461 457 for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++) 462 458 fwlog->cfg.module_entries[i].log_level = log_level; 463 459 } 460 + 461 + free_cmd_buf: 462 + kfree(cmd_buf); 464 463 465 464 return count; 466 465 } ··· 522 515 return PTR_ERR(cmd_buf); 523 516 524 517 ret = sscanf(cmd_buf, "%s", user_val); 525 - if (ret != 1) 526 - return -EINVAL; 518 + if (ret != 1) { 519 + count = -EINVAL; 520 + goto free_cmd_buf; 521 + } 527 522 528 523 ret = kstrtos16(user_val, 0, &nr_messages); 529 - if (ret) 530 - return ret; 524 + if (ret) { 525 + count = ret; 526 + goto free_cmd_buf; 527 + } 531 528 532 529 if (nr_messages < LIBIE_AQC_FW_LOG_MIN_RESOLUTION || 533 530 nr_messages > LIBIE_AQC_FW_LOG_MAX_RESOLUTION) { 534 531 dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n", 535 532 nr_messages, LIBIE_AQC_FW_LOG_MIN_RESOLUTION, 536 533 LIBIE_AQC_FW_LOG_MAX_RESOLUTION); 537 - return -EINVAL; 534 + count = -EINVAL; 535 + goto free_cmd_buf; 538 536 } 539 537 540 538 fwlog->cfg.log_resolution = nr_messages; 539 + 540 + free_cmd_buf: 541 + kfree(cmd_buf); 541 542 542 543 return count; 543 544 } ··· 603 588 return PTR_ERR(cmd_buf); 604 589 605 590 ret = sscanf(cmd_buf, "%s", user_val); 606 - if (ret != 1) 607 - return -EINVAL; 591 + if (ret != 1) { 592 + ret = -EINVAL; 593 + goto free_cmd_buf; 594 + } 608 595 609 596 ret = kstrtobool(user_val, &enable); 610 597 if (ret) ··· 641 624 */ 642 625 if (WARN_ON(ret != (ssize_t)count && ret >= 0)) 643 626 ret = -EIO; 627 + free_cmd_buf: 628 + kfree(cmd_buf); 644 629 645 630 return ret; 646 631 } ··· 701 682 return PTR_ERR(cmd_buf); 702 683 703 684 ret = sscanf(cmd_buf, "%s", user_val); 704 - if (ret != 1) 705 - return -EINVAL; 685 + if (ret != 1) { 686 + ret = -EINVAL; 687 + goto free_cmd_buf; 688 + } 706 689 707 690 index = sysfs_match_string(libie_fwlog_log_size, user_val); 708 691 if (index < 0) { ··· 733 712 */ 734 713 if (WARN_ON(ret != (ssize_t)count && ret >= 0)) 735 714 ret = -EIO; 715 + free_cmd_buf: 716 + kfree(cmd_buf); 736 717 737 718 return ret; 738 719 }
+2 -2
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 5016 5016 if (priv->percpu_pools) 5017 5017 numbufs = port->nrxqs * 2; 5018 5018 5019 - if (change_percpu) 5019 + if (change_percpu && priv->global_tx_fc) 5020 5020 mvpp2_bm_pool_update_priv_fc(priv, false); 5021 5021 5022 5022 for (i = 0; i < numbufs; i++) ··· 5041 5041 mvpp2_open(port->dev); 5042 5042 } 5043 5043 5044 - if (change_percpu) 5044 + if (change_percpu && priv->global_tx_fc) 5045 5045 mvpp2_bm_pool_update_priv_fc(priv, true); 5046 5046 5047 5047 return 0;
+1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 287 287 struct mlx5e_ipsec_dwork *dwork; 288 288 struct mlx5e_ipsec_limits limits; 289 289 u32 rx_mapped_id; 290 + u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; 290 291 }; 291 292 292 293 struct mlx5_accel_pol_xfrm_attrs {
+23 -29
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 310 310 mlx5e_ipsec_aso_query(sa_entry, data); 311 311 } 312 312 313 - static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, 314 - u32 mode_param) 313 + static void 314 + mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, 315 + u32 mode_param, 316 + struct mlx5_accel_esp_xfrm_attrs *attrs) 315 317 { 316 - struct mlx5_accel_esp_xfrm_attrs attrs = {}; 317 318 struct mlx5_wqe_aso_ctrl_seg data = {}; 318 319 319 320 if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) { ··· 324 323 sa_entry->esn_state.overlap = 1; 325 324 } 326 325 327 - mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); 328 - 329 - /* It is safe to execute the modify below unlocked since the only flows 330 - * that could affect this HW object, are create, destroy and this work. 331 - * 332 - * Creation flow can't co-exist with this modify work, the destruction 333 - * flow would cancel this work, and this work is a single entity that 334 - * can't conflict with it self. 335 - */ 336 - spin_unlock_bh(&sa_entry->x->lock); 337 - mlx5_accel_esp_modify_xfrm(sa_entry, &attrs); 338 - spin_lock_bh(&sa_entry->x->lock); 326 + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, attrs); 339 327 340 328 data.data_offset_condition_operand = 341 329 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET; ··· 360 370 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry) 361 371 { 362 372 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 363 - struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 364 - struct mlx5e_ipsec_aso *aso = ipsec->aso; 365 373 bool soft_arm, hard_arm; 366 374 u64 hard_cnt; 367 375 368 376 lockdep_assert_held(&sa_entry->x->lock); 369 377 370 - soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm); 371 - hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm); 378 + soft_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, soft_lft_arm); 379 + hard_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, hard_lft_arm); 372 380 if (!soft_arm && !hard_arm) 373 381 /* It is not lifetime event */ 374 382 return; 375 383 376 - hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt); 384 + hard_cnt = MLX5_GET(ipsec_aso, sa_entry->ctx, remove_flow_pkt_cnt); 377 385 if (!hard_cnt || hard_arm) { 378 386 /* It is possible to see packet counter equal to zero without 379 387 * hard limit event armed. Such situation can be if packet ··· 441 453 struct mlx5e_ipsec_work *work = 442 454 container_of(_work, struct mlx5e_ipsec_work, work); 443 455 struct mlx5e_ipsec_sa_entry *sa_entry = work->data; 456 + struct mlx5_accel_esp_xfrm_attrs tmp = {}; 444 457 struct mlx5_accel_esp_xfrm_attrs *attrs; 445 - struct mlx5e_ipsec_aso *aso; 458 + bool need_modify = false; 446 459 int ret; 447 460 448 - aso = sa_entry->ipsec->aso; 449 461 attrs = &sa_entry->attrs; 450 462 451 463 spin_lock_bh(&sa_entry->x->lock); ··· 453 465 if (ret) 454 466 goto unlock; 455 467 456 - if (attrs->replay_esn.trigger && 457 - !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) { 458 - u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter); 459 - 460 - mlx5e_ipsec_update_esn_state(sa_entry, mode_param); 461 - } 462 - 463 468 if (attrs->lft.soft_packet_limit != XFRM_INF) 464 469 mlx5e_ipsec_handle_limits(sa_entry); 465 470 471 + if (attrs->replay_esn.trigger && 472 + !MLX5_GET(ipsec_aso, sa_entry->ctx, esn_event_arm)) { 473 + u32 mode_param = MLX5_GET(ipsec_aso, sa_entry->ctx, 474 + mode_parameter); 475 + 476 + mlx5e_ipsec_update_esn_state(sa_entry, mode_param, &tmp); 477 + need_modify = true; 478 + } 479 + 466 480 unlock: 467 481 spin_unlock_bh(&sa_entry->x->lock); 482 + if (need_modify) 483 + mlx5_accel_esp_modify_xfrm(sa_entry, &tmp); 468 484 kfree(work); 469 485 } 470 486 ··· 621 629 /* We are in atomic context */ 622 630 udelay(10); 623 631 } while (ret && time_is_after_jiffies(expires)); 632 + if (!ret) 633 + memcpy(sa_entry->ctx, aso->ctx, MLX5_ST_SZ_BYTES(ipsec_aso)); 624 634 spin_unlock_bh(&aso->lock); 625 635 return ret; 626 636 }
+9 -14
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 1489 1489 return err; 1490 1490 } 1491 1491 1492 - static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) 1492 + static u32 mlx5_esw_qos_lag_link_speed_get(struct mlx5_core_dev *mdev, 1493 + bool take_rtnl) 1493 1494 { 1494 1495 struct ethtool_link_ksettings lksettings; 1495 1496 struct net_device *slave, *master; 1496 1497 u32 speed = SPEED_UNKNOWN; 1497 1498 1498 - /* Lock ensures a stable reference to master and slave netdevice 1499 - * while port speed of master is queried. 1500 - */ 1501 - ASSERT_RTNL(); 1502 - 1503 1499 slave = mlx5_uplink_netdev_get(mdev); 1504 1500 if (!slave) 1505 1501 goto out; 1506 1502 1503 + if (take_rtnl) 1504 + rtnl_lock(); 1507 1505 master = netdev_master_upper_dev_get(slave); 1508 1506 if (master && !__ethtool_get_link_ksettings(master, &lksettings)) 1509 1507 speed = lksettings.base.speed; 1508 + if (take_rtnl) 1509 + rtnl_unlock(); 1510 1510 1511 1511 out: 1512 1512 mlx5_uplink_netdev_put(mdev, slave); ··· 1514 1514 } 1515 1515 1516 1516 static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max, 1517 - bool hold_rtnl_lock, struct netlink_ext_ack *extack) 1517 + bool take_rtnl, 1518 + struct netlink_ext_ack *extack) 1518 1519 { 1519 1520 int err; 1520 1521 1521 1522 if (!mlx5_lag_is_active(mdev)) 1522 1523 goto skip_lag; 1523 1524 1524 - if (hold_rtnl_lock) 1525 - rtnl_lock(); 1526 - 1527 - *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev); 1528 - 1529 - if (hold_rtnl_lock) 1530 - rtnl_unlock(); 1525 + *link_speed_max = mlx5_esw_qos_lag_link_speed_get(mdev, take_rtnl); 1531 1526 1532 1527 if (*link_speed_max != (u32)SPEED_UNKNOWN) 1533 1528 return 0;
+3 -3
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 814 814 gc->max_num_cqs = 0; 815 815 } 816 816 817 - kfree(hwc->caller_ctx); 818 - hwc->caller_ctx = NULL; 819 - 820 817 if (hwc->txq) 821 818 mana_hwc_destroy_wq(hwc, hwc->txq); 822 819 ··· 822 825 823 826 if (hwc->cq) 824 827 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); 828 + 829 + kfree(hwc->caller_ctx); 830 + hwc->caller_ctx = NULL; 825 831 826 832 mana_gd_free_res_map(&hwc->inflight_msg_res); 827 833
+5
drivers/net/ethernet/ti/icssg/icssg_common.c
··· 1075 1075 xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); 1076 1076 1077 1077 *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len); 1078 + if (*xdp_state == ICSSG_XDP_CONSUMED) { 1079 + page_pool_recycle_direct(pool, page); 1080 + goto requeue; 1081 + } 1082 + 1078 1083 if (*xdp_state != ICSSG_XDP_PASS) 1079 1084 goto requeue; 1080 1085 headroom = xdp.data - xdp.data_hard_start;
+4 -1
drivers/net/netdevsim/netdev.c
··· 109 109 int ret; 110 110 111 111 ret = __dev_forward_skb(rx_dev, skb); 112 - if (ret) 112 + if (ret) { 113 + if (psp_ext) 114 + __skb_ext_put(psp_ext); 113 115 return ret; 116 + } 114 117 115 118 nsim_psp_handle_ext(skb, psp_ext); 116 119
+6 -6
drivers/net/usb/aqc111.c
··· 1395 1395 aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, 1396 1396 SFR_MEDIUM_STATUS_MODE, 2, &reg16); 1397 1397 1398 - aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0, 1399 - WOL_CFG_SIZE, &wol_cfg); 1400 - aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1401 - &aqc111_data->phy_cfg); 1398 + aqc111_write_cmd_nopm(dev, AQ_WOL_CFG, 0, 0, 1399 + WOL_CFG_SIZE, &wol_cfg); 1400 + aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, 1401 + &aqc111_data->phy_cfg); 1402 1402 } else { 1403 1403 aqc111_data->phy_cfg |= AQ_LOW_POWER; 1404 - aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1405 - &aqc111_data->phy_cfg); 1404 + aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, 1405 + &aqc111_data->phy_cfg); 1406 1406 1407 1407 /* Disable RX path */ 1408 1408 aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
+6 -4
drivers/net/usb/cdc_ncm.c
··· 1656 1656 struct usbnet *dev = netdev_priv(skb_in->dev); 1657 1657 struct usb_cdc_ncm_ndp16 *ndp16; 1658 1658 int ret = -EINVAL; 1659 + size_t ndp_len; 1659 1660 1660 1661 if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) { 1661 1662 netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n", ··· 1676 1675 sizeof(struct usb_cdc_ncm_dpe16)); 1677 1676 ret--; /* we process NDP entries except for the last one */ 1678 1677 1679 - if ((sizeof(struct usb_cdc_ncm_ndp16) + 1680 - ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) { 1678 + ndp_len = struct_size_t(struct usb_cdc_ncm_ndp16, dpe16, ret); 1679 + if (ndpoffset + ndp_len > skb_in->len) { 1681 1680 netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret); 1682 1681 ret = -EINVAL; 1683 1682 } ··· 1693 1692 struct usbnet *dev = netdev_priv(skb_in->dev); 1694 1693 struct usb_cdc_ncm_ndp32 *ndp32; 1695 1694 int ret = -EINVAL; 1695 + size_t ndp_len; 1696 1696 1697 1697 if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) { 1698 1698 netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n", ··· 1713 1711 sizeof(struct usb_cdc_ncm_dpe32)); 1714 1712 ret--; /* we process NDP entries except for the last one */ 1715 1713 1716 - if ((sizeof(struct usb_cdc_ncm_ndp32) + 1717 - ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) { 1714 + ndp_len = struct_size_t(struct usb_cdc_ncm_ndp32, dpe32, ret); 1715 + if (ndpoffset + ndp_len > skb_in->len) { 1718 1716 netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret); 1719 1717 ret = -EINVAL; 1720 1718 }
+2 -4
drivers/net/wireless/ath/ath9k/channel.c
··· 1006 1006 skb_set_queue_mapping(skb, IEEE80211_AC_VO); 1007 1007 1008 1008 if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL)) 1009 - goto error; 1009 + return; 1010 1010 1011 1011 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; 1012 1012 if (ath_tx_start(sc->hw, skb, &txctl)) ··· 1119 1119 1120 1120 skb->priority = 7; 1121 1121 skb_set_queue_mapping(skb, IEEE80211_AC_VO); 1122 - if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { 1123 - dev_kfree_skb_any(skb); 1122 + if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) 1124 1123 return false; 1125 - } 1126 1124 break; 1127 1125 default: 1128 1126 return false;
+1 -3
drivers/net/wireless/mediatek/mt76/scan.c
··· 63 63 64 64 rcu_read_lock(); 65 65 66 - if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) { 67 - ieee80211_free_txskb(phy->hw, skb); 66 + if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) 68 67 goto out; 69 - } 70 68 71 69 info = IEEE80211_SKB_CB(skb); 72 70 if (req->no_cck)
+1 -1
drivers/net/wireless/ti/wlcore/tx.c
··· 210 210 if (skb_headroom(skb) < (total_len - skb->len) && 211 211 pskb_expand_head(skb, (total_len - skb->len), 0, GFP_ATOMIC)) { 212 212 wl1271_free_tx_id(wl, id); 213 - return -EAGAIN; 213 + return -ENOMEM; 214 214 } 215 215 desc = skb_push(skb, total_len - skb->len); 216 216
+1 -2
drivers/net/wireless/virtual/mac80211_hwsim.c
··· 3157 3157 hwsim->tmp_chan->band, 3158 3158 NULL)) { 3159 3159 rcu_read_unlock(); 3160 - kfree_skb(probe); 3161 3160 continue; 3162 3161 } 3163 3162 ··· 6676 6677 if (info->attrs[HWSIM_ATTR_PMSR_SUPPORT]) { 6677 6678 struct cfg80211_pmsr_capabilities *pmsr_capa; 6678 6679 6679 - pmsr_capa = kmalloc_obj(*pmsr_capa); 6680 + pmsr_capa = kzalloc_obj(*pmsr_capa); 6680 6681 if (!pmsr_capa) { 6681 6682 ret = -ENOMEM; 6682 6683 goto out_free;
+2 -2
drivers/nfc/nxp-nci/i2c.c
··· 47 47 { 48 48 struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id; 49 49 50 - gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); 51 - gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); 50 + gpiod_set_value_cansleep(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); 51 + gpiod_set_value_cansleep(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); 52 52 usleep_range(10000, 15000); 53 53 54 54 if (mode == NXP_NCI_MODE_COLD)
+3 -2
drivers/nvdimm/bus.c
··· 486 486 static void nd_async_device_register(void *d, async_cookie_t cookie) 487 487 { 488 488 struct device *dev = d; 489 + struct device *parent = dev->parent; 489 490 490 491 if (device_add(dev) != 0) { 491 492 dev_err(dev, "%s: failed\n", __func__); 492 493 put_device(dev); 493 494 } 494 495 put_device(dev); 495 - if (dev->parent) 496 - put_device(dev->parent); 496 + if (parent) 497 + put_device(parent); 497 498 } 498 499 499 500 static void nd_async_device_unregister(void *d, async_cookie_t cookie)
+1 -2
drivers/nvme/host/core.c
··· 4834 4834 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4835 4835 const struct blk_mq_ops *ops, unsigned int cmd_size) 4836 4836 { 4837 - struct queue_limits lim = {}; 4838 4837 int ret; 4839 4838 4840 4839 memset(set, 0, sizeof(*set)); ··· 4860 4861 if (ctrl->admin_q) 4861 4862 blk_put_queue(ctrl->admin_q); 4862 4863 4863 - ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); 4864 + ctrl->admin_q = blk_mq_alloc_queue(set, NULL, NULL); 4864 4865 if (IS_ERR(ctrl->admin_q)) { 4865 4866 ret = PTR_ERR(ctrl->admin_q); 4866 4867 goto out_free_tagset;
+5 -3
drivers/nvme/host/pci.c
··· 544 544 /* Free memory and continue on */ 545 545 nvme_dbbuf_dma_free(dev); 546 546 547 - for (i = 1; i <= dev->online_queues; i++) 547 + for (i = 1; i < dev->online_queues; i++) 548 548 nvme_dbbuf_free(&dev->queues[i]); 549 549 } 550 550 } ··· 1625 1625 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1626 1626 { 1627 1627 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1628 + int irq; 1628 1629 1629 1630 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1630 1631 1631 - disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1632 + irq = pci_irq_vector(pdev, nvmeq->cq_vector); 1633 + disable_irq(irq); 1632 1634 spin_lock(&nvmeq->cq_poll_lock); 1633 1635 nvme_poll_cq(nvmeq, NULL); 1634 1636 spin_unlock(&nvmeq->cq_poll_lock); 1635 - enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1637 + enable_irq(irq); 1636 1638 } 1637 1639 1638 1640 static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+1 -1
drivers/nvme/target/admin-cmd.c
··· 1585 1585 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; 1586 1586 mutex_unlock(&ctrl->lock); 1587 1587 1588 - queue_work(nvmet_wq, &ctrl->async_event_work); 1588 + queue_work(nvmet_aen_wq, &ctrl->async_event_work); 1589 1589 } 1590 1590 1591 1591 void nvmet_execute_keep_alive(struct nvmet_req *req)
+12 -2
drivers/nvme/target/core.c
··· 27 27 28 28 struct workqueue_struct *nvmet_wq; 29 29 EXPORT_SYMBOL_GPL(nvmet_wq); 30 + struct workqueue_struct *nvmet_aen_wq; 31 + EXPORT_SYMBOL_GPL(nvmet_aen_wq); 30 32 31 33 /* 32 34 * This read/write semaphore is used to synchronize access to configuration ··· 208 206 list_add_tail(&aen->entry, &ctrl->async_events); 209 207 mutex_unlock(&ctrl->lock); 210 208 211 - queue_work(nvmet_wq, &ctrl->async_event_work); 209 + queue_work(nvmet_aen_wq, &ctrl->async_event_work); 212 210 } 213 211 214 212 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) ··· 1958 1956 if (!nvmet_wq) 1959 1957 goto out_free_buffered_work_queue; 1960 1958 1959 + nvmet_aen_wq = alloc_workqueue("nvmet-aen-wq", 1960 + WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1961 + if (!nvmet_aen_wq) 1962 + goto out_free_nvmet_work_queue; 1963 + 1961 1964 error = nvmet_init_debugfs(); 1962 1965 if (error) 1963 - goto out_free_nvmet_work_queue; 1966 + goto out_free_nvmet_aen_work_queue; 1964 1967 1965 1968 error = nvmet_init_discovery(); 1966 1969 if (error) ··· 1981 1974 nvmet_exit_discovery(); 1982 1975 out_exit_debugfs: 1983 1976 nvmet_exit_debugfs(); 1977 + out_free_nvmet_aen_work_queue: 1978 + destroy_workqueue(nvmet_aen_wq); 1984 1979 out_free_nvmet_work_queue: 1985 1980 destroy_workqueue(nvmet_wq); 1986 1981 out_free_buffered_work_queue: ··· 2000 1991 nvmet_exit_discovery(); 2001 1992 nvmet_exit_debugfs(); 2002 1993 ida_destroy(&cntlid_ida); 1994 + destroy_workqueue(nvmet_aen_wq); 2003 1995 destroy_workqueue(nvmet_wq); 2004 1996 destroy_workqueue(buffered_io_wq); 2005 1997 destroy_workqueue(zbd_wq);
+1
drivers/nvme/target/nvmet.h
··· 501 501 extern struct workqueue_struct *buffered_io_wq; 502 502 extern struct workqueue_struct *zbd_wq; 503 503 extern struct workqueue_struct *nvmet_wq; 504 + extern struct workqueue_struct *nvmet_aen_wq; 504 505 505 506 static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 506 507 {
+1
drivers/nvme/target/rdma.c
··· 2087 2087 mutex_unlock(&nvmet_rdma_queue_mutex); 2088 2088 2089 2089 flush_workqueue(nvmet_wq); 2090 + flush_workqueue(nvmet_aen_wq); 2090 2091 } 2091 2092 2092 2093 static struct ib_client nvmet_rdma_ib_client = {
+1 -1
drivers/power/sequencing/pwrseq-pcie-m2.c
··· 109 109 if (!ctx) 110 110 return -ENOMEM; 111 111 112 - ctx->of_node = of_node_get(dev->of_node); 112 + ctx->of_node = dev_of_node(dev); 113 113 ctx->pdata = device_get_match_data(dev); 114 114 if (!ctx->pdata) 115 115 return dev_err_probe(dev, -ENODEV,
+10 -4
drivers/regulator/pca9450-regulator.c
··· 1293 1293 struct regulator_dev *ldo5; 1294 1294 struct pca9450 *pca9450; 1295 1295 unsigned int device_id, i; 1296 + const char *type_name; 1296 1297 int ret; 1297 1298 1298 1299 pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL); ··· 1304 1303 case PCA9450_TYPE_PCA9450A: 1305 1304 regulator_desc = pca9450a_regulators; 1306 1305 pca9450->rcnt = ARRAY_SIZE(pca9450a_regulators); 1306 + type_name = "pca9450a"; 1307 1307 break; 1308 1308 case PCA9450_TYPE_PCA9450BC: 1309 1309 regulator_desc = pca9450bc_regulators; 1310 1310 pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators); 1311 + type_name = "pca9450bc"; 1311 1312 break; 1312 1313 case PCA9450_TYPE_PCA9451A: 1314 + regulator_desc = pca9451a_regulators; 1315 + pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators); 1316 + type_name = "pca9451a"; 1317 + break; 1313 1318 case PCA9450_TYPE_PCA9452: 1314 1319 regulator_desc = pca9451a_regulators; 1315 1320 pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators); 1321 + type_name = "pca9452"; 1316 1322 break; 1317 1323 default: 1318 1324 dev_err(&i2c->dev, "Unknown device type"); ··· 1377 1369 if (pca9450->irq) { 1378 1370 ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL, 1379 1371 pca9450_irq_handler, 1380 - (IRQF_TRIGGER_FALLING | IRQF_ONESHOT), 1372 + (IRQF_TRIGGER_LOW | IRQF_ONESHOT), 1381 1373 "pca9450-irq", pca9450); 1382 1374 if (ret != 0) 1383 1375 return dev_err_probe(pca9450->dev, ret, "Failed to request IRQ: %d\n", ··· 1421 1413 pca9450_i2c_restart_handler, pca9450)) 1422 1414 dev_warn(&i2c->dev, "Failed to register restart handler\n"); 1423 1415 1424 - dev_info(&i2c->dev, "%s probed.\n", 1425 - type == PCA9450_TYPE_PCA9450A ? "pca9450a" : 1426 - (type == PCA9450_TYPE_PCA9451A ? "pca9451a" : "pca9450bc")); 1416 + dev_info(&i2c->dev, "%s probed.\n", type_name); 1427 1417 1428 1418 return 0; 1429 1419 }
+3
drivers/reset/reset-rzg2l-usbphy-ctrl.c
··· 136 136 { 137 137 u32 val = power_on ? 0 : 1; 138 138 139 + if (!pwrrdy) 140 + return 0; 141 + 139 142 /* The initialization path guarantees that the mask is 1 bit long. */ 140 143 return regmap_field_update_bits(pwrrdy, 1, val); 141 144 }
+16
drivers/s390/block/dasd_eckd.c
··· 6135 6135 static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid, 6136 6136 char *sec_busid) 6137 6137 { 6138 + struct dasd_eckd_private *prim_priv, *sec_priv; 6138 6139 struct dasd_device *primary, *secondary; 6139 6140 struct dasd_copy_relation *copy; 6140 6141 struct dasd_block *block; ··· 6155 6154 secondary = copy_relation_find_device(copy, sec_busid); 6156 6155 if (!secondary) 6157 6156 return DASD_COPYPAIRSWAP_SECONDARY; 6157 + 6158 + prim_priv = primary->private; 6159 + sec_priv = secondary->private; 6158 6160 6159 6161 /* 6160 6162 * usually the device should be quiesced for swap ··· 6185 6181 dev_name(&primary->cdev->dev), 6186 6182 dev_name(&secondary->cdev->dev), rc); 6187 6183 } 6184 + 6185 + if (primary->stopped & DASD_STOPPED_QUIESCE) { 6186 + dasd_device_set_stop_bits(secondary, DASD_STOPPED_QUIESCE); 6187 + dasd_device_remove_stop_bits(primary, DASD_STOPPED_QUIESCE); 6188 + } 6189 + 6190 + /* 6191 + * The secondary device never got through format detection, but since it 6192 + * is a copy of the primary device, the format is exactly the same; 6193 + * therefore, the detected layout can simply be copied. 6194 + */ 6195 + sec_priv->uses_cdl = prim_priv->uses_cdl; 6188 6196 6189 6197 /* re-enable device */ 6190 6198 dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
+7 -5
drivers/s390/crypto/zcrypt_ccamisc.c
··· 1639 1639 1640 1640 memset(ci, 0, sizeof(*ci)); 1641 1641 1642 - /* get first info from zcrypt device driver about this apqn */ 1643 - rc = zcrypt_device_status_ext(cardnr, domain, &devstat); 1644 - if (rc) 1645 - return rc; 1646 - ci->hwtype = devstat.hwtype; 1642 + /* if specific domain given, fetch status and hw info for this apqn */ 1643 + if (domain != AUTOSEL_DOM) { 1644 + rc = zcrypt_device_status_ext(cardnr, domain, &devstat); 1645 + if (rc) 1646 + return rc; 1647 + ci->hwtype = devstat.hwtype; 1648 + } 1647 1649 1648 1650 /* 1649 1651 * Prep memory for rule array and var array use.
+1 -2
drivers/s390/crypto/zcrypt_cex4.c
··· 85 85 86 86 memset(&ci, 0, sizeof(ci)); 87 87 88 - if (ap_domain_index >= 0) 89 - cca_get_info(ac->id, ap_domain_index, &ci, 0); 88 + cca_get_info(ac->id, AUTOSEL_DOM, &ci, 0); 90 89 91 90 return sysfs_emit(buf, "%s\n", ci.serial); 92 91 }
+1 -1
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 2578 2578 shost->transportt = hisi_sas_stt; 2579 2579 shost->max_id = HISI_SAS_MAX_DEVICES; 2580 2580 shost->max_lun = ~0; 2581 - shost->max_channel = 1; 2581 + shost->max_channel = 0; 2582 2582 shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; 2583 2583 if (hisi_hba->hw->slot_index_alloc) { 2584 2584 shost->can_queue = HISI_SAS_MAX_COMMANDS;
+1 -1
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 4993 4993 shost->transportt = hisi_sas_stt; 4994 4994 shost->max_id = HISI_SAS_MAX_DEVICES; 4995 4995 shost->max_lun = ~0; 4996 - shost->max_channel = 1; 4996 + shost->max_channel = 0; 4997 4997 shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; 4998 4998 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 4999 4999 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
-2
drivers/scsi/qla2xxx/qla_iocb.c
··· 2751 2751 if (!elsio->u.els_logo.els_logo_pyld) { 2752 2752 /* ref: INIT */ 2753 2753 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2754 - qla2x00_free_fcport(fcport); 2755 2754 return QLA_FUNCTION_FAILED; 2756 2755 } 2757 2756 ··· 2775 2776 if (rval != QLA_SUCCESS) { 2776 2777 /* ref: INIT */ 2777 2778 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2778 - qla2x00_free_fcport(fcport); 2779 2779 return QLA_FUNCTION_FAILED; 2780 2780 } 2781 2781
+2 -6
drivers/scsi/scsi_scan.c
··· 360 360 * default device queue depth to figure out sbitmap shift 361 361 * since we use this queue depth most of times. 362 362 */ 363 - if (scsi_realloc_sdev_budget_map(sdev, depth)) { 364 - kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); 365 - put_device(&starget->dev); 366 - kfree(sdev); 367 - goto out; 368 - } 363 + if (scsi_realloc_sdev_budget_map(sdev, depth)) 364 + goto out_device_destroy; 369 365 370 366 scsi_change_queue_depth(sdev, depth); 371 367
+22 -2
drivers/soc/fsl/qbman/qman.c
··· 1827 1827 1828 1828 void qman_destroy_fq(struct qman_fq *fq) 1829 1829 { 1830 + int leaked; 1831 + 1830 1832 /* 1831 1833 * We don't need to lock the FQ as it is a pre-condition that the FQ be 1832 1834 * quiesced. Instead, run some checks. ··· 1836 1834 switch (fq->state) { 1837 1835 case qman_fq_state_parked: 1838 1836 case qman_fq_state_oos: 1839 - if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) 1840 - qman_release_fqid(fq->fqid); 1837 + /* 1838 + * There's a race condition here on releasing the fqid, 1839 + * setting the fq_table to NULL, and freeing the fqid. 1840 + * To prevent it, this order should be respected: 1841 + */ 1842 + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) { 1843 + leaked = qman_shutdown_fq(fq->fqid); 1844 + if (leaked) 1845 + pr_debug("FQID %d leaked\n", fq->fqid); 1846 + } 1841 1847 1842 1848 DPAA_ASSERT(fq_table[fq->idx]); 1843 1849 fq_table[fq->idx] = NULL; 1850 + 1851 + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID) && !leaked) { 1852 + /* 1853 + * fq_table[fq->idx] should be set to null before 1854 + * freeing fq->fqid otherwise it could by allocated by 1855 + * qman_alloc_fqid() while still being !NULL 1856 + */ 1857 + smp_wmb(); 1858 + gen_pool_free(qm_fqalloc, fq->fqid | DPAA_GENALLOC_OFF, 1); 1859 + } 1844 1860 return; 1845 1861 default: 1846 1862 break;
+2 -2
drivers/soc/fsl/qe/qmc.c
··· 1790 1790 return -EINVAL; 1791 1791 qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0)); 1792 1792 qmc->dpram = devm_ioremap_resource(qmc->dev, res); 1793 - if (IS_ERR(qmc->scc_pram)) 1794 - return PTR_ERR(qmc->scc_pram); 1793 + if (IS_ERR(qmc->dpram)) 1794 + return PTR_ERR(qmc->dpram); 1795 1795 1796 1796 return 0; 1797 1797 }
+9 -4
drivers/soc/microchip/mpfs-sys-controller.c
··· 142 142 143 143 sys_controller->flash = of_get_mtd_device_by_node(np); 144 144 of_node_put(np); 145 - if (IS_ERR(sys_controller->flash)) 146 - return dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n"); 145 + if (IS_ERR(sys_controller->flash)) { 146 + ret = dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n"); 147 + goto out_free; 148 + } 147 149 148 150 no_flash: 149 151 sys_controller->client.dev = dev; ··· 157 155 if (IS_ERR(sys_controller->chan)) { 158 156 ret = dev_err_probe(dev, PTR_ERR(sys_controller->chan), 159 157 "Failed to get mbox channel\n"); 160 - kfree(sys_controller); 161 - return ret; 158 + goto out_free; 162 159 } 163 160 164 161 init_completion(&sys_controller->c); ··· 175 174 dev_info(&pdev->dev, "Registered MPFS system controller\n"); 176 175 177 176 return 0; 177 + 178 + out_free: 179 + kfree(sys_controller); 180 + return ret; 178 181 } 179 182 180 183 static void mpfs_sys_controller_remove(struct platform_device *pdev)
+1
drivers/soc/rockchip/grf.c
··· 231 231 grf = syscon_node_to_regmap(np); 232 232 if (IS_ERR(grf)) { 233 233 pr_err("%s: could not get grf syscon\n", __func__); 234 + of_node_put(np); 234 235 return PTR_ERR(grf); 235 236 } 236 237
+2 -3
drivers/spi/spi-amlogic-spifc-a4.c
··· 411 411 ret = dma_mapping_error(sfc->dev, sfc->daddr); 412 412 if (ret) { 413 413 dev_err(sfc->dev, "DMA mapping error\n"); 414 - goto out_map_data; 414 + return ret; 415 415 } 416 416 417 417 cmd = CMD_DATA_ADDRL(sfc->daddr); ··· 429 429 ret = dma_mapping_error(sfc->dev, sfc->iaddr); 430 430 if (ret) { 431 431 dev_err(sfc->dev, "DMA mapping error\n"); 432 - dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir); 433 432 goto out_map_data; 434 433 } 435 434 ··· 447 448 return 0; 448 449 449 450 out_map_info: 450 - dma_unmap_single(sfc->dev, sfc->iaddr, datalen, dir); 451 + dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir); 451 452 out_map_data: 452 453 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir); 453 454
+16 -22
drivers/spi/spi-atcspi200.c
··· 195 195 if (op->addr.buswidth > 1) 196 196 tc |= TRANS_ADDR_FMT; 197 197 if (op->data.nbytes) { 198 - tc |= TRANS_DUAL_QUAD(ffs(op->data.buswidth) - 1); 198 + unsigned int width_code; 199 + 200 + width_code = ffs(op->data.buswidth) - 1; 201 + if (unlikely(width_code > 3)) { 202 + WARN_ON_ONCE(1); 203 + width_code = 0; 204 + } 205 + tc |= TRANS_DUAL_QUAD(width_code); 206 + 199 207 if (op->data.dir == SPI_MEM_DATA_IN) { 200 208 if (op->dummy.nbytes) 201 209 tc |= TRANS_MODE_DMY_READ | ··· 505 497 506 498 static int atcspi_configure_dma(struct atcspi_dev *spi) 507 499 { 508 - struct dma_chan *dma_chan; 509 - int ret = 0; 500 + spi->host->dma_rx = devm_dma_request_chan(spi->dev, "rx"); 501 + if (IS_ERR(spi->host->dma_rx)) 502 + return PTR_ERR(spi->host->dma_rx); 510 503 511 - dma_chan = devm_dma_request_chan(spi->dev, "rx"); 512 - if (IS_ERR(dma_chan)) { 513 - ret = PTR_ERR(dma_chan); 514 - goto err_exit; 515 - } 516 - spi->host->dma_rx = dma_chan; 504 + spi->host->dma_tx = devm_dma_request_chan(spi->dev, "tx"); 505 + if (IS_ERR(spi->host->dma_tx)) 506 + return PTR_ERR(spi->host->dma_tx); 517 507 518 - dma_chan = devm_dma_request_chan(spi->dev, "tx"); 519 - if (IS_ERR(dma_chan)) { 520 - ret = PTR_ERR(dma_chan); 521 - goto free_rx; 522 - } 523 - spi->host->dma_tx = dma_chan; 524 508 init_completion(&spi->dma_completion); 525 509 526 - return ret; 527 - 528 - free_rx: 529 - dma_release_channel(spi->host->dma_rx); 530 - spi->host->dma_rx = NULL; 531 - err_exit: 532 - return ret; 510 + return 0; 533 511 } 534 512 535 513 static int atcspi_enable_clk(struct atcspi_dev *spi)
+6
drivers/spi/spi-cadence-quadspi.c
··· 76 76 u8 cs; 77 77 }; 78 78 79 + static const struct clk_bulk_data cqspi_clks[CLK_QSPI_NUM] = { 80 + [CLK_QSPI_APB] = { .id = "apb" }, 81 + [CLK_QSPI_AHB] = { .id = "ahb" }, 82 + }; 83 + 79 84 struct cqspi_st { 80 85 struct platform_device *pdev; 81 86 struct spi_controller *host; ··· 1828 1823 } 1829 1824 1830 1825 /* Obtain QSPI clocks. */ 1826 + memcpy(&cqspi->clks, &cqspi_clks, sizeof(cqspi->clks)); 1831 1827 ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks); 1832 1828 if (ret) 1833 1829 return dev_err_probe(dev, ret, "Failed to get clocks\n");
+1
drivers/spi/spi-intel-pci.c
··· 96 96 { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, 97 97 { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info }, 98 98 { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info }, 99 + { PCI_VDEVICE(INTEL, 0xd323), (unsigned long)&cnl_info }, 99 100 { PCI_VDEVICE(INTEL, 0xe323), (unsigned long)&cnl_info }, 100 101 { PCI_VDEVICE(INTEL, 0xe423), (unsigned long)&cnl_info }, 101 102 { },
+1 -1
drivers/spi/spi-rockchip-sfc.c
··· 711 711 } 712 712 } 713 713 714 - ret = devm_spi_register_controller(dev, host); 714 + ret = spi_register_controller(host); 715 715 if (ret) 716 716 goto err_register; 717 717
+10 -5
drivers/staging/rtl8723bs/core/rtw_ieee80211.c
··· 186 186 187 187 cnt = 0; 188 188 189 - while (cnt < in_len) { 189 + while (cnt + 2 <= in_len) { 190 + u8 ie_len = in_ie[cnt + 1]; 191 + 192 + if (cnt + 2 + ie_len > in_len) 193 + break; 194 + 190 195 if (eid == in_ie[cnt] 191 - && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) { 196 + && (!oui || (ie_len >= oui_len && !memcmp(&in_ie[cnt + 2], oui, oui_len)))) { 192 197 target_ie = &in_ie[cnt]; 193 198 194 199 if (ie) 195 - memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2); 200 + memcpy(ie, &in_ie[cnt], ie_len + 2); 196 201 197 202 if (ielen) 198 - *ielen = in_ie[cnt+1]+2; 203 + *ielen = ie_len + 2; 199 204 200 205 break; 201 206 } 202 - cnt += in_ie[cnt+1]+2; /* goto next */ 207 + cnt += ie_len + 2; /* goto next */ 203 208 } 204 209 205 210 return target_ie;
+4 -1
drivers/staging/rtl8723bs/core/rtw_mlme.c
··· 1988 1988 while (i < in_len) { 1989 1989 ielength = initial_out_len; 1990 1990 1991 - if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) { /* WMM element ID and OUI */ 1991 + if (i + 5 < in_len && 1992 + in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && 1993 + in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && 1994 + in_ie[i + 5] == 0x02) { 1992 1995 for (j = i; j < i + 9; j++) { 1993 1996 out_ie[ielength] = in_ie[j]; 1994 1997 ielength++;
+1
drivers/staging/sm750fb/sm750.c
··· 1123 1123 1124 1124 iounmap(sm750_dev->pvReg); 1125 1125 iounmap(sm750_dev->pvMem); 1126 + pci_release_region(pdev, 1); 1126 1127 kfree(g_settings); 1127 1128 } 1128 1129
+11 -11
drivers/staging/sm750fb/sm750_hw.c
··· 36 36 37 37 pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start); 38 38 39 - /* 40 - * reserve the vidreg space of smi adaptor 41 - * if you do this, you need to add release region code 42 - * in lynxfb_remove, or memory will not be mapped again 43 - * successfully 44 - */ 39 + /* reserve the vidreg space of smi adaptor */ 45 40 ret = pci_request_region(pdev, 1, "sm750fb"); 46 41 if (ret) { 47 42 pr_err("Can not request PCI regions.\n"); 48 - goto exit; 43 + return ret; 49 44 } 50 45 51 46 /* now map mmio and vidmem */ ··· 49 54 if (!sm750_dev->pvReg) { 50 55 pr_err("mmio failed\n"); 51 56 ret = -EFAULT; 52 - goto exit; 57 + goto err_release_region; 53 58 } 54 59 pr_info("mmio virtual addr = %p\n", sm750_dev->pvReg); 55 60 ··· 74 79 sm750_dev->pvMem = 75 80 ioremap_wc(sm750_dev->vidmem_start, sm750_dev->vidmem_size); 76 81 if (!sm750_dev->pvMem) { 77 - iounmap(sm750_dev->pvReg); 78 82 pr_err("Map video memory failed\n"); 79 83 ret = -EFAULT; 80 - goto exit; 84 + goto err_unmap_reg; 81 85 } 82 86 pr_info("video memory vaddr = %p\n", sm750_dev->pvMem); 83 - exit: 87 + 88 + return 0; 89 + 90 + err_unmap_reg: 91 + iounmap(sm750_dev->pvReg); 92 + err_release_region: 93 + pci_release_region(pdev, 1); 84 94 return ret; 85 95 } 86 96
-27
drivers/tee/tee_shm.c
··· 23 23 struct page *page; 24 24 }; 25 25 26 - static void shm_put_kernel_pages(struct page **pages, size_t page_count) 27 - { 28 - size_t n; 29 - 30 - for (n = 0; n < page_count; n++) 31 - put_page(pages[n]); 32 - } 33 - 34 - static void shm_get_kernel_pages(struct page **pages, size_t page_count) 35 - { 36 - size_t n; 37 - 38 - for (n = 0; n < page_count; n++) 39 - get_page(pages[n]); 40 - } 41 - 42 26 static void release_registered_pages(struct tee_shm *shm) 43 27 { 44 28 if (shm->pages) { 45 29 if (shm->flags & TEE_SHM_USER_MAPPED) 46 30 unpin_user_pages(shm->pages, shm->num_pages); 47 - else 48 - shm_put_kernel_pages(shm->pages, shm->num_pages); 49 31 50 32 kfree(shm->pages); 51 33 } ··· 459 477 goto err_put_shm_pages; 460 478 } 461 479 462 - /* 463 - * iov_iter_extract_kvec_pages does not get reference on the pages, 464 - * get a reference on them. 465 - */ 466 - if (iov_iter_is_kvec(iter)) 467 - shm_get_kernel_pages(shm->pages, num_pages); 468 - 469 480 shm->offset = off; 470 481 shm->size = len; 471 482 shm->num_pages = num_pages; ··· 474 499 err_put_shm_pages: 475 500 if (!iov_iter_is_kvec(iter)) 476 501 unpin_user_pages(shm->pages, shm->num_pages); 477 - else 478 - shm_put_kernel_pages(shm->pages, shm->num_pages); 479 502 err_free_shm_pages: 480 503 kfree(shm->pages); 481 504 err_free_shm:
+1 -1
drivers/ufs/core/ufshcd.c
··· 10066 10066 } 10067 10067 10068 10068 flush_work(&hba->eeh_work); 10069 + cancel_delayed_work_sync(&hba->ufs_rtc_update_work); 10069 10070 10070 10071 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 10071 10072 if (ret) ··· 10121 10120 if (ret) 10122 10121 goto set_link_active; 10123 10122 10124 - cancel_delayed_work_sync(&hba->ufs_rtc_update_work); 10125 10123 goto out; 10126 10124 10127 10125 set_link_active:
+5
drivers/usb/class/cdc-acm.c
··· 1379 1379 acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities; 1380 1380 if (quirks & NO_CAP_LINE) 1381 1381 acm->ctrl_caps &= ~USB_CDC_CAP_LINE; 1382 + if (quirks & MISSING_CAP_BRK) 1383 + acm->ctrl_caps |= USB_CDC_CAP_BRK; 1382 1384 acm->ctrlsize = ctrlsize; 1383 1385 acm->readsize = readsize; 1384 1386 acm->rx_buflimit = num_rx_buf; ··· 2003 2001 { USB_DEVICE(0x32a7, 0x0000), 2004 2002 .driver_info = IGNORE_DEVICE, 2005 2003 }, 2004 + 2005 + /* CH343 supports CAP_BRK, but doesn't advertise it */ 2006 + { USB_DEVICE(0x1a86, 0x55d3), .driver_info = MISSING_CAP_BRK, }, 2006 2007 2007 2008 /* control interfaces without any protocol set */ 2008 2009 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+1
drivers/usb/class/cdc-acm.h
··· 113 113 #define CLEAR_HALT_CONDITIONS BIT(5) 114 114 #define SEND_ZERO_PACKET BIT(6) 115 115 #define DISABLE_ECHO BIT(7) 116 + #define MISSING_CAP_BRK BIT(8)
+3 -1
drivers/usb/class/cdc-wdm.c
··· 225 225 /* we may already be in overflow */ 226 226 if (!test_bit(WDM_OVERFLOW, &desc->flags)) { 227 227 memmove(desc->ubuf + desc->length, desc->inbuf, length); 228 - desc->length += length; 228 + smp_wmb(); /* against wdm_read() */ 229 + WRITE_ONCE(desc->length, desc->length + length); 229 230 } 230 231 } 231 232 skip_error: ··· 534 533 return -ERESTARTSYS; 535 534 536 535 cntr = READ_ONCE(desc->length); 536 + smp_rmb(); /* against wdm_in_callback() */ 537 537 if (cntr == 0) { 538 538 desc->read = 0; 539 539 retry:
+3 -3
drivers/usb/class/usbtmc.c
··· 727 727 buffer[1] = data->bTag; 728 728 buffer[2] = ~data->bTag; 729 729 730 - retval = usb_bulk_msg(data->usb_dev, 730 + retval = usb_bulk_msg_killable(data->usb_dev, 731 731 usb_sndbulkpipe(data->usb_dev, 732 732 data->bulk_out), 733 733 buffer, USBTMC_HEADER_SIZE, ··· 1347 1347 buffer[11] = 0; /* Reserved */ 1348 1348 1349 1349 /* Send bulk URB */ 1350 - retval = usb_bulk_msg(data->usb_dev, 1350 + retval = usb_bulk_msg_killable(data->usb_dev, 1351 1351 usb_sndbulkpipe(data->usb_dev, 1352 1352 data->bulk_out), 1353 1353 buffer, USBTMC_HEADER_SIZE, ··· 1419 1419 actual = 0; 1420 1420 1421 1421 /* Send bulk URB */ 1422 - retval = usb_bulk_msg(data->usb_dev, 1422 + retval = usb_bulk_msg_killable(data->usb_dev, 1423 1423 usb_rcvbulkpipe(data->usb_dev, 1424 1424 data->bulk_in), 1425 1425 buffer, bufsize, &actual,
+5 -1
drivers/usb/core/config.c
··· 927 927 dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; 928 928 } 929 929 930 - if (ncfg < 1) { 930 + if (ncfg < 1 && dev->quirks & USB_QUIRK_FORCE_ONE_CONFIG) { 931 + dev_info(ddev, "Device claims zero configurations, forcing to 1\n"); 932 + dev->descriptor.bNumConfigurations = 1; 933 + ncfg = 1; 934 + } else if (ncfg < 1) { 931 935 dev_err(ddev, "no configurations\n"); 932 936 return -EINVAL; 933 937 }
+79 -21
drivers/usb/core/message.c
··· 42 42 43 43 44 44 /* 45 - * Starts urb and waits for completion or timeout. Note that this call 46 - * is NOT interruptible. Many device driver i/o requests should be 47 - * interruptible and therefore these drivers should implement their 48 - * own interruptible routines. 45 + * Starts urb and waits for completion or timeout. 46 + * Whether or not the wait is killable depends on the flag passed in. 47 + * For example, compare usb_bulk_msg() and usb_bulk_msg_killable(). 48 + * 49 + * For non-killable waits, we enforce a maximum limit on the timeout value. 49 50 */ 50 - static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) 51 + static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length, 52 + bool killable) 51 53 { 52 54 struct api_context ctx; 53 55 unsigned long expire; 54 56 int retval; 57 + long rc; 55 58 56 59 init_completion(&ctx.done); 57 60 urb->context = &ctx; ··· 63 60 if (unlikely(retval)) 64 61 goto out; 65 62 66 - expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; 67 - if (!wait_for_completion_timeout(&ctx.done, expire)) { 63 + if (!killable && (timeout <= 0 || timeout > USB_MAX_SYNCHRONOUS_TIMEOUT)) 64 + timeout = USB_MAX_SYNCHRONOUS_TIMEOUT; 65 + expire = (timeout > 0) ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; 66 + if (killable) 67 + rc = wait_for_completion_killable_timeout(&ctx.done, expire); 68 + else 69 + rc = wait_for_completion_timeout(&ctx.done, expire); 70 + if (rc <= 0) { 68 71 usb_kill_urb(urb); 69 - retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); 72 + if (ctx.status != -ENOENT) 73 + retval = ctx.status; 74 + else if (rc == 0) 75 + retval = -ETIMEDOUT; 76 + else 77 + retval = rc; 70 78 71 79 dev_dbg(&urb->dev->dev, 72 - "%s timed out on ep%d%s len=%u/%u\n", 80 + "%s timed out or killed on ep%d%s len=%u/%u\n", 73 81 current->comm, 74 82 usb_endpoint_num(&urb->ep->desc), 75 83 usb_urb_dir_in(urb) ? "in" : "out", ··· 114 100 usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, 115 101 len, usb_api_blocking_completion, NULL); 116 102 117 - retv = usb_start_wait_urb(urb, timeout, &length); 103 + retv = usb_start_wait_urb(urb, timeout, &length, false); 118 104 if (retv < 0) 119 105 return retv; 120 106 else ··· 131 117 * @index: USB message index value 132 118 * @data: pointer to the data to send 133 119 * @size: length in bytes of the data to send 134 - * @timeout: time in msecs to wait for the message to complete before timing 135 - * out (if 0 the wait is forever) 120 + * @timeout: time in msecs to wait for the message to complete before timing out 136 121 * 137 122 * Context: task context, might sleep. 138 123 * ··· 186 173 * @index: USB message index value 187 174 * @driver_data: pointer to the data to send 188 175 * @size: length in bytes of the data to send 189 - * @timeout: time in msecs to wait for the message to complete before timing 190 - * out (if 0 the wait is forever) 176 + * @timeout: time in msecs to wait for the message to complete before timing out 191 177 * @memflags: the flags for memory allocation for buffers 192 178 * 193 179 * Context: !in_interrupt () ··· 244 232 * @index: USB message index value 245 233 * @driver_data: pointer to the data to be filled in by the message 246 234 * @size: length in bytes of the data to be received 247 - * @timeout: time in msecs to wait for the message to complete before timing 248 - * out (if 0 the wait is forever) 235 + * @timeout: time in msecs to wait for the message to complete before timing out 249 236 * @memflags: the flags for memory allocation for buffers 250 237 * 251 238 * Context: !in_interrupt () ··· 315 304 * @len: length in bytes of the data to send 316 305 * @actual_length: pointer to a location to put the actual length transferred 317 306 * in bytes 318 - * @timeout: time in msecs to wait for the message to complete before 319 - * timing out (if 0 the wait is forever) 307 + * @timeout: time in msecs to wait for the message to complete before timing out 320 308 * 321 309 * Context: task context, might sleep. 322 310 * ··· 347 337 * @len: length in bytes of the data to send 348 338 * @actual_length: pointer to a location to put the actual length transferred 349 339 * in bytes 350 - * @timeout: time in msecs to wait for the message to complete before 351 - * timing out (if 0 the wait is forever) 340 + * @timeout: time in msecs to wait for the message to complete before timing out 352 341 * 353 342 * Context: task context, might sleep. 354 343 * ··· 394 385 usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, 395 386 usb_api_blocking_completion, NULL); 396 387 397 - return usb_start_wait_urb(urb, timeout, actual_length); 388 + return usb_start_wait_urb(urb, timeout, actual_length, false); 398 389 } 399 390 EXPORT_SYMBOL_GPL(usb_bulk_msg); 391 + 392 + /** 393 + * usb_bulk_msg_killable - Builds a bulk urb, sends it off and waits for completion in a killable state 394 + * @usb_dev: pointer to the usb device to send the message to 395 + * @pipe: endpoint "pipe" to send the message to 396 + * @data: pointer to the data to send 397 + * @len: length in bytes of the data to send 398 + * @actual_length: pointer to a location to put the actual length transferred 399 + * in bytes 400 + * @timeout: time in msecs to wait for the message to complete before 401 + * timing out (if <= 0, the wait is as long as possible) 402 + * 403 + * Context: task context, might sleep. 404 + * 405 + * This function is just like usb_blk_msg(), except that it waits in a 406 + * killable state and there is no limit on the timeout length. 407 + * 408 + * Return: 409 + * If successful, 0. Otherwise a negative error number. The number of actual 410 + * bytes transferred will be stored in the @actual_length parameter. 411 + * 412 + */ 413 + int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe, 414 + void *data, int len, int *actual_length, int timeout) 415 + { 416 + struct urb *urb; 417 + struct usb_host_endpoint *ep; 418 + 419 + ep = usb_pipe_endpoint(usb_dev, pipe); 420 + if (!ep || len < 0) 421 + return -EINVAL; 422 + 423 + urb = usb_alloc_urb(0, GFP_KERNEL); 424 + if (!urb) 425 + return -ENOMEM; 426 + 427 + if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == 428 + USB_ENDPOINT_XFER_INT) { 429 + pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); 430 + usb_fill_int_urb(urb, usb_dev, pipe, data, len, 431 + usb_api_blocking_completion, NULL, 432 + ep->desc.bInterval); 433 + } else 434 + usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, 435 + usb_api_blocking_completion, NULL); 436 + 437 + return usb_start_wait_urb(urb, timeout, actual_length, true); 438 + } 439 + EXPORT_SYMBOL_GPL(usb_bulk_msg_killable); 400 440 401 441 /*-------------------------------------------------------------------*/ 402 442
+1 -7
drivers/usb/core/phy.c
··· 200 200 list_for_each_entry(roothub_entry, head, list) { 201 201 err = phy_set_mode(roothub_entry->phy, mode); 202 202 if (err) 203 - goto err_out; 203 + return err; 204 204 } 205 205 206 206 return 0; 207 - 208 - err_out: 209 - list_for_each_entry_continue_reverse(roothub_entry, head, list) 210 - phy_power_off(roothub_entry->phy); 211 - 212 - return err; 213 207 } 214 208 EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode); 215 209
+21
drivers/usb/core/quirks.c
··· 140 140 case 'p': 141 141 flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT; 142 142 break; 143 + case 'q': 144 + flags |= USB_QUIRK_FORCE_ONE_CONFIG; 143 145 /* Ignore unrecognized flag characters */ 144 146 } 145 147 } ··· 208 206 209 207 /* HP v222w 16GB Mini USB Drive */ 210 208 { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT }, 209 + 210 + /* Huawei 4G LTE module ME906S */ 211 + { USB_DEVICE(0x03f0, 0xa31d), .driver_info = 212 + USB_QUIRK_DISCONNECT_SUSPEND }, 211 213 212 214 /* Creative SB Audigy 2 NX */ 213 215 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, ··· 382 376 /* SanDisk Extreme 55AE */ 383 377 { USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM }, 384 378 379 + /* Avermedia Live Gamer Ultra 2.1 (GC553G2) - BOS descriptor fetch hangs at SuperSpeed Plus */ 380 + { USB_DEVICE(0x07ca, 0x2553), .driver_info = USB_QUIRK_NO_BOS }, 381 + 385 382 /* Realforce 87U Keyboard */ 386 383 { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, 387 384 ··· 444 435 /* ASUS Base Station(T100) */ 445 436 { USB_DEVICE(0x0b05, 0x17e0), .driver_info = 446 437 USB_QUIRK_IGNORE_REMOTE_WAKEUP }, 438 + 439 + /* ASUS TUF 4K PRO - BOS descriptor fetch hangs at SuperSpeed Plus */ 440 + { USB_DEVICE(0x0b05, 0x1ab9), .driver_info = USB_QUIRK_NO_BOS }, 447 441 448 442 /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/ 449 443 { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, ··· 576 564 577 565 { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, 578 566 567 + /* UGREEN 35871 - BOS descriptor fetch hangs at SuperSpeed Plus */ 568 + { USB_DEVICE(0x2b89, 0x5871), .driver_info = USB_QUIRK_NO_BOS }, 569 + 579 570 /* APTIV AUTOMOTIVE HUB */ 580 571 { USB_DEVICE(0x2c48, 0x0132), .driver_info = 581 572 USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT }, ··· 589 574 /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */ 590 575 { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM }, 591 576 577 + /* ezcap401 - BOS descriptor fetch hangs at SuperSpeed Plus */ 578 + { USB_DEVICE(0x32ed, 0x0401), .driver_info = USB_QUIRK_NO_BOS }, 579 + 592 580 /* DELL USB GEN2 */ 593 581 { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME }, 594 582 595 583 /* VCOM device */ 596 584 { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, 585 + 586 + /* Noji-MCS SmartCard Reader */ 587 + { USB_DEVICE(0x5131, 0x2007), .driver_info = USB_QUIRK_FORCE_ONE_CONFIG }, 597 588 598 589 /* INTEL VALUE SSD */ 599 590 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+2
drivers/usb/dwc3/dwc3-pci.c
··· 56 56 #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e 57 57 #define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0 58 58 #define PCI_DEVICE_ID_INTEL_RPL 0xa70e 59 + #define PCI_DEVICE_ID_INTEL_NVLH 0xd37f 59 60 #define PCI_DEVICE_ID_INTEL_PTLH 0xe332 60 61 #define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e 61 62 #define PCI_DEVICE_ID_INTEL_PTLU 0xe432 ··· 448 447 { PCI_DEVICE_DATA(INTEL, CNPH, &dwc3_pci_intel_swnode) }, 449 448 { PCI_DEVICE_DATA(INTEL, CNPV, &dwc3_pci_intel_swnode) }, 450 449 { PCI_DEVICE_DATA(INTEL, RPL, &dwc3_pci_intel_swnode) }, 450 + { PCI_DEVICE_DATA(INTEL, NVLH, &dwc3_pci_intel_swnode) }, 451 451 { PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) }, 452 452 { PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) }, 453 453 { PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) },
+4
drivers/usb/gadget/function/f_hid.c
··· 1207 1207 if (!hidg->interval_user_set) { 1208 1208 hidg_fs_in_ep_desc.bInterval = 10; 1209 1209 hidg_hs_in_ep_desc.bInterval = 4; 1210 + hidg_ss_in_ep_desc.bInterval = 4; 1210 1211 } else { 1211 1212 hidg_fs_in_ep_desc.bInterval = hidg->interval; 1212 1213 hidg_hs_in_ep_desc.bInterval = hidg->interval; 1214 + hidg_ss_in_ep_desc.bInterval = hidg->interval; 1213 1215 } 1214 1216 1215 1217 hidg_ss_out_comp_desc.wBytesPerInterval = ··· 1241 1239 if (!hidg->interval_user_set) { 1242 1240 hidg_fs_out_ep_desc.bInterval = 10; 1243 1241 hidg_hs_out_ep_desc.bInterval = 4; 1242 + hidg_ss_out_ep_desc.bInterval = 4; 1244 1243 } else { 1245 1244 hidg_fs_out_ep_desc.bInterval = hidg->interval; 1246 1245 hidg_hs_out_ep_desc.bInterval = hidg->interval; 1246 + hidg_ss_out_ep_desc.bInterval = hidg->interval; 1247 1247 } 1248 1248 status = usb_assign_descriptors(f, 1249 1249 hidg_fs_descriptors_intout,
+10 -2
drivers/usb/gadget/function/f_mass_storage.c
··· 180 180 #include <linux/kthread.h> 181 181 #include <linux/sched/signal.h> 182 182 #include <linux/limits.h> 183 + #include <linux/overflow.h> 183 184 #include <linux/pagemap.h> 184 185 #include <linux/rwsem.h> 185 186 #include <linux/slab.h> ··· 1854 1853 int cmnd_size, enum data_direction data_dir, 1855 1854 unsigned int mask, int needs_medium, const char *name) 1856 1855 { 1857 - if (common->curlun) 1858 - common->data_size_from_cmnd <<= common->curlun->blkbits; 1856 + if (common->curlun) { 1857 + if (check_shl_overflow(common->data_size_from_cmnd, 1858 + common->curlun->blkbits, 1859 + &common->data_size_from_cmnd)) { 1860 + common->phase_error = 1; 1861 + return -EINVAL; 1862 + } 1863 + } 1864 + 1859 1865 return check_command(common, cmnd_size, data_dir, 1860 1866 mask, needs_medium, name); 1861 1867 }
+79 -65
drivers/usb/gadget/function/f_ncm.c
··· 83 83 return container_of(f, struct f_ncm, port.func); 84 84 } 85 85 86 - static inline struct f_ncm_opts *func_to_ncm_opts(struct usb_function *f) 87 - { 88 - return container_of(f->fi, struct f_ncm_opts, func_inst); 89 - } 90 - 91 86 /*-------------------------------------------------------------------------*/ 92 87 93 88 /* ··· 859 864 static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 860 865 { 861 866 struct f_ncm *ncm = func_to_ncm(f); 862 - struct f_ncm_opts *opts = func_to_ncm_opts(f); 863 867 struct usb_composite_dev *cdev = f->config->cdev; 864 868 865 869 /* Control interface has only altsetting 0 */ ··· 881 887 if (alt > 1) 882 888 goto fail; 883 889 884 - scoped_guard(mutex, &opts->lock) 885 - if (opts->net) { 886 - DBG(cdev, "reset ncm\n"); 887 - opts->net = NULL; 888 - gether_disconnect(&ncm->port); 889 - ncm_reset_values(ncm); 890 - } 890 + if (ncm->netdev) { 891 + DBG(cdev, "reset ncm\n"); 892 + ncm->netdev = NULL; 893 + gether_disconnect(&ncm->port); 894 + ncm_reset_values(ncm); 895 + } 891 896 892 897 /* 893 898 * CDC Network only sends data in non-default altsettings. ··· 919 926 net = gether_connect(&ncm->port); 920 927 if (IS_ERR(net)) 921 928 return PTR_ERR(net); 922 - scoped_guard(mutex, &opts->lock) 923 - opts->net = net; 929 + ncm->netdev = net; 924 930 } 925 931 926 932 spin_lock(&ncm->lock); ··· 1366 1374 static void ncm_disable(struct usb_function *f) 1367 1375 { 1368 1376 struct f_ncm *ncm = func_to_ncm(f); 1369 - struct f_ncm_opts *opts = func_to_ncm_opts(f); 1370 1377 struct usb_composite_dev *cdev = f->config->cdev; 1371 1378 1372 1379 DBG(cdev, "ncm deactivated\n"); 1373 1380 1374 - scoped_guard(mutex, &opts->lock) 1375 - if (opts->net) { 1376 - opts->net = NULL; 1377 - gether_disconnect(&ncm->port); 1378 - } 1381 + if (ncm->netdev) { 1382 + ncm->netdev = NULL; 1383 + gether_disconnect(&ncm->port); 1384 + } 1379 1385 1380 1386 if (ncm->notify->enabled) { 1381 1387 usb_ep_disable(ncm->notify); ··· 1433 1443 { 1434 1444 struct usb_composite_dev *cdev = c->cdev; 1435 1445 struct f_ncm *ncm = func_to_ncm(f); 1436 - struct f_ncm_opts *ncm_opts = func_to_ncm_opts(f); 1437 1446 struct usb_string *us; 1438 1447 int status = 0; 1439 1448 struct usb_ep *ep; 1449 + struct f_ncm_opts *ncm_opts; 1440 1450 1441 1451 struct usb_os_desc_table *os_desc_table __free(kfree) = NULL; 1442 - struct net_device *netdev __free(free_gether_netdev) = NULL; 1452 + struct net_device *net __free(detach_gadget) = NULL; 1443 1453 struct usb_request *request __free(free_usb_request) = NULL; 1444 1454 1445 1455 if (!can_support_ecm(cdev->gadget)) 1446 1456 return -EINVAL; 1457 + 1458 + ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); 1447 1459 1448 1460 if (cdev->use_os_string) { 1449 1461 os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL); ··· 1453 1461 return -ENOMEM; 1454 1462 } 1455 1463 1456 - netdev = gether_setup_default(); 1457 - if (IS_ERR(netdev)) 1458 - return -ENOMEM; 1464 + scoped_guard(mutex, &ncm_opts->lock) 1465 + if (ncm_opts->bind_count == 0) { 1466 + if (!device_is_registered(&ncm_opts->net->dev)) { 1467 + ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN); 1468 + gether_set_gadget(ncm_opts->net, cdev->gadget); 1469 + status = gether_register_netdev(ncm_opts->net); 1470 + } else 1471 + status = gether_attach_gadget(ncm_opts->net, cdev->gadget); 1459 1472 1460 - scoped_guard(mutex, &ncm_opts->lock) { 1461 - gether_apply_opts(netdev, &ncm_opts->net_opts); 1462 - netdev->mtu = ncm_opts->max_segment_size - ETH_HLEN; 1463 - } 1473 + if (status) 1474 + return status; 1475 + net = ncm_opts->net; 1476 + } 1464 1477 1465 - gether_set_gadget(netdev, cdev->gadget); 1466 - status = gether_register_netdev(netdev); 1467 - if (status) 1468 - return status; 1469 - 1470 - /* export host's Ethernet address in CDC format */ 1471 - status = gether_get_host_addr_cdc(netdev, ncm->ethaddr, 1472 - sizeof(ncm->ethaddr)); 1473 - if (status < 12) 1474 - return -EINVAL; 1475 - ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr; 1478 + ncm_string_defs[1].s = ncm->ethaddr; 1476 1479 1477 1480 us = usb_gstrings_attach(cdev, ncm_strings, 1478 1481 ARRAY_SIZE(ncm_string_defs)); ··· 1565 1578 f->os_desc_n = 1; 1566 1579 } 1567 1580 ncm->notify_req = no_free_ptr(request); 1568 - ncm->netdev = no_free_ptr(netdev); 1569 - ncm->port.ioport = netdev_priv(ncm->netdev); 1581 + 1582 + ncm_opts->bind_count++; 1583 + retain_and_null_ptr(net); 1570 1584 1571 1585 DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n", 1572 1586 ncm->port.in_ep->name, ncm->port.out_ep->name, ··· 1582 1594 } 1583 1595 1584 1596 /* f_ncm_item_ops */ 1585 - USB_ETHER_OPTS_ITEM(ncm); 1597 + USB_ETHERNET_CONFIGFS_ITEM(ncm); 1586 1598 1587 1599 /* f_ncm_opts_dev_addr */ 1588 - USB_ETHER_OPTS_ATTR_DEV_ADDR(ncm); 1600 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm); 1589 1601 1590 1602 /* f_ncm_opts_host_addr */ 1591 - USB_ETHER_OPTS_ATTR_HOST_ADDR(ncm); 1603 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm); 1592 1604 1593 1605 /* f_ncm_opts_qmult */ 1594 - USB_ETHER_OPTS_ATTR_QMULT(ncm); 1606 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm); 1595 1607 1596 1608 /* f_ncm_opts_ifname */ 1597 - USB_ETHER_OPTS_ATTR_IFNAME(ncm); 1609 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm); 1598 1610 1599 1611 static ssize_t ncm_opts_max_segment_size_show(struct config_item *item, 1600 1612 char *page) ··· 1660 1672 struct f_ncm_opts *opts; 1661 1673 1662 1674 opts = container_of(f, struct f_ncm_opts, func_inst); 1675 + if (device_is_registered(&opts->net->dev)) 1676 + gether_cleanup(netdev_priv(opts->net)); 1677 + else 1678 + free_netdev(opts->net); 1663 1679 kfree(opts->ncm_interf_group); 1664 1680 kfree(opts); 1665 1681 } 1666 1682 1667 1683 static struct usb_function_instance *ncm_alloc_inst(void) 1668 1684 { 1669 - struct usb_function_instance *ret; 1685 + struct f_ncm_opts *opts; 1670 1686 struct usb_os_desc *descs[1]; 1671 1687 char *names[1]; 1672 1688 struct config_group *ncm_interf_group; 1673 1689 1674 - struct f_ncm_opts *opts __free(kfree) = kzalloc_obj(*opts); 1690 + opts = kzalloc_obj(*opts); 1675 1691 if (!opts) 1676 1692 return ERR_PTR(-ENOMEM); 1677 - 1678 - opts->net = NULL; 1679 1693 opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id; 1680 - gether_setup_opts_default(&opts->net_opts, "usb"); 1681 1694 1682 1695 mutex_init(&opts->lock); 1683 1696 opts->func_inst.free_func_inst = ncm_free_inst; 1697 + opts->net = gether_setup_default(); 1698 + if (IS_ERR(opts->net)) { 1699 + struct net_device *net = opts->net; 1700 + kfree(opts); 1701 + return ERR_CAST(net); 1702 + } 1684 1703 opts->max_segment_size = ETH_FRAME_LEN; 1685 1704 INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop); 1686 1705 ··· 1698 1703 ncm_interf_group = 1699 1704 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, 1700 1705 names, THIS_MODULE); 1701 - if (IS_ERR(ncm_interf_group)) 1706 + if (IS_ERR(ncm_interf_group)) { 1707 + ncm_free_inst(&opts->func_inst); 1702 1708 return ERR_CAST(ncm_interf_group); 1709 + } 1703 1710 opts->ncm_interf_group = ncm_interf_group; 1704 1711 1705 - ret = &opts->func_inst; 1706 - retain_and_null_ptr(opts); 1707 - return ret; 1712 + return &opts->func_inst; 1708 1713 } 1709 1714 1710 1715 static void ncm_free(struct usb_function *f) 1711 1716 { 1712 - struct f_ncm_opts *opts = func_to_ncm_opts(f); 1717 + struct f_ncm *ncm; 1718 + struct f_ncm_opts *opts; 1713 1719 1714 - scoped_guard(mutex, &opts->lock) 1715 - opts->refcnt--; 1716 - kfree(func_to_ncm(f)); 1720 + ncm = func_to_ncm(f); 1721 + opts = container_of(f->fi, struct f_ncm_opts, func_inst); 1722 + kfree(ncm); 1723 + mutex_lock(&opts->lock); 1724 + opts->refcnt--; 1725 + mutex_unlock(&opts->lock); 1717 1726 } 1718 1727 1719 1728 static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) 1720 1729 { 1721 1730 struct f_ncm *ncm = func_to_ncm(f); 1731 + struct f_ncm_opts *ncm_opts; 1722 1732 1723 1733 DBG(c->cdev, "ncm unbind\n"); 1734 + 1735 + ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); 1724 1736 1725 1737 hrtimer_cancel(&ncm->task_timer); 1726 1738 ··· 1745 1743 kfree(ncm->notify_req->buf); 1746 1744 usb_ep_free_request(ncm->notify, ncm->notify_req); 1747 1745 1748 - ncm->port.ioport = NULL; 1749 - gether_cleanup(netdev_priv(ncm->netdev)); 1746 + ncm_opts->bind_count--; 1747 + if (ncm_opts->bind_count == 0) 1748 + gether_detach_gadget(ncm_opts->net); 1750 1749 } 1751 1750 1752 1751 static struct usb_function *ncm_alloc(struct usb_function_instance *fi) 1753 1752 { 1754 1753 struct f_ncm *ncm; 1755 1754 struct f_ncm_opts *opts; 1755 + int status; 1756 1756 1757 1757 /* allocate and initialize one new instance */ 1758 1758 ncm = kzalloc(sizeof(*ncm), GFP_KERNEL); ··· 1762 1758 return ERR_PTR(-ENOMEM); 1763 1759 1764 1760 opts = container_of(fi, struct f_ncm_opts, func_inst); 1761 + mutex_lock(&opts->lock); 1762 + opts->refcnt++; 1765 1763 1766 - scoped_guard(mutex, &opts->lock) 1767 - opts->refcnt++; 1764 + /* export host's Ethernet address in CDC format */ 1765 + status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr, 1766 + sizeof(ncm->ethaddr)); 1767 + if (status < 12) { /* strlen("01234567890a") */ 1768 + kfree(ncm); 1769 + mutex_unlock(&opts->lock); 1770 + return ERR_PTR(-EINVAL); 1771 + } 1768 1772 1769 1773 spin_lock_init(&ncm->lock); 1770 1774 ncm_reset_values(ncm); 1775 + ncm->port.ioport = netdev_priv(opts->net); 1776 + mutex_unlock(&opts->lock); 1771 1777 ncm->port.is_fixed = true; 1772 1778 ncm->port.supports_multi_frame = true; 1773 1779
+14
drivers/usb/gadget/function/f_tcm.c
··· 1222 1222 se_cmd = &cmd->se_cmd; 1223 1223 tpg = cmd->fu->tpg; 1224 1224 tv_nexus = tpg->tpg_nexus; 1225 + if (!tv_nexus) { 1226 + struct usb_gadget *gadget = fuas_to_gadget(cmd->fu); 1227 + 1228 + dev_err(&gadget->dev, "Missing nexus, ignoring command\n"); 1229 + return; 1230 + } 1231 + 1225 1232 dir = get_cmd_dir(cmd->cmd_buf); 1226 1233 if (dir < 0) 1227 1234 goto out; ··· 1490 1483 se_cmd = &cmd->se_cmd; 1491 1484 tpg = cmd->fu->tpg; 1492 1485 tv_nexus = tpg->tpg_nexus; 1486 + if (!tv_nexus) { 1487 + struct usb_gadget *gadget = fuas_to_gadget(cmd->fu); 1488 + 1489 + dev_err(&gadget->dev, "Missing nexus, ignoring command\n"); 1490 + return; 1491 + } 1492 + 1493 1493 dir = get_cmd_dir(cmd->cmd_buf); 1494 1494 if (dir < 0) 1495 1495 goto out;
+22 -45
drivers/usb/gadget/function/u_ether.c
··· 897 897 } 898 898 EXPORT_SYMBOL_GPL(gether_set_gadget); 899 899 900 + int gether_attach_gadget(struct net_device *net, struct usb_gadget *g) 901 + { 902 + int ret; 903 + 904 + ret = device_move(&net->dev, &g->dev, DPM_ORDER_DEV_AFTER_PARENT); 905 + if (ret) 906 + return ret; 907 + 908 + gether_set_gadget(net, g); 909 + return 0; 910 + } 911 + EXPORT_SYMBOL_GPL(gether_attach_gadget); 912 + 913 + void gether_detach_gadget(struct net_device *net) 914 + { 915 + struct eth_dev *dev = netdev_priv(net); 916 + 917 + device_move(&net->dev, NULL, DPM_ORDER_NONE); 918 + dev->gadget = NULL; 919 + } 920 + EXPORT_SYMBOL_GPL(gether_detach_gadget); 921 + 900 922 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 901 923 { 902 924 struct eth_dev *dev; ··· 1062 1040 } 1063 1041 EXPORT_SYMBOL_GPL(gether_set_ifname); 1064 1042 1065 - void gether_setup_opts_default(struct gether_opts *opts, const char *name) 1066 - { 1067 - opts->qmult = QMULT_DEFAULT; 1068 - snprintf(opts->name, sizeof(opts->name), "%s%%d", name); 1069 - eth_random_addr(opts->dev_mac); 1070 - opts->addr_assign_type = NET_ADDR_RANDOM; 1071 - eth_random_addr(opts->host_mac); 1072 - } 1073 - EXPORT_SYMBOL_GPL(gether_setup_opts_default); 1074 - 1075 - void gether_apply_opts(struct net_device *net, struct gether_opts *opts) 1076 - { 1077 - struct eth_dev *dev = netdev_priv(net); 1078 - 1079 - dev->qmult = opts->qmult; 1080 - 1081 - if (opts->ifname_set) { 1082 - strscpy(net->name, opts->name, sizeof(net->name)); 1083 - dev->ifname_set = true; 1084 - } 1085 - 1086 - memcpy(dev->host_mac, opts->host_mac, sizeof(dev->host_mac)); 1087 - 1088 - if (opts->addr_assign_type == NET_ADDR_SET) { 1089 - memcpy(dev->dev_mac, opts->dev_mac, sizeof(dev->dev_mac)); 1090 - net->addr_assign_type = opts->addr_assign_type; 1091 - } 1092 - } 1093 - EXPORT_SYMBOL_GPL(gether_apply_opts); 1094 - 1095 1043 void gether_suspend(struct gether *link) 1096 1044 { 1097 1045 struct eth_dev *dev = link->ioport; ··· 1117 1125 free_netdev(dev->net); 1118 1126 } 1119 1127 EXPORT_SYMBOL_GPL(gether_cleanup); 1120 - 1121 - void gether_unregister_free_netdev(struct net_device *net) 1122 - { 1123 - if (!net) 1124 - return; 1125 - 1126 - struct eth_dev *dev = netdev_priv(net); 1127 - 1128 - if (net->reg_state == NETREG_REGISTERED) { 1129 - unregister_netdev(net); 1130 - flush_work(&dev->work); 1131 - } 1132 - free_netdev(net); 1133 - } 1134 - EXPORT_SYMBOL_GPL(gether_unregister_free_netdev); 1135 1128 1136 1129 /** 1137 1130 * gether_connect - notify network layer that USB link is active
+26 -30
drivers/usb/gadget/function/u_ether.h
··· 38 38 39 39 struct eth_dev; 40 40 41 - /** 42 - * struct gether_opts - Options for Ethernet gadget function instances 43 - * @name: Pattern for the network interface name (e.g., "usb%d"). 44 - * Used to generate the net device name. 45 - * @qmult: Queue length multiplier for high/super speed. 46 - * @host_mac: The MAC address to be used by the host side. 47 - * @dev_mac: The MAC address to be used by the device side. 48 - * @ifname_set: True if the interface name pattern has been set by userspace. 49 - * @addr_assign_type: The method used for assigning the device MAC address 50 - * (e.g., NET_ADDR_RANDOM, NET_ADDR_SET). 51 - * 52 - * This structure caches network-related settings provided through configfs 53 - * before the net_device is fully instantiated. This allows for early 54 - * configuration while deferring net_device allocation until the function 55 - * is bound. 56 - */ 57 - struct gether_opts { 58 - char name[IFNAMSIZ]; 59 - unsigned int qmult; 60 - u8 host_mac[ETH_ALEN]; 61 - u8 dev_mac[ETH_ALEN]; 62 - bool ifname_set; 63 - unsigned char addr_assign_type; 64 - }; 65 - 66 41 /* 67 42 * This represents the USB side of an "ethernet" link, managed by a USB 68 43 * function which provides control and (maybe) framing. Two functions ··· 151 176 void gether_set_gadget(struct net_device *net, struct usb_gadget *g); 152 177 153 178 /** 179 + * gether_attach_gadget - Reparent net_device to the gadget device. 180 + * @net: The network device to reparent. 181 + * @g: The target USB gadget device to parent to. 182 + * 183 + * This function moves the network device to be a child of the USB gadget 184 + * device in the device hierarchy. This is typically done when the function 185 + * is bound to a configuration. 186 + * 187 + * Returns 0 on success, or a negative error code on failure. 188 + */ 189 + int gether_attach_gadget(struct net_device *net, struct usb_gadget *g); 190 + 191 + /** 192 + * gether_detach_gadget - Detach net_device from its gadget parent. 193 + * @net: The network device to detach. 194 + * 195 + * This function moves the network device to be a child of the virtual 196 + * devices parent, effectively detaching it from the USB gadget device 197 + * hierarchy. This is typically done when the function is unbound 198 + * from a configuration but the instance is not yet freed. 199 + */ 200 + void gether_detach_gadget(struct net_device *net); 201 + 202 + DEFINE_FREE(detach_gadget, struct net_device *, if (_T) gether_detach_gadget(_T)) 203 + 204 + /** 154 205 * gether_set_dev_addr - initialize an ethernet-over-usb link with eth address 155 206 * @net: device representing this link 156 207 * @dev_addr: eth address of this device ··· 284 283 int gether_set_ifname(struct net_device *net, const char *name, int len); 285 284 286 285 void gether_cleanup(struct eth_dev *dev); 287 - void gether_unregister_free_netdev(struct net_device *net); 288 - DEFINE_FREE(free_gether_netdev, struct net_device *, gether_unregister_free_netdev(_T)); 289 - 290 - void gether_setup_opts_default(struct gether_opts *opts, const char *name); 291 - void gether_apply_opts(struct net_device *net, struct gether_opts *opts); 292 286 293 287 void gether_suspend(struct gether *link); 294 288 void gether_resume(struct gether *link);
-177
drivers/usb/gadget/function/u_ether_configfs.h
··· 13 13 #ifndef __U_ETHER_CONFIGFS_H 14 14 #define __U_ETHER_CONFIGFS_H 15 15 16 - #include <linux/cleanup.h> 17 - #include <linux/hex.h> 18 - #include <linux/if_ether.h> 19 - #include <linux/mutex.h> 20 - #include <linux/netdevice.h> 21 - #include <linux/rtnetlink.h> 22 - 23 16 #define USB_ETHERNET_CONFIGFS_ITEM(_f_) \ 24 17 static void _f_##_attr_release(struct config_item *item) \ 25 18 { \ ··· 196 203 } \ 197 204 \ 198 205 CONFIGFS_ATTR(_f_##_opts_, _n_) 199 - 200 - #define USB_ETHER_OPTS_ITEM(_f_) \ 201 - static void _f_##_attr_release(struct config_item *item) \ 202 - { \ 203 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 204 - \ 205 - usb_put_function_instance(&opts->func_inst); \ 206 - } \ 207 - \ 208 - static struct configfs_item_operations _f_##_item_ops = { \ 209 - .release = _f_##_attr_release, \ 210 - } 211 - 212 - #define USB_ETHER_OPTS_ATTR_DEV_ADDR(_f_) \ 213 - static ssize_t _f_##_opts_dev_addr_show(struct config_item *item, \ 214 - char *page) \ 215 - { \ 216 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 217 - \ 218 - guard(mutex)(&opts->lock); \ 219 - return sysfs_emit(page, "%pM\n", opts->net_opts.dev_mac); \ 220 - } \ 221 - \ 222 - static ssize_t _f_##_opts_dev_addr_store(struct config_item *item, \ 223 - const char *page, size_t len) \ 224 - { \ 225 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 226 - u8 new_addr[ETH_ALEN]; \ 227 - const char *p = page; \ 228 - \ 229 - guard(mutex)(&opts->lock); \ 230 - if (opts->refcnt) \ 231 - return -EBUSY; \ 232 - \ 233 - for (int i = 0; i < ETH_ALEN; i++) { \ 234 - unsigned char num; \ 235 - if ((*p == '.') || (*p == ':')) \ 236 - p++; \ 237 - num = hex_to_bin(*p++) << 4; \ 238 - num |= hex_to_bin(*p++); \ 239 - new_addr[i] = num; \ 240 - } \ 241 - if (!is_valid_ether_addr(new_addr)) \ 242 - return -EINVAL; \ 243 - memcpy(opts->net_opts.dev_mac, new_addr, ETH_ALEN); \ 244 - opts->net_opts.addr_assign_type = NET_ADDR_SET; \ 245 - return len; \ 246 - } \ 247 - \ 248 - CONFIGFS_ATTR(_f_##_opts_, dev_addr) 249 - 250 - #define USB_ETHER_OPTS_ATTR_HOST_ADDR(_f_) \ 251 - static ssize_t _f_##_opts_host_addr_show(struct config_item *item, \ 252 - char *page) \ 253 - { \ 254 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 255 - \ 256 - guard(mutex)(&opts->lock); \ 257 - return sysfs_emit(page, "%pM\n", opts->net_opts.host_mac); \ 258 - } \ 259 - \ 260 - static ssize_t _f_##_opts_host_addr_store(struct config_item *item, \ 261 - const char *page, size_t len) \ 262 - { \ 263 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 264 - u8 new_addr[ETH_ALEN]; \ 265 - const char *p = page; \ 266 - \ 267 - guard(mutex)(&opts->lock); \ 268 - if (opts->refcnt) \ 269 - return -EBUSY; \ 270 - \ 271 - for (int i = 0; i < ETH_ALEN; i++) { \ 272 - unsigned char num; \ 273 - if ((*p == '.') || (*p == ':')) \ 274 - p++; \ 275 - num = hex_to_bin(*p++) << 4; \ 276 - num |= hex_to_bin(*p++); \ 277 - new_addr[i] = num; \ 278 - } \ 279 - if (!is_valid_ether_addr(new_addr)) \ 280 - return -EINVAL; \ 281 - memcpy(opts->net_opts.host_mac, new_addr, ETH_ALEN); \ 282 - return len; \ 283 - } \ 284 - \ 285 - CONFIGFS_ATTR(_f_##_opts_, host_addr) 286 - 287 - #define USB_ETHER_OPTS_ATTR_QMULT(_f_) \ 288 - static ssize_t _f_##_opts_qmult_show(struct config_item *item, \ 289 - char *page) \ 290 - { \ 291 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 292 - \ 293 - guard(mutex)(&opts->lock); \ 294 - return sysfs_emit(page, "%u\n", opts->net_opts.qmult); \ 295 - } \ 296 - \ 297 - static ssize_t _f_##_opts_qmult_store(struct config_item *item, \ 298 - const char *page, size_t len) \ 299 - { \ 300 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 301 - u32 val; \ 302 - int ret; \ 303 - \ 304 - guard(mutex)(&opts->lock); \ 305 - if (opts->refcnt) \ 306 - return -EBUSY; \ 307 - \ 308 - ret = kstrtou32(page, 0, &val); \ 309 - if (ret) \ 310 - return ret; \ 311 - \ 312 - opts->net_opts.qmult = val; \ 313 - return len; \ 314 - } \ 315 - \ 316 - CONFIGFS_ATTR(_f_##_opts_, qmult) 317 - 318 - #define USB_ETHER_OPTS_ATTR_IFNAME(_f_) \ 319 - static ssize_t _f_##_opts_ifname_show(struct config_item *item, \ 320 - char *page) \ 321 - { \ 322 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 323 - const char *name; \ 324 - \ 325 - guard(mutex)(&opts->lock); \ 326 - rtnl_lock(); \ 327 - if (opts->net_opts.ifname_set) \ 328 - name = opts->net_opts.name; \ 329 - else if (opts->net) \ 330 - name = netdev_name(opts->net); \ 331 - else \ 332 - name = "(inactive net_device)"; \ 333 - rtnl_unlock(); \ 334 - return sysfs_emit(page, "%s\n", name); \ 335 - } \ 336 - \ 337 - static ssize_t _f_##_opts_ifname_store(struct config_item *item, \ 338 - const char *page, size_t len) \ 339 - { \ 340 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 341 - char tmp[IFNAMSIZ]; \ 342 - const char *p; \ 343 - size_t c_len = len; \ 344 - \ 345 - if (c_len > 0 && page[c_len - 1] == '\n') \ 346 - c_len--; \ 347 - \ 348 - if (c_len >= sizeof(tmp)) \ 349 - return -E2BIG; \ 350 - \ 351 - strscpy(tmp, page, c_len + 1); \ 352 - if (!dev_valid_name(tmp)) \ 353 - return -EINVAL; \ 354 - \ 355 - /* Require exactly one %d */ \ 356 - p = strchr(tmp, '%'); \ 357 - if (!p || p[1] != 'd' || strchr(p + 2, '%')) \ 358 - return -EINVAL; \ 359 - \ 360 - guard(mutex)(&opts->lock); \ 361 - if (opts->refcnt) \ 362 - return -EBUSY; \ 363 - strscpy(opts->net_opts.name, tmp, sizeof(opts->net_opts.name)); \ 364 - opts->net_opts.ifname_set = true; \ 365 - return len; \ 366 - } \ 367 - \ 368 - CONFIGFS_ATTR(_f_##_opts_, ifname) 369 206 370 207 #endif /* __U_ETHER_CONFIGFS_H */
+1 -3
drivers/usb/gadget/function/u_ncm.h
··· 15 15 16 16 #include <linux/usb/composite.h> 17 17 18 - #include "u_ether.h" 19 - 20 18 struct f_ncm_opts { 21 19 struct usb_function_instance func_inst; 22 20 struct net_device *net; 21 + int bind_count; 23 22 24 - struct gether_opts net_opts; 25 23 struct config_group *ncm_interf_group; 26 24 struct usb_os_desc ncm_os_desc; 27 25 char ncm_ext_compat_id[16];
+1 -1
drivers/usb/gadget/function/uvc_video.c
··· 513 513 return; 514 514 } 515 515 516 - interval_duration = 2 << (video->ep->desc->bInterval - 1); 516 + interval_duration = 1 << (video->ep->desc->bInterval - 1); 517 517 if (cdev->gadget->speed < USB_SPEED_HIGH) 518 518 interval_duration *= 10000; 519 519 else
+9 -1
drivers/usb/host/xhci-debugfs.c
··· 386 386 static int xhci_portli_show(struct seq_file *s, void *unused) 387 387 { 388 388 struct xhci_port *port = s->private; 389 - struct xhci_hcd *xhci = hcd_to_xhci(port->rhub->hcd); 389 + struct xhci_hcd *xhci; 390 390 u32 portli; 391 391 392 392 portli = readl(&port->port_reg->portli); 393 + 394 + /* port without protocol capability isn't added to a roothub */ 395 + if (!port->rhub) { 396 + seq_printf(s, "0x%08x\n", portli); 397 + return 0; 398 + } 399 + 400 + xhci = hcd_to_xhci(port->rhub->hcd); 393 401 394 402 /* PORTLI fields are valid if port is a USB3 or eUSB2V2 port */ 395 403 if (port->rhub == &xhci->usb3_rhub)
+1
drivers/usb/host/xhci-ring.c
··· 3195 3195 3196 3196 if (status & STS_HCE) { 3197 3197 xhci_warn(xhci, "WARNING: Host Controller Error\n"); 3198 + xhci_halt(xhci); 3198 3199 goto out; 3199 3200 } 3200 3201
+2 -2
drivers/usb/host/xhci.c
··· 4146 4146 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 4147 4147 (xhci->xhc_state & XHCI_STATE_HALTED)) { 4148 4148 spin_unlock_irqrestore(&xhci->lock, flags); 4149 - kfree(command); 4149 + xhci_free_command(xhci, command); 4150 4150 return -ENODEV; 4151 4151 } 4152 4152 ··· 4154 4154 slot_id); 4155 4155 if (ret) { 4156 4156 spin_unlock_irqrestore(&xhci->lock, flags); 4157 - kfree(command); 4157 + xhci_free_command(xhci, command); 4158 4158 return ret; 4159 4159 } 4160 4160 xhci_ring_cmd_db(xhci);
+4 -2
drivers/usb/image/mdc800.c
··· 707 707 if (signal_pending (current)) 708 708 { 709 709 mutex_unlock(&mdc800->io_lock); 710 - return -EINTR; 710 + return len == left ? -EINTR : len-left; 711 711 } 712 712 713 713 sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left; ··· 730 730 mutex_unlock(&mdc800->io_lock); 731 731 return len-left; 732 732 } 733 - wait_event_timeout(mdc800->download_wait, 733 + retval = wait_event_timeout(mdc800->download_wait, 734 734 mdc800->downloaded, 735 735 msecs_to_jiffies(TO_DOWNLOAD_GET_READY)); 736 + if (!retval) 737 + usb_kill_urb(mdc800->download_urb); 736 738 mdc800->downloaded = 0; 737 739 if (mdc800->download_urb->status != 0) 738 740 {
+1 -1
drivers/usb/misc/uss720.c
··· 736 736 ret = get_1284_register(pp, 0, &reg, GFP_KERNEL); 737 737 dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg); 738 738 if (ret < 0) 739 - return ret; 739 + goto probe_abort; 740 740 741 741 ret = usb_find_last_int_in_endpoint(interface, &epd); 742 742 if (!ret) {
+1 -1
drivers/usb/misc/yurex.c
··· 272 272 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, 273 273 dev, 1); 274 274 dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 275 + dev->bbu = -1; 275 276 if (usb_submit_urb(dev->urb, GFP_KERNEL)) { 276 277 retval = -EIO; 277 278 dev_err(&interface->dev, "Could not submitting URB\n"); ··· 281 280 282 281 /* save our data pointer in this interface device */ 283 282 usb_set_intfdata(interface, dev); 284 - dev->bbu = -1; 285 283 286 284 /* we can register the device now, as it is ready */ 287 285 retval = usb_register_dev(interface, &yurex_class);
+9
drivers/usb/renesas_usbhs/common.c
··· 815 815 816 816 usbhs_platform_call(priv, hardware_exit, pdev); 817 817 reset_control_assert(priv->rsts); 818 + 819 + /* 820 + * Explicitly free the IRQ to ensure the interrupt handler is 821 + * disabled and synchronized before freeing resources. 822 + * devm_free_irq() calls free_irq() which waits for any running 823 + * ISR to complete, preventing UAF. 824 + */ 825 + devm_free_irq(&pdev->dev, priv->irq, priv); 826 + 818 827 usbhs_mod_remove(priv); 819 828 usbhs_fifo_remove(priv); 820 829 usbhs_pipe_remove(priv);
+6 -1
drivers/usb/roles/class.c
··· 139 139 static struct usb_role_switch * 140 140 usb_role_switch_is_parent(struct fwnode_handle *fwnode) 141 141 { 142 - struct fwnode_handle *parent = fwnode_get_parent(fwnode); 142 + struct fwnode_handle *parent; 143 143 struct device *dev; 144 + 145 + if (!fwnode_device_is_compatible(fwnode, "usb-b-connector")) 146 + return NULL; 147 + 148 + parent = fwnode_get_parent(fwnode); 144 149 145 150 if (!fwnode_property_present(parent, "usb-role-switch")) { 146 151 fwnode_handle_put(parent);
+6 -1
drivers/usb/typec/altmodes/displayport.c
··· 100 100 { 101 101 u8 pin_assign = 0; 102 102 u32 conf; 103 + u32 signal; 103 104 104 105 /* DP Signalling */ 105 - conf = (dp->data.conf & DP_CONF_SIGNALLING_MASK) >> DP_CONF_SIGNALLING_SHIFT; 106 + signal = DP_CAP_DP_SIGNALLING(dp->port->vdo) & DP_CAP_DP_SIGNALLING(dp->alt->vdo); 107 + if (dp->plug_prime) 108 + signal &= DP_CAP_DP_SIGNALLING(dp->plug_prime->vdo); 109 + 110 + conf = signal << DP_CONF_SIGNALLING_SHIFT; 106 111 107 112 switch (con) { 108 113 case DP_STATUS_CON_DISABLED:
+1 -1
drivers/usb/typec/tcpm/tcpm.c
··· 7890 7890 port->partner_desc.identity = &port->partner_ident; 7891 7891 7892 7892 port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); 7893 - if (IS_ERR_OR_NULL(port->role_sw)) 7893 + if (!port->role_sw) 7894 7894 port->role_sw = usb_role_switch_get(port->dev); 7895 7895 if (IS_ERR(port->role_sw)) { 7896 7896 err = PTR_ERR(port->role_sw);
+6 -1
fs/btrfs/disk-io.c
··· 3594 3594 } 3595 3595 } 3596 3596 3597 - btrfs_zoned_reserve_data_reloc_bg(fs_info); 3598 3597 btrfs_free_zone_cache(fs_info); 3599 3598 3600 3599 btrfs_check_active_zone_reservation(fs_info); ··· 3620 3621 ret = PTR_ERR(fs_info->transaction_kthread); 3621 3622 goto fail_cleaner; 3622 3623 } 3624 + 3625 + /* 3626 + * Starts a transaction, must be called after the transaction kthread 3627 + * is initialized. 3628 + */ 3629 + btrfs_zoned_reserve_data_reloc_bg(fs_info); 3623 3630 3624 3631 ret = btrfs_read_qgroup_config(fs_info); 3625 3632 if (ret)
+1
fs/btrfs/extent_io.c
··· 4507 4507 */ 4508 4508 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { 4509 4509 spin_unlock(&eb->refs_lock); 4510 + rcu_read_lock(); 4510 4511 break; 4511 4512 } 4512 4513
+19
fs/btrfs/inode.c
··· 6612 6612 int ret; 6613 6613 bool xa_reserved = false; 6614 6614 6615 + if (!args->orphan && !args->subvol) { 6616 + /* 6617 + * Before anything else, check if we can add the name to the 6618 + * parent directory. We want to avoid a dir item overflow in 6619 + * case we have an existing dir item due to existing name 6620 + * hash collisions. We do this check here before we call 6621 + * btrfs_add_link() down below so that we can avoid a 6622 + * transaction abort (which could be exploited by malicious 6623 + * users). 6624 + * 6625 + * For subvolumes we already do this in btrfs_mksubvol(). 6626 + */ 6627 + ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, 6628 + btrfs_ino(BTRFS_I(dir)), 6629 + name); 6630 + if (ret < 0) 6631 + return ret; 6632 + } 6633 + 6615 6634 path = btrfs_alloc_path(); 6616 6635 if (!path) 6617 6636 return -ENOMEM;
+28 -4
fs/btrfs/ioctl.c
··· 672 672 goto out; 673 673 } 674 674 675 + /* 676 + * Subvolumes have orphans cleaned on first dentry lookup. A new 677 + * subvolume cannot have any orphans, so we should set the bit before we 678 + * add the subvolume dentry to the dentry cache, so that it is in the 679 + * same state as a subvolume after first lookup. 680 + */ 681 + set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &new_root->state); 675 682 d_instantiate_new(dentry, new_inode_args.inode); 676 683 new_inode_args.inode = NULL; 677 684 ··· 3859 3852 goto out; 3860 3853 } 3861 3854 3855 + received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid, 3856 + BTRFS_UUID_SIZE); 3857 + 3858 + /* 3859 + * Before we attempt to add the new received uuid, check if we have room 3860 + * for it in case there's already an item. If the size of the existing 3861 + * item plus this root's ID (u64) exceeds the maximum item size, we can 3862 + * return here without the need to abort a transaction. If we don't do 3863 + * this check, the btrfs_uuid_tree_add() call below would fail with 3864 + * -EOVERFLOW and result in a transaction abort. Malicious users could 3865 + * exploit this to turn the fs into RO mode. 3866 + */ 3867 + if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) { 3868 + ret = btrfs_uuid_tree_check_overflow(fs_info, sa->uuid, 3869 + BTRFS_UUID_KEY_RECEIVED_SUBVOL); 3870 + if (ret < 0) 3871 + goto out; 3872 + } 3873 + 3862 3874 /* 3863 3875 * 1 - root item 3864 3876 * 2 - uuid items (received uuid + subvol uuid) ··· 3893 3867 sa->rtime.sec = ct.tv_sec; 3894 3868 sa->rtime.nsec = ct.tv_nsec; 3895 3869 3896 - received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid, 3897 - BTRFS_UUID_SIZE); 3898 3870 if (received_uuid_changed && 3899 3871 !btrfs_is_empty_uuid(root_item->received_uuid)) { 3900 3872 ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid, 3901 3873 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 3902 3874 btrfs_root_id(root)); 3903 3875 if (unlikely(ret && ret != -ENOENT)) { 3904 - btrfs_abort_transaction(trans, ret); 3905 3876 btrfs_end_transaction(trans); 3906 3877 goto out; 3907 3878 } ··· 3913 3890 3914 3891 ret = btrfs_update_root(trans, fs_info->tree_root, 3915 3892 &root->root_key, &root->root_item); 3916 - if (ret < 0) { 3893 + if (unlikely(ret < 0)) { 3894 + btrfs_abort_transaction(trans, ret); 3917 3895 btrfs_end_transaction(trans); 3918 3896 goto out; 3919 3897 }
-3
fs/btrfs/messages.h
··· 31 31 #define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \ 32 32 btrfs_no_printk(fs_info, fmt, ##args) 33 33 34 - #define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \ 35 - btrfs_no_printk(fs_info, fmt, ##args) 36 - 37 34 #define btrfs_printk_rl_in_rcu(fs_info, level, fmt, args...) \ 38 35 btrfs_no_printk(fs_info, fmt, ##args) 39 36
+10
fs/btrfs/print-tree.c
··· 38 38 { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" }, 39 39 { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }, 40 40 { BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" }, 41 + { BTRFS_REMAP_TREE_OBJECTID, "REMAP_TREE" }, 41 42 }; 42 43 43 44 const char *btrfs_root_name(const struct btrfs_key *key, char *buf) ··· 416 415 [BTRFS_UUID_KEY_SUBVOL] = "UUID_KEY_SUBVOL", 417 416 [BTRFS_UUID_KEY_RECEIVED_SUBVOL] = "UUID_KEY_RECEIVED_SUBVOL", 418 417 [BTRFS_RAID_STRIPE_KEY] = "RAID_STRIPE", 418 + [BTRFS_IDENTITY_REMAP_KEY] = "IDENTITY_REMAP", 419 + [BTRFS_REMAP_KEY] = "REMAP", 420 + [BTRFS_REMAP_BACKREF_KEY] = "REMAP_BACKREF", 419 421 }; 420 422 421 423 if (key->type == 0 && key->objectid == BTRFS_FREE_SPACE_OBJECTID) ··· 439 435 struct btrfs_extent_data_ref *dref; 440 436 struct btrfs_shared_data_ref *sref; 441 437 struct btrfs_dev_extent *dev_extent; 438 + struct btrfs_remap_item *remap; 442 439 struct btrfs_key key; 443 440 444 441 if (!l) ··· 573 568 case BTRFS_RAID_STRIPE_KEY: 574 569 print_raid_stripe_key(l, btrfs_item_size(l, i), 575 570 btrfs_item_ptr(l, i, struct btrfs_stripe_extent)); 571 + break; 572 + case BTRFS_REMAP_KEY: 573 + case BTRFS_REMAP_BACKREF_KEY: 574 + remap = btrfs_item_ptr(l, i, struct btrfs_remap_item); 575 + pr_info("\t\taddress %llu\n", btrfs_remap_address(l, remap)); 576 576 break; 577 577 } 578 578 }
+2
fs/btrfs/relocation.c
··· 4399 4399 4400 4400 leaf = path->nodes[0]; 4401 4401 } 4402 + 4403 + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4402 4404 } 4403 4405 4404 4406 remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item);
+4 -1
fs/btrfs/space-info.c
··· 2194 2194 if (!btrfs_should_periodic_reclaim(space_info)) 2195 2195 continue; 2196 2196 for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) { 2197 - if (do_reclaim_sweep(space_info, raid)) 2197 + if (do_reclaim_sweep(space_info, raid)) { 2198 + spin_lock(&space_info->lock); 2198 2199 btrfs_set_periodic_reclaim_ready(space_info, false); 2200 + spin_unlock(&space_info->lock); 2201 + } 2199 2202 } 2200 2203 } 2201 2204 }
+16
fs/btrfs/transaction.c
··· 1905 1905 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, 1906 1906 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1907 1907 objectid); 1908 + /* 1909 + * We are creating of lot of snapshots of the same root that was 1910 + * received (has a received UUID) and reached a leaf's limit for 1911 + * an item. We can safely ignore this and avoid a transaction 1912 + * abort. A deletion of this snapshot will still work since we 1913 + * ignore if an item with a BTRFS_UUID_KEY_RECEIVED_SUBVOL key 1914 + * is missing (see btrfs_delete_subvolume()). Send/receive will 1915 + * work too since it peeks the first root id from the existing 1916 + * item (it could peek any), and in case it's missing it 1917 + * falls back to search by BTRFS_UUID_KEY_SUBVOL keys. 1918 + * Creation of a snapshot does not require CAP_SYS_ADMIN, so 1919 + * we don't want users triggering transaction aborts, either 1920 + * intentionally or not. 1921 + */ 1922 + if (ret == -EOVERFLOW) 1923 + ret = 0; 1908 1924 if (unlikely(ret && ret != -EEXIST)) { 1909 1925 btrfs_abort_transaction(trans, ret); 1910 1926 goto fail;
+1 -1
fs/btrfs/tree-checker.c
··· 1284 1284 } 1285 1285 if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) { 1286 1286 generic_err(leaf, slot, 1287 - "invalid root level, have %u expect [0, %u]", 1287 + "invalid root drop_level, have %u expect [0, %u]", 1288 1288 btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1); 1289 1289 return -EUCLEAN; 1290 1290 }
+6
fs/btrfs/tree-log.c
··· 6195 6195 struct btrfs_root *root, 6196 6196 struct btrfs_log_ctx *ctx) 6197 6197 { 6198 + const bool orig_log_new_dentries = ctx->log_new_dentries; 6198 6199 int ret = 0; 6199 6200 6200 6201 /* ··· 6257 6256 * dir index key range logged for the directory. So we 6258 6257 * must make sure the deletion is recorded. 6259 6258 */ 6259 + ctx->log_new_dentries = false; 6260 6260 ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx); 6261 + if (!ret && ctx->log_new_dentries) 6262 + ret = log_new_dir_dentries(trans, inode, ctx); 6263 + 6261 6264 btrfs_add_delayed_iput(inode); 6262 6265 if (ret) 6263 6266 break; ··· 6296 6291 break; 6297 6292 } 6298 6293 6294 + ctx->log_new_dentries = orig_log_new_dentries; 6299 6295 ctx->logging_conflict_inodes = false; 6300 6296 if (ret) 6301 6297 free_conflicting_inodes(ctx);
+38
fs/btrfs/uuid-tree.c
··· 199 199 return 0; 200 200 } 201 201 202 + /* 203 + * Check if we can add one root ID to a UUID key. 204 + * If the key does not yet exists, we can, otherwise only if extended item does 205 + * not exceeds the maximum item size permitted by the leaf size. 206 + * 207 + * Returns 0 on success, negative value on error. 208 + */ 209 + int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info, 210 + const u8 *uuid, u8 type) 211 + { 212 + BTRFS_PATH_AUTO_FREE(path); 213 + int ret; 214 + u32 item_size; 215 + struct btrfs_key key; 216 + 217 + if (WARN_ON_ONCE(!fs_info->uuid_root)) 218 + return -EINVAL; 219 + 220 + path = btrfs_alloc_path(); 221 + if (!path) 222 + return -ENOMEM; 223 + 224 + btrfs_uuid_to_key(uuid, type, &key); 225 + ret = btrfs_search_slot(NULL, fs_info->uuid_root, &key, path, 0, 0); 226 + if (ret < 0) 227 + return ret; 228 + if (ret > 0) 229 + return 0; 230 + 231 + item_size = btrfs_item_size(path->nodes[0], path->slots[0]); 232 + 233 + if (sizeof(struct btrfs_item) + item_size + sizeof(u64) > 234 + BTRFS_LEAF_DATA_SIZE(fs_info)) 235 + return -EOVERFLOW; 236 + 237 + return 0; 238 + } 239 + 202 240 static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type, 203 241 u64 subid) 204 242 {
+2
fs/btrfs/uuid-tree.h
··· 12 12 u64 subid); 13 13 int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type, 14 14 u64 subid); 15 + int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info, 16 + const u8 *uuid, u8 type); 15 17 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info); 16 18 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info); 17 19 int btrfs_uuid_scan_kthread(void *data);
+1 -1
fs/btrfs/volumes.c
··· 3587 3587 3588 3588 /* step one, relocate all the extents inside this chunk */ 3589 3589 btrfs_scrub_pause(fs_info); 3590 - ret = btrfs_relocate_block_group(fs_info, chunk_offset, true); 3590 + ret = btrfs_relocate_block_group(fs_info, chunk_offset, verbose); 3591 3591 btrfs_scrub_continue(fs_info); 3592 3592 if (ret) { 3593 3593 /*
+4 -2
fs/btrfs/zoned.c
··· 337 337 if (!btrfs_fs_incompat(fs_info, ZONED)) 338 338 return 0; 339 339 340 - mutex_lock(&fs_devices->device_list_mutex); 340 + /* 341 + * No need to take the device_list mutex here, we're still in the mount 342 + * path and devices cannot be added to or removed from the list yet. 343 + */ 341 344 list_for_each_entry(device, &fs_devices->devices, dev_list) { 342 345 /* We can skip reading of zone info for missing devices */ 343 346 if (!device->bdev) ··· 350 347 if (ret) 351 348 break; 352 349 } 353 - mutex_unlock(&fs_devices->device_list_mutex); 354 350 355 351 return ret; 356 352 }
-1
fs/ceph/addr.c
··· 1326 1326 continue; 1327 1327 } else if (rc == -E2BIG) { 1328 1328 folio_unlock(folio); 1329 - ceph_wbc->fbatch.folios[i] = NULL; 1330 1329 break; 1331 1330 } 1332 1331
+2 -2
fs/ceph/debugfs.c
··· 79 79 if (req->r_inode) { 80 80 seq_printf(s, " #%llx", ceph_ino(req->r_inode)); 81 81 } else if (req->r_dentry) { 82 - struct ceph_path_info path_info; 82 + struct ceph_path_info path_info = {0}; 83 83 path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0); 84 84 if (IS_ERR(path)) 85 85 path = NULL; ··· 98 98 } 99 99 100 100 if (req->r_old_dentry) { 101 - struct ceph_path_info path_info; 101 + struct ceph_path_info path_info = {0}; 102 102 path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0); 103 103 if (IS_ERR(path)) 104 104 path = NULL;
+15 -2
fs/ceph/dir.c
··· 1339 1339 struct ceph_client *cl = fsc->client; 1340 1340 struct ceph_mds_client *mdsc = fsc->mdsc; 1341 1341 struct inode *inode = d_inode(dentry); 1342 + struct ceph_inode_info *ci = ceph_inode(inode); 1342 1343 struct ceph_mds_request *req; 1343 1344 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS); 1344 1345 struct dentry *dn; ··· 1364 1363 if (!dn) { 1365 1364 try_async = false; 1366 1365 } else { 1367 - struct ceph_path_info path_info; 1366 + struct ceph_path_info path_info = {0}; 1368 1367 path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0); 1369 1368 if (IS_ERR(path)) { 1370 1369 try_async = false; ··· 1425 1424 * We have enough caps, so we assume that the unlink 1426 1425 * will succeed. Fix up the target inode and dcache. 1427 1426 */ 1428 - drop_nlink(inode); 1427 + 1428 + /* 1429 + * Protect the i_nlink update with i_ceph_lock 1430 + * to precent racing against ceph_fill_inode() 1431 + * handling our completion on a worker thread 1432 + * and don't decrement if i_nlink has already 1433 + * been updated to zero by this completion. 1434 + */ 1435 + spin_lock(&ci->i_ceph_lock); 1436 + if (inode->i_nlink > 0) 1437 + drop_nlink(inode); 1438 + spin_unlock(&ci->i_ceph_lock); 1439 + 1429 1440 d_delete(dentry); 1430 1441 } else { 1431 1442 spin_lock(&fsc->async_unlink_conflict_lock);
+2 -2
fs/ceph/file.c
··· 397 397 if (!dentry) { 398 398 do_sync = true; 399 399 } else { 400 - struct ceph_path_info path_info; 400 + struct ceph_path_info path_info = {0}; 401 401 path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0); 402 402 if (IS_ERR(path)) { 403 403 do_sync = true; ··· 807 807 if (!dn) { 808 808 try_async = false; 809 809 } else { 810 - struct ceph_path_info path_info; 810 + struct ceph_path_info path_info = {0}; 811 811 path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0); 812 812 if (IS_ERR(path)) { 813 813 try_async = false;
+1 -1
fs/ceph/inode.c
··· 2551 2551 if (!dentry) { 2552 2552 do_sync = true; 2553 2553 } else { 2554 - struct ceph_path_info path_info; 2554 + struct ceph_path_info path_info = {0}; 2555 2555 path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0); 2556 2556 if (IS_ERR(path)) { 2557 2557 do_sync = true;
+3
fs/ceph/mds_client.c
··· 2768 2768 if (ret < 0) { 2769 2769 dput(parent); 2770 2770 dput(cur); 2771 + __putname(path); 2771 2772 return ERR_PTR(ret); 2772 2773 } 2773 2774 ··· 2778 2777 if (len < 0) { 2779 2778 dput(parent); 2780 2779 dput(cur); 2780 + __putname(path); 2781 2781 return ERR_PTR(len); 2782 2782 } 2783 2783 } ··· 2815 2813 * cannot ever succeed. Creating paths that long is 2816 2814 * possible with Ceph, but Linux cannot use them. 2817 2815 */ 2816 + __putname(path); 2818 2817 return ERR_PTR(-ENAMETOOLONG); 2819 2818 } 2820 2819
+2 -1
fs/nfs/Kconfig
··· 87 87 space programs which can be found in the Linux nfs-utils package, 88 88 available from http://linux-nfs.org/. 89 89 90 - If unsure, say Y. 90 + If unsure, say N. 91 91 92 92 config NFS_SWAP 93 93 bool "Provide swap over NFS support" ··· 100 100 config NFS_V4_0 101 101 bool "NFS client support for NFSv4.0" 102 102 depends on NFS_V4 103 + default y 103 104 help 104 105 This option enables support for minor version 0 of the NFSv4 protocol 105 106 (RFC 3530) in the kernel's NFS client.
+6 -1
fs/nfs/nfs3proc.c
··· 392 392 if (status != 0) 393 393 goto out_release_acls; 394 394 395 - if (d_alias) 395 + if (d_alias) { 396 + if (d_is_dir(d_alias)) { 397 + status = -EISDIR; 398 + goto out_dput; 399 + } 396 400 dentry = d_alias; 401 + } 397 402 398 403 /* When we created the file with exclusive semantics, make 399 404 * sure we set the attributes afterwards. */
+54 -9
fs/nfsd/export.c
··· 36 36 * second map contains a reference to the entry in the first map. 37 37 */ 38 38 39 + static struct workqueue_struct *nfsd_export_wq; 40 + 39 41 #define EXPKEY_HASHBITS 8 40 42 #define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS) 41 43 #define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) 42 44 43 - static void expkey_put(struct kref *ref) 45 + static void expkey_release(struct work_struct *work) 44 46 { 45 - struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); 47 + struct svc_expkey *key = container_of(to_rcu_work(work), 48 + struct svc_expkey, ek_rwork); 46 49 47 50 if (test_bit(CACHE_VALID, &key->h.flags) && 48 51 !test_bit(CACHE_NEGATIVE, &key->h.flags)) 49 52 path_put(&key->ek_path); 50 53 auth_domain_put(key->ek_client); 51 - kfree_rcu(key, ek_rcu); 54 + kfree(key); 55 + } 56 + 57 + static void expkey_put(struct kref *ref) 58 + { 59 + struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); 60 + 61 + INIT_RCU_WORK(&key->ek_rwork, expkey_release); 62 + queue_rcu_work(nfsd_export_wq, &key->ek_rwork); 52 63 } 53 64 54 65 static int expkey_upcall(struct cache_detail *cd, struct cache_head *h) ··· 364 353 EXP_STATS_COUNTERS_NUM); 365 354 } 366 355 367 - static void svc_export_release(struct rcu_head *rcu_head) 356 + static void svc_export_release(struct work_struct *work) 368 357 { 369 - struct svc_export *exp = container_of(rcu_head, struct svc_export, 370 - ex_rcu); 358 + struct svc_export *exp = container_of(to_rcu_work(work), 359 + struct svc_export, ex_rwork); 371 360 361 + path_put(&exp->ex_path); 362 + auth_domain_put(exp->ex_client); 372 363 nfsd4_fslocs_free(&exp->ex_fslocs); 373 364 export_stats_destroy(exp->ex_stats); 374 365 kfree(exp->ex_stats); ··· 382 369 { 383 370 struct svc_export *exp = container_of(ref, struct svc_export, h.ref); 384 371 385 - path_put(&exp->ex_path); 386 - auth_domain_put(exp->ex_client); 387 - call_rcu(&exp->ex_rcu, svc_export_release); 372 + INIT_RCU_WORK(&exp->ex_rwork, svc_export_release); 373 + queue_rcu_work(nfsd_export_wq, &exp->ex_rwork); 388 374 } 389 375 390 376 static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h) ··· 1491 1479 .show = e_show, 1492 1480 }; 1493 1481 1482 + /** 1483 + * nfsd_export_wq_init - allocate the export release workqueue 1484 + * 1485 + * Called once at module load. The workqueue runs deferred svc_export and 1486 + * svc_expkey release work scheduled by queue_rcu_work() in the cache put 1487 + * callbacks. 1488 + * 1489 + * Return values: 1490 + * %0: workqueue allocated 1491 + * %-ENOMEM: allocation failed 1492 + */ 1493 + int nfsd_export_wq_init(void) 1494 + { 1495 + nfsd_export_wq = alloc_workqueue("nfsd_export", WQ_UNBOUND, 0); 1496 + if (!nfsd_export_wq) 1497 + return -ENOMEM; 1498 + return 0; 1499 + } 1500 + 1501 + /** 1502 + * nfsd_export_wq_shutdown - drain and free the export release workqueue 1503 + * 1504 + * Called once at module unload. Per-namespace teardown in 1505 + * nfsd_export_shutdown() has already drained all deferred work. 1506 + */ 1507 + void nfsd_export_wq_shutdown(void) 1508 + { 1509 + destroy_workqueue(nfsd_export_wq); 1510 + } 1511 + 1494 1512 /* 1495 1513 * Initialize the exports module. 1496 1514 */ ··· 1582 1540 1583 1541 cache_unregister_net(nn->svc_expkey_cache, net); 1584 1542 cache_unregister_net(nn->svc_export_cache, net); 1543 + /* Drain deferred export and expkey release work. */ 1544 + rcu_barrier(); 1545 + flush_workqueue(nfsd_export_wq); 1585 1546 cache_destroy_net(nn->svc_expkey_cache, net); 1586 1547 cache_destroy_net(nn->svc_export_cache, net); 1587 1548 svcauth_unix_purge(net);
+5 -2
fs/nfsd/export.h
··· 7 7 8 8 #include <linux/sunrpc/cache.h> 9 9 #include <linux/percpu_counter.h> 10 + #include <linux/workqueue.h> 10 11 #include <uapi/linux/nfsd/export.h> 11 12 #include <linux/nfs4.h> 12 13 ··· 76 75 u32 ex_layout_types; 77 76 struct nfsd4_deviceid_map *ex_devid_map; 78 77 struct cache_detail *cd; 79 - struct rcu_head ex_rcu; 78 + struct rcu_work ex_rwork; 80 79 unsigned long ex_xprtsec_modes; 81 80 struct export_stats *ex_stats; 82 81 }; ··· 93 92 u32 ek_fsid[6]; 94 93 95 94 struct path ek_path; 96 - struct rcu_head ek_rcu; 95 + struct rcu_work ek_rwork; 97 96 }; 98 97 99 98 #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) ··· 111 110 /* 112 111 * Function declarations 113 112 */ 113 + int nfsd_export_wq_init(void); 114 + void nfsd_export_wq_shutdown(void); 114 115 int nfsd_export_init(struct net *); 115 116 void nfsd_export_shutdown(struct net *); 116 117 void nfsd_export_flush(struct net *);
+7 -2
fs/nfsd/nfs4xdr.c
··· 6281 6281 int len = xdr->buf->len - (op_status_offset + XDR_UNIT); 6282 6282 6283 6283 so->so_replay.rp_status = op->status; 6284 - so->so_replay.rp_buflen = len; 6285 - read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT, 6284 + if (len <= NFSD4_REPLAY_ISIZE) { 6285 + so->so_replay.rp_buflen = len; 6286 + read_bytes_from_xdr_buf(xdr->buf, 6287 + op_status_offset + XDR_UNIT, 6286 6288 so->so_replay.rp_buf, len); 6289 + } else { 6290 + so->so_replay.rp_buflen = 0; 6291 + } 6287 6292 } 6288 6293 status: 6289 6294 op->status = nfsd4_map_status(op->status,
+19 -3
fs/nfsd/nfsctl.c
··· 149 149 150 150 seq = file->private_data; 151 151 seq->private = nn->svc_export_cache; 152 + get_net(net); 152 153 return 0; 154 + } 155 + 156 + static int exports_release(struct inode *inode, struct file *file) 157 + { 158 + struct seq_file *seq = file->private_data; 159 + struct cache_detail *cd = seq->private; 160 + 161 + put_net(cd->net); 162 + return seq_release(inode, file); 153 163 } 154 164 155 165 static int exports_nfsd_open(struct inode *inode, struct file *file) ··· 171 161 .open = exports_nfsd_open, 172 162 .read = seq_read, 173 163 .llseek = seq_lseek, 174 - .release = seq_release, 164 + .release = exports_release, 175 165 }; 176 166 177 167 static int export_features_show(struct seq_file *m, void *v) ··· 1386 1376 .proc_open = exports_proc_open, 1387 1377 .proc_read = seq_read, 1388 1378 .proc_lseek = seq_lseek, 1389 - .proc_release = seq_release, 1379 + .proc_release = exports_release, 1390 1380 }; 1391 1381 1392 1382 static int create_proc_exports_entry(void) ··· 2269 2259 if (retval) 2270 2260 goto out_free_pnfs; 2271 2261 nfsd_lockd_init(); /* lockd->nfsd callbacks */ 2262 + retval = nfsd_export_wq_init(); 2263 + if (retval) 2264 + goto out_free_lockd; 2272 2265 retval = register_pernet_subsys(&nfsd_net_ops); 2273 2266 if (retval < 0) 2274 - goto out_free_lockd; 2267 + goto out_free_export_wq; 2275 2268 retval = register_cld_notifier(); 2276 2269 if (retval) 2277 2270 goto out_free_subsys; ··· 2303 2290 unregister_cld_notifier(); 2304 2291 out_free_subsys: 2305 2292 unregister_pernet_subsys(&nfsd_net_ops); 2293 + out_free_export_wq: 2294 + nfsd_export_wq_shutdown(); 2306 2295 out_free_lockd: 2307 2296 nfsd_lockd_shutdown(); 2308 2297 nfsd_drc_slab_free(); ··· 2325 2310 nfsd4_destroy_laundry_wq(); 2326 2311 unregister_cld_notifier(); 2327 2312 unregister_pernet_subsys(&nfsd_net_ops); 2313 + nfsd_export_wq_shutdown(); 2328 2314 nfsd_drc_slab_free(); 2329 2315 nfsd_lockd_shutdown(); 2330 2316 nfsd4_free_slabs();
+12 -5
fs/nfsd/state.h
··· 541 541 struct xdr_netobj cr_princhash; 542 542 }; 543 543 544 - /* A reasonable value for REPLAY_ISIZE was estimated as follows: 545 - * The OPEN response, typically the largest, requires 546 - * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) + 547 - * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) + 548 - * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes 544 + /* 545 + * REPLAY_ISIZE is sized for an OPEN response with delegation: 546 + * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 547 + * 8(verifier) + 4(deleg. type) + 8(deleg. stateid) + 548 + * 4(deleg. recall flag) + 20(deleg. space limit) + 549 + * ~32(deleg. ace) = 112 bytes 550 + * 551 + * Some responses can exceed this. A LOCK denial includes the conflicting 552 + * lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses 553 + * larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is 554 + * saved. Enlarging this constant increases the size of every 555 + * nfs4_stateowner. 549 556 */ 550 557 551 558 #define NFSD4_REPLAY_ISIZE 112
+1 -1
fs/smb/client/cifsacl.c
··· 1489 1489 struct cifsFileInfo *open_file = NULL; 1490 1490 1491 1491 if (inode) 1492 - open_file = find_readable_file(CIFS_I(inode), true); 1492 + open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY); 1493 1493 if (!open_file) 1494 1494 return get_cifs_acl_by_path(cifs_sb, path, pacllen, info); 1495 1495
+1 -1
fs/smb/client/cifsfs.c
··· 1269 1269 struct cifsFileInfo *writeable_srcfile; 1270 1270 int rc = -EINVAL; 1271 1271 1272 - writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY); 1272 + writeable_srcfile = find_writable_file(src_cifsi, FIND_FSUID_ONLY); 1273 1273 if (writeable_srcfile) { 1274 1274 if (src_tcon->ses->server->ops->set_file_size) 1275 1275 rc = src_tcon->ses->server->ops->set_file_size(
+17 -6
fs/smb/client/cifsglob.h
··· 20 20 #include <linux/utsname.h> 21 21 #include <linux/sched/mm.h> 22 22 #include <linux/netfs.h> 23 + #include <linux/fcntl.h> 23 24 #include "cifs_fs_sb.h" 24 25 #include "cifsacl.h" 25 26 #include <crypto/internal/hash.h> ··· 1885 1884 } 1886 1885 1887 1886 1888 - /* cifs_get_writable_file() flags */ 1889 - enum cifs_writable_file_flags { 1890 - FIND_WR_ANY = 0U, 1891 - FIND_WR_FSUID_ONLY = (1U << 0), 1892 - FIND_WR_WITH_DELETE = (1U << 1), 1893 - FIND_WR_NO_PENDING_DELETE = (1U << 2), 1887 + enum cifs_find_flags { 1888 + FIND_ANY = 0U, 1889 + FIND_FSUID_ONLY = (1U << 0), 1890 + FIND_WITH_DELETE = (1U << 1), 1891 + FIND_NO_PENDING_DELETE = (1U << 2), 1892 + FIND_OPEN_FLAGS = (1U << 3), 1894 1893 }; 1895 1894 1896 1895 #define MID_FREE 0 ··· 2374 2373 static inline bool cifs_forced_shutdown(const struct cifs_sb_info *sbi) 2375 2374 { 2376 2375 return cifs_sb_flags(sbi) & CIFS_MOUNT_SHUTDOWN; 2376 + } 2377 + 2378 + static inline int cifs_open_create_options(unsigned int oflags, int opts) 2379 + { 2380 + /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 2381 + if (oflags & O_SYNC) 2382 + opts |= CREATE_WRITE_THROUGH; 2383 + if (oflags & O_DIRECT) 2384 + opts |= CREATE_NO_BUFFER; 2385 + return opts; 2377 2386 } 2378 2387 2379 2388 #endif /* _CIFS_GLOB_H */
+22 -4
fs/smb/client/cifsproto.h
··· 138 138 ssize_t result); 139 139 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, 140 140 int flags); 141 - int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, 142 - struct cifsFileInfo **ret_file); 141 + int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 142 + unsigned int find_flags, unsigned int open_flags, 143 + struct cifsFileInfo **ret_file); 143 144 int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, int flags, 144 145 struct cifsFileInfo **ret_file); 145 - struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 146 - bool fsuid_only); 146 + struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode, 147 + unsigned int find_flags, 148 + unsigned int open_flags); 147 149 int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, 148 150 struct cifsFileInfo **ret_file); 149 151 int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode, ··· 596 594 sg_set_page(&sgtable->sgl[sgtable->nents++], 597 595 virt_to_page((void *)addr), buflen, off); 598 596 } 597 + } 598 + 599 + static inline int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 600 + unsigned int find_flags, 601 + struct cifsFileInfo **ret_file) 602 + { 603 + find_flags &= ~FIND_OPEN_FLAGS; 604 + return __cifs_get_writable_file(cifs_inode, find_flags, 0, ret_file); 605 + } 606 + 607 + static inline struct cifsFileInfo * 608 + find_readable_file(struct cifsInodeInfo *cinode, unsigned int find_flags) 609 + { 610 + find_flags &= ~FIND_OPEN_FLAGS; 611 + find_flags |= FIND_NO_PENDING_DELETE; 612 + return __find_readable_file(cinode, find_flags, 0); 599 613 } 600 614 601 615 #endif /* _CIFSPROTO_H */
+2 -2
fs/smb/client/dir.c
··· 187 187 const char *full_path; 188 188 void *page = alloc_dentry_path(); 189 189 struct inode *newinode = NULL; 190 - unsigned int sbflags; 190 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 191 191 int disposition; 192 192 struct TCP_Server_Info *server = tcon->ses->server; 193 193 struct cifs_open_parms oparms; ··· 308 308 goto out; 309 309 } 310 310 311 + create_options |= cifs_open_create_options(oflags, create_options); 311 312 /* 312 313 * if we're not using unix extensions, see if we need to set 313 314 * ATTR_READONLY on the create call ··· 368 367 * If Open reported that we actually created a file then we now have to 369 368 * set the mode if possible. 370 369 */ 371 - sbflags = cifs_sb_flags(cifs_sb); 372 370 if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) { 373 371 struct cifs_unix_set_info_args args = { 374 372 .mode = mode,
+69 -60
fs/smb/client/file.c
··· 255 255 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq); 256 256 int ret; 257 257 258 - ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile); 258 + ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile); 259 259 if (ret) { 260 260 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret); 261 261 return; ··· 584 584 *********************************************************************/ 585 585 586 586 disposition = cifs_get_disposition(f_flags); 587 - 588 587 /* BB pass O_SYNC flag through on file attributes .. BB */ 589 - 590 - /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 591 - if (f_flags & O_SYNC) 592 - create_options |= CREATE_WRITE_THROUGH; 593 - 594 - if (f_flags & O_DIRECT) 595 - create_options |= CREATE_NO_BUFFER; 588 + create_options |= cifs_open_create_options(f_flags, create_options); 596 589 597 590 retry_open: 598 591 oparms = (struct cifs_open_parms) { ··· 956 963 return tcon->ses->server->ops->flush(xid, tcon, 957 964 &cfile->fid); 958 965 } 959 - rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); 966 + rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile); 960 967 if (!rc) { 961 968 tcon = tlink_tcon(cfile->tlink); 962 969 rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid); ··· 981 988 return -ERESTARTSYS; 982 989 mapping_set_error(inode->i_mapping, rc); 983 990 984 - cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY); 991 + cfile = find_writable_file(cinode, FIND_FSUID_ONLY); 985 992 rc = cifs_file_flush(xid, inode, cfile); 986 993 if (!rc) { 987 994 if (cfile) { ··· 1061 1068 1062 1069 /* Get the cached handle as SMB2 close is deferred */ 1063 1070 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) { 1064 - rc = cifs_get_writable_path(tcon, full_path, 1065 - FIND_WR_FSUID_ONLY | 1066 - FIND_WR_NO_PENDING_DELETE, 1067 - &cfile); 1071 + rc = __cifs_get_writable_file(CIFS_I(inode), 1072 + FIND_FSUID_ONLY | 1073 + FIND_NO_PENDING_DELETE | 1074 + FIND_OPEN_FLAGS, 1075 + file->f_flags, &cfile); 1068 1076 } else { 1069 - rc = cifs_get_readable_path(tcon, full_path, &cfile); 1077 + cfile = __find_readable_file(CIFS_I(inode), 1078 + FIND_NO_PENDING_DELETE | 1079 + FIND_OPEN_FLAGS, 1080 + file->f_flags); 1081 + rc = cfile ? 0 : -ENOENT; 1070 1082 } 1071 1083 if (rc == 0) { 1072 - unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC); 1073 - unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC); 1074 - 1075 - if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) && 1076 - (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) { 1077 - file->private_data = cfile; 1078 - spin_lock(&CIFS_I(inode)->deferred_lock); 1079 - cifs_del_deferred_close(cfile); 1080 - spin_unlock(&CIFS_I(inode)->deferred_lock); 1081 - goto use_cache; 1082 - } 1083 - _cifsFileInfo_put(cfile, true, false); 1084 - } else { 1085 - /* hard link on the defeered close file */ 1086 - rc = cifs_get_hardlink_path(tcon, inode, file); 1087 - if (rc) 1088 - cifs_close_deferred_file(CIFS_I(inode)); 1084 + file->private_data = cfile; 1085 + spin_lock(&CIFS_I(inode)->deferred_lock); 1086 + cifs_del_deferred_close(cfile); 1087 + spin_unlock(&CIFS_I(inode)->deferred_lock); 1088 + goto use_cache; 1089 1089 } 1090 + /* hard link on the deferred close file */ 1091 + rc = cifs_get_hardlink_path(tcon, inode, file); 1092 + if (rc) 1093 + cifs_close_deferred_file(CIFS_I(inode)); 1090 1094 1091 1095 if (server->oplocks) 1092 1096 oplock = REQ_OPLOCK; ··· 1304 1314 rdwr_for_fscache = 1; 1305 1315 1306 1316 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache); 1307 - 1308 - /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 1309 - if (cfile->f_flags & O_SYNC) 1310 - create_options |= CREATE_WRITE_THROUGH; 1311 - 1312 - if (cfile->f_flags & O_DIRECT) 1313 - create_options |= CREATE_NO_BUFFER; 1317 + create_options |= cifs_open_create_options(cfile->f_flags, 1318 + create_options); 1314 1319 1315 1320 if (server->ops->get_lease_key) 1316 1321 server->ops->get_lease_key(inode, &cfile->fid); ··· 2509 2524 netfs_write_subrequest_terminated(&wdata->subreq, result); 2510 2525 } 2511 2526 2512 - struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 2513 - bool fsuid_only) 2527 + static bool open_flags_match(struct cifsInodeInfo *cinode, 2528 + unsigned int oflags, unsigned int cflags) 2529 + { 2530 + struct inode *inode = &cinode->netfs.inode; 2531 + int crw = 0, orw = 0; 2532 + 2533 + oflags &= ~(O_CREAT | O_EXCL | O_TRUNC); 2534 + cflags &= ~(O_CREAT | O_EXCL | O_TRUNC); 2535 + 2536 + if (cifs_fscache_enabled(inode)) { 2537 + if (OPEN_FMODE(cflags) & FMODE_WRITE) 2538 + crw = 1; 2539 + if (OPEN_FMODE(oflags) & FMODE_WRITE) 2540 + orw = 1; 2541 + } 2542 + if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw)) 2543 + return false; 2544 + 2545 + return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT)); 2546 + } 2547 + 2548 + struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode, 2549 + unsigned int find_flags, 2550 + unsigned int open_flags) 2514 2551 { 2515 2552 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode); 2553 + bool fsuid_only = find_flags & FIND_FSUID_ONLY; 2516 2554 struct cifsFileInfo *open_file = NULL; 2517 2555 2518 2556 /* only filter by fsuid on multiuser mounts */ ··· 2548 2540 have a close pending, we go through the whole list */ 2549 2541 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 2550 2542 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 2543 + continue; 2544 + if ((find_flags & FIND_NO_PENDING_DELETE) && 2545 + open_file->status_file_deleted) 2546 + continue; 2547 + if ((find_flags & FIND_OPEN_FLAGS) && 2548 + !open_flags_match(cifs_inode, open_flags, 2549 + open_file->f_flags)) 2551 2550 continue; 2552 2551 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { 2553 2552 if ((!open_file->invalidHandle)) { ··· 2574 2559 } 2575 2560 2576 2561 /* Return -EBADF if no handle is found and general rc otherwise */ 2577 - int 2578 - cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, 2579 - struct cifsFileInfo **ret_file) 2562 + int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 2563 + unsigned int find_flags, unsigned int open_flags, 2564 + struct cifsFileInfo **ret_file) 2580 2565 { 2581 2566 struct cifsFileInfo *open_file, *inv_file = NULL; 2582 2567 struct cifs_sb_info *cifs_sb; 2583 2568 bool any_available = false; 2584 2569 int rc = -EBADF; 2585 2570 unsigned int refind = 0; 2586 - bool fsuid_only = flags & FIND_WR_FSUID_ONLY; 2587 - bool with_delete = flags & FIND_WR_WITH_DELETE; 2571 + bool fsuid_only = find_flags & FIND_FSUID_ONLY; 2572 + bool with_delete = find_flags & FIND_WITH_DELETE; 2588 2573 *ret_file = NULL; 2589 2574 2590 2575 /* ··· 2618 2603 continue; 2619 2604 if (with_delete && !(open_file->fid.access & DELETE)) 2620 2605 continue; 2621 - if ((flags & FIND_WR_NO_PENDING_DELETE) && 2606 + if ((find_flags & FIND_NO_PENDING_DELETE) && 2622 2607 open_file->status_file_deleted) 2608 + continue; 2609 + if ((find_flags & FIND_OPEN_FLAGS) && 2610 + !open_flags_match(cifs_inode, open_flags, 2611 + open_file->f_flags)) 2623 2612 continue; 2624 2613 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 2625 2614 if (!open_file->invalidHandle) { ··· 2741 2722 cinode = CIFS_I(d_inode(cfile->dentry)); 2742 2723 spin_unlock(&tcon->open_file_lock); 2743 2724 free_dentry_path(page); 2744 - *ret_file = find_readable_file(cinode, 0); 2745 - if (*ret_file) { 2746 - spin_lock(&cinode->open_file_lock); 2747 - if ((*ret_file)->status_file_deleted) { 2748 - spin_unlock(&cinode->open_file_lock); 2749 - cifsFileInfo_put(*ret_file); 2750 - *ret_file = NULL; 2751 - } else { 2752 - spin_unlock(&cinode->open_file_lock); 2753 - } 2754 - } 2725 + *ret_file = find_readable_file(cinode, FIND_ANY); 2755 2726 return *ret_file ? 0 : -ENOENT; 2756 2727 } 2757 2728 ··· 2813 2804 } 2814 2805 2815 2806 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 2816 - smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 2807 + smbfile = find_writable_file(CIFS_I(inode), FIND_ANY); 2817 2808 if (smbfile) { 2818 2809 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2819 2810 cifsFileInfo_put(smbfile);
+1 -1
fs/smb/client/fs_context.c
··· 1997 1997 ctx->backupuid_specified = false; /* no backup intent for a user */ 1998 1998 ctx->backupgid_specified = false; /* no backup intent for a group */ 1999 1999 2000 - ctx->retrans = 1; 2000 + ctx->retrans = 0; 2001 2001 ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT; 2002 2002 ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT; 2003 2003 ctx->nonativesocket = 0;
+3 -3
fs/smb/client/inode.c
··· 2997 2997 } 2998 2998 } 2999 2999 3000 - cfile = find_readable_file(cifs_i, false); 3000 + cfile = find_readable_file(cifs_i, FIND_ANY); 3001 3001 if (cfile == NULL) 3002 3002 return -EINVAL; 3003 3003 ··· 3050 3050 size, false); 3051 3051 cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc); 3052 3052 } else { 3053 - open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3053 + open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY); 3054 3054 if (open_file) { 3055 3055 tcon = tlink_tcon(open_file->tlink); 3056 3056 server = tcon->ses->server; ··· 3219 3219 open_file->fid.netfid, 3220 3220 open_file->pid); 3221 3221 } else { 3222 - open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3222 + open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY); 3223 3223 if (open_file) { 3224 3224 pTcon = tlink_tcon(open_file->tlink); 3225 3225 rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args,
+1 -1
fs/smb/client/smb1ops.c
··· 960 960 struct cifs_tcon *tcon; 961 961 962 962 /* if the file is already open for write, just use that fileid */ 963 - open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); 963 + open_file = find_writable_file(cinode, FIND_FSUID_ONLY); 964 964 965 965 if (open_file) { 966 966 fid.netfid = open_file->fid.netfid;
+10 -12
fs/smb/client/smb2inode.c
··· 1156 1156 cifs_i = CIFS_I(inode); 1157 1157 dosattrs = cifs_i->cifsAttrs | ATTR_READONLY; 1158 1158 data.Attributes = cpu_to_le32(dosattrs); 1159 - cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile); 1159 + cifs_get_writable_path(tcon, name, FIND_ANY, &cfile); 1160 1160 oparms = CIFS_OPARMS(cifs_sb, tcon, name, FILE_WRITE_ATTRIBUTES, 1161 1161 FILE_CREATE, CREATE_NOT_FILE, ACL_NO_MODE); 1162 1162 tmprc = smb2_compound_op(xid, tcon, cifs_sb, name, ··· 1336 1336 __u32 co = file_create_options(source_dentry); 1337 1337 1338 1338 drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb); 1339 - cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile); 1339 + cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile); 1340 1340 1341 1341 int rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, 1342 1342 co, DELETE, SMB2_OP_RENAME, cfile, source_dentry); 1343 1343 if (rc == -EINVAL) { 1344 1344 cifs_dbg(FYI, "invalid lease key, resending request without lease"); 1345 - cifs_get_writable_path(tcon, from_name, 1346 - FIND_WR_WITH_DELETE, &cfile); 1345 + cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile); 1347 1346 rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, 1348 1347 co, DELETE, SMB2_OP_RENAME, cfile, NULL); 1349 1348 } ··· 1376 1377 1377 1378 in_iov.iov_base = &eof; 1378 1379 in_iov.iov_len = sizeof(eof); 1379 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1380 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1380 1381 1381 1382 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_DATA, 1382 1383 FILE_OPEN, 0, ACL_NO_MODE); ··· 1386 1387 cfile, NULL, NULL, dentry); 1387 1388 if (rc == -EINVAL) { 1388 1389 cifs_dbg(FYI, "invalid lease key, resending request without lease"); 1389 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1390 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1390 1391 rc = smb2_compound_op(xid, tcon, cifs_sb, 1391 1392 full_path, &oparms, &in_iov, 1392 1393 &(int){SMB2_OP_SET_EOF}, 1, ··· 1416 1417 (buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) { 1417 1418 if (buf->Attributes == 0) 1418 1419 goto out; /* would be a no op, no sense sending this */ 1419 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1420 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1420 1421 } 1421 1422 1422 1423 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES, ··· 1475 1476 1476 1477 if (tcon->posix_extensions) { 1477 1478 cmds[1] = SMB2_OP_POSIX_QUERY_INFO; 1478 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1479 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1479 1480 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, 1480 1481 in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL); 1481 1482 if (!rc) { ··· 1484 1485 } 1485 1486 } else { 1486 1487 cmds[1] = SMB2_OP_QUERY_INFO; 1487 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1488 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1488 1489 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, 1489 1490 in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL); 1490 1491 if (!rc) { ··· 1635 1636 iov[1].iov_base = utf16_path; 1636 1637 iov[1].iov_len = sizeof(*utf16_path) * UniStrlen((wchar_t *)utf16_path); 1637 1638 1638 - cifs_get_writable_path(tcon, full_path, FIND_WR_WITH_DELETE, &cfile); 1639 + cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile); 1639 1640 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov, 1640 1641 cmds, num_cmds, cfile, NULL, NULL, dentry); 1641 1642 if (rc == -EINVAL) { 1642 1643 cifs_dbg(FYI, "invalid lease key, resending request without lease\n"); 1643 - cifs_get_writable_path(tcon, full_path, 1644 - FIND_WR_WITH_DELETE, &cfile); 1644 + cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile); 1645 1645 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov, 1646 1646 cmds, num_cmds, cfile, NULL, NULL, NULL); 1647 1647 }
+6 -3
fs/smb/client/smb2maperror.c
··· 109 109 } 110 110 111 111 #if IS_ENABLED(CONFIG_SMB_KUNIT_TESTS) 112 + #define EXPORT_SYMBOL_FOR_SMB_TEST(sym) \ 113 + EXPORT_SYMBOL_FOR_MODULES(sym, "smb2maperror_test") 114 + 112 115 /* Previous prototype for eliminating the build warning. */ 113 116 const struct status_to_posix_error *smb2_get_err_map_test(__u32 smb2_status); 114 117 ··· 119 116 { 120 117 return smb2_get_err_map(smb2_status); 121 118 } 122 - EXPORT_SYMBOL_GPL(smb2_get_err_map_test); 119 + EXPORT_SYMBOL_FOR_SMB_TEST(smb2_get_err_map_test); 123 120 124 121 const struct status_to_posix_error *smb2_error_map_table_test = smb2_error_map_table; 125 - EXPORT_SYMBOL_GPL(smb2_error_map_table_test); 122 + EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_table_test); 126 123 127 124 unsigned int smb2_error_map_num = ARRAY_SIZE(smb2_error_map_table); 128 - EXPORT_SYMBOL_GPL(smb2_error_map_num); 125 + EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_num); 129 126 #endif
+14 -4
fs/smb/client/smb2ops.c
··· 628 628 struct smb_sockaddr_in6 *p6; 629 629 struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL; 630 630 struct cifs_server_iface tmp_iface; 631 + __be16 port; 631 632 ssize_t bytes_left; 632 633 size_t next = 0; 633 634 int nb_iface = 0; ··· 663 662 goto out; 664 663 } 665 664 665 + spin_lock(&ses->server->srv_lock); 666 + if (ses->server->dstaddr.ss_family == AF_INET) 667 + port = ((struct sockaddr_in *)&ses->server->dstaddr)->sin_port; 668 + else if (ses->server->dstaddr.ss_family == AF_INET6) 669 + port = ((struct sockaddr_in6 *)&ses->server->dstaddr)->sin6_port; 670 + else 671 + port = cpu_to_be16(CIFS_PORT); 672 + spin_unlock(&ses->server->srv_lock); 673 + 666 674 while (bytes_left >= (ssize_t)sizeof(*p)) { 667 675 memset(&tmp_iface, 0, sizeof(tmp_iface)); 668 676 /* default to 1Gbps when link speed is unset */ ··· 692 682 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4); 693 683 694 684 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */ 695 - addr4->sin_port = cpu_to_be16(CIFS_PORT); 685 + addr4->sin_port = port; 696 686 697 687 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__, 698 688 &addr4->sin_addr); ··· 706 696 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */ 707 697 addr6->sin6_flowinfo = 0; 708 698 addr6->sin6_scope_id = 0; 709 - addr6->sin6_port = cpu_to_be16(CIFS_PORT); 699 + addr6->sin6_port = port; 710 700 711 701 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__, 712 702 &addr6->sin6_addr); ··· 3362 3352 struct cifsFileInfo *open_file = NULL; 3363 3353 3364 3354 if (inode && !(info & SACL_SECINFO)) 3365 - open_file = find_readable_file(CIFS_I(inode), true); 3355 + open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY); 3366 3356 if (!open_file || (info & SACL_SECINFO)) 3367 3357 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info); 3368 3358 ··· 3908 3898 * some servers (Windows2016) will not reflect recent writes in 3909 3899 * QUERY_ALLOCATED_RANGES until SMB2_flush is called. 3910 3900 */ 3911 - wrcfile = find_writable_file(cifsi, FIND_WR_ANY); 3901 + wrcfile = find_writable_file(cifsi, FIND_ANY); 3912 3902 if (wrcfile) { 3913 3903 filemap_write_and_wait(inode->i_mapping); 3914 3904 smb2_flush_file(xid, tcon, &wrcfile->fid);
+4 -1
fs/smb/client/smb2pdu.c
··· 5307 5307 5308 5308 memset(&rqst, 0, sizeof(struct smb_rqst)); 5309 5309 rqst.rq_iov = iov; 5310 - rqst.rq_nvec = n_vec + 1; 5310 + /* iov[0] is the SMB header; move payload to rq_iter for encryption safety */ 5311 + rqst.rq_nvec = 1; 5312 + iov_iter_kvec(&rqst.rq_iter, ITER_SOURCE, &iov[1], n_vec, 5313 + io_parms->length); 5311 5314 5312 5315 if (retries) { 5313 5316 /* Back-off before retry */
+2 -6
fs/xfs/libxfs/xfs_da_btree.c
··· 2716 2716 * larger one that needs to be free by the caller. 2717 2717 */ 2718 2718 if (nirecs > 1) { 2719 - map = kzalloc(nirecs * sizeof(struct xfs_buf_map), 2720 - GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); 2721 - if (!map) { 2722 - error = -ENOMEM; 2723 - goto out_free_irecs; 2724 - } 2719 + map = kcalloc(nirecs, sizeof(struct xfs_buf_map), 2720 + GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); 2725 2721 *mapp = map; 2726 2722 } 2727 2723
+1 -1
fs/xfs/libxfs/xfs_defer.c
··· 809 809 810 810 /* Paused items cannot absorb more work */ 811 811 if (dfp->dfp_flags & XFS_DEFER_PAUSED) 812 - return NULL; 812 + return false; 813 813 814 814 /* Already full? */ 815 815 if (ops->max_items && dfp->dfp_count >= ops->max_items)
+1 -1
fs/xfs/xfs_bmap_item.c
··· 245 245 struct xfs_bmap_intent *ba = bi_entry(a); 246 246 struct xfs_bmap_intent *bb = bi_entry(b); 247 247 248 - return ba->bi_owner->i_ino - bb->bi_owner->i_ino; 248 + return cmp_int(ba->bi_owner->i_ino, bb->bi_owner->i_ino); 249 249 } 250 250 251 251 /* Log bmap updates in the intent item. */
+7 -1
fs/xfs/xfs_dquot.c
··· 1439 1439 return 0; 1440 1440 1441 1441 out_abort: 1442 + /* 1443 + * Shut down the log before removing the dquot item from the AIL. 1444 + * Otherwise, the log tail may advance past this item's LSN while 1445 + * log writes are still in progress, making these unflushed changes 1446 + * unrecoverable on the next mount. 1447 + */ 1448 + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1442 1449 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1443 1450 xfs_trans_ail_delete(lip, 0); 1444 - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1445 1451 xfs_dqfunlock(dqp); 1446 1452 return error; 1447 1453 }
+10 -7
fs/xfs/xfs_healthmon.c
··· 141 141 hm->mount_cookie = DETACHED_MOUNT_COOKIE; 142 142 spin_unlock(&xfs_healthmon_lock); 143 143 144 + /* 145 + * Wake up any readers that might remain. This can happen if unmount 146 + * races with the healthmon fd owner entering ->read_iter, having 147 + * already emptied the event queue. 148 + * 149 + * In the ->release case there shouldn't be any readers because the 150 + * only users of the waiter are read and poll. 151 + */ 152 + wake_up_all(&hm->wait); 153 + 144 154 trace_xfs_healthmon_detach(hm); 145 155 xfs_healthmon_put(hm); 146 156 } ··· 1037 1027 * process can create another health monitor file. 1038 1028 */ 1039 1029 xfs_healthmon_detach(hm); 1040 - 1041 - /* 1042 - * Wake up any readers that might be left. There shouldn't be any 1043 - * because the only users of the waiter are read and poll. 1044 - */ 1045 - wake_up_all(&hm->wait); 1046 - 1047 1030 xfs_healthmon_put(hm); 1048 1031 return 0; 1049 1032 }
-1
fs/xfs/xfs_icache.c
··· 159 159 ASSERT(!test_bit(XFS_LI_IN_AIL, 160 160 &ip->i_itemp->ili_item.li_flags)); 161 161 xfs_inode_item_destroy(ip); 162 - ip->i_itemp = NULL; 163 162 } 164 163 165 164 kmem_cache_free(xfs_inode_cache, ip);
+2
fs/xfs/xfs_log.c
··· 1357 1357 1358 1358 if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1) 1359 1359 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; 1360 + else if (mp->m_sb.sb_logsectsize > 0) 1361 + log->l_iclog_roundoff = mp->m_sb.sb_logsectsize; 1360 1362 else 1361 1363 log->l_iclog_roundoff = BBSIZE; 1362 1364
-2
fs/xfs/xfs_zone_gc.c
··· 96 96 */ 97 97 xfs_fsblock_t old_startblock; 98 98 xfs_daddr_t new_daddr; 99 - struct xfs_zone_scratch *scratch; 100 99 101 100 /* Are we writing to a sequential write required zone? */ 102 101 bool is_seq; ··· 778 779 ihold(VFS_I(chunk->ip)); 779 780 split_chunk->ip = chunk->ip; 780 781 split_chunk->is_seq = chunk->is_seq; 781 - split_chunk->scratch = chunk->scratch; 782 782 split_chunk->offset = chunk->offset; 783 783 split_chunk->len = split_len; 784 784 split_chunk->old_startblock = chunk->old_startblock;
+3 -1
include/linux/build_bug.h
··· 32 32 /** 33 33 * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied 34 34 * error message. 35 - * @condition: the condition which the compiler should know is false. 35 + * @cond: the condition which the compiler should know is false. 36 + * @msg: build-time error message 36 37 * 37 38 * See BUILD_BUG_ON for description. 38 39 */ ··· 61 60 62 61 /** 63 62 * static_assert - check integer constant expression at build time 63 + * @expr: expression to be checked 64 64 * 65 65 * static_assert() is a wrapper for the C11 _Static_assert, with a 66 66 * little macro magic to make the message optional (defaulting to the
+2 -1
include/linux/etherdevice.h
··· 42 42 43 43 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, 44 44 const void *daddr, const void *saddr, unsigned len); 45 - int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 45 + int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, 46 + unsigned char *haddr); 46 47 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, 47 48 __be16 type); 48 49 void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+4 -4
include/linux/firmware/intel/stratix10-svc-client.h
··· 68 68 * timeout value used in Stratix10 FPGA manager driver. 69 69 * timeout value used in RSU driver 70 70 */ 71 - #define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300 72 - #define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720 73 - #define SVC_RSU_REQUEST_TIMEOUT_MS 300 71 + #define SVC_RECONFIG_REQUEST_TIMEOUT_MS 5000 72 + #define SVC_RECONFIG_BUFFER_TIMEOUT_MS 5000 73 + #define SVC_RSU_REQUEST_TIMEOUT_MS 2000 74 74 #define SVC_FCS_REQUEST_TIMEOUT_MS 2000 75 75 #define SVC_COMPLETED_TIMEOUT_MS 30000 76 - #define SVC_HWMON_REQUEST_TIMEOUT_MS 300 76 + #define SVC_HWMON_REQUEST_TIMEOUT_MS 2000 77 77 78 78 struct stratix10_svc_chan; 79 79
+1
include/linux/hid.h
··· 682 682 __s32 battery_charge_status; 683 683 enum hid_battery_status battery_status; 684 684 bool battery_avoid_query; 685 + bool battery_present; 685 686 ktime_t battery_ratelimit_time; 686 687 #endif 687 688
+2 -1
include/linux/if_ether.h
··· 40 40 return (struct ethhdr *)skb_inner_mac_header(skb); 41 41 } 42 42 43 - int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 43 + int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, 44 + unsigned char *haddr); 44 45 45 46 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 46 47
+1
include/linux/io_uring_types.h
··· 388 388 * regularly bounce b/w CPUs. 389 389 */ 390 390 struct { 391 + struct io_rings __rcu *rings_rcu; 391 392 struct llist_head work_llist; 392 393 struct llist_head retry_llist; 393 394 unsigned long check_cq;
+35 -48
include/linux/kvm_host.h
··· 1940 1940 1941 1941 struct kvm_stat_data { 1942 1942 struct kvm *kvm; 1943 - const struct _kvm_stats_desc *desc; 1943 + const struct kvm_stats_desc *desc; 1944 1944 enum kvm_stat_kind kind; 1945 1945 }; 1946 1946 1947 - struct _kvm_stats_desc { 1948 - struct kvm_stats_desc desc; 1949 - char name[KVM_STATS_NAME_SIZE]; 1950 - }; 1951 - 1952 - #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ 1953 - .flags = type | unit | base | \ 1954 - BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ 1955 - BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ 1956 - BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ 1957 - .exponent = exp, \ 1958 - .size = sz, \ 1947 + #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ 1948 + .flags = type | unit | base | \ 1949 + BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ 1950 + BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ 1951 + BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ 1952 + .exponent = exp, \ 1953 + .size = sz, \ 1959 1954 .bucket_size = bsz 1960 1955 1961 - #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1962 - { \ 1963 - { \ 1964 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1965 - .offset = offsetof(struct kvm_vm_stat, generic.stat) \ 1966 - }, \ 1967 - .name = #stat, \ 1968 - } 1969 - #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1970 - { \ 1971 - { \ 1972 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1973 - .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ 1974 - }, \ 1975 - .name = #stat, \ 1976 - } 1977 - #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1978 - { \ 1979 - { \ 1980 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1981 - .offset = offsetof(struct kvm_vm_stat, stat) \ 1982 - }, \ 1983 - .name = #stat, \ 1984 - } 1985 - #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1986 - { \ 1987 - { \ 1988 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1989 - .offset = offsetof(struct kvm_vcpu_stat, stat) \ 1990 - }, \ 1991 - .name = #stat, \ 1992 - } 1956 + #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1957 + { \ 1958 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1959 + .offset = offsetof(struct kvm_vm_stat, generic.stat), \ 1960 + .name = #stat, \ 1961 + } 1962 + #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1963 + { \ 1964 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1965 + .offset = offsetof(struct kvm_vcpu_stat, generic.stat), \ 1966 + .name = #stat, \ 1967 + } 1968 + #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1969 + { \ 1970 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1971 + .offset = offsetof(struct kvm_vm_stat, stat), \ 1972 + .name = #stat, \ 1973 + } 1974 + #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1975 + { \ 1976 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1977 + .offset = offsetof(struct kvm_vcpu_stat, stat), \ 1978 + .name = #stat, \ 1979 + } 1993 1980 /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ 1994 1981 #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ 1995 1982 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) ··· 2053 2066 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) 2054 2067 2055 2068 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, 2056 - const struct _kvm_stats_desc *desc, 2069 + const struct kvm_stats_desc *desc, 2057 2070 void *stats, size_t size_stats, 2058 2071 char __user *user_buffer, size_t size, loff_t *offset); 2059 2072 ··· 2098 2111 2099 2112 2100 2113 extern const struct kvm_stats_header kvm_vm_stats_header; 2101 - extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; 2114 + extern const struct kvm_stats_desc kvm_vm_stats_desc[]; 2102 2115 extern const struct kvm_stats_header kvm_vcpu_stats_header; 2103 - extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; 2116 + extern const struct kvm_stats_desc kvm_vcpu_stats_desc[]; 2104 2117 2105 2118 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) 2106 2119 {
+5 -4
include/linux/netdevice.h
··· 311 311 int (*create) (struct sk_buff *skb, struct net_device *dev, 312 312 unsigned short type, const void *daddr, 313 313 const void *saddr, unsigned int len); 314 - int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 314 + int (*parse)(const struct sk_buff *skb, 315 + const struct net_device *dev, 316 + unsigned char *haddr); 315 317 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 316 318 void (*cache_update)(struct hh_cache *hh, 317 319 const struct net_device *dev, ··· 2157 2155 unsigned long state; 2158 2156 unsigned int flags; 2159 2157 unsigned short hard_header_len; 2158 + enum netdev_stat_type pcpu_stat_type:8; 2160 2159 netdev_features_t features; 2161 2160 struct inet6_dev __rcu *ip6_ptr; 2162 2161 __cacheline_group_end(net_device_read_txrx); ··· 2406 2403 /* mid-layer private */ 2407 2404 void *ml_priv; 2408 2405 enum netdev_ml_priv_type ml_priv_type; 2409 - 2410 - enum netdev_stat_type pcpu_stat_type:8; 2411 2406 2412 2407 #if IS_ENABLED(CONFIG_GARP) 2413 2408 struct garp_port __rcu *garp_port; ··· 3447 3446 3448 3447 if (!dev->header_ops || !dev->header_ops->parse) 3449 3448 return 0; 3450 - return dev->header_ops->parse(skb, haddr); 3449 + return dev->header_ops->parse(skb, dev, haddr); 3451 3450 } 3452 3451 3453 3452 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
+1 -1
include/linux/nvme-auth.h
··· 11 11 struct nvme_dhchap_key { 12 12 size_t len; 13 13 u8 hash; 14 - u8 key[]; 14 + u8 key[] __counted_by(len); 15 15 }; 16 16 17 17 u32 nvme_auth_get_seqnum(void);
+5 -1
include/linux/rseq_types.h
··· 133 133 * @active: MM CID is active for the task 134 134 * @cid: The CID associated to the task either permanently or 135 135 * borrowed from the CPU 136 + * @node: Queued in the per MM MMCID list 136 137 */ 137 138 struct sched_mm_cid { 138 139 unsigned int active; 139 140 unsigned int cid; 141 + struct hlist_node node; 140 142 }; 141 143 142 144 /** ··· 159 157 * @work: Regular work to handle the affinity mode change case 160 158 * @lock: Spinlock to protect against affinity setting which can't take @mutex 161 159 * @mutex: Mutex to serialize forks and exits related to this mm 160 + * @user_list: List of the MM CID users of a MM 162 161 * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map 163 162 * is growth only. 164 163 * @users: The number of tasks sharing this MM. Separate from mm::mm_users ··· 180 177 181 178 raw_spinlock_t lock; 182 179 struct mutex mutex; 180 + struct hlist_head user_list; 183 181 184 182 /* Low frequency modified */ 185 183 unsigned int nr_cpus_allowed; 186 184 unsigned int users; 187 185 unsigned int pcpu_thrs; 188 186 unsigned int update_deferred; 189 - }____cacheline_aligned_in_smp; 187 + } ____cacheline_aligned; 190 188 #else /* CONFIG_SCHED_MM_CID */ 191 189 struct mm_mm_cid { }; 192 190 struct sched_mm_cid { };
-2
include/linux/sched.h
··· 2354 2354 #ifdef CONFIG_SCHED_MM_CID 2355 2355 void sched_mm_cid_before_execve(struct task_struct *t); 2356 2356 void sched_mm_cid_after_execve(struct task_struct *t); 2357 - void sched_mm_cid_fork(struct task_struct *t); 2358 2357 void sched_mm_cid_exit(struct task_struct *t); 2359 2358 static __always_inline int task_mm_cid(struct task_struct *t) 2360 2359 { ··· 2362 2363 #else 2363 2364 static inline void sched_mm_cid_before_execve(struct task_struct *t) { } 2364 2365 static inline void sched_mm_cid_after_execve(struct task_struct *t) { } 2365 - static inline void sched_mm_cid_fork(struct task_struct *t) { } 2366 2366 static inline void sched_mm_cid_exit(struct task_struct *t) { } 2367 2367 static __always_inline int task_mm_cid(struct task_struct *t) 2368 2368 {
+6 -2
include/linux/usb.h
··· 1862 1862 * SYNCHRONOUS CALL SUPPORT * 1863 1863 *-------------------------------------------------------------------*/ 1864 1864 1865 + /* Maximum value allowed for timeout in synchronous routines below */ 1866 + #define USB_MAX_SYNCHRONOUS_TIMEOUT 60000 /* ms */ 1867 + 1865 1868 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, 1866 1869 __u8 request, __u8 requesttype, __u16 value, __u16 index, 1867 1870 void *data, __u16 size, int timeout); 1868 1871 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, 1869 1872 void *data, int len, int *actual_length, int timeout); 1870 1873 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, 1871 - void *data, int len, int *actual_length, 1872 - int timeout); 1874 + void *data, int len, int *actual_length, int timeout); 1875 + extern int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe, 1876 + void *data, int len, int *actual_length, int timeout); 1873 1877 1874 1878 /* wrappers around usb_control_msg() for the most common standard requests */ 1875 1879 int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
+3
include/linux/usb/quirks.h
··· 78 78 /* skip BOS descriptor request */ 79 79 #define USB_QUIRK_NO_BOS BIT(17) 80 80 81 + /* Device claims zero configurations, forcing to 1 */ 82 + #define USB_QUIRK_FORCE_ONE_CONFIG BIT(18) 83 + 81 84 #endif /* __LINUX_USB_QUIRKS_H */
+22 -6
include/net/ip_tunnels.h
··· 665 665 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) 666 666 { 667 667 if (pkt_len > 0) { 668 - struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); 668 + if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) { 669 + struct pcpu_dstats *dstats = get_cpu_ptr(dev->dstats); 669 670 670 - u64_stats_update_begin(&tstats->syncp); 671 - u64_stats_add(&tstats->tx_bytes, pkt_len); 672 - u64_stats_inc(&tstats->tx_packets); 673 - u64_stats_update_end(&tstats->syncp); 674 - put_cpu_ptr(tstats); 671 + u64_stats_update_begin(&dstats->syncp); 672 + u64_stats_add(&dstats->tx_bytes, pkt_len); 673 + u64_stats_inc(&dstats->tx_packets); 674 + u64_stats_update_end(&dstats->syncp); 675 + put_cpu_ptr(dstats); 676 + return; 677 + } 678 + if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) { 679 + struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); 680 + 681 + u64_stats_update_begin(&tstats->syncp); 682 + u64_stats_add(&tstats->tx_bytes, pkt_len); 683 + u64_stats_inc(&tstats->tx_packets); 684 + u64_stats_update_end(&tstats->syncp); 685 + put_cpu_ptr(tstats); 686 + return; 687 + } 688 + pr_err_once("iptunnel_xmit_stats pcpu_stat_type=%d\n", 689 + dev->pcpu_stat_type); 690 + WARN_ON_ONCE(1); 675 691 return; 676 692 } 677 693
+3 -1
include/net/mac80211.h
··· 7411 7411 * @band: the band to transmit on 7412 7412 * @sta: optional pointer to get the station to send the frame to 7413 7413 * 7414 - * Return: %true if the skb was prepared, %false otherwise 7414 + * Return: %true if the skb was prepared, %false otherwise. 7415 + * On failure, the skb is freed by this function; callers must not 7416 + * free it again. 7415 7417 * 7416 7418 * Note: must be called under RCU lock 7417 7419 */
+2 -4
include/net/netfilter/nf_tables.h
··· 266 266 unsigned char data[]; 267 267 }; 268 268 269 - #define NFT_SET_ELEM_INTERNAL_LAST 0x1 270 - 271 269 /* placeholder structure for opaque set element backend representation. */ 272 270 struct nft_elem_priv { }; 273 271 ··· 275 277 * @key: element key 276 278 * @key_end: closing element key 277 279 * @data: element data 278 - * @flags: flags 279 280 * @priv: element private data and extensions 280 281 */ 281 282 struct nft_set_elem { ··· 290 293 u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; 291 294 struct nft_data val; 292 295 } data; 293 - u32 flags; 294 296 struct nft_elem_priv *priv; 295 297 }; 296 298 ··· 861 865 u64 timeout, u64 expiration, gfp_t gfp); 862 866 int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, 863 867 struct nft_expr *expr_array[]); 868 + void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, 869 + struct nft_set_elem_expr *elem_expr); 864 870 void nft_set_elem_destroy(const struct nft_set *set, 865 871 const struct nft_elem_priv *elem_priv, 866 872 bool destroy_expr);
+33
include/net/sch_generic.h
··· 719 719 void qdisc_put(struct Qdisc *qdisc); 720 720 void qdisc_put_unlocked(struct Qdisc *qdisc); 721 721 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 722 + 723 + static inline void dev_reset_queue(struct net_device *dev, 724 + struct netdev_queue *dev_queue, 725 + void *_unused) 726 + { 727 + struct Qdisc *qdisc; 728 + bool nolock; 729 + 730 + qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 731 + if (!qdisc) 732 + return; 733 + 734 + nolock = qdisc->flags & TCQ_F_NOLOCK; 735 + 736 + if (nolock) 737 + spin_lock_bh(&qdisc->seqlock); 738 + spin_lock_bh(qdisc_lock(qdisc)); 739 + 740 + qdisc_reset(qdisc); 741 + 742 + spin_unlock_bh(qdisc_lock(qdisc)); 743 + if (nolock) { 744 + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); 745 + clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); 746 + spin_unlock_bh(&qdisc->seqlock); 747 + } 748 + } 749 + 722 750 #ifdef CONFIG_NET_SCHED 723 751 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 724 752 void *type_data); ··· 1473 1445 struct mini_Qdisc __rcu **p_miniq); 1474 1446 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1475 1447 struct tcf_block *block); 1448 + 1449 + static inline bool mini_qdisc_pair_inited(struct mini_Qdisc_pair *miniqp) 1450 + { 1451 + return !!miniqp->p_miniq; 1452 + } 1476 1453 1477 1454 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1478 1455
+1 -1
include/net/udp_tunnel.h
··· 52 52 static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, 53 53 struct socket **sockp) 54 54 { 55 - return 0; 55 + return -EPFNOSUPPORT; 56 56 } 57 57 #endif 58 58
+8
include/uapi/linux/kvm.h
··· 14 14 #include <linux/ioctl.h> 15 15 #include <asm/kvm.h> 16 16 17 + #ifdef __KERNEL__ 18 + #include <linux/kvm_types.h> 19 + #endif 20 + 17 21 #define KVM_API_VERSION 12 18 22 19 23 /* ··· 1605 1601 __u16 size; 1606 1602 __u32 offset; 1607 1603 __u32 bucket_size; 1604 + #ifdef __KERNEL__ 1605 + char name[KVM_STATS_NAME_SIZE]; 1606 + #else 1608 1607 char name[]; 1608 + #endif 1609 1609 }; 1610 1610 1611 1611 #define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+1 -1
io_uring/bpf_filter.c
··· 85 85 do { 86 86 if (filter == &dummy_filter) 87 87 return -EACCES; 88 - ret = bpf_prog_run(filter->prog, &bpf_ctx); 88 + ret = bpf_prog_run_pin_on_cpu(filter->prog, &bpf_ctx); 89 89 if (!ret) 90 90 return -EACCES; 91 91 filter = filter->next;
+7 -3
io_uring/eventfd.c
··· 76 76 { 77 77 bool skip = false; 78 78 struct io_ev_fd *ev_fd; 79 - 80 - if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) 81 - return; 79 + struct io_rings *rings; 82 80 83 81 guard(rcu)(); 82 + 83 + rings = rcu_dereference(ctx->rings_rcu); 84 + if (!rings) 85 + return; 86 + if (READ_ONCE(rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) 87 + return; 84 88 ev_fd = rcu_dereference(ctx->io_ev_fd); 85 89 /* 86 90 * Check again if ev_fd exists in case an io_eventfd_unregister call
+3 -1
io_uring/io_uring.c
··· 1745 1745 * well as 2 contiguous entries. 1746 1746 */ 1747 1747 if (!(ctx->flags & IORING_SETUP_SQE_MIXED) || *left < 2 || 1748 - !(ctx->cached_sq_head & (ctx->sq_entries - 1))) 1748 + (unsigned)(sqe - ctx->sq_sqes) >= ctx->sq_entries - 1) 1749 1749 return io_init_fail_req(req, -EINVAL); 1750 1750 /* 1751 1751 * A 128b operation on a mixed SQ uses two entries, so we have ··· 2066 2066 io_free_region(ctx->user, &ctx->sq_region); 2067 2067 io_free_region(ctx->user, &ctx->ring_region); 2068 2068 ctx->rings = NULL; 2069 + RCU_INIT_POINTER(ctx->rings_rcu, NULL); 2069 2070 ctx->sq_sqes = NULL; 2070 2071 } 2071 2072 ··· 2704 2703 if (ret) 2705 2704 return ret; 2706 2705 ctx->rings = rings = io_region_get_ptr(&ctx->ring_region); 2706 + rcu_assign_pointer(ctx->rings_rcu, rings); 2707 2707 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) 2708 2708 ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset); 2709 2709
+11 -2
io_uring/kbuf.c
··· 111 111 112 112 buf = req->kbuf; 113 113 bl = io_buffer_get_list(ctx, buf->bgid); 114 - list_add(&buf->list, &bl->buf_list); 115 - bl->nbufs++; 114 + /* 115 + * If the buffer list was upgraded to a ring-based one, or removed, 116 + * while the request was in-flight in io-wq, drop it. 117 + */ 118 + if (bl && !(bl->flags & IOBL_BUF_RING)) { 119 + list_add(&buf->list, &bl->buf_list); 120 + bl->nbufs++; 121 + } else { 122 + kfree(buf); 123 + } 116 124 req->flags &= ~REQ_F_BUFFER_SELECTED; 125 + req->kbuf = NULL; 117 126 118 127 io_ring_submit_unlock(ctx, issue_flags); 119 128 return true;
+13 -2
io_uring/register.c
··· 202 202 return -EPERM; 203 203 /* 204 204 * Similar to seccomp, disallow setting a filter if task_no_new_privs 205 - * is true and we're not CAP_SYS_ADMIN. 205 + * is false and we're not CAP_SYS_ADMIN. 206 206 */ 207 207 if (!task_no_new_privs(current) && 208 208 !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) ··· 238 238 239 239 /* 240 240 * Similar to seccomp, disallow setting a filter if task_no_new_privs 241 - * is true and we're not CAP_SYS_ADMIN. 241 + * is false and we're not CAP_SYS_ADMIN. 242 242 */ 243 243 if (!task_no_new_privs(current) && 244 244 !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) ··· 633 633 ctx->sq_entries = p->sq_entries; 634 634 ctx->cq_entries = p->cq_entries; 635 635 636 + /* 637 + * Just mark any flag we may have missed and that the application 638 + * should act on unconditionally. Worst case it'll be an extra 639 + * syscall. 640 + */ 641 + atomic_or(IORING_SQ_TASKRUN | IORING_SQ_NEED_WAKEUP, &n.rings->sq_flags); 636 642 ctx->rings = n.rings; 643 + rcu_assign_pointer(ctx->rings_rcu, n.rings); 644 + 637 645 ctx->sq_sqes = n.sq_sqes; 638 646 swap_old(ctx, o, n, ring_region); 639 647 swap_old(ctx, o, n, sq_region); ··· 650 642 out: 651 643 spin_unlock(&ctx->completion_lock); 652 644 mutex_unlock(&ctx->mmap_lock); 645 + /* Wait for concurrent io_ctx_mark_taskrun() */ 646 + if (to_free == &o) 647 + synchronize_rcu_expedited(); 653 648 io_register_free_rings(ctx, to_free); 654 649 655 650 if (ctx->sq_data)
+20 -2
io_uring/tw.c
··· 152 152 WARN_ON_ONCE(ret); 153 153 } 154 154 155 + /* 156 + * Sets IORING_SQ_TASKRUN in the sq_flags shared with userspace, using the 157 + * RCU protected rings pointer to be safe against concurrent ring resizing. 158 + */ 159 + static void io_ctx_mark_taskrun(struct io_ring_ctx *ctx) 160 + { 161 + lockdep_assert_in_rcu_read_lock(); 162 + 163 + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) { 164 + struct io_rings *rings = rcu_dereference(ctx->rings_rcu); 165 + 166 + atomic_or(IORING_SQ_TASKRUN, &rings->sq_flags); 167 + } 168 + } 169 + 155 170 void io_req_local_work_add(struct io_kiocb *req, unsigned flags) 156 171 { 157 172 struct io_ring_ctx *ctx = req->ctx; ··· 221 206 */ 222 207 223 208 if (!head) { 224 - if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 225 - atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 209 + io_ctx_mark_taskrun(ctx); 226 210 if (ctx->has_evfd) 227 211 io_eventfd_signal(ctx, false); 228 212 } ··· 245 231 if (!llist_add(&req->io_task_work.node, &tctx->task_list)) 246 232 return; 247 233 234 + /* 235 + * Doesn't need to use ->rings_rcu, as resizing isn't supported for 236 + * !DEFER_TASKRUN. 237 + */ 248 238 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 249 239 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 250 240
+6
kernel/cgroup/cgroup.c
··· 5109 5109 return; 5110 5110 5111 5111 task = list_entry(it->task_pos, struct task_struct, cg_list); 5112 + /* 5113 + * Hide tasks that are exiting but not yet removed. Keep zombie 5114 + * leaders with live threads visible. 5115 + */ 5116 + if ((task->flags & PF_EXITING) && !atomic_read(&task->signal->live)) 5117 + goto repeat; 5112 5118 5113 5119 if (it->flags & CSS_TASK_ITER_PROCS) { 5114 5120 /* if PROCS, skip over tasks which aren't group leaders */
+31 -28
kernel/cgroup/cpuset.c
··· 879 879 /* 880 880 * Cgroup v2 doesn't support domain attributes, just set all of them 881 881 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a 882 - * subset of HK_TYPE_DOMAIN housekeeping CPUs. 882 + * subset of HK_TYPE_DOMAIN_BOOT housekeeping CPUs. 883 883 */ 884 884 for (i = 0; i < ndoms; i++) { 885 885 /* ··· 888 888 */ 889 889 if (!csa || csa[i] == &top_cpuset) 890 890 cpumask_and(doms[i], top_cpuset.effective_cpus, 891 - housekeeping_cpumask(HK_TYPE_DOMAIN)); 891 + housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)); 892 892 else 893 893 cpumask_copy(doms[i], csa[i]->effective_cpus); 894 894 if (dattr) ··· 1329 1329 } 1330 1330 1331 1331 /* 1332 - * update_hk_sched_domains - Update HK cpumasks & rebuild sched domains 1332 + * cpuset_update_sd_hk_unlock - Rebuild sched domains, update HK & unlock 1333 1333 * 1334 - * Update housekeeping cpumasks and rebuild sched domains if necessary. 1335 - * This should be called at the end of cpuset or hotplug actions. 1334 + * Update housekeeping cpumasks and rebuild sched domains if necessary and 1335 + * then do a cpuset_full_unlock(). 1336 + * This should be called at the end of cpuset operation. 1336 1337 */ 1337 - static void update_hk_sched_domains(void) 1338 + static void cpuset_update_sd_hk_unlock(void) 1339 + __releases(&cpuset_mutex) 1340 + __releases(&cpuset_top_mutex) 1338 1341 { 1342 + /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */ 1343 + if (force_sd_rebuild) 1344 + rebuild_sched_domains_locked(); 1345 + 1339 1346 if (update_housekeeping) { 1340 - /* Updating HK cpumasks implies rebuild sched domains */ 1341 1347 update_housekeeping = false; 1342 - force_sd_rebuild = true; 1343 1348 cpumask_copy(isolated_hk_cpus, isolated_cpus); 1344 1349 1345 1350 /* ··· 1355 1350 mutex_unlock(&cpuset_mutex); 1356 1351 cpus_read_unlock(); 1357 1352 WARN_ON_ONCE(housekeeping_update(isolated_hk_cpus)); 1358 - cpus_read_lock(); 1359 - mutex_lock(&cpuset_mutex); 1353 + mutex_unlock(&cpuset_top_mutex); 1354 + } else { 1355 + cpuset_full_unlock(); 1360 1356 } 1361 - /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */ 1362 - if (force_sd_rebuild) 1363 - rebuild_sched_domains_locked(); 1364 1357 } 1365 1358 1366 1359 /* 1367 - * Work function to invoke update_hk_sched_domains() 1360 + * Work function to invoke cpuset_update_sd_hk_unlock() 1368 1361 */ 1369 1362 static void hk_sd_workfn(struct work_struct *work) 1370 1363 { 1371 1364 cpuset_full_lock(); 1372 - update_hk_sched_domains(); 1373 - cpuset_full_unlock(); 1365 + cpuset_update_sd_hk_unlock(); 1374 1366 } 1375 1367 1376 1368 /** ··· 3232 3230 3233 3231 free_cpuset(trialcs); 3234 3232 out_unlock: 3235 - update_hk_sched_domains(); 3236 - cpuset_full_unlock(); 3233 + cpuset_update_sd_hk_unlock(); 3237 3234 if (of_cft(of)->private == FILE_MEMLIST) 3238 3235 schedule_flush_migrate_mm(); 3239 3236 return retval ?: nbytes; ··· 3339 3338 cpuset_full_lock(); 3340 3339 if (is_cpuset_online(cs)) 3341 3340 retval = update_prstate(cs, val); 3342 - update_hk_sched_domains(); 3343 - cpuset_full_unlock(); 3341 + cpuset_update_sd_hk_unlock(); 3344 3342 return retval ?: nbytes; 3345 3343 } 3346 3344 ··· 3513 3513 /* Reset valid partition back to member */ 3514 3514 if (is_partition_valid(cs)) 3515 3515 update_prstate(cs, PRS_MEMBER); 3516 - update_hk_sched_domains(); 3517 - cpuset_full_unlock(); 3516 + cpuset_update_sd_hk_unlock(); 3518 3517 } 3519 3518 3520 3519 static void cpuset_css_free(struct cgroup_subsys_state *css) ··· 3922 3923 rcu_read_unlock(); 3923 3924 } 3924 3925 3925 - 3926 3926 /* 3927 - * Queue a work to call housekeeping_update() & rebuild_sched_domains() 3928 - * There will be a slight delay before the HK_TYPE_DOMAIN housekeeping 3929 - * cpumask can correctly reflect what is in isolated_cpus. 3927 + * rebuild_sched_domains() will always be called directly if needed 3928 + * to make sure that newly added or removed CPU will be reflected in 3929 + * the sched domains. However, if isolated partition invalidation 3930 + * or recreation is being done (update_housekeeping set), a work item 3931 + * will be queued to call housekeeping_update() to update the 3932 + * corresponding housekeeping cpumasks after some slight delay. 3930 3933 * 3931 3934 * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that 3932 3935 * is still pending. Before the pending bit is cleared, the work data ··· 3937 3936 * previously queued work. Since hk_sd_workfn() doesn't use the work 3938 3937 * item at all, this is not a problem. 3939 3938 */ 3940 - if (update_housekeeping || force_sd_rebuild) 3941 - queue_work(system_unbound_wq, &hk_sd_work); 3939 + if (force_sd_rebuild) 3940 + rebuild_sched_domains_cpuslocked(); 3941 + if (update_housekeeping) 3942 + queue_work(system_dfl_wq, &hk_sd_work); 3942 3943 3943 3944 free_tmpmasks(ptmp); 3944 3945 }
+2 -2
kernel/crash_dump_dm_crypt.c
··· 168 168 169 169 memcpy(dm_key->data, ukp->data, ukp->datalen); 170 170 dm_key->key_size = ukp->datalen; 171 - kexec_dprintk("Get dm crypt key (size=%u) %s: %8ph\n", dm_key->key_size, 172 - dm_key->key_desc, dm_key->data); 171 + kexec_dprintk("Get dm crypt key (size=%u) %s\n", dm_key->key_size, 172 + dm_key->key_desc); 173 173 174 174 out: 175 175 up_read(&key->sem);
+1 -2
kernel/fork.c
··· 1000 1000 #ifdef CONFIG_SCHED_MM_CID 1001 1001 tsk->mm_cid.cid = MM_CID_UNSET; 1002 1002 tsk->mm_cid.active = 0; 1003 + INIT_HLIST_NODE(&tsk->mm_cid.node); 1003 1004 #endif 1004 1005 return tsk; 1005 1006 ··· 1587 1586 1588 1587 tsk->mm = mm; 1589 1588 tsk->active_mm = mm; 1590 - sched_mm_cid_fork(tsk); 1591 1589 return 0; 1592 1590 } 1593 1591 ··· 2498 2498 exit_nsproxy_namespaces(p); 2499 2499 bad_fork_cleanup_mm: 2500 2500 if (p->mm) { 2501 - sched_mm_cid_exit(p); 2502 2501 mm_clear_owner(p->mm, p); 2503 2502 mmput(p->mm); 2504 2503 }
+6 -2
kernel/kprobes.c
··· 1144 1144 lockdep_assert_held(&kprobe_mutex); 1145 1145 1146 1146 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1147 - if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) 1147 + if (ret < 0) 1148 1148 return ret; 1149 1149 1150 1150 if (*cnt == 0) { 1151 1151 ret = register_ftrace_function(ops); 1152 - if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) { 1152 + if (ret < 0) { 1153 1153 /* 1154 1154 * At this point, sinec ops is not registered, we should be sefe from 1155 1155 * registering empty filter. ··· 1178 1178 int ret; 1179 1179 1180 1180 lockdep_assert_held(&kprobe_mutex); 1181 + if (unlikely(kprobe_ftrace_disabled)) { 1182 + /* Now ftrace is disabled forever, disarm is already done. */ 1183 + return 0; 1184 + } 1181 1185 1182 1186 if (*cnt == 1) { 1183 1187 ret = unregister_ftrace_function(ops);
+29 -52
kernel/sched/core.c
··· 4729 4729 scx_cancel_fork(p); 4730 4730 } 4731 4731 4732 + static void sched_mm_cid_fork(struct task_struct *t); 4733 + 4732 4734 void sched_post_fork(struct task_struct *p) 4733 4735 { 4736 + sched_mm_cid_fork(p); 4734 4737 uclamp_post_fork(p); 4735 4738 scx_post_fork(p); 4736 4739 } ··· 10620 10617 } 10621 10618 } 10622 10619 10623 - static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm) 10620 + static void mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm) 10624 10621 { 10625 10622 /* Remote access to mm::mm_cid::pcpu requires rq_lock */ 10626 10623 guard(task_rq_lock)(t); 10627 - /* If the task is not active it is not in the users count */ 10628 - if (!t->mm_cid.active) 10629 - return false; 10630 10624 if (cid_on_task(t->mm_cid.cid)) { 10631 10625 /* If running on the CPU, put the CID in transit mode, otherwise drop it */ 10632 10626 if (task_rq(t)->curr == t) ··· 10631 10631 else 10632 10632 mm_unset_cid_on_task(t); 10633 10633 } 10634 - return true; 10635 - } 10636 - 10637 - static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm) 10638 - { 10639 - struct task_struct *p, *t; 10640 - unsigned int users; 10641 - 10642 - /* 10643 - * This can obviously race with a concurrent affinity change, which 10644 - * increases the number of allowed CPUs for this mm, but that does 10645 - * not affect the mode and only changes the CID constraints. A 10646 - * possible switch back to per task mode happens either in the 10647 - * deferred handler function or in the next fork()/exit(). 10648 - * 10649 - * The caller has already transferred. The newly incoming task is 10650 - * already accounted for, but not yet visible. 10651 - */ 10652 - users = mm->mm_cid.users - 2; 10653 - if (!users) 10654 - return; 10655 - 10656 - guard(rcu)(); 10657 - for_other_threads(current, t) { 10658 - if (mm_cid_fixup_task_to_cpu(t, mm)) 10659 - users--; 10660 - } 10661 - 10662 - if (!users) 10663 - return; 10664 - 10665 - /* Happens only for VM_CLONE processes. */ 10666 - for_each_process_thread(p, t) { 10667 - if (t == current || t->mm != mm) 10668 - continue; 10669 - if (mm_cid_fixup_task_to_cpu(t, mm)) { 10670 - if (--users == 0) 10671 - return; 10672 - } 10673 - } 10674 10634 } 10675 10635 10676 10636 static void mm_cid_fixup_tasks_to_cpus(void) 10677 10637 { 10678 10638 struct mm_struct *mm = current->mm; 10639 + struct task_struct *t; 10679 10640 10680 - mm_cid_do_fixup_tasks_to_cpus(mm); 10641 + lockdep_assert_held(&mm->mm_cid.mutex); 10642 + 10643 + hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) { 10644 + /* Current has already transferred before invoking the fixup. */ 10645 + if (t != current) 10646 + mm_cid_fixup_task_to_cpu(t, mm); 10647 + } 10648 + 10681 10649 mm_cid_complete_transit(mm, MM_CID_ONCPU); 10682 10650 } 10683 10651 10684 10652 static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm) 10685 10653 { 10654 + lockdep_assert_held(&mm->mm_cid.lock); 10655 + 10686 10656 t->mm_cid.active = 1; 10657 + hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list); 10687 10658 mm->mm_cid.users++; 10688 10659 return mm_update_max_cids(mm); 10689 10660 } 10690 10661 10691 - void sched_mm_cid_fork(struct task_struct *t) 10662 + static void sched_mm_cid_fork(struct task_struct *t) 10692 10663 { 10693 10664 struct mm_struct *mm = t->mm; 10694 10665 bool percpu; 10695 10666 10696 - WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET); 10667 + if (!mm) 10668 + return; 10669 + 10670 + WARN_ON_ONCE(t->mm_cid.cid != MM_CID_UNSET); 10697 10671 10698 10672 guard(mutex)(&mm->mm_cid.mutex); 10699 10673 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { ··· 10706 10732 10707 10733 static bool sched_mm_cid_remove_user(struct task_struct *t) 10708 10734 { 10735 + lockdep_assert_held(&t->mm->mm_cid.lock); 10736 + 10709 10737 t->mm_cid.active = 0; 10710 - scoped_guard(preempt) { 10711 - /* Clear the transition bit */ 10712 - t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid); 10713 - mm_unset_cid_on_task(t); 10714 - } 10738 + /* Clear the transition bit */ 10739 + t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid); 10740 + mm_unset_cid_on_task(t); 10741 + hlist_del_init(&t->mm_cid.node); 10715 10742 t->mm->mm_cid.users--; 10716 10743 return mm_update_max_cids(t->mm); 10717 10744 } ··· 10855 10880 mutex_init(&mm->mm_cid.mutex); 10856 10881 mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work); 10857 10882 INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn); 10883 + INIT_HLIST_HEAD(&mm->mm_cid.user_list); 10858 10884 cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); 10859 10885 bitmap_zero(mm_cidmask(mm), num_possible_cpus()); 10860 10886 } 10861 10887 #else /* CONFIG_SCHED_MM_CID */ 10862 10888 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { } 10889 + static inline void sched_mm_cid_fork(struct task_struct *t) { } 10863 10890 #endif /* !CONFIG_SCHED_MM_CID */ 10864 10891 10865 10892 static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx);
+11 -11
kernel/sched/ext.c
··· 1103 1103 } 1104 1104 1105 1105 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1106 - dsq->seq++; 1106 + WRITE_ONCE(dsq->seq, dsq->seq + 1); 1107 1107 p->scx.dsq_seq = dsq->seq; 1108 1108 1109 1109 dsq_mod_nr(dsq, 1); ··· 1470 1470 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 1471 1471 } 1472 1472 1473 - static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 1473 + static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags) 1474 1474 { 1475 1475 struct scx_sched *sch = scx_root; 1476 1476 int sticky_cpu = p->scx.sticky_cpu; 1477 + u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags; 1477 1478 1478 1479 if (enq_flags & ENQUEUE_WAKEUP) 1479 1480 rq->scx.flags |= SCX_RQ_IN_WAKEUP; 1480 - 1481 - enq_flags |= rq->scx.extra_enq_flags; 1482 1481 1483 1482 if (sticky_cpu >= 0) 1484 1483 p->scx.sticky_cpu = -1; ··· 3907 3908 * consider offloading iff the total queued duration is over the 3908 3909 * threshold. 3909 3910 */ 3910 - min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV; 3911 - if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us)) 3911 + min_delta_us = READ_ONCE(scx_bypass_lb_intv_us) / SCX_BYPASS_LB_MIN_DELTA_DIV; 3912 + if (delta < DIV_ROUND_UP(min_delta_us, READ_ONCE(scx_slice_bypass_us))) 3912 3913 return 0; 3913 3914 3914 3915 raw_spin_rq_lock_irq(rq); ··· 4136 4137 WARN_ON_ONCE(scx_bypass_depth <= 0); 4137 4138 if (scx_bypass_depth != 1) 4138 4139 goto unlock; 4139 - WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC); 4140 + WRITE_ONCE(scx_slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC); 4140 4141 bypass_timestamp = ktime_get_ns(); 4141 4142 if (sch) 4142 4143 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); ··· 5258 5259 if (!READ_ONCE(helper)) { 5259 5260 mutex_lock(&helper_mutex); 5260 5261 if (!helper) { 5261 - helper = kthread_run_worker(0, "scx_enable_helper"); 5262 - if (IS_ERR_OR_NULL(helper)) { 5263 - helper = NULL; 5262 + struct kthread_worker *w = 5263 + kthread_run_worker(0, "scx_enable_helper"); 5264 + if (IS_ERR_OR_NULL(w)) { 5264 5265 mutex_unlock(&helper_mutex); 5265 5266 return -ENOMEM; 5266 5267 } 5267 - sched_set_fifo(helper->task); 5268 + sched_set_fifo(w->task); 5269 + WRITE_ONCE(helper, w); 5268 5270 } 5269 5271 mutex_unlock(&helper_mutex); 5270 5272 }
+98 -16
kernel/sched/ext_internal.h
··· 1035 1035 }; 1036 1036 1037 1037 /* 1038 - * sched_ext_entity->ops_state 1038 + * Task Ownership State Machine (sched_ext_entity->ops_state) 1039 1039 * 1040 - * Used to track the task ownership between the SCX core and the BPF scheduler. 1041 - * State transitions look as follows: 1040 + * The sched_ext core uses this state machine to track task ownership 1041 + * between the SCX core and the BPF scheduler. This allows the BPF 1042 + * scheduler to dispatch tasks without strict ordering requirements, while 1043 + * the SCX core safely rejects invalid dispatches. 1042 1044 * 1043 - * NONE -> QUEUEING -> QUEUED -> DISPATCHING 1044 - * ^ | | 1045 - * | v v 1046 - * \-------------------------------/ 1045 + * State Transitions 1047 1046 * 1048 - * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call 1049 - * sites for explanations on the conditions being waited upon and why they are 1050 - * safe. Transitions out of them into NONE or QUEUED must store_release and the 1051 - * waiters should load_acquire. 1047 + * .------------> NONE (owned by SCX core) 1048 + * | | ^ 1049 + * | enqueue | | direct dispatch 1050 + * | v | 1051 + * | QUEUEING -------' 1052 + * | | 1053 + * | enqueue | 1054 + * | completes | 1055 + * | v 1056 + * | QUEUED (owned by BPF scheduler) 1057 + * | | 1058 + * | dispatch | 1059 + * | | 1060 + * | v 1061 + * | DISPATCHING 1062 + * | | 1063 + * | dispatch | 1064 + * | completes | 1065 + * `---------------' 1052 1066 * 1053 - * Tracking scx_ops_state enables sched_ext core to reliably determine whether 1054 - * any given task can be dispatched by the BPF scheduler at all times and thus 1055 - * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler 1056 - * to try to dispatch any task anytime regardless of its state as the SCX core 1057 - * can safely reject invalid dispatches. 1067 + * State Descriptions 1068 + * 1069 + * - %SCX_OPSS_NONE: 1070 + * Task is owned by the SCX core. It's either on a run queue, running, 1071 + * or being manipulated by the core scheduler. The BPF scheduler has no 1072 + * claim on this task. 1073 + * 1074 + * - %SCX_OPSS_QUEUEING: 1075 + * Transitional state while transferring a task from the SCX core to 1076 + * the BPF scheduler. The task's rq lock is held during this state. 1077 + * Since QUEUEING is both entered and exited under the rq lock, dequeue 1078 + * can never observe this state (it would be a BUG). When finishing a 1079 + * dispatch, if the task is still in %SCX_OPSS_QUEUEING the completion 1080 + * path busy-waits for it to leave this state (via wait_ops_state()) 1081 + * before retrying. 1082 + * 1083 + * - %SCX_OPSS_QUEUED: 1084 + * Task is owned by the BPF scheduler. It's on a DSQ (dispatch queue) 1085 + * and the BPF scheduler is responsible for dispatching it. A QSEQ 1086 + * (queue sequence number) is embedded in this state to detect 1087 + * dispatch/dequeue races: if a task is dequeued and re-enqueued, the 1088 + * QSEQ changes and any in-flight dispatch operations targeting the old 1089 + * QSEQ are safely ignored. 1090 + * 1091 + * - %SCX_OPSS_DISPATCHING: 1092 + * Transitional state while transferring a task from the BPF scheduler 1093 + * back to the SCX core. This state indicates the BPF scheduler has 1094 + * selected the task for execution. When dequeue needs to take the task 1095 + * off a DSQ and it is still in %SCX_OPSS_DISPATCHING, the dequeue path 1096 + * busy-waits for it to leave this state (via wait_ops_state()) before 1097 + * proceeding. Exits to %SCX_OPSS_NONE when dispatch completes. 1098 + * 1099 + * Memory Ordering 1100 + * 1101 + * Transitions out of %SCX_OPSS_QUEUEING and %SCX_OPSS_DISPATCHING into 1102 + * %SCX_OPSS_NONE or %SCX_OPSS_QUEUED must use atomic_long_set_release() 1103 + * and waiters must use atomic_long_read_acquire(). This ensures proper 1104 + * synchronization between concurrent operations. 1105 + * 1106 + * Cross-CPU Task Migration 1107 + * 1108 + * When moving a task in the %SCX_OPSS_DISPATCHING state, we can't simply 1109 + * grab the target CPU's rq lock because a concurrent dequeue might be 1110 + * waiting on %SCX_OPSS_DISPATCHING while holding the source rq lock 1111 + * (deadlock). 1112 + * 1113 + * The sched_ext core uses a "lock dancing" protocol coordinated by 1114 + * p->scx.holding_cpu. When moving a task to a different rq: 1115 + * 1116 + * 1. Verify task can be moved (CPU affinity, migration_disabled, etc.) 1117 + * 2. Set p->scx.holding_cpu to the current CPU 1118 + * 3. Set task state to %SCX_OPSS_NONE; dequeue waits while DISPATCHING 1119 + * is set, so clearing DISPATCHING first prevents the circular wait 1120 + * (safe to lock the rq we need) 1121 + * 4. Unlock the current CPU's rq 1122 + * 5. Lock src_rq (where the task currently lives) 1123 + * 6. Verify p->scx.holding_cpu == current CPU, if not, dequeue won the 1124 + * race (dequeue clears holding_cpu to -1 when it takes the task), in 1125 + * this case migration is aborted 1126 + * 7. If src_rq == dst_rq: clear holding_cpu and enqueue directly 1127 + * into dst_rq's local DSQ (no lock swap needed) 1128 + * 8. Otherwise: call move_remote_task_to_local_dsq(), which releases 1129 + * src_rq, locks dst_rq, and performs the deactivate/activate 1130 + * migration cycle (dst_rq is held on return) 1131 + * 9. Unlock dst_rq and re-lock the current CPU's rq to restore 1132 + * the lock state expected by the caller 1133 + * 1134 + * If any verification fails, abort the migration. 1135 + * 1136 + * This state tracking allows the BPF scheduler to try to dispatch any task 1137 + * at any time regardless of its state. The SCX core can safely 1138 + * reject/ignore invalid dispatches, simplifying the BPF scheduler 1139 + * implementation. 1058 1140 */ 1059 1141 enum scx_ops_state { 1060 1142 SCX_OPSS_NONE, /* owned by the SCX core */
+30 -9
kernel/sched/idle.c
··· 161 161 return cpuidle_enter(drv, dev, next_state); 162 162 } 163 163 164 + static void idle_call_stop_or_retain_tick(bool stop_tick) 165 + { 166 + if (stop_tick || tick_nohz_tick_stopped()) 167 + tick_nohz_idle_stop_tick(); 168 + else 169 + tick_nohz_idle_retain_tick(); 170 + } 171 + 164 172 /** 165 173 * cpuidle_idle_call - the main idle function 166 174 * ··· 178 170 * set, and it returns with polling set. If it ever stops polling, it 179 171 * must clear the polling bit. 180 172 */ 181 - static void cpuidle_idle_call(void) 173 + static void cpuidle_idle_call(bool stop_tick) 182 174 { 183 175 struct cpuidle_device *dev = cpuidle_get_device(); 184 176 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); ··· 194 186 } 195 187 196 188 if (cpuidle_not_available(drv, dev)) { 197 - tick_nohz_idle_stop_tick(); 189 + idle_call_stop_or_retain_tick(stop_tick); 198 190 199 191 default_idle_call(); 200 192 goto exit_idle; ··· 229 221 230 222 next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); 231 223 call_cpuidle(drv, dev, next_state); 232 - } else { 233 - bool stop_tick = true; 224 + } else if (drv->state_count > 1) { 225 + /* 226 + * stop_tick is expected to be true by default by cpuidle 227 + * governors, which allows them to select idle states with 228 + * target residency above the tick period length. 229 + */ 230 + stop_tick = true; 234 231 235 232 /* 236 233 * Ask the cpuidle framework to choose a convenient idle state. 237 234 */ 238 235 next_state = cpuidle_select(drv, dev, &stop_tick); 239 236 240 - if (stop_tick || tick_nohz_tick_stopped()) 241 - tick_nohz_idle_stop_tick(); 242 - else 243 - tick_nohz_idle_retain_tick(); 237 + idle_call_stop_or_retain_tick(stop_tick); 244 238 245 239 entered_state = call_cpuidle(drv, dev, next_state); 246 240 /* 247 241 * Give the governor an opportunity to reflect on the outcome 248 242 */ 249 243 cpuidle_reflect(dev, entered_state); 244 + } else { 245 + idle_call_stop_or_retain_tick(stop_tick); 246 + 247 + /* 248 + * If there is only a single idle state (or none), there is 249 + * nothing meaningful for the governor to choose. Skip the 250 + * governor and always use state 0. 251 + */ 252 + call_cpuidle(drv, dev, 0); 250 253 } 251 254 252 255 exit_idle: ··· 278 259 static void do_idle(void) 279 260 { 280 261 int cpu = smp_processor_id(); 262 + bool got_tick = false; 281 263 282 264 /* 283 265 * Check if we need to update blocked load ··· 349 329 tick_nohz_idle_restart_tick(); 350 330 cpu_idle_poll(); 351 331 } else { 352 - cpuidle_idle_call(); 332 + cpuidle_idle_call(got_tick); 353 333 } 334 + got_tick = tick_nohz_idle_got_tick(); 354 335 arch_cpu_idle_exit(); 355 336 } 356 337
+1 -1
kernel/time/time.c
··· 697 697 * 698 698 * Return: jiffies_64 value converted to 64-bit "clock_t" (CLOCKS_PER_SEC) 699 699 */ 700 - u64 jiffies_64_to_clock_t(u64 x) 700 + notrace u64 jiffies_64_to_clock_t(u64 x) 701 701 { 702 702 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 703 703 # if HZ < USER_HZ
+28 -27
kernel/workqueue.c
··· 190 190 int id; /* I: pool ID */ 191 191 unsigned int flags; /* L: flags */ 192 192 193 - unsigned long watchdog_ts; /* L: watchdog timestamp */ 193 + unsigned long last_progress_ts; /* L: last forward progress timestamp */ 194 194 bool cpu_stall; /* WD: stalled cpu bound pool */ 195 195 196 196 /* ··· 1697 1697 WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); 1698 1698 trace_workqueue_activate_work(work); 1699 1699 if (list_empty(&pwq->pool->worklist)) 1700 - pwq->pool->watchdog_ts = jiffies; 1700 + pwq->pool->last_progress_ts = jiffies; 1701 1701 move_linked_works(work, &pwq->pool->worklist, NULL); 1702 1702 __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); 1703 1703 } ··· 2348 2348 */ 2349 2349 if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) { 2350 2350 if (list_empty(&pool->worklist)) 2351 - pool->watchdog_ts = jiffies; 2351 + pool->last_progress_ts = jiffies; 2352 2352 2353 2353 trace_workqueue_activate_work(work); 2354 2354 insert_work(pwq, work, &pool->worklist, work_flags); ··· 3204 3204 worker->current_pwq = pwq; 3205 3205 if (worker->task) 3206 3206 worker->current_at = worker->task->se.sum_exec_runtime; 3207 + worker->current_start = jiffies; 3207 3208 work_data = *work_data_bits(work); 3208 3209 worker->current_color = get_work_color(work_data); 3209 3210 ··· 3353 3352 while ((work = list_first_entry_or_null(&worker->scheduled, 3354 3353 struct work_struct, entry))) { 3355 3354 if (first) { 3356 - worker->pool->watchdog_ts = jiffies; 3355 + worker->pool->last_progress_ts = jiffies; 3357 3356 first = false; 3358 3357 } 3359 3358 process_one_work(worker, work); ··· 4851 4850 pool->cpu = -1; 4852 4851 pool->node = NUMA_NO_NODE; 4853 4852 pool->flags |= POOL_DISASSOCIATED; 4854 - pool->watchdog_ts = jiffies; 4853 + pool->last_progress_ts = jiffies; 4855 4854 INIT_LIST_HEAD(&pool->worklist); 4856 4855 INIT_LIST_HEAD(&pool->idle_list); 4857 4856 hash_init(pool->busy_hash); ··· 6275 6274 { 6276 6275 struct worker_pool *pool = worker->pool; 6277 6276 6278 - if (pool->flags & WQ_BH) 6277 + if (pool->flags & POOL_BH) 6279 6278 pr_cont("bh%s", 6280 6279 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); 6281 6280 else ··· 6360 6359 pr_cont(" %s", comma ? "," : ""); 6361 6360 pr_cont_worker_id(worker); 6362 6361 pr_cont(":%ps", worker->current_func); 6362 + pr_cont(" for %us", 6363 + jiffies_to_msecs(jiffies - worker->current_start) / 1000); 6363 6364 list_for_each_entry(work, &worker->scheduled, entry) 6364 6365 pr_cont_work(false, work, &pcws); 6365 6366 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); ··· 6465 6462 6466 6463 /* How long the first pending work is waiting for a worker. */ 6467 6464 if (!list_empty(&pool->worklist)) 6468 - hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 6465 + hung = jiffies_to_msecs(jiffies - pool->last_progress_ts) / 1000; 6469 6466 6470 6467 /* 6471 6468 * Defer printing to avoid deadlocks in console drivers that ··· 7583 7580 7584 7581 /* 7585 7582 * Show workers that might prevent the processing of pending work items. 7586 - * The only candidates are CPU-bound workers in the running state. 7587 - * Pending work items should be handled by another idle worker 7588 - * in all other situations. 7583 + * A busy worker that is not running on the CPU (e.g. sleeping in 7584 + * wait_event_idle() with PF_WQ_WORKER cleared) can stall the pool just as 7585 + * effectively as a CPU-bound one, so dump every in-flight worker. 7589 7586 */ 7590 - static void show_cpu_pool_hog(struct worker_pool *pool) 7587 + static void show_cpu_pool_busy_workers(struct worker_pool *pool) 7591 7588 { 7592 7589 struct worker *worker; 7593 7590 unsigned long irq_flags; ··· 7596 7593 raw_spin_lock_irqsave(&pool->lock, irq_flags); 7597 7594 7598 7595 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 7599 - if (task_is_running(worker->task)) { 7600 - /* 7601 - * Defer printing to avoid deadlocks in console 7602 - * drivers that queue work while holding locks 7603 - * also taken in their write paths. 7604 - */ 7605 - printk_deferred_enter(); 7596 + /* 7597 + * Defer printing to avoid deadlocks in console 7598 + * drivers that queue work while holding locks 7599 + * also taken in their write paths. 7600 + */ 7601 + printk_deferred_enter(); 7606 7602 7607 - pr_info("pool %d:\n", pool->id); 7608 - sched_show_task(worker->task); 7603 + pr_info("pool %d:\n", pool->id); 7604 + sched_show_task(worker->task); 7609 7605 7610 - printk_deferred_exit(); 7611 - } 7606 + printk_deferred_exit(); 7612 7607 } 7613 7608 7614 7609 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); 7615 7610 } 7616 7611 7617 - static void show_cpu_pools_hogs(void) 7612 + static void show_cpu_pools_busy_workers(void) 7618 7613 { 7619 7614 struct worker_pool *pool; 7620 7615 int pi; 7621 7616 7622 - pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 7617 + pr_info("Showing backtraces of busy workers in stalled worker pools:\n"); 7623 7618 7624 7619 rcu_read_lock(); 7625 7620 7626 7621 for_each_pool(pool, pi) { 7627 7622 if (pool->cpu_stall) 7628 - show_cpu_pool_hog(pool); 7623 + show_cpu_pool_busy_workers(pool); 7629 7624 7630 7625 } 7631 7626 ··· 7692 7691 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 7693 7692 else 7694 7693 touched = READ_ONCE(wq_watchdog_touched); 7695 - pool_ts = READ_ONCE(pool->watchdog_ts); 7694 + pool_ts = READ_ONCE(pool->last_progress_ts); 7696 7695 7697 7696 if (time_after(pool_ts, touched)) 7698 7697 ts = pool_ts; ··· 7720 7719 show_all_workqueues(); 7721 7720 7722 7721 if (cpu_pool_stall) 7723 - show_cpu_pools_hogs(); 7722 + show_cpu_pools_busy_workers(); 7724 7723 7725 7724 if (lockup_detected) 7726 7725 panic_on_wq_watchdog(max_stall_time);
+1
kernel/workqueue_internal.h
··· 32 32 work_func_t current_func; /* K: function */ 33 33 struct pool_workqueue *current_pwq; /* K: pwq */ 34 34 u64 current_at; /* K: runtime at start or last wakeup */ 35 + unsigned long current_start; /* K: start time of current work item */ 35 36 unsigned int current_color; /* K: color */ 36 37 37 38 int sleeping; /* S: is worker sleeping? */
+3 -3
lib/bootconfig.c
··· 316 316 depth ? "." : ""); 317 317 if (ret < 0) 318 318 return ret; 319 - if (ret > size) { 319 + if (ret >= size) { 320 320 size = 0; 321 321 } else { 322 322 size -= ret; ··· 532 532 static int __init __xbc_open_brace(char *p) 533 533 { 534 534 /* Push the last key as open brace */ 535 - open_brace[brace_index++] = xbc_node_index(last_parent); 536 535 if (brace_index >= XBC_DEPTH_MAX) 537 536 return xbc_parse_error("Exceed max depth of braces", p); 537 + open_brace[brace_index++] = xbc_node_index(last_parent); 538 538 539 539 return 0; 540 540 } ··· 802 802 803 803 /* Brace closing */ 804 804 if (brace_index) { 805 - n = &xbc_nodes[open_brace[brace_index]]; 805 + n = &xbc_nodes[open_brace[brace_index - 1]]; 806 806 return xbc_parse_error("Brace is not closed", 807 807 xbc_node_get_data(n)); 808 808 }
+3
lib/crypto/Makefile
··· 55 55 libaes-$(CONFIG_X86) += x86/aes-aesni.o 56 56 endif # CONFIG_CRYPTO_LIB_AES_ARCH 57 57 58 + # clean-files must be defined unconditionally 59 + clean-files += powerpc/aesp8-ppc.S 60 + 58 61 ################################################################################ 59 62 60 63 obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o
+2 -1
mm/huge_memory.c
··· 2797 2797 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); 2798 2798 } else { 2799 2799 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); 2800 - _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot); 2800 + _dst_pmd = move_soft_dirty_pmd(src_pmdval); 2801 + _dst_pmd = clear_uffd_wp_pmd(_dst_pmd); 2801 2802 } 2802 2803 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd); 2803 2804
+17 -4
mm/rmap.c
··· 1955 1955 if (userfaultfd_wp(vma)) 1956 1956 return 1; 1957 1957 1958 - return folio_pte_batch(folio, pvmw->pte, pte, max_nr); 1958 + /* 1959 + * If unmap fails, we need to restore the ptes. To avoid accidentally 1960 + * upgrading write permissions for ptes that were not originally 1961 + * writable, and to avoid losing the soft-dirty bit, use the 1962 + * appropriate FPB flags. 1963 + */ 1964 + return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr, 1965 + FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY); 1959 1966 } 1960 1967 1961 1968 /* ··· 2450 2443 __maybe_unused pmd_t pmdval; 2451 2444 2452 2445 if (flags & TTU_SPLIT_HUGE_PMD) { 2446 + /* 2447 + * split_huge_pmd_locked() might leave the 2448 + * folio mapped through PTEs. Retry the walk 2449 + * so we can detect this scenario and properly 2450 + * abort the walk. 2451 + */ 2453 2452 split_huge_pmd_locked(vma, pvmw.address, 2454 2453 pvmw.pmd, true); 2455 - ret = false; 2456 - page_vma_mapped_walk_done(&pvmw); 2457 - break; 2454 + flags &= ~TTU_SPLIT_HUGE_PMD; 2455 + page_vma_mapped_walk_restart(&pvmw); 2456 + continue; 2458 2457 } 2459 2458 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2460 2459 pmdval = pmdp_get(pvmw.pmd);
+4 -7
mm/slub.c
··· 2119 2119 size_t sz = sizeof(struct slabobj_ext) * slab->objects; 2120 2120 struct kmem_cache *obj_exts_cache; 2121 2121 2122 - /* 2123 - * slabobj_ext array for KMALLOC_CGROUP allocations 2124 - * are served from KMALLOC_NORMAL caches. 2125 - */ 2126 - if (!mem_alloc_profiling_enabled()) 2127 - return sz; 2128 - 2129 2122 if (sz > KMALLOC_MAX_CACHE_SIZE) 2130 2123 return sz; 2131 2124 ··· 2790 2797 if (s->flags & SLAB_KMALLOC) 2791 2798 mark_obj_codetag_empty(sheaf); 2792 2799 2800 + VM_WARN_ON_ONCE(sheaf->size > 0); 2793 2801 kfree(sheaf); 2794 2802 2795 2803 stat(s, SHEAF_FREE); ··· 2822 2828 return 0; 2823 2829 } 2824 2830 2831 + static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf); 2825 2832 2826 2833 static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp) 2827 2834 { ··· 2832 2837 return NULL; 2833 2838 2834 2839 if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) { 2840 + sheaf_flush_unused(s, sheaf); 2835 2841 free_empty_sheaf(s, sheaf); 2836 2842 return NULL; 2837 2843 } ··· 4619 4623 * we must be very low on memory so don't bother 4620 4624 * with the barn 4621 4625 */ 4626 + sheaf_flush_unused(s, empty); 4622 4627 free_empty_sheaf(s, empty); 4623 4628 } 4624 4629 } else {
+47 -25
net/atm/lec.c
··· 154 154 /* 0x01 is topology change */ 155 155 156 156 priv = netdev_priv(dev); 157 - atm_force_charge(priv->lecd, skb2->truesize); 158 - sk = sk_atm(priv->lecd); 159 - skb_queue_tail(&sk->sk_receive_queue, skb2); 160 - sk->sk_data_ready(sk); 157 + struct atm_vcc *vcc; 158 + 159 + rcu_read_lock(); 160 + vcc = rcu_dereference(priv->lecd); 161 + if (vcc) { 162 + atm_force_charge(vcc, skb2->truesize); 163 + sk = sk_atm(vcc); 164 + skb_queue_tail(&sk->sk_receive_queue, skb2); 165 + sk->sk_data_ready(sk); 166 + } else { 167 + dev_kfree_skb(skb2); 168 + } 169 + rcu_read_unlock(); 161 170 } 162 171 } 163 172 #endif /* IS_ENABLED(CONFIG_BRIDGE) */ ··· 225 216 int is_rdesc; 226 217 227 218 pr_debug("called\n"); 228 - if (!priv->lecd) { 219 + if (!rcu_access_pointer(priv->lecd)) { 229 220 pr_info("%s:No lecd attached\n", dev->name); 230 221 dev->stats.tx_errors++; 231 222 netif_stop_queue(dev); ··· 458 449 break; 459 450 skb2->len = sizeof(struct atmlec_msg); 460 451 skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg)); 461 - atm_force_charge(priv->lecd, skb2->truesize); 462 - sk = sk_atm(priv->lecd); 463 - skb_queue_tail(&sk->sk_receive_queue, skb2); 464 - sk->sk_data_ready(sk); 452 + struct atm_vcc *vcc; 453 + 454 + rcu_read_lock(); 455 + vcc = rcu_dereference(priv->lecd); 456 + if (vcc) { 457 + atm_force_charge(vcc, skb2->truesize); 458 + sk = sk_atm(vcc); 459 + skb_queue_tail(&sk->sk_receive_queue, skb2); 460 + sk->sk_data_ready(sk); 461 + } else { 462 + dev_kfree_skb(skb2); 463 + } 464 + rcu_read_unlock(); 465 465 } 466 466 } 467 467 #endif /* IS_ENABLED(CONFIG_BRIDGE) */ ··· 486 468 487 469 static void lec_atm_close(struct atm_vcc *vcc) 488 470 { 489 - struct sk_buff *skb; 490 471 struct net_device *dev = (struct net_device *)vcc->proto_data; 491 472 struct lec_priv *priv = netdev_priv(dev); 492 473 493 - priv->lecd = NULL; 474 + rcu_assign_pointer(priv->lecd, NULL); 475 + synchronize_rcu(); 494 476 /* Do something needful? */ 495 477 496 478 netif_stop_queue(dev); 497 479 lec_arp_destroy(priv); 498 - 499 - if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 500 - pr_info("%s closing with messages pending\n", dev->name); 501 - while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) { 502 - atm_return(vcc, skb->truesize); 503 - dev_kfree_skb(skb); 504 - } 505 480 506 481 pr_info("%s: Shut down!\n", dev->name); 507 482 module_put(THIS_MODULE); ··· 521 510 const unsigned char *mac_addr, const unsigned char *atm_addr, 522 511 struct sk_buff *data) 523 512 { 513 + struct atm_vcc *vcc; 524 514 struct sock *sk; 525 515 struct sk_buff *skb; 526 516 struct atmlec_msg *mesg; 527 517 528 - if (!priv || !priv->lecd) 518 + if (!priv || !rcu_access_pointer(priv->lecd)) 529 519 return -1; 520 + 530 521 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 531 522 if (!skb) 532 523 return -1; ··· 545 532 if (atm_addr) 546 533 memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN); 547 534 548 - atm_force_charge(priv->lecd, skb->truesize); 549 - sk = sk_atm(priv->lecd); 535 + rcu_read_lock(); 536 + vcc = rcu_dereference(priv->lecd); 537 + if (!vcc) { 538 + rcu_read_unlock(); 539 + kfree_skb(skb); 540 + return -1; 541 + } 542 + 543 + atm_force_charge(vcc, skb->truesize); 544 + sk = sk_atm(vcc); 550 545 skb_queue_tail(&sk->sk_receive_queue, skb); 551 546 sk->sk_data_ready(sk); 552 547 553 548 if (data != NULL) { 554 549 pr_debug("about to send %d bytes of data\n", data->len); 555 - atm_force_charge(priv->lecd, data->truesize); 550 + atm_force_charge(vcc, data->truesize); 556 551 skb_queue_tail(&sk->sk_receive_queue, data); 557 552 sk->sk_data_ready(sk); 558 553 } 559 554 555 + rcu_read_unlock(); 560 556 return 0; 561 557 } 562 558 ··· 640 618 641 619 atm_return(vcc, skb->truesize); 642 620 if (*(__be16 *) skb->data == htons(priv->lecid) || 643 - !priv->lecd || !(dev->flags & IFF_UP)) { 621 + !rcu_access_pointer(priv->lecd) || !(dev->flags & IFF_UP)) { 644 622 /* 645 623 * Probably looping back, or if lecd is missing, 646 624 * lecd has gone down ··· 775 753 priv = netdev_priv(dev_lec[i]); 776 754 } else { 777 755 priv = netdev_priv(dev_lec[i]); 778 - if (priv->lecd) 756 + if (rcu_access_pointer(priv->lecd)) 779 757 return -EADDRINUSE; 780 758 } 781 759 lec_arp_init(priv); 782 760 priv->itfnum = i; /* LANE2 addition */ 783 - priv->lecd = vcc; 761 + rcu_assign_pointer(priv->lecd, vcc); 784 762 vcc->dev = &lecatm_dev; 785 763 vcc_insert_socket(sk_atm(vcc)); 786 764
+1 -1
net/atm/lec.h
··· 91 91 */ 92 92 spinlock_t lec_arp_lock; 93 93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ 94 - struct atm_vcc *lecd; 94 + struct atm_vcc __rcu *lecd; 95 95 struct delayed_work lec_arp_work; /* C10 */ 96 96 unsigned int maximum_unknown_frame_count; 97 97 /*
+3
net/batman-adv/bat_iv_ogm.c
··· 473 473 if (aggregated_bytes > max_bytes) 474 474 return false; 475 475 476 + if (skb_tailroom(forw_packet->skb) < packet_len) 477 + return false; 478 + 476 479 if (packet_num >= BATADV_MAX_AGGREGATION_PACKETS) 477 480 return false; 478 481
+2 -2
net/bluetooth/hci_conn.c
··· 1944 1944 return false; 1945 1945 1946 1946 done: 1947 + conn->iso_qos = *qos; 1948 + 1947 1949 if (hci_cmd_sync_queue(hdev, set_cig_params_sync, 1948 1950 UINT_PTR(qos->ucast.cig), NULL) < 0) 1949 1951 return false; ··· 2015 2013 } 2016 2014 2017 2015 hci_conn_hold(cis); 2018 - 2019 - cis->iso_qos = *qos; 2020 2016 cis->state = BT_BOUND; 2021 2017 2022 2018 return cis;
+1 -1
net/bluetooth/hci_sync.c
··· 6627 6627 * state. 6628 6628 */ 6629 6629 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 6630 - hci_scan_disable_sync(hdev); 6631 6630 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); 6631 + hci_scan_disable_sync(hdev); 6632 6632 } 6633 6633 6634 6634 /* Update random address, but set require_privacy to false so
+14 -2
net/bluetooth/hidp/core.c
··· 986 986 skb_queue_purge(&session->intr_transmit); 987 987 fput(session->intr_sock->file); 988 988 fput(session->ctrl_sock->file); 989 - l2cap_conn_put(session->conn); 989 + if (session->conn) 990 + l2cap_conn_put(session->conn); 990 991 kfree(session); 991 992 } 992 993 ··· 1165 1164 1166 1165 down_write(&hidp_session_sem); 1167 1166 1167 + /* Drop L2CAP reference immediately to indicate that 1168 + * l2cap_unregister_user() shall not be called as it is already 1169 + * considered removed. 1170 + */ 1171 + if (session->conn) { 1172 + l2cap_conn_put(session->conn); 1173 + session->conn = NULL; 1174 + } 1175 + 1168 1176 hidp_session_terminate(session); 1169 1177 1170 1178 cancel_work_sync(&session->dev_init); ··· 1311 1301 * Instead, this call has the same semantics as if user-space tried to 1312 1302 * delete the session. 1313 1303 */ 1314 - l2cap_unregister_user(session->conn, &session->user); 1304 + if (session->conn) 1305 + l2cap_unregister_user(session->conn, &session->user); 1306 + 1315 1307 hidp_session_put(session); 1316 1308 1317 1309 module_put_and_kthread_exit(0);
+31 -20
net/bluetooth/l2cap_core.c
··· 1678 1678 1679 1679 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) 1680 1680 { 1681 - struct hci_dev *hdev = conn->hcon->hdev; 1682 1681 int ret; 1683 1682 1684 1683 /* We need to check whether l2cap_conn is registered. If it is not, we 1685 - * must not register the l2cap_user. l2cap_conn_del() is unregisters 1686 - * l2cap_conn objects, but doesn't provide its own locking. Instead, it 1687 - * relies on the parent hci_conn object to be locked. This itself relies 1688 - * on the hci_dev object to be locked. So we must lock the hci device 1689 - * here, too. */ 1684 + * must not register the l2cap_user. l2cap_conn_del() unregisters 1685 + * l2cap_conn objects under conn->lock, and we use the same lock here 1686 + * to protect access to conn->users and conn->hchan. 1687 + */ 1690 1688 1691 - hci_dev_lock(hdev); 1689 + mutex_lock(&conn->lock); 1692 1690 1693 1691 if (!list_empty(&user->list)) { 1694 1692 ret = -EINVAL; ··· 1707 1709 ret = 0; 1708 1710 1709 1711 out_unlock: 1710 - hci_dev_unlock(hdev); 1712 + mutex_unlock(&conn->lock); 1711 1713 return ret; 1712 1714 } 1713 1715 EXPORT_SYMBOL(l2cap_register_user); 1714 1716 1715 1717 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) 1716 1718 { 1717 - struct hci_dev *hdev = conn->hcon->hdev; 1718 - 1719 - hci_dev_lock(hdev); 1719 + mutex_lock(&conn->lock); 1720 1720 1721 1721 if (list_empty(&user->list)) 1722 1722 goto out_unlock; ··· 1723 1727 user->remove(conn, user); 1724 1728 1725 1729 out_unlock: 1726 - hci_dev_unlock(hdev); 1730 + mutex_unlock(&conn->lock); 1727 1731 } 1728 1732 EXPORT_SYMBOL(l2cap_unregister_user); 1729 1733 ··· 4612 4616 4613 4617 switch (type) { 4614 4618 case L2CAP_IT_FEAT_MASK: 4615 - conn->feat_mask = get_unaligned_le32(rsp->data); 4619 + if (cmd_len >= sizeof(*rsp) + sizeof(u32)) 4620 + conn->feat_mask = get_unaligned_le32(rsp->data); 4616 4621 4617 4622 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 4618 4623 struct l2cap_info_req req; ··· 4632 4635 break; 4633 4636 4634 4637 case L2CAP_IT_FIXED_CHAN: 4635 - conn->remote_fixed_chan = rsp->data[0]; 4638 + if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0])) 4639 + conn->remote_fixed_chan = rsp->data[0]; 4636 4640 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4637 4641 conn->info_ident = 0; 4638 4642 ··· 5057 5059 u16 mtu, mps; 5058 5060 __le16 psm; 5059 5061 u8 result, rsp_len = 0; 5060 - int i, num_scid; 5062 + int i, num_scid = 0; 5061 5063 bool defer = false; 5062 5064 5063 5065 if (!enable_ecred) ··· 5066 5068 memset(pdu, 0, sizeof(*pdu)); 5067 5069 5068 5070 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 5071 + result = L2CAP_CR_LE_INVALID_PARAMS; 5072 + goto response; 5073 + } 5074 + 5075 + /* Check if there are no pending channels with the same ident */ 5076 + __l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer, 5077 + &num_scid); 5078 + if (num_scid) { 5069 5079 result = L2CAP_CR_LE_INVALID_PARAMS; 5070 5080 goto response; 5071 5081 } ··· 5430 5424 u8 *data) 5431 5425 { 5432 5426 struct l2cap_chan *chan, *tmp; 5433 - struct l2cap_ecred_conn_rsp *rsp = (void *) data; 5427 + struct l2cap_ecred_reconf_rsp *rsp = (void *)data; 5434 5428 u16 result; 5435 5429 5436 5430 if (cmd_len < sizeof(*rsp)) ··· 5438 5432 5439 5433 result = __le16_to_cpu(rsp->result); 5440 5434 5441 - BT_DBG("result 0x%4.4x", rsp->result); 5435 + BT_DBG("result 0x%4.4x", result); 5442 5436 5443 5437 if (!result) 5444 5438 return 0; ··· 6668 6662 return -ENOBUFS; 6669 6663 } 6670 6664 6671 - if (chan->imtu < skb->len) { 6672 - BT_ERR("Too big LE L2CAP PDU"); 6665 + if (skb->len > chan->imtu) { 6666 + BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len, 6667 + chan->imtu); 6668 + l2cap_send_disconn_req(chan, ECONNRESET); 6673 6669 return -ENOBUFS; 6674 6670 } 6675 6671 ··· 6697 6689 sdu_len, skb->len, chan->imtu); 6698 6690 6699 6691 if (sdu_len > chan->imtu) { 6700 - BT_ERR("Too big LE L2CAP SDU length received"); 6692 + BT_ERR("Too big LE L2CAP SDU length: len %u > %u", 6693 + skb->len, sdu_len); 6694 + l2cap_send_disconn_req(chan, ECONNRESET); 6701 6695 err = -EMSGSIZE; 6702 6696 goto failed; 6703 6697 } ··· 6735 6725 6736 6726 if (chan->sdu->len + skb->len > chan->sdu_len) { 6737 6727 BT_ERR("Too much LE L2CAP data received"); 6728 + l2cap_send_disconn_req(chan, ECONNRESET); 6738 6729 err = -EINVAL; 6739 6730 goto failed; 6740 6731 }
+2 -5
net/bluetooth/mgmt.c
··· 2195 2195 sk = cmd->sk; 2196 2196 2197 2197 if (status) { 2198 - mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 2199 - status); 2200 - mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true, 2201 - cmd_status_rsp, &status); 2198 + mgmt_cmd_status(cmd->sk, hdev->id, cmd->opcode, status); 2202 2199 goto done; 2203 2200 } 2204 2201 ··· 5374 5377 5375 5378 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, 5376 5379 mgmt_status(status), &rp, sizeof(rp)); 5377 - mgmt_pending_remove(cmd); 5380 + mgmt_pending_free(cmd); 5378 5381 5379 5382 hci_dev_unlock(hdev); 5380 5383 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
+1 -1
net/bluetooth/smp.c
··· 2743 2743 if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) && 2744 2744 !crypto_memneq(key, smp->local_pk, 64)) { 2745 2745 bt_dev_err(hdev, "Remote and local public keys are identical"); 2746 - return SMP_UNSPECIFIED; 2746 + return SMP_DHKEY_CHECK_FAILED; 2747 2747 } 2748 2748 2749 2749 memcpy(smp->remote_pk, key, 64);
+2 -2
net/bridge/br_cfm.c
··· 576 576 577 577 /* Empty and free peer MEP list */ 578 578 hlist_for_each_entry_safe(peer_mep, n_store, &mep->peer_mep_list, head) { 579 - cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork); 579 + disable_delayed_work_sync(&peer_mep->ccm_rx_dwork); 580 580 hlist_del_rcu(&peer_mep->head); 581 581 kfree_rcu(peer_mep, rcu); 582 582 } ··· 732 732 return -ENOENT; 733 733 } 734 734 735 - cc_peer_disable(peer_mep); 735 + disable_delayed_work_sync(&peer_mep->ccm_rx_dwork); 736 736 737 737 hlist_del_rcu(&peer_mep->head); 738 738 kfree_rcu(peer_mep, rcu);
+4 -2
net/ceph/auth.c
··· 205 205 s32 result; 206 206 u64 global_id; 207 207 void *payload, *payload_end; 208 - int payload_len; 208 + u32 payload_len; 209 209 char *result_msg; 210 - int result_msg_len; 210 + u32 result_msg_len; 211 211 int ret = -EINVAL; 212 212 213 213 mutex_lock(&ac->mutex); ··· 217 217 result = ceph_decode_32(&p); 218 218 global_id = ceph_decode_64(&p); 219 219 payload_len = ceph_decode_32(&p); 220 + ceph_decode_need(&p, end, payload_len, bad); 220 221 payload = p; 221 222 p += payload_len; 222 223 ceph_decode_need(&p, end, sizeof(u32), bad); 223 224 result_msg_len = ceph_decode_32(&p); 225 + ceph_decode_need(&p, end, result_msg_len, bad); 224 226 result_msg = p; 225 227 p += result_msg_len; 226 228 if (p != end)
+21 -10
net/ceph/messenger_v2.c
··· 392 392 int head_len; 393 393 int rem_len; 394 394 395 - BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN); 395 + BUG_ON(ctrl_len < 1 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN); 396 396 397 397 if (secure) { 398 398 head_len = CEPH_PREAMBLE_SECURE_LEN; ··· 401 401 head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN; 402 402 } 403 403 } else { 404 - head_len = CEPH_PREAMBLE_PLAIN_LEN; 405 - if (ctrl_len) 406 - head_len += ctrl_len + CEPH_CRC_LEN; 404 + head_len = CEPH_PREAMBLE_PLAIN_LEN + ctrl_len + CEPH_CRC_LEN; 407 405 } 408 406 return head_len; 409 407 } ··· 526 528 desc->fd_aligns[i] = ceph_decode_16(&p); 527 529 } 528 530 529 - if (desc->fd_lens[0] < 0 || 531 + /* 532 + * This would fire for FRAME_TAG_WAIT (it has one empty 533 + * segment), but we should never get it as client. 534 + */ 535 + if (desc->fd_lens[0] < 1 || 530 536 desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) { 531 537 pr_err("bad control segment length %d\n", desc->fd_lens[0]); 532 538 return -EINVAL; 533 539 } 540 + 534 541 if (desc->fd_lens[1] < 0 || 535 542 desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) { 536 543 pr_err("bad front segment length %d\n", desc->fd_lens[1]); ··· 552 549 return -EINVAL; 553 550 } 554 551 555 - /* 556 - * This would fire for FRAME_TAG_WAIT (it has one empty 557 - * segment), but we should never get it as client. 558 - */ 559 552 if (!desc->fd_lens[desc->fd_seg_cnt - 1]) { 560 553 pr_err("last segment empty, segment count %d\n", 561 554 desc->fd_seg_cnt); ··· 2832 2833 void *p, void *end) 2833 2834 { 2834 2835 struct ceph_frame_desc *desc = &con->v2.in_desc; 2835 - struct ceph_msg_header2 *hdr2 = p; 2836 + struct ceph_msg_header2 *hdr2; 2836 2837 struct ceph_msg_header hdr; 2837 2838 int skip; 2838 2839 int ret; 2839 2840 u64 seq; 2841 + 2842 + ceph_decode_need(&p, end, sizeof(*hdr2), bad); 2843 + hdr2 = p; 2840 2844 2841 2845 /* verify seq# */ 2842 2846 seq = le64_to_cpu(hdr2->seq); ··· 2871 2869 WARN_ON(!con->in_msg); 2872 2870 WARN_ON(con->in_msg->con != con); 2873 2871 return 1; 2872 + 2873 + bad: 2874 + pr_err("failed to decode message header\n"); 2875 + return -EINVAL; 2874 2876 } 2875 2877 2876 2878 static int process_message(struct ceph_connection *con) ··· 2903 2897 2904 2898 if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE) 2905 2899 return process_control(con, p, end); 2900 + 2901 + if (con->state != CEPH_CON_S_OPEN) { 2902 + con->error_msg = "protocol error, unexpected message"; 2903 + return -EINVAL; 2904 + } 2906 2905 2907 2906 ret = process_message_header(con, p, end); 2908 2907 if (ret < 0)
+3 -3
net/ceph/mon_client.c
··· 72 72 struct ceph_monmap *monmap = NULL; 73 73 struct ceph_fsid fsid; 74 74 u32 struct_len; 75 - int blob_len; 76 - int num_mon; 75 + u32 blob_len; 76 + u32 num_mon; 77 77 u8 struct_v; 78 78 u32 epoch; 79 79 int ret; ··· 112 112 } 113 113 ceph_decode_32_safe(p, end, num_mon, e_inval); 114 114 115 - dout("%s fsid %pU epoch %u num_mon %d\n", __func__, &fsid, epoch, 115 + dout("%s fsid %pU epoch %u num_mon %u\n", __func__, &fsid, epoch, 116 116 num_mon); 117 117 if (num_mon > CEPH_MAX_MON) 118 118 goto e_inval;
+3 -6
net/ethernet/eth.c
··· 193 193 } 194 194 EXPORT_SYMBOL(eth_type_trans); 195 195 196 - /** 197 - * eth_header_parse - extract hardware address from packet 198 - * @skb: packet to extract header from 199 - * @haddr: destination buffer 200 - */ 201 - int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) 196 + int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, 197 + unsigned char *haddr) 202 198 { 203 199 const struct ethhdr *eth = eth_hdr(skb); 200 + 204 201 memcpy(haddr, eth->h_source, ETH_ALEN); 205 202 return ETH_ALEN; 206 203 }
+3 -1
net/ipv4/icmp.c
··· 1078 1078 1079 1079 static bool icmp_tag_validation(int proto) 1080 1080 { 1081 + const struct net_protocol *ipprot; 1081 1082 bool ok; 1082 1083 1083 1084 rcu_read_lock(); 1084 - ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation; 1085 + ipprot = rcu_dereference(inet_protos[proto]); 1086 + ok = ipprot ? ipprot->icmp_strict_tag_validation : false; 1085 1087 rcu_read_unlock(); 1086 1088 return ok; 1087 1089 }
+2 -1
net/ipv4/ip_gre.c
··· 919 919 return -(t->hlen + sizeof(*iph)); 920 920 } 921 921 922 - static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 922 + static int ipgre_header_parse(const struct sk_buff *skb, const struct net_device *dev, 923 + unsigned char *haddr) 923 924 { 924 925 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); 925 926 memcpy(haddr, &iph->saddr, 4);
+4
net/ipv6/exthdrs.c
··· 379 379 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); 380 380 381 381 idev = __in6_dev_get(skb->dev); 382 + if (!idev) { 383 + kfree_skb(skb); 384 + return -1; 385 + } 382 386 383 387 accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled), 384 388 READ_ONCE(idev->cnf.seg6_enabled));
+2
net/ipv6/seg6_hmac.c
··· 184 184 int require_hmac; 185 185 186 186 idev = __in6_dev_get(skb->dev); 187 + if (!idev) 188 + return false; 187 189 188 190 srh = (struct ipv6_sr_hdr *)skb_transport_header(skb); 189 191
+6 -6
net/mac80211/cfg.c
··· 1897 1897 1898 1898 __sta_info_flush(sdata, true, link_id, NULL); 1899 1899 1900 - ieee80211_remove_link_keys(link, &keys); 1901 - if (!list_empty(&keys)) { 1902 - synchronize_net(); 1903 - ieee80211_free_key_list(local, &keys); 1904 - } 1905 - 1906 1900 ieee80211_stop_mbssid(sdata); 1907 1901 RCU_INIT_POINTER(link_conf->tx_bss_conf, NULL); 1908 1902 ··· 1907 1913 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1908 1914 ieee80211_link_info_change_notify(sdata, link, 1909 1915 BSS_CHANGED_BEACON_ENABLED); 1916 + 1917 + ieee80211_remove_link_keys(link, &keys); 1918 + if (!list_empty(&keys)) { 1919 + synchronize_net(); 1920 + ieee80211_free_key_list(local, &keys); 1921 + } 1910 1922 1911 1923 if (sdata->wdev.links[link_id].cac_started) { 1912 1924 chandef = link_conf->chanreq.oper;
+4 -2
net/mac80211/chan.c
··· 582 582 rcu_read_lock(); 583 583 list_for_each_entry_rcu(sta, &local->sta_list, 584 584 list) { 585 - struct ieee80211_sub_if_data *sdata = sta->sdata; 585 + struct ieee80211_sub_if_data *sdata; 586 586 enum ieee80211_sta_rx_bandwidth new_sta_bw; 587 587 unsigned int link_id; 588 588 589 589 if (!ieee80211_sdata_running(sta->sdata)) 590 590 continue; 591 591 592 - for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) { 592 + sdata = get_bss_sdata(sta->sdata); 593 + 594 + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { 593 595 struct ieee80211_link_data *link = 594 596 rcu_dereference(sdata->link[link_id]); 595 597 struct ieee80211_bss_conf *link_conf;
+5 -9
net/mac80211/debugfs.c
··· 320 320 static ssize_t aql_enable_write(struct file *file, const char __user *user_buf, 321 321 size_t count, loff_t *ppos) 322 322 { 323 - bool aql_disabled = static_key_false(&aql_disable.key); 324 323 char buf[3]; 325 324 size_t len; 326 325 ··· 334 335 if (len > 0 && buf[len - 1] == '\n') 335 336 buf[len - 1] = 0; 336 337 337 - if (buf[0] == '0' && buf[1] == '\0') { 338 - if (!aql_disabled) 339 - static_branch_inc(&aql_disable); 340 - } else if (buf[0] == '1' && buf[1] == '\0') { 341 - if (aql_disabled) 342 - static_branch_dec(&aql_disable); 343 - } else { 338 + if (buf[0] == '0' && buf[1] == '\0') 339 + static_branch_enable(&aql_disable); 340 + else if (buf[0] == '1' && buf[1] == '\0') 341 + static_branch_disable(&aql_disable); 342 + else 344 343 return -EINVAL; 345 - } 346 344 347 345 return count; 348 346 }
+3
net/mac80211/mesh.c
··· 78 78 * - MDA enabled 79 79 * - Power management control on fc 80 80 */ 81 + if (!ie->mesh_config) 82 + return false; 83 + 81 84 if (!(ifmsh->mesh_id_len == ie->mesh_id_len && 82 85 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && 83 86 (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
+5 -2
net/mac80211/sta_info.c
··· 2782 2782 } 2783 2783 2784 2784 link_sinfo->inactive_time = 2785 - jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, link_id)); 2785 + jiffies_delta_to_msecs(jiffies - 2786 + ieee80211_sta_last_active(sta, 2787 + link_id)); 2786 2788 2787 2789 if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2788 2790 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { ··· 3017 3015 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 3018 3016 sinfo->assoc_at = sta->assoc_at; 3019 3017 sinfo->inactive_time = 3020 - jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, -1)); 3018 + jiffies_delta_to_msecs(jiffies - 3019 + ieee80211_sta_last_active(sta, -1)); 3021 3020 3022 3021 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 3023 3022 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
+1 -1
net/mac80211/tdls.c
··· 1444 1444 } 1445 1445 1446 1446 sta = sta_info_get(sdata, peer); 1447 - if (!sta) 1447 + if (!sta || !sta->sta.tdls) 1448 1448 return -ENOLINK; 1449 1449 1450 1450 iee80211_tdls_recalc_chanctx(sdata, sta);
+3 -1
net/mac80211/tx.c
··· 1896 1896 struct ieee80211_tx_data tx; 1897 1897 struct sk_buff *skb2; 1898 1898 1899 - if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) 1899 + if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) { 1900 + kfree_skb(skb); 1900 1901 return false; 1902 + } 1901 1903 1902 1904 info->band = band; 1903 1905 info->control.vif = vif;
+3 -1
net/mac802154/iface.c
··· 469 469 } 470 470 471 471 static int 472 - mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) 472 + mac802154_header_parse(const struct sk_buff *skb, 473 + const struct net_device *dev, 474 + unsigned char *haddr) 473 475 { 474 476 struct ieee802154_hdr hdr; 475 477
+1
net/mpls/af_mpls.c
··· 2851 2851 rtnl_af_unregister(&mpls_af_ops); 2852 2852 out_unregister_dev_type: 2853 2853 dev_remove_pack(&mpls_packet_type); 2854 + unregister_netdevice_notifier(&mpls_dev_notifier); 2854 2855 out_unregister_pernet: 2855 2856 unregister_pernet_subsys(&mpls_net_ops); 2856 2857 goto out;
+1 -1
net/mptcp/pm_kernel.c
··· 838 838 static int mptcp_pm_nl_create_listen_socket(struct sock *sk, 839 839 struct mptcp_pm_addr_entry *entry) 840 840 { 841 - bool is_ipv6 = sk->sk_family == AF_INET6; 841 + bool is_ipv6 = entry->addr.family == AF_INET6; 842 842 int addrlen = sizeof(struct sockaddr_in); 843 843 struct sockaddr_storage addr; 844 844 struct sock *newsk, *ssk;
+1 -1
net/netfilter/nf_bpf_link.c
··· 170 170 171 171 static const struct bpf_link_ops bpf_nf_link_lops = { 172 172 .release = bpf_nf_link_release, 173 - .dealloc = bpf_nf_link_dealloc, 173 + .dealloc_deferred = bpf_nf_link_dealloc, 174 174 .detach = bpf_nf_link_detach, 175 175 .show_fdinfo = bpf_nf_link_show_info, 176 176 .fill_link_info = bpf_nf_link_fill_link_info,
+4
net/netfilter/nf_conntrack_h323_asn1.c
··· 331 331 if (nf_h323_error_boundary(bs, 0, 2)) 332 332 return H323_ERROR_BOUND; 333 333 len = get_bits(bs, 2) + 1; 334 + if (nf_h323_error_boundary(bs, len, 0)) 335 + return H323_ERROR_BOUND; 334 336 BYTE_ALIGN(bs); 335 337 if (base && (f->attr & DECODE)) { /* timeToLive */ 336 338 unsigned int v = get_uint(bs, len) + f->lb; ··· 924 922 break; 925 923 p++; 926 924 len--; 925 + if (len <= 0) 926 + break; 927 927 return DecodeH323_UserInformation(buf, p, len, 928 928 &q931->UUIE); 929 929 }
+26 -2
net/netfilter/nf_conntrack_netlink.c
··· 3210 3210 { 3211 3211 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3212 3212 struct nf_conn *ct = cb->data; 3213 - struct nf_conn_help *help = nfct_help(ct); 3213 + struct nf_conn_help *help; 3214 3214 u_int8_t l3proto = nfmsg->nfgen_family; 3215 3215 unsigned long last_id = cb->args[1]; 3216 3216 struct nf_conntrack_expect *exp; 3217 3217 3218 3218 if (cb->args[0]) 3219 + return 0; 3220 + 3221 + help = nfct_help(ct); 3222 + if (!help) 3219 3223 return 0; 3220 3224 3221 3225 rcu_read_lock(); ··· 3251 3247 return skb->len; 3252 3248 } 3253 3249 3250 + static int ctnetlink_dump_exp_ct_start(struct netlink_callback *cb) 3251 + { 3252 + struct nf_conn *ct = cb->data; 3253 + 3254 + if (!refcount_inc_not_zero(&ct->ct_general.use)) 3255 + return -ENOENT; 3256 + return 0; 3257 + } 3258 + 3259 + static int ctnetlink_dump_exp_ct_done(struct netlink_callback *cb) 3260 + { 3261 + struct nf_conn *ct = cb->data; 3262 + 3263 + if (ct) 3264 + nf_ct_put(ct); 3265 + return 0; 3266 + } 3267 + 3254 3268 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, 3255 3269 struct sk_buff *skb, 3256 3270 const struct nlmsghdr *nlh, ··· 3284 3262 struct nf_conntrack_zone zone; 3285 3263 struct netlink_dump_control c = { 3286 3264 .dump = ctnetlink_exp_ct_dump_table, 3265 + .start = ctnetlink_dump_exp_ct_start, 3266 + .done = ctnetlink_dump_exp_ct_done, 3287 3267 }; 3288 3268 3289 3269 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, ··· 3487 3463 3488 3464 #if IS_ENABLED(CONFIG_NF_NAT) 3489 3465 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { 3490 - [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, 3466 + [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY), 3491 3467 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, 3492 3468 }; 3493 3469 #endif
+2 -1
net/netfilter/nf_conntrack_proto_sctp.c
··· 582 582 } 583 583 584 584 static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = { 585 - [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 }, 585 + [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8, 586 + SCTP_CONNTRACK_HEARTBEAT_SENT), 586 587 [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 }, 587 588 [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 }, 588 589 };
+5 -1
net/netfilter/nf_conntrack_sip.c
··· 1534 1534 { 1535 1535 struct tcphdr *th, _tcph; 1536 1536 unsigned int dataoff, datalen; 1537 - unsigned int matchoff, matchlen, clen; 1537 + unsigned int matchoff, matchlen; 1538 1538 unsigned int msglen, origlen; 1539 1539 const char *dptr, *end; 1540 1540 s16 diff, tdiff = 0; 1541 1541 int ret = NF_ACCEPT; 1542 + unsigned long clen; 1542 1543 bool term; 1543 1544 1544 1545 if (ctinfo != IP_CT_ESTABLISHED && ··· 1572 1571 1573 1572 clen = simple_strtoul(dptr + matchoff, (char **)&end, 10); 1574 1573 if (dptr + matchoff == end) 1574 + break; 1575 + 1576 + if (clen > datalen) 1575 1577 break; 1576 1578 1577 1579 term = false;
+1
net/netfilter/nf_flow_table_ip.c
··· 738 738 switch (tuple->encap[i].proto) { 739 739 case htons(ETH_P_8021Q): 740 740 case htons(ETH_P_8021AD): 741 + skb_reset_mac_header(skb); 741 742 if (skb_vlan_push(skb, tuple->encap[i].proto, 742 743 tuple->encap[i].id) < 0) 743 744 return -1;
+7 -19
net/netfilter/nf_tables_api.c
··· 6686 6686 } 6687 6687 } 6688 6688 6689 - static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, 6690 - struct nft_set_elem_expr *elem_expr) 6689 + void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, 6690 + struct nft_set_elem_expr *elem_expr) 6691 6691 { 6692 6692 struct nft_expr *expr; 6693 6693 u32 size; ··· 7096 7096 } 7097 7097 7098 7098 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, 7099 - const struct nlattr *attr, u32 nlmsg_flags, 7100 - bool last) 7099 + const struct nlattr *attr, u32 nlmsg_flags) 7101 7100 { 7102 7101 struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {}; 7103 7102 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; ··· 7383 7384 if (flags) 7384 7385 *nft_set_ext_flags(ext) = flags; 7385 7386 7386 - if (last) 7387 - elem.flags = NFT_SET_ELEM_INTERNAL_LAST; 7388 - else 7389 - elem.flags = 0; 7390 - 7391 7387 if (obj) 7392 7388 *nft_set_ext_obj(ext) = obj; 7393 7389 ··· 7547 7553 nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); 7548 7554 7549 7555 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 7550 - err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags, 7551 - nla_is_last(attr, rem)); 7556 + err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags); 7552 7557 if (err < 0) { 7553 7558 NL_SET_BAD_ATTR(extack, attr); 7554 7559 return err; ··· 7671 7678 } 7672 7679 7673 7680 static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, 7674 - const struct nlattr *attr, bool last) 7681 + const struct nlattr *attr) 7675 7682 { 7676 7683 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; 7677 7684 struct nft_set_ext_tmpl tmpl; ··· 7738 7745 ext = nft_set_elem_ext(set, elem.priv); 7739 7746 if (flags) 7740 7747 *nft_set_ext_flags(ext) = flags; 7741 - 7742 - if (last) 7743 - elem.flags = NFT_SET_ELEM_INTERNAL_LAST; 7744 - else 7745 - elem.flags = 0; 7746 7748 7747 7749 trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set); 7748 7750 if (trans == NULL) ··· 7889 7901 return nft_set_flush(&ctx, set, genmask); 7890 7902 7891 7903 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 7892 - err = nft_del_setelem(&ctx, set, attr, 7893 - nla_is_last(attr, rem)); 7904 + err = nft_del_setelem(&ctx, set, attr); 7894 7905 if (err == -ENOENT && 7895 7906 NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSETELEM) 7896 7907 continue; ··· 9142 9155 return 0; 9143 9156 9144 9157 err_flowtable_hooks: 9158 + synchronize_rcu(); 9145 9159 nft_trans_destroy(trans); 9146 9160 err_flowtable_trans: 9147 9161 nft_hooks_destroy(&flowtable->hook_list);
+4
net/netfilter/nft_ct.c
··· 23 23 #include <net/netfilter/nf_conntrack_l4proto.h> 24 24 #include <net/netfilter/nf_conntrack_expect.h> 25 25 #include <net/netfilter/nf_conntrack_seqadj.h> 26 + #include "nf_internals.h" 26 27 27 28 struct nft_ct_helper_obj { 28 29 struct nf_conntrack_helper *helper4; ··· 544 543 #endif 545 544 #ifdef CONFIG_NF_CONNTRACK_ZONES 546 545 case NFT_CT_ZONE: 546 + nf_queue_nf_hook_drop(ctx->net); 547 547 mutex_lock(&nft_ct_pcpu_mutex); 548 548 if (--nft_ct_pcpu_template_refcnt == 0) 549 549 nft_ct_tmpl_put_pcpu(); ··· 971 969 struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 972 970 struct nf_ct_timeout *timeout = priv->timeout; 973 971 972 + nf_queue_nf_hook_drop(ctx->net); 974 973 nf_ct_untimeout(ctx->net, timeout); 975 974 nf_ct_netns_put(ctx->net, ctx->family); 976 975 kfree(priv->timeout); ··· 1104 1101 { 1105 1102 struct nft_ct_helper_obj *priv = nft_obj_data(obj); 1106 1103 1104 + nf_queue_nf_hook_drop(ctx->net); 1107 1105 if (priv->helper4) 1108 1106 nf_conntrack_helper_put(priv->helper4); 1109 1107 if (priv->helper6)
+9 -1
net/netfilter/nft_dynset.c
··· 30 30 const struct nft_set_ext *ext) 31 31 { 32 32 struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext); 33 + struct nft_ctx ctx = { 34 + .net = read_pnet(&priv->set->net), 35 + .family = priv->set->table->family, 36 + }; 33 37 struct nft_expr *expr; 34 38 int i; 35 39 36 40 for (i = 0; i < priv->num_exprs; i++) { 37 41 expr = nft_setelem_expr_at(elem_expr, elem_expr->size); 38 42 if (nft_expr_clone(expr, priv->expr_array[i], GFP_ATOMIC) < 0) 39 - return -1; 43 + goto err_out; 40 44 41 45 elem_expr->size += priv->expr_array[i]->ops->size; 42 46 } 43 47 44 48 return 0; 49 + err_out: 50 + nft_set_elem_expr_destroy(&ctx, elem_expr); 51 + 52 + return -1; 45 53 } 46 54 47 55 struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
+10 -61
net/netfilter/nft_set_rbtree.c
··· 304 304 priv->start_rbe_cookie = (unsigned long)rbe; 305 305 } 306 306 307 - static void nft_rbtree_set_start_cookie_open(struct nft_rbtree *priv, 308 - const struct nft_rbtree_elem *rbe, 309 - unsigned long open_interval) 310 - { 311 - priv->start_rbe_cookie = (unsigned long)rbe | open_interval; 312 - } 313 - 314 - #define NFT_RBTREE_OPEN_INTERVAL 1UL 315 - 316 307 static bool nft_rbtree_cmp_start_cookie(struct nft_rbtree *priv, 317 308 const struct nft_rbtree_elem *rbe) 318 309 { 319 - return (priv->start_rbe_cookie & ~NFT_RBTREE_OPEN_INTERVAL) == (unsigned long)rbe; 310 + return priv->start_rbe_cookie == (unsigned long)rbe; 320 311 } 321 312 322 313 static bool nft_rbtree_insert_same_interval(const struct net *net, ··· 337 346 338 347 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, 339 348 struct nft_rbtree_elem *new, 340 - struct nft_elem_priv **elem_priv, u64 tstamp, bool last) 349 + struct nft_elem_priv **elem_priv, u64 tstamp) 341 350 { 342 351 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL, *rbe_prev; 343 352 struct rb_node *node, *next, *parent, **p, *first = NULL; 344 353 struct nft_rbtree *priv = nft_set_priv(set); 345 354 u8 cur_genmask = nft_genmask_cur(net); 346 355 u8 genmask = nft_genmask_next(net); 347 - unsigned long open_interval = 0; 348 356 int d; 349 357 350 358 /* Descend the tree to search for an existing element greater than the ··· 449 459 } 450 460 } 451 461 452 - if (nft_rbtree_interval_null(set, new)) { 462 + if (nft_rbtree_interval_null(set, new)) 453 463 priv->start_rbe_cookie = 0; 454 - } else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie) { 455 - if (nft_set_is_anonymous(set)) { 456 - priv->start_rbe_cookie = 0; 457 - } else if (priv->start_rbe_cookie & NFT_RBTREE_OPEN_INTERVAL) { 458 - /* Previous element is an open interval that partially 459 - * overlaps with an existing non-open interval. 460 - */ 461 - return -ENOTEMPTY; 462 - } 463 - } 464 + else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie) 465 + priv->start_rbe_cookie = 0; 464 466 465 467 /* - new start element matching existing start element: full overlap 466 468 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. ··· 460 478 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && 461 479 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { 462 480 *elem_priv = &rbe_ge->priv; 463 - 464 - /* - Corner case: new start element of open interval (which 465 - * comes as last element in the batch) overlaps the start of 466 - * an existing interval with an end element: partial overlap. 467 - */ 468 - node = rb_first(&priv->root); 469 - rbe = __nft_rbtree_next_active(node, genmask); 470 - if (rbe && nft_rbtree_interval_end(rbe)) { 471 - rbe = nft_rbtree_next_active(rbe, genmask); 472 - if (rbe && 473 - nft_rbtree_interval_start(rbe) && 474 - !nft_rbtree_cmp(set, new, rbe)) { 475 - if (last) 476 - return -ENOTEMPTY; 477 - 478 - /* Maybe open interval? */ 479 - open_interval = NFT_RBTREE_OPEN_INTERVAL; 480 - } 481 - } 482 - nft_rbtree_set_start_cookie_open(priv, rbe_ge, open_interval); 483 - 481 + nft_rbtree_set_start_cookie(priv, rbe_ge); 484 482 return -EEXIST; 485 483 } 486 484 ··· 513 551 */ 514 552 if (rbe_ge && 515 553 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new)) 516 - return -ENOTEMPTY; 517 - 518 - /* - start element overlaps an open interval but end element is new: 519 - * partial overlap, reported as -ENOEMPTY. 520 - */ 521 - if (!rbe_ge && priv->start_rbe_cookie && nft_rbtree_interval_end(new)) 522 554 return -ENOTEMPTY; 523 555 524 556 /* Accepted element: pick insertion point depending on key value */ ··· 624 668 struct nft_elem_priv **elem_priv) 625 669 { 626 670 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem->priv); 627 - bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST); 628 671 struct nft_rbtree *priv = nft_set_priv(set); 629 672 u64 tstamp = nft_net_tstamp(net); 630 673 int err; ··· 640 685 cond_resched(); 641 686 642 687 write_lock(&priv->lock); 643 - err = __nft_rbtree_insert(net, set, rbe, elem_priv, tstamp, last); 688 + err = __nft_rbtree_insert(net, set, rbe, elem_priv, tstamp); 644 689 write_unlock(&priv->lock); 645 - 646 - if (nft_rbtree_interval_end(rbe)) 647 - priv->start_rbe_cookie = 0; 648 - 649 690 } while (err == -EAGAIN); 650 691 651 692 return err; ··· 724 773 const struct nft_set_elem *elem) 725 774 { 726 775 struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv); 727 - bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST); 728 776 struct nft_rbtree *priv = nft_set_priv(set); 729 777 const struct rb_node *parent = priv->root.rb_node; 730 778 u8 genmask = nft_genmask_next(net); ··· 764 814 continue; 765 815 } 766 816 767 - if (nft_rbtree_interval_start(rbe)) { 768 - if (!last) 769 - nft_rbtree_set_start_cookie(priv, rbe); 770 - } else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe)) 817 + if (nft_rbtree_interval_start(rbe)) 818 + nft_rbtree_set_start_cookie(priv, rbe); 819 + else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe)) 771 820 return NULL; 772 821 773 822 nft_rbtree_flush(net, set, &rbe->priv);
+4
net/netfilter/xt_CT.c
··· 16 16 #include <net/netfilter/nf_conntrack_ecache.h> 17 17 #include <net/netfilter/nf_conntrack_timeout.h> 18 18 #include <net/netfilter/nf_conntrack_zones.h> 19 + #include "nf_internals.h" 19 20 20 21 static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) 21 22 { ··· 284 283 struct nf_conn_help *help; 285 284 286 285 if (ct) { 286 + if (info->helper[0] || info->timeout[0]) 287 + nf_queue_nf_hook_drop(par->net); 288 + 287 289 help = nfct_help(ct); 288 290 xt_ct_put_helper(help); 289 291
+2 -2
net/netfilter/xt_time.c
··· 222 222 223 223 localtime_2(&current_time, stamp); 224 224 225 - if (!(info->weekdays_match & (1 << current_time.weekday))) 225 + if (!(info->weekdays_match & (1U << current_time.weekday))) 226 226 return false; 227 227 228 228 /* Do not spend time computing monthday if all days match anyway */ 229 229 if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) { 230 230 localtime_3(&current_time, stamp); 231 - if (!(info->monthdays_match & (1 << current_time.monthday))) 231 + if (!(info->monthdays_match & (1U << current_time.monthday))) 232 232 return false; 233 233 } 234 234
+4 -1
net/phonet/af_phonet.c
··· 129 129 return 1; 130 130 } 131 131 132 - static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr) 132 + static int pn_header_parse(const struct sk_buff *skb, 133 + const struct net_device *dev, 134 + unsigned char *haddr) 133 135 { 134 136 const u8 *media = skb_mac_header(skb); 137 + 135 138 *haddr = *media; 136 139 return 1; 137 140 }
+5
net/rose/af_rose.c
··· 811 811 goto out_release; 812 812 } 813 813 814 + if (sk->sk_state == TCP_SYN_SENT) { 815 + err = -EALREADY; 816 + goto out_release; 817 + } 818 + 814 819 sk->sk_state = TCP_CLOSE; 815 820 sock->state = SS_UNCONNECTED; 816 821
-27
net/sched/sch_generic.c
··· 1313 1313 } 1314 1314 } 1315 1315 1316 - static void dev_reset_queue(struct net_device *dev, 1317 - struct netdev_queue *dev_queue, 1318 - void *_unused) 1319 - { 1320 - struct Qdisc *qdisc; 1321 - bool nolock; 1322 - 1323 - qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1324 - if (!qdisc) 1325 - return; 1326 - 1327 - nolock = qdisc->flags & TCQ_F_NOLOCK; 1328 - 1329 - if (nolock) 1330 - spin_lock_bh(&qdisc->seqlock); 1331 - spin_lock_bh(qdisc_lock(qdisc)); 1332 - 1333 - qdisc_reset(qdisc); 1334 - 1335 - spin_unlock_bh(qdisc_lock(qdisc)); 1336 - if (nolock) { 1337 - clear_bit(__QDISC_STATE_MISSED, &qdisc->state); 1338 - clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); 1339 - spin_unlock_bh(&qdisc->seqlock); 1340 - } 1341 - } 1342 - 1343 1316 static bool some_qdisc_is_busy(struct net_device *dev) 1344 1317 { 1345 1318 unsigned int i;
+8 -6
net/sched/sch_ingress.c
··· 113 113 { 114 114 struct ingress_sched_data *q = qdisc_priv(sch); 115 115 struct net_device *dev = qdisc_dev(sch); 116 - struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress); 116 + struct bpf_mprog_entry *entry; 117 117 118 118 if (sch->parent != TC_H_INGRESS) 119 119 return; 120 120 121 121 tcf_block_put_ext(q->block, sch, &q->block_info); 122 122 123 - if (entry) { 123 + if (mini_qdisc_pair_inited(&q->miniqp)) { 124 + entry = rtnl_dereference(dev->tcx_ingress); 124 125 tcx_miniq_dec(entry); 125 126 if (!tcx_entry_is_active(entry)) { 126 127 tcx_entry_update(dev, NULL, true); ··· 291 290 292 291 static void clsact_destroy(struct Qdisc *sch) 293 292 { 293 + struct bpf_mprog_entry *ingress_entry, *egress_entry; 294 294 struct clsact_sched_data *q = qdisc_priv(sch); 295 295 struct net_device *dev = qdisc_dev(sch); 296 - struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress); 297 - struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress); 298 296 299 297 if (sch->parent != TC_H_CLSACT) 300 298 return; ··· 301 301 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); 302 302 tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); 303 303 304 - if (ingress_entry) { 304 + if (mini_qdisc_pair_inited(&q->miniqp_ingress)) { 305 + ingress_entry = rtnl_dereference(dev->tcx_ingress); 305 306 tcx_miniq_dec(ingress_entry); 306 307 if (!tcx_entry_is_active(ingress_entry)) { 307 308 tcx_entry_update(dev, NULL, true); ··· 310 309 } 311 310 } 312 311 313 - if (egress_entry) { 312 + if (mini_qdisc_pair_inited(&q->miniqp_egress)) { 313 + egress_entry = rtnl_dereference(dev->tcx_egress); 314 314 tcx_miniq_dec(egress_entry); 315 315 if (!tcx_entry_is_active(egress_entry)) { 316 316 tcx_entry_update(dev, NULL, false);
+2 -5
net/sched/sch_teql.c
··· 146 146 master->slaves = NEXT_SLAVE(q); 147 147 if (q == master->slaves) { 148 148 struct netdev_queue *txq; 149 - spinlock_t *root_lock; 150 149 151 150 txq = netdev_get_tx_queue(master->dev, 0); 152 151 master->slaves = NULL; 153 152 154 - root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); 155 - spin_lock_bh(root_lock); 156 - qdisc_reset(rtnl_dereference(txq->qdisc)); 157 - spin_unlock_bh(root_lock); 153 + dev_reset_queue(master->dev, 154 + txq, NULL); 158 155 } 159 156 } 160 157 skb_queue_purge(&dat->q);
+94 -66
net/shaper/shaper.c
··· 36 36 return &((struct net_shaper_nl_ctx *)ctx)->binding; 37 37 } 38 38 39 - static void net_shaper_lock(struct net_shaper_binding *binding) 40 - { 41 - switch (binding->type) { 42 - case NET_SHAPER_BINDING_TYPE_NETDEV: 43 - netdev_lock(binding->netdev); 44 - break; 45 - } 46 - } 47 - 48 - static void net_shaper_unlock(struct net_shaper_binding *binding) 49 - { 50 - switch (binding->type) { 51 - case NET_SHAPER_BINDING_TYPE_NETDEV: 52 - netdev_unlock(binding->netdev); 53 - break; 54 - } 55 - } 56 - 57 39 static struct net_shaper_hierarchy * 58 40 net_shaper_hierarchy(struct net_shaper_binding *binding) 59 41 { 60 42 /* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */ 61 43 if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV) 44 + return READ_ONCE(binding->netdev->net_shaper_hierarchy); 45 + 46 + /* No other type supported yet. */ 47 + return NULL; 48 + } 49 + 50 + static struct net_shaper_hierarchy * 51 + net_shaper_hierarchy_rcu(struct net_shaper_binding *binding) 52 + { 53 + /* Readers look up the device and take a ref, then take RCU lock 54 + * later at which point netdev may have been unregistered and flushed. 55 + * READ_ONCE() pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. 56 + */ 57 + if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV && 58 + READ_ONCE(binding->netdev->reg_state) <= NETREG_REGISTERED) 62 59 return READ_ONCE(binding->netdev->net_shaper_hierarchy); 63 60 64 61 /* No other type supported yet. */ ··· 201 204 return 0; 202 205 } 203 206 207 + /* Like net_shaper_ctx_setup(), but for "write" handlers (never for dumps!) 208 + * Acquires the lock protecting the hierarchy (instance lock for netdev). 209 + */ 210 + static int net_shaper_ctx_setup_lock(const struct genl_info *info, int type, 211 + struct net_shaper_nl_ctx *ctx) 212 + { 213 + struct net *ns = genl_info_net(info); 214 + struct net_device *dev; 215 + int ifindex; 216 + 217 + if (GENL_REQ_ATTR_CHECK(info, type)) 218 + return -EINVAL; 219 + 220 + ifindex = nla_get_u32(info->attrs[type]); 221 + dev = netdev_get_by_index_lock(ns, ifindex); 222 + if (!dev) { 223 + NL_SET_BAD_ATTR(info->extack, info->attrs[type]); 224 + return -ENOENT; 225 + } 226 + 227 + if (!dev->netdev_ops->net_shaper_ops) { 228 + NL_SET_BAD_ATTR(info->extack, info->attrs[type]); 229 + netdev_unlock(dev); 230 + return -EOPNOTSUPP; 231 + } 232 + 233 + ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV; 234 + ctx->binding.netdev = dev; 235 + return 0; 236 + } 237 + 204 238 static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx) 205 239 { 206 240 if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV) 207 241 netdev_put(ctx->binding.netdev, &ctx->dev_tracker); 242 + } 243 + 244 + static void net_shaper_ctx_cleanup_unlock(struct net_shaper_nl_ctx *ctx) 245 + { 246 + if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV) 247 + netdev_unlock(ctx->binding.netdev); 208 248 } 209 249 210 250 static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle) ··· 285 251 net_shaper_lookup(struct net_shaper_binding *binding, 286 252 const struct net_shaper_handle *handle) 287 253 { 288 - struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); 289 254 u32 index = net_shaper_handle_to_index(handle); 255 + struct net_shaper_hierarchy *hierarchy; 290 256 257 + hierarchy = net_shaper_hierarchy_rcu(binding); 291 258 if (!hierarchy || xa_get_mark(&hierarchy->shapers, index, 292 259 NET_SHAPER_NOT_VALID)) 293 260 return NULL; ··· 297 262 } 298 263 299 264 /* Allocate on demand the per device shaper's hierarchy container. 300 - * Called under the net shaper lock 265 + * Called under the lock protecting the hierarchy (instance lock for netdev) 301 266 */ 302 267 static struct net_shaper_hierarchy * 303 268 net_shaper_hierarchy_setup(struct net_shaper_binding *binding) ··· 716 681 net_shaper_generic_post(info); 717 682 } 718 683 684 + int net_shaper_nl_pre_doit_write(const struct genl_split_ops *ops, 685 + struct sk_buff *skb, struct genl_info *info) 686 + { 687 + struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx; 688 + 689 + BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx)); 690 + 691 + return net_shaper_ctx_setup_lock(info, NET_SHAPER_A_IFINDEX, ctx); 692 + } 693 + 694 + void net_shaper_nl_post_doit_write(const struct genl_split_ops *ops, 695 + struct sk_buff *skb, struct genl_info *info) 696 + { 697 + net_shaper_ctx_cleanup_unlock((struct net_shaper_nl_ctx *)info->ctx); 698 + } 699 + 719 700 int net_shaper_nl_pre_dumpit(struct netlink_callback *cb) 720 701 { 721 702 struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; ··· 829 778 830 779 /* Don't error out dumps performed before any set operation. */ 831 780 binding = net_shaper_binding_from_ctx(ctx); 832 - hierarchy = net_shaper_hierarchy(binding); 833 - if (!hierarchy) 834 - return 0; 835 781 836 782 rcu_read_lock(); 783 + hierarchy = net_shaper_hierarchy_rcu(binding); 784 + if (!hierarchy) 785 + goto out_unlock; 786 + 837 787 for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index, 838 788 U32_MAX, XA_PRESENT)); ctx->start_index++) { 839 789 ret = net_shaper_fill_one(skb, binding, shaper, info); 840 790 if (ret) 841 791 break; 842 792 } 793 + out_unlock: 843 794 rcu_read_unlock(); 844 795 845 796 return ret; ··· 859 806 860 807 binding = net_shaper_binding_from_ctx(info->ctx); 861 808 862 - net_shaper_lock(binding); 863 809 ret = net_shaper_parse_info(binding, info->attrs, info, &shaper, 864 810 &exists); 865 811 if (ret) 866 - goto unlock; 812 + return ret; 867 813 868 814 if (!exists) 869 815 net_shaper_default_parent(&shaper.handle, &shaper.parent); 870 816 871 817 hierarchy = net_shaper_hierarchy_setup(binding); 872 - if (!hierarchy) { 873 - ret = -ENOMEM; 874 - goto unlock; 875 - } 818 + if (!hierarchy) 819 + return -ENOMEM; 876 820 877 821 /* The 'set' operation can't create node-scope shapers. */ 878 822 handle = shaper.handle; 879 823 if (handle.scope == NET_SHAPER_SCOPE_NODE && 880 - !net_shaper_lookup(binding, &handle)) { 881 - ret = -ENOENT; 882 - goto unlock; 883 - } 824 + !net_shaper_lookup(binding, &handle)) 825 + return -ENOENT; 884 826 885 827 ret = net_shaper_pre_insert(binding, &handle, info->extack); 886 828 if (ret) 887 - goto unlock; 829 + return ret; 888 830 889 831 ops = net_shaper_ops(binding); 890 832 ret = ops->set(binding, &shaper, info->extack); 891 833 if (ret) { 892 834 net_shaper_rollback(binding); 893 - goto unlock; 835 + return ret; 894 836 } 895 837 896 838 net_shaper_commit(binding, 1, &shaper); 897 839 898 - unlock: 899 - net_shaper_unlock(binding); 900 - return ret; 840 + return 0; 901 841 } 902 842 903 843 static int __net_shaper_delete(struct net_shaper_binding *binding, ··· 1118 1072 1119 1073 binding = net_shaper_binding_from_ctx(info->ctx); 1120 1074 1121 - net_shaper_lock(binding); 1122 1075 ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info, 1123 1076 &handle); 1124 1077 if (ret) 1125 - goto unlock; 1078 + return ret; 1126 1079 1127 1080 hierarchy = net_shaper_hierarchy(binding); 1128 - if (!hierarchy) { 1129 - ret = -ENOENT; 1130 - goto unlock; 1131 - } 1081 + if (!hierarchy) 1082 + return -ENOENT; 1132 1083 1133 1084 shaper = net_shaper_lookup(binding, &handle); 1134 - if (!shaper) { 1135 - ret = -ENOENT; 1136 - goto unlock; 1137 - } 1085 + if (!shaper) 1086 + return -ENOENT; 1138 1087 1139 1088 if (handle.scope == NET_SHAPER_SCOPE_NODE) { 1140 1089 ret = net_shaper_pre_del_node(binding, shaper, info->extack); 1141 1090 if (ret) 1142 - goto unlock; 1091 + return ret; 1143 1092 } 1144 1093 1145 - ret = __net_shaper_delete(binding, shaper, info->extack); 1146 - 1147 - unlock: 1148 - net_shaper_unlock(binding); 1149 - return ret; 1094 + return __net_shaper_delete(binding, shaper, info->extack); 1150 1095 } 1151 1096 1152 1097 static int net_shaper_group_send_reply(struct net_shaper_binding *binding, ··· 1186 1149 if (!net_shaper_ops(binding)->group) 1187 1150 return -EOPNOTSUPP; 1188 1151 1189 - net_shaper_lock(binding); 1190 1152 leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES); 1191 1153 if (!leaves_count) { 1192 1154 NL_SET_BAD_ATTR(info->extack, 1193 1155 info->attrs[NET_SHAPER_A_LEAVES]); 1194 - ret = -EINVAL; 1195 - goto unlock; 1156 + return -EINVAL; 1196 1157 } 1197 1158 1198 1159 leaves = kcalloc(leaves_count, sizeof(struct net_shaper) + 1199 1160 sizeof(struct net_shaper *), GFP_KERNEL); 1200 - if (!leaves) { 1201 - ret = -ENOMEM; 1202 - goto unlock; 1203 - } 1161 + if (!leaves) 1162 + return -ENOMEM; 1204 1163 old_nodes = (void *)&leaves[leaves_count]; 1205 1164 1206 1165 ret = net_shaper_parse_node(binding, info->attrs, info, &node); ··· 1273 1240 1274 1241 free_leaves: 1275 1242 kfree(leaves); 1276 - 1277 - unlock: 1278 - net_shaper_unlock(binding); 1279 1243 return ret; 1280 1244 1281 1245 free_msg: ··· 1382 1352 if (!hierarchy) 1383 1353 return; 1384 1354 1385 - net_shaper_lock(binding); 1386 1355 xa_lock(&hierarchy->shapers); 1387 1356 xa_for_each(&hierarchy->shapers, index, cur) { 1388 1357 __xa_erase(&hierarchy->shapers, index); 1389 1358 kfree(cur); 1390 1359 } 1391 1360 xa_unlock(&hierarchy->shapers); 1392 - net_shaper_unlock(binding); 1393 1361 1394 1362 kfree(hierarchy); 1395 1363 }
+6 -6
net/shaper/shaper_nl_gen.c
··· 99 99 }, 100 100 { 101 101 .cmd = NET_SHAPER_CMD_SET, 102 - .pre_doit = net_shaper_nl_pre_doit, 102 + .pre_doit = net_shaper_nl_pre_doit_write, 103 103 .doit = net_shaper_nl_set_doit, 104 - .post_doit = net_shaper_nl_post_doit, 104 + .post_doit = net_shaper_nl_post_doit_write, 105 105 .policy = net_shaper_set_nl_policy, 106 106 .maxattr = NET_SHAPER_A_IFINDEX, 107 107 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 108 108 }, 109 109 { 110 110 .cmd = NET_SHAPER_CMD_DELETE, 111 - .pre_doit = net_shaper_nl_pre_doit, 111 + .pre_doit = net_shaper_nl_pre_doit_write, 112 112 .doit = net_shaper_nl_delete_doit, 113 - .post_doit = net_shaper_nl_post_doit, 113 + .post_doit = net_shaper_nl_post_doit_write, 114 114 .policy = net_shaper_delete_nl_policy, 115 115 .maxattr = NET_SHAPER_A_IFINDEX, 116 116 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 117 117 }, 118 118 { 119 119 .cmd = NET_SHAPER_CMD_GROUP, 120 - .pre_doit = net_shaper_nl_pre_doit, 120 + .pre_doit = net_shaper_nl_pre_doit_write, 121 121 .doit = net_shaper_nl_group_doit, 122 - .post_doit = net_shaper_nl_post_doit, 122 + .post_doit = net_shaper_nl_post_doit_write, 123 123 .policy = net_shaper_group_nl_policy, 124 124 .maxattr = NET_SHAPER_A_LEAVES, 125 125 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+5
net/shaper/shaper_nl_gen.h
··· 18 18 19 19 int net_shaper_nl_pre_doit(const struct genl_split_ops *ops, 20 20 struct sk_buff *skb, struct genl_info *info); 21 + int net_shaper_nl_pre_doit_write(const struct genl_split_ops *ops, 22 + struct sk_buff *skb, struct genl_info *info); 21 23 int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops, 22 24 struct sk_buff *skb, struct genl_info *info); 23 25 void 24 26 net_shaper_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, 25 27 struct genl_info *info); 28 + void 29 + net_shaper_nl_post_doit_write(const struct genl_split_ops *ops, 30 + struct sk_buff *skb, struct genl_info *info); 26 31 void 27 32 net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops, 28 33 struct sk_buff *skb, struct genl_info *info);
+17 -6
net/smc/af_smc.c
··· 131 131 struct smc_sock *smc; 132 132 struct sock *child; 133 133 134 - smc = smc_clcsock_user_data(sk); 134 + rcu_read_lock(); 135 + smc = smc_clcsock_user_data_rcu(sk); 136 + if (!smc || !refcount_inc_not_zero(&smc->sk.sk_refcnt)) { 137 + rcu_read_unlock(); 138 + smc = NULL; 139 + goto drop; 140 + } 141 + rcu_read_unlock(); 135 142 136 143 if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) > 137 144 sk->sk_max_ack_backlog) ··· 160 153 if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops) 161 154 inet_csk(child)->icsk_af_ops = smc->ori_af_ops; 162 155 } 156 + sock_put(&smc->sk); 163 157 return child; 164 158 165 159 drop: 166 160 dst_release(dst); 167 161 tcp_listendrop(sk); 162 + if (smc) 163 + sock_put(&smc->sk); 168 164 return NULL; 169 165 } 170 166 ··· 264 254 struct sock *clcsk = smc->clcsock->sk; 265 255 266 256 write_lock_bh(&clcsk->sk_callback_lock); 267 - clcsk->sk_user_data = NULL; 257 + rcu_assign_sk_user_data(clcsk, NULL); 268 258 269 259 smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change); 270 260 smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready); ··· 912 902 struct sock *clcsk = smc->clcsock->sk; 913 903 914 904 write_lock_bh(&clcsk->sk_callback_lock); 915 - clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); 905 + __rcu_assign_sk_user_data_with_flags(clcsk, smc, SK_USER_DATA_NOCOPY); 916 906 917 907 smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change, 918 908 &smc->clcsk_state_change); ··· 2675 2665 * smc-specific sk_data_ready function 2676 2666 */ 2677 2667 write_lock_bh(&smc->clcsock->sk->sk_callback_lock); 2678 - smc->clcsock->sk->sk_user_data = 2679 - (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); 2668 + __rcu_assign_sk_user_data_with_flags(smc->clcsock->sk, smc, 2669 + SK_USER_DATA_NOCOPY); 2680 2670 smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready, 2681 2671 smc_clcsock_data_ready, &smc->clcsk_data_ready); 2682 2672 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); ··· 2697 2687 write_lock_bh(&smc->clcsock->sk->sk_callback_lock); 2698 2688 smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready, 2699 2689 &smc->clcsk_data_ready); 2700 - smc->clcsock->sk->sk_user_data = NULL; 2690 + rcu_assign_sk_user_data(smc->clcsock->sk, NULL); 2701 2691 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); 2702 2692 goto out; 2703 2693 } 2694 + sock_set_flag(sk, SOCK_RCU_FREE); 2704 2695 sk->sk_max_ack_backlog = backlog; 2705 2696 sk->sk_ack_backlog = 0; 2706 2697 sk->sk_state = SMC_LISTEN;
+5
net/smc/smc.h
··· 346 346 ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY); 347 347 } 348 348 349 + static inline struct smc_sock *smc_clcsock_user_data_rcu(const struct sock *clcsk) 350 + { 351 + return (struct smc_sock *)rcu_dereference_sk_user_data(clcsk); 352 + } 353 + 349 354 /* save target_cb in saved_cb, and replace target_cb with new_cb */ 350 355 static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *), 351 356 void (*new_cb)(struct sock *),
+1 -1
net/smc/smc_close.c
··· 218 218 write_lock_bh(&smc->clcsock->sk->sk_callback_lock); 219 219 smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready, 220 220 &smc->clcsk_data_ready); 221 - smc->clcsock->sk->sk_user_data = NULL; 221 + rcu_assign_sk_user_data(smc->clcsock->sk, NULL); 222 222 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); 223 223 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); 224 224 }
+21 -5
net/sunrpc/cache.c
··· 1062 1062 struct cache_reader *rp = filp->private_data; 1063 1063 1064 1064 if (rp) { 1065 + struct cache_request *rq = NULL; 1066 + 1065 1067 spin_lock(&queue_lock); 1066 1068 if (rp->offset) { 1067 1069 struct cache_queue *cq; 1068 - for (cq= &rp->q; &cq->list != &cd->queue; 1069 - cq = list_entry(cq->list.next, struct cache_queue, list)) 1070 + for (cq = &rp->q; &cq->list != &cd->queue; 1071 + cq = list_entry(cq->list.next, 1072 + struct cache_queue, list)) 1070 1073 if (!cq->reader) { 1071 - container_of(cq, struct cache_request, q) 1072 - ->readers--; 1074 + struct cache_request *cr = 1075 + container_of(cq, 1076 + struct cache_request, q); 1077 + cr->readers--; 1078 + if (cr->readers == 0 && 1079 + !test_bit(CACHE_PENDING, 1080 + &cr->item->flags)) { 1081 + list_del(&cr->q.list); 1082 + rq = cr; 1083 + } 1073 1084 break; 1074 1085 } 1075 1086 rp->offset = 0; ··· 1088 1077 list_del(&rp->q.list); 1089 1078 spin_unlock(&queue_lock); 1090 1079 1080 + if (rq) { 1081 + cache_put(rq->item, cd); 1082 + kfree(rq->buf); 1083 + kfree(rq); 1084 + } 1085 + 1091 1086 filp->private_data = NULL; 1092 1087 kfree(rp); 1093 - 1094 1088 } 1095 1089 if (filp->f_mode & FMODE_WRITE) { 1096 1090 atomic_dec(&cd->writers);
+4 -3
net/sunrpc/xprtrdma/verbs.c
··· 1362 1362 needed += RPCRDMA_MAX_RECV_BATCH; 1363 1363 1364 1364 if (atomic_inc_return(&ep->re_receiving) > 1) 1365 - goto out; 1365 + goto out_dec; 1366 1366 1367 1367 /* fast path: all needed reps can be found on the free list */ 1368 1368 wr = NULL; ··· 1385 1385 ++count; 1386 1386 } 1387 1387 if (!wr) 1388 - goto out; 1388 + goto out_dec; 1389 1389 1390 1390 rc = ib_post_recv(ep->re_id->qp, wr, 1391 1391 (const struct ib_recv_wr **)&bad_wr); ··· 1400 1400 --count; 1401 1401 } 1402 1402 } 1403 + 1404 + out_dec: 1403 1405 if (atomic_dec_return(&ep->re_receiving) > 0) 1404 1406 complete(&ep->re_done); 1405 - 1406 1407 out: 1407 1408 trace_xprtrdma_post_recvs(r_xprt, count); 1408 1409 ep->re_receive_count += count;
+2
net/unix/af_unix.c
··· 1958 1958 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) 1959 1959 { 1960 1960 scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1961 + 1962 + unix_peek_fpl(scm->fp); 1961 1963 } 1962 1964 1963 1965 static void unix_destruct_scm(struct sk_buff *skb)
+1
net/unix/af_unix.h
··· 29 29 void unix_update_edges(struct unix_sock *receiver); 30 30 int unix_prepare_fpl(struct scm_fp_list *fpl); 31 31 void unix_destroy_fpl(struct scm_fp_list *fpl); 32 + void unix_peek_fpl(struct scm_fp_list *fpl); 32 33 void unix_schedule_gc(struct user_struct *user); 33 34 34 35 /* SOCK_DIAG */
+51 -28
net/unix/garbage.c
··· 318 318 unix_free_vertices(fpl); 319 319 } 320 320 321 + static bool gc_in_progress; 322 + static seqcount_t unix_peek_seq = SEQCNT_ZERO(unix_peek_seq); 323 + 324 + void unix_peek_fpl(struct scm_fp_list *fpl) 325 + { 326 + static DEFINE_SPINLOCK(unix_peek_lock); 327 + 328 + if (!fpl || !fpl->count_unix) 329 + return; 330 + 331 + if (!READ_ONCE(gc_in_progress)) 332 + return; 333 + 334 + /* Invalidate the final refcnt check in unix_vertex_dead(). */ 335 + spin_lock(&unix_peek_lock); 336 + raw_write_seqcount_barrier(&unix_peek_seq); 337 + spin_unlock(&unix_peek_lock); 338 + } 339 + 321 340 static bool unix_vertex_dead(struct unix_vertex *vertex) 322 341 { 323 342 struct unix_edge *edge; ··· 368 349 return false; 369 350 370 351 return true; 352 + } 353 + 354 + static LIST_HEAD(unix_visited_vertices); 355 + static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; 356 + 357 + static bool unix_scc_dead(struct list_head *scc, bool fast) 358 + { 359 + struct unix_vertex *vertex; 360 + bool scc_dead = true; 361 + unsigned int seq; 362 + 363 + seq = read_seqcount_begin(&unix_peek_seq); 364 + 365 + list_for_each_entry_reverse(vertex, scc, scc_entry) { 366 + /* Don't restart DFS from this vertex. */ 367 + list_move_tail(&vertex->entry, &unix_visited_vertices); 368 + 369 + /* Mark vertex as off-stack for __unix_walk_scc(). */ 370 + if (!fast) 371 + vertex->index = unix_vertex_grouped_index; 372 + 373 + if (scc_dead) 374 + scc_dead = unix_vertex_dead(vertex); 375 + } 376 + 377 + /* If MSG_PEEK intervened, defer this SCC to the next round. */ 378 + if (read_seqcount_retry(&unix_peek_seq, seq)) 379 + return false; 380 + 381 + return scc_dead; 371 382 } 372 383 373 384 static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) ··· 452 403 453 404 return false; 454 405 } 455 - 456 - static LIST_HEAD(unix_visited_vertices); 457 - static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; 458 406 459 407 static unsigned long __unix_walk_scc(struct unix_vertex *vertex, 460 408 unsigned long *last_index, ··· 520 474 } 521 475 522 476 if (vertex->index == vertex->scc_index) { 523 - struct unix_vertex *v; 524 477 struct list_head scc; 525 - bool scc_dead = true; 526 478 527 479 /* SCC finalised. 528 480 * ··· 529 485 */ 530 486 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); 531 487 532 - list_for_each_entry_reverse(v, &scc, scc_entry) { 533 - /* Don't restart DFS from this vertex in unix_walk_scc(). */ 534 - list_move_tail(&v->entry, &unix_visited_vertices); 535 - 536 - /* Mark vertex as off-stack. */ 537 - v->index = unix_vertex_grouped_index; 538 - 539 - if (scc_dead) 540 - scc_dead = unix_vertex_dead(v); 541 - } 542 - 543 - if (scc_dead) { 488 + if (unix_scc_dead(&scc, false)) { 544 489 unix_collect_skb(&scc, hitlist); 545 490 } else { 546 491 if (unix_vertex_max_scc_index < vertex->scc_index) ··· 583 550 while (!list_empty(&unix_unvisited_vertices)) { 584 551 struct unix_vertex *vertex; 585 552 struct list_head scc; 586 - bool scc_dead = true; 587 553 588 554 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 589 555 list_add(&scc, &vertex->scc_entry); 590 556 591 - list_for_each_entry_reverse(vertex, &scc, scc_entry) { 592 - list_move_tail(&vertex->entry, &unix_visited_vertices); 593 - 594 - if (scc_dead) 595 - scc_dead = unix_vertex_dead(vertex); 596 - } 597 - 598 - if (scc_dead) { 557 + if (unix_scc_dead(&scc, true)) { 599 558 cyclic_sccs--; 600 559 unix_collect_skb(&scc, hitlist); 601 560 } ··· 601 576 WRITE_ONCE(unix_graph_state, 602 577 cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC); 603 578 } 604 - 605 - static bool gc_in_progress; 606 579 607 580 static void unix_gc(struct work_struct *work) 608 581 {
+1
net/wireless/pmsr.c
··· 665 665 } 666 666 spin_unlock_bh(&wdev->pmsr_lock); 667 667 668 + cancel_work_sync(&wdev->pmsr_free_wk); 668 669 if (found) 669 670 cfg80211_pmsr_process_abort(wdev); 670 671
+5 -6
rust/Makefile
··· 148 148 quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $< 149 149 cmd_rustdoc = \ 150 150 OBJTREE=$(abspath $(objtree)) \ 151 - $(RUSTDOC) $(filter-out $(skip_flags) --remap-path-prefix=%,$(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \ 151 + $(RUSTDOC) $(filter-out $(skip_flags) --remap-path-prefix=% --remap-path-scope=%, \ 152 + $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \ 152 153 $(rustc_target_flags) -L$(objtree)/$(obj) \ 153 154 -Zunstable-options --generate-link-to-definition \ 154 155 --output $(rustdoc_output) \ ··· 335 334 rm -rf $(objtree)/$(obj)/test/doctests/kernel; \ 336 335 mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \ 337 336 OBJTREE=$(abspath $(objtree)) \ 338 - $(RUSTDOC) --test $(filter-out --remap-path-prefix=%,$(rust_flags)) \ 337 + $(RUSTDOC) --test $(filter-out --remap-path-prefix=% --remap-path-scope=%,$(rust_flags)) \ 339 338 -L$(objtree)/$(obj) --extern ffi --extern pin_init \ 340 339 --extern kernel --extern build_error --extern macros \ 341 340 --extern bindings --extern uapi \ ··· 527 526 cmd_rustc_procmacrolibrary = \ 528 527 $(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \ 529 528 $(filter-out $(skip_flags),$(rust_common_flags) $(rustc_target_flags)) \ 530 - --emit=dep-info,link --crate-type rlib -O \ 529 + --emit=dep-info=$(depfile) --emit=link=$@ --crate-type rlib -O \ 531 530 --out-dir $(objtree)/$(obj) -L$(objtree)/$(obj) \ 532 - --crate-name $(patsubst lib%.rlib,%,$(notdir $@)) $<; \ 533 - mv $(objtree)/$(obj)/$(patsubst lib%.rlib,%,$(notdir $@)).d $(depfile); \ 534 - sed -i '/^\#/d' $(depfile) 531 + --crate-name $(patsubst lib%.rlib,%,$(notdir $@)) $< 535 532 536 533 $(obj)/libproc_macro2.rlib: private skip_clippy = 1 537 534 $(obj)/libproc_macro2.rlib: private rustc_target_flags = $(proc_macro2-flags)
+1
rust/kernel/cpufreq.rs
··· 401 401 /// ``` 402 402 /// use kernel::cpufreq::{DEFAULT_TRANSITION_LATENCY_NS, Policy}; 403 403 /// 404 + /// #[allow(clippy::double_parens, reason = "False positive before 1.92.0")] 404 405 /// fn update_policy(policy: &mut Policy) { 405 406 /// policy 406 407 /// .set_dvfs_possible_from_any_cpu(true)
+50 -64
rust/kernel/dma.rs
··· 461 461 self.count * core::mem::size_of::<T>() 462 462 } 463 463 464 + /// Returns the raw pointer to the allocated region in the CPU's virtual address space. 465 + #[inline] 466 + pub fn as_ptr(&self) -> *const [T] { 467 + core::ptr::slice_from_raw_parts(self.cpu_addr.as_ptr(), self.count) 468 + } 469 + 470 + /// Returns the raw pointer to the allocated region in the CPU's virtual address space as 471 + /// a mutable pointer. 472 + #[inline] 473 + pub fn as_mut_ptr(&self) -> *mut [T] { 474 + core::ptr::slice_from_raw_parts_mut(self.cpu_addr.as_ptr(), self.count) 475 + } 476 + 464 477 /// Returns the base address to the allocated region in the CPU's virtual address space. 465 478 pub fn start_ptr(&self) -> *const T { 466 479 self.cpu_addr.as_ptr() ··· 594 581 Ok(()) 595 582 } 596 583 597 - /// Returns a pointer to an element from the region with bounds checking. `offset` is in 598 - /// units of `T`, not the number of bytes. 599 - /// 600 - /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros. 601 - #[doc(hidden)] 602 - pub fn item_from_index(&self, offset: usize) -> Result<*mut T> { 603 - if offset >= self.count { 604 - return Err(EINVAL); 605 - } 606 - // SAFETY: 607 - // - The pointer is valid due to type invariant on `CoherentAllocation` 608 - // and we've just checked that the range and index is within bounds. 609 - // - `offset` can't overflow since it is smaller than `self.count` and we've checked 610 - // that `self.count` won't overflow early in the constructor. 611 - Ok(unsafe { self.cpu_addr.as_ptr().add(offset) }) 612 - } 613 - 614 584 /// Reads the value of `field` and ensures that its type is [`FromBytes`]. 615 585 /// 616 586 /// # Safety ··· 666 670 667 671 /// Reads a field of an item from an allocated region of structs. 668 672 /// 673 + /// The syntax is of the form `kernel::dma_read!(dma, proj)` where `dma` is an expression evaluating 674 + /// to a [`CoherentAllocation`] and `proj` is a [projection specification](kernel::ptr::project!). 675 + /// 669 676 /// # Examples 670 677 /// 671 678 /// ``` ··· 683 684 /// unsafe impl kernel::transmute::AsBytes for MyStruct{}; 684 685 /// 685 686 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result { 686 - /// let whole = kernel::dma_read!(alloc[2]); 687 - /// let field = kernel::dma_read!(alloc[1].field); 687 + /// let whole = kernel::dma_read!(alloc, [2]?); 688 + /// let field = kernel::dma_read!(alloc, [1]?.field); 688 689 /// # Ok::<(), Error>(()) } 689 690 /// ``` 690 691 #[macro_export] 691 692 macro_rules! dma_read { 692 - ($dma:expr, $idx: expr, $($field:tt)*) => {{ 693 - (|| -> ::core::result::Result<_, $crate::error::Error> { 694 - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; 695 - // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be 696 - // dereferenced. The compiler also further validates the expression on whether `field` 697 - // is a member of `item` when expanded by the macro. 698 - unsafe { 699 - let ptr_field = ::core::ptr::addr_of!((*item) $($field)*); 700 - ::core::result::Result::Ok( 701 - $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field) 702 - ) 703 - } 704 - })() 693 + ($dma:expr, $($proj:tt)*) => {{ 694 + let dma = &$dma; 695 + let ptr = $crate::ptr::project!( 696 + $crate::dma::CoherentAllocation::as_ptr(dma), $($proj)* 697 + ); 698 + // SAFETY: The pointer created by the projection is within the DMA region. 699 + unsafe { $crate::dma::CoherentAllocation::field_read(dma, ptr) } 705 700 }}; 706 - ($dma:ident [ $idx:expr ] $($field:tt)* ) => { 707 - $crate::dma_read!($dma, $idx, $($field)*) 708 - }; 709 - ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => { 710 - $crate::dma_read!($($dma).*, $idx, $($field)*) 711 - }; 712 701 } 713 702 714 703 /// Writes to a field of an item from an allocated region of structs. 704 + /// 705 + /// The syntax is of the form `kernel::dma_write!(dma, proj, val)` where `dma` is an expression 706 + /// evaluating to a [`CoherentAllocation`], `proj` is a 707 + /// [projection specification](kernel::ptr::project!), and `val` is the value to be written to the 708 + /// projected location. 715 709 /// 716 710 /// # Examples 717 711 /// ··· 720 728 /// unsafe impl kernel::transmute::AsBytes for MyStruct{}; 721 729 /// 722 730 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result { 723 - /// kernel::dma_write!(alloc[2].member = 0xf); 724 - /// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf }); 731 + /// kernel::dma_write!(alloc, [2]?.member, 0xf); 732 + /// kernel::dma_write!(alloc, [1]?, MyStruct { member: 0xf }); 725 733 /// # Ok::<(), Error>(()) } 726 734 /// ``` 727 735 #[macro_export] 728 736 macro_rules! dma_write { 729 - ($dma:ident [ $idx:expr ] $($field:tt)*) => {{ 730 - $crate::dma_write!($dma, $idx, $($field)*) 737 + (@parse [$dma:expr] [$($proj:tt)*] [, $val:expr]) => {{ 738 + let dma = &$dma; 739 + let ptr = $crate::ptr::project!( 740 + mut $crate::dma::CoherentAllocation::as_mut_ptr(dma), $($proj)* 741 + ); 742 + let val = $val; 743 + // SAFETY: The pointer created by the projection is within the DMA region. 744 + unsafe { $crate::dma::CoherentAllocation::field_write(dma, ptr, val) } 731 745 }}; 732 - ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{ 733 - $crate::dma_write!($($dma).*, $idx, $($field)*) 734 - }}; 735 - ($dma:expr, $idx: expr, = $val:expr) => { 736 - (|| -> ::core::result::Result<_, $crate::error::Error> { 737 - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; 738 - // SAFETY: `item_from_index` ensures that `item` is always a valid item. 739 - unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) } 740 - ::core::result::Result::Ok(()) 741 - })() 746 + (@parse [$dma:expr] [$($proj:tt)*] [.$field:tt $($rest:tt)*]) => { 747 + $crate::dma_write!(@parse [$dma] [$($proj)* .$field] [$($rest)*]) 742 748 }; 743 - ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => { 744 - (|| -> ::core::result::Result<_, $crate::error::Error> { 745 - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; 746 - // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be 747 - // dereferenced. The compiler also further validates the expression on whether `field` 748 - // is a member of `item` when expanded by the macro. 749 - unsafe { 750 - let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*); 751 - $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val) 752 - } 753 - ::core::result::Result::Ok(()) 754 - })() 749 + (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr]? $($rest:tt)*]) => { 750 + $crate::dma_write!(@parse [$dma] [$($proj)* [$index]?] [$($rest)*]) 751 + }; 752 + (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr] $($rest:tt)*]) => { 753 + $crate::dma_write!(@parse [$dma] [$($proj)* [$index]] [$($rest)*]) 754 + }; 755 + ($dma:expr, $($rest:tt)*) => { 756 + $crate::dma_write!(@parse [$dma] [] [$($rest)*]) 755 757 }; 756 758 }
+4
rust/kernel/lib.rs
··· 20 20 #![feature(generic_nonzero)] 21 21 #![feature(inline_const)] 22 22 #![feature(pointer_is_aligned)] 23 + #![feature(slice_ptr_len)] 23 24 // 24 25 // Stable since Rust 1.80.0. 25 26 #![feature(slice_flatten)] ··· 37 36 #![feature(const_option)] 38 37 #![feature(const_ptr_write)] 39 38 #![feature(const_refs_to_cell)] 39 + // 40 + // Stable since Rust 1.84.0. 41 + #![feature(strict_provenance)] 40 42 // 41 43 // Expected to become stable. 42 44 #![feature(arbitrary_self_types)]
+29 -1
rust/kernel/ptr.rs
··· 2 2 3 3 //! Types and functions to work with pointers and addresses. 4 4 5 - use core::mem::align_of; 5 + pub mod projection; 6 + pub use crate::project_pointer as project; 7 + 8 + use core::mem::{ 9 + align_of, 10 + size_of, // 11 + }; 6 12 use core::num::NonZero; 7 13 8 14 /// Type representing an alignment, which is always a power of two. ··· 231 225 } 232 226 233 227 impl_alignable_uint!(u8, u16, u32, u64, usize); 228 + 229 + /// Trait to represent compile-time known size information. 230 + /// 231 + /// This is a generalization of [`size_of`] that works for dynamically sized types. 232 + pub trait KnownSize { 233 + /// Get the size of an object of this type in bytes, with the metadata of the given pointer. 234 + fn size(p: *const Self) -> usize; 235 + } 236 + 237 + impl<T> KnownSize for T { 238 + #[inline(always)] 239 + fn size(_: *const Self) -> usize { 240 + size_of::<T>() 241 + } 242 + } 243 + 244 + impl<T> KnownSize for [T] { 245 + #[inline(always)] 246 + fn size(p: *const Self) -> usize { 247 + p.len() * size_of::<T>() 248 + } 249 + }
+305
rust/kernel/ptr/projection.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! Infrastructure for handling projections. 4 + 5 + use core::{ 6 + mem::MaybeUninit, 7 + ops::Deref, // 8 + }; 9 + 10 + use crate::prelude::*; 11 + 12 + /// Error raised when a projection is attempted on an array or slice out of bounds. 13 + pub struct OutOfBound; 14 + 15 + impl From<OutOfBound> for Error { 16 + #[inline(always)] 17 + fn from(_: OutOfBound) -> Self { 18 + ERANGE 19 + } 20 + } 21 + 22 + /// A helper trait to perform index projection. 23 + /// 24 + /// This is similar to [`core::slice::SliceIndex`], but operates on raw pointers safely and 25 + /// fallibly. 26 + /// 27 + /// # Safety 28 + /// 29 + /// The implementation of `index` and `get` (if [`Some`] is returned) must ensure that, if provided 30 + /// input pointer `slice` and returned pointer `output`, then: 31 + /// - `output` has the same provenance as `slice`; 32 + /// - `output.byte_offset_from(slice)` is between 0 to 33 + /// `KnownSize::size(slice) - KnownSize::size(output)`. 34 + /// 35 + /// This means that if the input pointer is valid, then pointer returned by `get` or `index` is 36 + /// also valid. 37 + #[diagnostic::on_unimplemented(message = "`{Self}` cannot be used to index `{T}`")] 38 + #[doc(hidden)] 39 + pub unsafe trait ProjectIndex<T: ?Sized>: Sized { 40 + type Output: ?Sized; 41 + 42 + /// Returns an index-projected pointer, if in bounds. 43 + fn get(self, slice: *mut T) -> Option<*mut Self::Output>; 44 + 45 + /// Returns an index-projected pointer; fail the build if it cannot be proved to be in bounds. 46 + #[inline(always)] 47 + fn index(self, slice: *mut T) -> *mut Self::Output { 48 + Self::get(self, slice).unwrap_or_else(|| build_error!()) 49 + } 50 + } 51 + 52 + // Forward array impl to slice impl. 53 + // 54 + // SAFETY: Safety requirement guaranteed by the forwarded impl. 55 + unsafe impl<T, I, const N: usize> ProjectIndex<[T; N]> for I 56 + where 57 + I: ProjectIndex<[T]>, 58 + { 59 + type Output = <I as ProjectIndex<[T]>>::Output; 60 + 61 + #[inline(always)] 62 + fn get(self, slice: *mut [T; N]) -> Option<*mut Self::Output> { 63 + <I as ProjectIndex<[T]>>::get(self, slice) 64 + } 65 + 66 + #[inline(always)] 67 + fn index(self, slice: *mut [T; N]) -> *mut Self::Output { 68 + <I as ProjectIndex<[T]>>::index(self, slice) 69 + } 70 + } 71 + 72 + // SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to 73 + // not exceed the required bound. 74 + unsafe impl<T> ProjectIndex<[T]> for usize { 75 + type Output = T; 76 + 77 + #[inline(always)] 78 + fn get(self, slice: *mut [T]) -> Option<*mut T> { 79 + if self >= slice.len() { 80 + None 81 + } else { 82 + Some(slice.cast::<T>().wrapping_add(self)) 83 + } 84 + } 85 + } 86 + 87 + // SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to 88 + // not exceed the required bound. 89 + unsafe impl<T> ProjectIndex<[T]> for core::ops::Range<usize> { 90 + type Output = [T]; 91 + 92 + #[inline(always)] 93 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 94 + let new_len = self.end.checked_sub(self.start)?; 95 + if self.end > slice.len() { 96 + return None; 97 + } 98 + Some(core::ptr::slice_from_raw_parts_mut( 99 + slice.cast::<T>().wrapping_add(self.start), 100 + new_len, 101 + )) 102 + } 103 + } 104 + 105 + // SAFETY: Safety requirement guaranteed by the forwarded impl. 106 + unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeTo<usize> { 107 + type Output = [T]; 108 + 109 + #[inline(always)] 110 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 111 + (0..self.end).get(slice) 112 + } 113 + } 114 + 115 + // SAFETY: Safety requirement guaranteed by the forwarded impl. 116 + unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFrom<usize> { 117 + type Output = [T]; 118 + 119 + #[inline(always)] 120 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 121 + (self.start..slice.len()).get(slice) 122 + } 123 + } 124 + 125 + // SAFETY: `get` returned the pointer as is, so it always has the same provenance and offset of 0. 126 + unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFull { 127 + type Output = [T]; 128 + 129 + #[inline(always)] 130 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 131 + Some(slice) 132 + } 133 + } 134 + 135 + /// A helper trait to perform field projection. 136 + /// 137 + /// This trait has a `DEREF` generic parameter so it can be implemented twice for types that 138 + /// implement [`Deref`]. This will cause an ambiguity error and thus block [`Deref`] types being 139 + /// used as base of projection, as they can inject unsoundness. Users therefore must not specify 140 + /// `DEREF` and should always leave it to be inferred. 141 + /// 142 + /// # Safety 143 + /// 144 + /// `proj` may only invoke `f` with a valid allocation, as the documentation of [`Self::proj`] 145 + /// describes. 146 + #[doc(hidden)] 147 + pub unsafe trait ProjectField<const DEREF: bool> { 148 + /// Project a pointer to a type to a pointer of a field. 149 + /// 150 + /// `f` may only be invoked with a valid allocation so it can safely obtain raw pointers to 151 + /// fields using `&raw mut`. 152 + /// 153 + /// This is needed because `base` might not point to a valid allocation, while `&raw mut` 154 + /// requires pointers to be in bounds of a valid allocation. 155 + /// 156 + /// # Safety 157 + /// 158 + /// `f` must return a pointer in bounds of the provided pointer. 159 + unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F; 160 + } 161 + 162 + // NOTE: in theory, this API should work for `T: ?Sized` and `F: ?Sized`, too. However, we cannot 163 + // currently support that as we need to obtain a valid allocation that `&raw const` can operate on. 164 + // 165 + // SAFETY: `proj` invokes `f` with valid allocation. 166 + unsafe impl<T> ProjectField<false> for T { 167 + #[inline(always)] 168 + unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F { 169 + // Create a valid allocation to start projection, as `base` is not necessarily so. The 170 + // memory is never actually used so it will be optimized out, so it should work even for 171 + // very large `T` (`memoffset` crate also relies on this). To be extra certain, we also 172 + // annotate `f` closure with `#[inline(always)]` in the macro. 173 + let mut place = MaybeUninit::uninit(); 174 + let place_base = place.as_mut_ptr(); 175 + let field = f(place_base); 176 + // SAFETY: `field` is in bounds from `base` per safety requirement. 177 + let offset = unsafe { field.byte_offset_from(place_base) }; 178 + // Use `wrapping_byte_offset` as `base` does not need to be of valid allocation. 179 + base.wrapping_byte_offset(offset).cast() 180 + } 181 + } 182 + 183 + // SAFETY: Vacuously satisfied. 184 + unsafe impl<T: Deref> ProjectField<true> for T { 185 + #[inline(always)] 186 + unsafe fn proj<F>(_: *mut Self, _: impl FnOnce(*mut Self) -> *mut F) -> *mut F { 187 + build_error!("this function is a guard against `Deref` impl and is never invoked"); 188 + } 189 + } 190 + 191 + /// Create a projection from a raw pointer. 192 + /// 193 + /// The projected pointer is within the memory region marked by the input pointer. There is no 194 + /// requirement that the input raw pointer needs to be valid, so this macro may be used for 195 + /// projecting pointers outside normal address space, e.g. I/O pointers. However, if the input 196 + /// pointer is valid, the projected pointer is also valid. 197 + /// 198 + /// Supported projections include field projections and index projections. 199 + /// It is not allowed to project into types that implement custom [`Deref`] or 200 + /// [`Index`](core::ops::Index). 201 + /// 202 + /// The macro has basic syntax of `kernel::ptr::project!(ptr, projection)`, where `ptr` is an 203 + /// expression that evaluates to a raw pointer which serves as the base of projection. `projection` 204 + /// can be a projection expression of form `.field` (normally identifier, or numeral in case of 205 + /// tuple structs) or of form `[index]`. 206 + /// 207 + /// If a mutable pointer is needed, the macro input can be prefixed with the `mut` keyword, i.e. 208 + /// `kernel::ptr::project!(mut ptr, projection)`. By default, a const pointer is created. 209 + /// 210 + /// `ptr::project!` macro can perform both fallible indexing and build-time checked indexing. 211 + /// `[index]` form performs build-time bounds checking; if compiler fails to prove `[index]` is in 212 + /// bounds, compilation will fail. `[index]?` can be used to perform runtime bounds checking; 213 + /// `OutOfBound` error is raised via `?` if the index is out of bounds. 214 + /// 215 + /// # Examples 216 + /// 217 + /// Field projections are performed with `.field_name`: 218 + /// 219 + /// ``` 220 + /// struct MyStruct { field: u32, } 221 + /// let ptr: *const MyStruct = core::ptr::dangling(); 222 + /// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .field); 223 + /// 224 + /// struct MyTupleStruct(u32, u32); 225 + /// 226 + /// fn proj(ptr: *const MyTupleStruct) { 227 + /// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .1); 228 + /// } 229 + /// ``` 230 + /// 231 + /// Index projections are performed with `[index]`: 232 + /// 233 + /// ``` 234 + /// fn proj(ptr: *const [u8; 32]) -> Result { 235 + /// let field_ptr: *const u8 = kernel::ptr::project!(ptr, [1]); 236 + /// // The following invocation, if uncommented, would fail the build. 237 + /// // 238 + /// // kernel::ptr::project!(ptr, [128]); 239 + /// 240 + /// // This will raise an `OutOfBound` error (which is convertible to `ERANGE`). 241 + /// kernel::ptr::project!(ptr, [128]?); 242 + /// Ok(()) 243 + /// } 244 + /// ``` 245 + /// 246 + /// If you need to match on the error instead of propagate, put the invocation inside a closure: 247 + /// 248 + /// ``` 249 + /// let ptr: *const [u8; 32] = core::ptr::dangling(); 250 + /// let field_ptr: Result<*const u8> = (|| -> Result<_> { 251 + /// Ok(kernel::ptr::project!(ptr, [128]?)) 252 + /// })(); 253 + /// assert!(field_ptr.is_err()); 254 + /// ``` 255 + /// 256 + /// For mutable pointers, put `mut` as the first token in macro invocation. 257 + /// 258 + /// ``` 259 + /// let ptr: *mut [(u8, u16); 32] = core::ptr::dangling_mut(); 260 + /// let field_ptr: *mut u16 = kernel::ptr::project!(mut ptr, [1].1); 261 + /// ``` 262 + #[macro_export] 263 + macro_rules! project_pointer { 264 + (@gen $ptr:ident, ) => {}; 265 + // Field projection. `$field` needs to be `tt` to support tuple index like `.0`. 266 + (@gen $ptr:ident, .$field:tt $($rest:tt)*) => { 267 + // SAFETY: The provided closure always returns an in-bounds pointer. 268 + let $ptr = unsafe { 269 + $crate::ptr::projection::ProjectField::proj($ptr, #[inline(always)] |ptr| { 270 + // Check unaligned field. Not all users (e.g. DMA) can handle unaligned 271 + // projections. 272 + if false { 273 + let _ = &(*ptr).$field; 274 + } 275 + // SAFETY: `$field` is in bounds, and no implicit `Deref` is possible (if the 276 + // type implements `Deref`, Rust cannot infer the generic parameter `DEREF`). 277 + &raw mut (*ptr).$field 278 + }) 279 + }; 280 + $crate::ptr::project!(@gen $ptr, $($rest)*) 281 + }; 282 + // Fallible index projection. 283 + (@gen $ptr:ident, [$index:expr]? $($rest:tt)*) => { 284 + let $ptr = $crate::ptr::projection::ProjectIndex::get($index, $ptr) 285 + .ok_or($crate::ptr::projection::OutOfBound)?; 286 + $crate::ptr::project!(@gen $ptr, $($rest)*) 287 + }; 288 + // Build-time checked index projection. 289 + (@gen $ptr:ident, [$index:expr] $($rest:tt)*) => { 290 + let $ptr = $crate::ptr::projection::ProjectIndex::index($index, $ptr); 291 + $crate::ptr::project!(@gen $ptr, $($rest)*) 292 + }; 293 + (mut $ptr:expr, $($proj:tt)*) => {{ 294 + let ptr: *mut _ = $ptr; 295 + $crate::ptr::project!(@gen ptr, $($proj)*); 296 + ptr 297 + }}; 298 + ($ptr:expr, $($proj:tt)*) => {{ 299 + let ptr = <*const _>::cast_mut($ptr); 300 + // We currently always project using mutable pointer, as it is not decided whether `&raw 301 + // const` allows the resulting pointer to be mutated (see documentation of `addr_of!`). 302 + $crate::ptr::project!(@gen ptr, $($proj)*); 303 + ptr.cast_const() 304 + }}; 305 + }
+2 -2
rust/kernel/str.rs
··· 664 664 /// 665 665 /// * The first byte of `buffer` is always zero. 666 666 /// * The length of `buffer` is at least 1. 667 - pub(crate) struct NullTerminatedFormatter<'a> { 667 + pub struct NullTerminatedFormatter<'a> { 668 668 buffer: &'a mut [u8], 669 669 } 670 670 671 671 impl<'a> NullTerminatedFormatter<'a> { 672 672 /// Create a new [`Self`] instance. 673 - pub(crate) fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> { 673 + pub fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> { 674 674 *(buffer.first_mut()?) = 0; 675 675 676 676 // INVARIANT:
+23 -46
rust/pin-init/internal/src/init.rs
··· 62 62 63 63 enum InitializerAttribute { 64 64 DefaultError(DefaultErrorAttribute), 65 - DisableInitializedFieldAccess, 66 65 } 67 66 68 67 struct DefaultErrorAttribute { ··· 85 86 let error = error.map_or_else( 86 87 || { 87 88 if let Some(default_error) = attrs.iter().fold(None, |acc, attr| { 89 + #[expect(irrefutable_let_patterns)] 88 90 if let InitializerAttribute::DefaultError(DefaultErrorAttribute { ty }) = attr { 89 91 Some(ty.clone()) 90 92 } else { ··· 145 145 }; 146 146 // `mixed_site` ensures that the data is not accessible to the user-controlled code. 147 147 let data = Ident::new("__data", Span::mixed_site()); 148 - let init_fields = init_fields( 149 - &fields, 150 - pinned, 151 - !attrs 152 - .iter() 153 - .any(|attr| matches!(attr, InitializerAttribute::DisableInitializedFieldAccess)), 154 - &data, 155 - &slot, 156 - ); 148 + let init_fields = init_fields(&fields, pinned, &data, &slot); 157 149 let field_check = make_field_check(&fields, init_kind, &path); 158 150 Ok(quote! {{ 159 - // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return 160 - // type and shadow it later when we insert the arbitrary user code. That way there will be 161 - // no possibility of returning without `unsafe`. 162 - struct __InitOk; 163 - 164 151 // Get the data about fields from the supplied type. 165 152 // SAFETY: TODO 166 153 let #data = unsafe { ··· 157 170 #path::#get_data() 158 171 }; 159 172 // Ensure that `#data` really is of type `#data` and help with type inference: 160 - let init = ::pin_init::__internal::#data_trait::make_closure::<_, __InitOk, #error>( 173 + let init = ::pin_init::__internal::#data_trait::make_closure::<_, #error>( 161 174 #data, 162 175 move |slot| { 163 - { 164 - // Shadow the structure so it cannot be used to return early. 165 - struct __InitOk; 166 - #zeroable_check 167 - #this 168 - #init_fields 169 - #field_check 170 - } 171 - Ok(__InitOk) 176 + #zeroable_check 177 + #this 178 + #init_fields 179 + #field_check 180 + // SAFETY: we are the `init!` macro that is allowed to call this. 181 + Ok(unsafe { ::pin_init::__internal::InitOk::new() }) 172 182 } 173 183 ); 174 184 let init = move |slot| -> ::core::result::Result<(), #error> { ··· 220 236 fn init_fields( 221 237 fields: &Punctuated<InitializerField, Token![,]>, 222 238 pinned: bool, 223 - generate_initialized_accessors: bool, 224 239 data: &Ident, 225 240 slot: &Ident, 226 241 ) -> TokenStream { ··· 243 260 }); 244 261 // Again span for better diagnostics 245 262 let write = quote_spanned!(ident.span()=> ::core::ptr::write); 263 + // NOTE: the field accessor ensures that the initialized field is properly aligned. 264 + // Unaligned fields will cause the compiler to emit E0793. We do not support 265 + // unaligned fields since `Init::__init` requires an aligned pointer; the call to 266 + // `ptr::write` below has the same requirement. 246 267 let accessor = if pinned { 247 268 let project_ident = format_ident!("__project_{ident}"); 248 269 quote! { ··· 259 272 unsafe { &mut (*#slot).#ident } 260 273 } 261 274 }; 262 - let accessor = generate_initialized_accessors.then(|| { 263 - quote! { 264 - #(#cfgs)* 265 - #[allow(unused_variables)] 266 - let #ident = #accessor; 267 - } 268 - }); 269 275 quote! { 270 276 #(#attrs)* 271 277 { ··· 266 286 // SAFETY: TODO 267 287 unsafe { #write(::core::ptr::addr_of_mut!((*#slot).#ident), #value_ident) }; 268 288 } 269 - #accessor 289 + #(#cfgs)* 290 + #[allow(unused_variables)] 291 + let #ident = #accessor; 270 292 } 271 293 } 272 294 InitializerKind::Init { ident, value, .. } => { 273 295 // Again span for better diagnostics 274 296 let init = format_ident!("init", span = value.span()); 297 + // NOTE: the field accessor ensures that the initialized field is properly aligned. 298 + // Unaligned fields will cause the compiler to emit E0793. We do not support 299 + // unaligned fields since `Init::__init` requires an aligned pointer; the call to 300 + // `ptr::write` below has the same requirement. 275 301 let (value_init, accessor) = if pinned { 276 302 let project_ident = format_ident!("__project_{ident}"); 277 303 ( ··· 312 326 }, 313 327 ) 314 328 }; 315 - let accessor = generate_initialized_accessors.then(|| { 316 - quote! { 317 - #(#cfgs)* 318 - #[allow(unused_variables)] 319 - let #ident = #accessor; 320 - } 321 - }); 322 329 quote! { 323 330 #(#attrs)* 324 331 { 325 332 let #init = #value; 326 333 #value_init 327 334 } 328 - #accessor 335 + #(#cfgs)* 336 + #[allow(unused_variables)] 337 + let #ident = #accessor; 329 338 } 330 339 } 331 340 InitializerKind::Code { block: value, .. } => quote! { ··· 447 466 if a.path().is_ident("default_error") { 448 467 a.parse_args::<DefaultErrorAttribute>() 449 468 .map(InitializerAttribute::DefaultError) 450 - } else if a.path().is_ident("disable_initialized_field_access") { 451 - a.meta 452 - .require_path_only() 453 - .map(|_| InitializerAttribute::DisableInitializedFieldAccess) 454 469 } else { 455 470 Err(syn::Error::new_spanned(a, "unknown initializer attribute")) 456 471 }
+24 -4
rust/pin-init/src/__internal.rs
··· 46 46 } 47 47 } 48 48 49 + /// Token type to signify successful initialization. 50 + /// 51 + /// Can only be constructed via the unsafe [`Self::new`] function. The initializer macros use this 52 + /// token type to prevent returning `Ok` from an initializer without initializing all fields. 53 + pub struct InitOk(()); 54 + 55 + impl InitOk { 56 + /// Creates a new token. 57 + /// 58 + /// # Safety 59 + /// 60 + /// This function may only be called from the `init!` macro in `../internal/src/init.rs`. 61 + #[inline(always)] 62 + pub unsafe fn new() -> Self { 63 + Self(()) 64 + } 65 + } 66 + 49 67 /// This trait is only implemented via the `#[pin_data]` proc-macro. It is used to facilitate 50 68 /// the pin projections within the initializers. 51 69 /// ··· 86 68 type Datee: ?Sized + HasPinData; 87 69 88 70 /// Type inference helper function. 89 - fn make_closure<F, O, E>(self, f: F) -> F 71 + #[inline(always)] 72 + fn make_closure<F, E>(self, f: F) -> F 90 73 where 91 - F: FnOnce(*mut Self::Datee) -> Result<O, E>, 74 + F: FnOnce(*mut Self::Datee) -> Result<InitOk, E>, 92 75 { 93 76 f 94 77 } ··· 117 98 type Datee: ?Sized + HasInitData; 118 99 119 100 /// Type inference helper function. 120 - fn make_closure<F, O, E>(self, f: F) -> F 101 + #[inline(always)] 102 + fn make_closure<F, E>(self, f: F) -> F 121 103 where 122 - F: FnOnce(*mut Self::Datee) -> Result<O, E>, 104 + F: FnOnce(*mut Self::Datee) -> Result<InitOk, E>, 123 105 { 124 106 f 125 107 }
+16 -14
samples/rust/rust_dma.rs
··· 68 68 CoherentAllocation::alloc_coherent(pdev.as_ref(), TEST_VALUES.len(), GFP_KERNEL)?; 69 69 70 70 for (i, value) in TEST_VALUES.into_iter().enumerate() { 71 - kernel::dma_write!(ca[i] = MyStruct::new(value.0, value.1))?; 71 + kernel::dma_write!(ca, [i]?, MyStruct::new(value.0, value.1)); 72 72 } 73 73 74 74 let size = 4 * page::PAGE_SIZE; ··· 85 85 } 86 86 } 87 87 88 + impl DmaSampleDriver { 89 + fn check_dma(&self) -> Result { 90 + for (i, value) in TEST_VALUES.into_iter().enumerate() { 91 + let val0 = kernel::dma_read!(self.ca, [i]?.h); 92 + let val1 = kernel::dma_read!(self.ca, [i]?.b); 93 + 94 + assert_eq!(val0, value.0); 95 + assert_eq!(val1, value.1); 96 + } 97 + 98 + Ok(()) 99 + } 100 + } 101 + 88 102 #[pinned_drop] 89 103 impl PinnedDrop for DmaSampleDriver { 90 104 fn drop(self: Pin<&mut Self>) { 91 105 dev_info!(self.pdev, "Unload DMA test driver.\n"); 92 106 93 - for (i, value) in TEST_VALUES.into_iter().enumerate() { 94 - let val0 = kernel::dma_read!(self.ca[i].h); 95 - let val1 = kernel::dma_read!(self.ca[i].b); 96 - assert!(val0.is_ok()); 97 - assert!(val1.is_ok()); 98 - 99 - if let Ok(val0) = val0 { 100 - assert_eq!(val0, value.0); 101 - } 102 - if let Ok(val1) = val1 { 103 - assert_eq!(val1, value.1); 104 - } 105 - } 107 + assert!(self.check_dma().is_ok()); 106 108 107 109 for (i, entry) in self.sgt.iter().enumerate() { 108 110 dev_info!(
+1
samples/workqueue/stall_detector/Makefile
··· 1 + obj-m += wq_stall.o
+98
samples/workqueue/stall_detector/wq_stall.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * wq_stall - Test module for the workqueue stall detector. 4 + * 5 + * Deliberately creates a workqueue stall so the watchdog fires and 6 + * prints diagnostic output. Useful for verifying that the stall 7 + * detector correctly identifies stuck workers and produces useful 8 + * backtraces. 9 + * 10 + * The stall is triggered by clearing PF_WQ_WORKER before sleeping, 11 + * which hides the worker from the concurrency manager. A second 12 + * work item queued on the same pool then sits in the worklist with 13 + * no worker available to process it. 14 + * 15 + * After ~30s the workqueue watchdog fires: 16 + * BUG: workqueue lockup - pool cpus=N ... 17 + * 18 + * Build: 19 + * make -C <kernel tree> M=samples/workqueue/stall_detector modules 20 + * 21 + * Copyright (c) 2026 Meta Platforms, Inc. and affiliates. 22 + * Copyright (c) 2026 Breno Leitao <leitao@debian.org> 23 + */ 24 + 25 + #include <linux/module.h> 26 + #include <linux/workqueue.h> 27 + #include <linux/wait.h> 28 + #include <linux/atomic.h> 29 + #include <linux/sched.h> 30 + 31 + static DECLARE_WAIT_QUEUE_HEAD(stall_wq_head); 32 + static atomic_t wake_condition = ATOMIC_INIT(0); 33 + static struct work_struct stall_work1; 34 + static struct work_struct stall_work2; 35 + 36 + static void stall_work2_fn(struct work_struct *work) 37 + { 38 + pr_info("wq_stall: second work item finally ran\n"); 39 + } 40 + 41 + static void stall_work1_fn(struct work_struct *work) 42 + { 43 + pr_info("wq_stall: first work item running on cpu %d\n", 44 + raw_smp_processor_id()); 45 + 46 + /* 47 + * Queue second item while we're still counted as running 48 + * (pool->nr_running > 0). Since schedule_work() on a per-CPU 49 + * workqueue targets raw_smp_processor_id(), item 2 lands on the 50 + * same pool. __queue_work -> kick_pool -> need_more_worker() 51 + * sees nr_running > 0 and does NOT wake a new worker. 52 + */ 53 + schedule_work(&stall_work2); 54 + 55 + /* 56 + * Hide from the workqueue concurrency manager. Without 57 + * PF_WQ_WORKER, schedule() won't call wq_worker_sleeping(), 58 + * so nr_running is never decremented and no replacement 59 + * worker is created. Item 2 stays stuck in pool->worklist. 60 + */ 61 + current->flags &= ~PF_WQ_WORKER; 62 + 63 + pr_info("wq_stall: entering wait_event_idle (PF_WQ_WORKER cleared)\n"); 64 + pr_info("wq_stall: expect 'BUG: workqueue lockup' in ~30-60s\n"); 65 + wait_event_idle(stall_wq_head, atomic_read(&wake_condition) != 0); 66 + 67 + /* Restore so process_one_work() cleanup works correctly */ 68 + current->flags |= PF_WQ_WORKER; 69 + pr_info("wq_stall: woke up, PF_WQ_WORKER restored\n"); 70 + } 71 + 72 + static int __init wq_stall_init(void) 73 + { 74 + pr_info("wq_stall: loading\n"); 75 + 76 + INIT_WORK(&stall_work1, stall_work1_fn); 77 + INIT_WORK(&stall_work2, stall_work2_fn); 78 + schedule_work(&stall_work1); 79 + 80 + return 0; 81 + } 82 + 83 + static void __exit wq_stall_exit(void) 84 + { 85 + pr_info("wq_stall: unloading\n"); 86 + atomic_set(&wake_condition, 1); 87 + wake_up(&stall_wq_head); 88 + flush_work(&stall_work1); 89 + flush_work(&stall_work2); 90 + pr_info("wq_stall: all work flushed, module unloaded\n"); 91 + } 92 + 93 + module_init(wq_stall_init); 94 + module_exit(wq_stall_exit); 95 + 96 + MODULE_LICENSE("GPL"); 97 + MODULE_DESCRIPTION("Reproduce workqueue stall caused by PF_WQ_WORKER misuse"); 98 + MODULE_AUTHOR("Breno Leitao <leitao@debian.org>");
+3 -1
scripts/Makefile.build
··· 310 310 311 311 # The features in this list are the ones allowed for non-`rust/` code. 312 312 # 313 + # - Stable since Rust 1.79.0: `feature(slice_ptr_len)`. 313 314 # - Stable since Rust 1.81.0: `feature(lint_reasons)`. 314 315 # - Stable since Rust 1.82.0: `feature(asm_const)`, 315 316 # `feature(offset_of_nested)`, `feature(raw_ref_op)`. 317 + # - Stable since Rust 1.84.0: `feature(strict_provenance)`. 316 318 # - Stable since Rust 1.87.0: `feature(asm_goto)`. 317 319 # - Expected to become stable: `feature(arbitrary_self_types)`. 318 320 # - To be determined: `feature(used_with_arg)`. 319 321 # 320 322 # Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on 321 323 # the unstable features in use. 322 - rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,offset_of_nested,raw_ref_op,used_with_arg 324 + rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,offset_of_nested,raw_ref_op,slice_ptr_len,strict_provenance,used_with_arg 323 325 324 326 # `--out-dir` is required to avoid temporaries being created by `rustc` in the 325 327 # current working directory, which may be not accessible in the out-of-tree
+16 -3
sound/core/pcm_native.c
··· 2144 2144 for (;;) { 2145 2145 long tout; 2146 2146 struct snd_pcm_runtime *to_check; 2147 + unsigned int drain_rate; 2148 + snd_pcm_uframes_t drain_bufsz; 2149 + bool drain_no_period_wakeup; 2150 + 2147 2151 if (signal_pending(current)) { 2148 2152 result = -ERESTARTSYS; 2149 2153 break; ··· 2167 2163 snd_pcm_group_unref(group, substream); 2168 2164 if (!to_check) 2169 2165 break; /* all drained */ 2166 + /* 2167 + * Cache the runtime fields needed after unlock. 2168 + * A concurrent close() on the linked stream may free 2169 + * its runtime via snd_pcm_detach_substream() once we 2170 + * release the stream lock below. 2171 + */ 2172 + drain_no_period_wakeup = to_check->no_period_wakeup; 2173 + drain_rate = to_check->rate; 2174 + drain_bufsz = to_check->buffer_size; 2170 2175 init_waitqueue_entry(&wait, current); 2171 2176 set_current_state(TASK_INTERRUPTIBLE); 2172 2177 add_wait_queue(&to_check->sleep, &wait); 2173 2178 snd_pcm_stream_unlock_irq(substream); 2174 - if (runtime->no_period_wakeup) 2179 + if (drain_no_period_wakeup) 2175 2180 tout = MAX_SCHEDULE_TIMEOUT; 2176 2181 else { 2177 2182 tout = 100; 2178 - if (runtime->rate) { 2179 - long t = runtime->buffer_size * 1100 / runtime->rate; 2183 + if (drain_rate) { 2184 + long t = drain_bufsz * 1100 / drain_rate; 2180 2185 tout = max(t, tout); 2181 2186 } 2182 2187 tout = msecs_to_jiffies(tout);
+3
sound/hda/codecs/realtek/alc269.c
··· 6940 6940 SND_PCI_QUIRK(0x103c, 0x89da, "HP Spectre x360 14t-ea100", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), 6941 6941 SND_PCI_QUIRK(0x103c, 0x89e7, "HP Elite x2 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 6942 6942 SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED), 6943 + SND_PCI_QUIRK(0x103c, 0x8a1f, "HP Laptop 14s-dr5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), 6943 6944 SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), 6944 6945 SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), 6945 6946 SND_PCI_QUIRK(0x103c, 0x8a26, "HP Victus 16-d1xxx (MB 8A26)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), ··· 7274 7273 SND_PCI_QUIRK(0x1043, 0x1e93, "ASUS ExpertBook B9403CVAR", ALC294_FIXUP_ASUS_HPE), 7275 7274 SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C), 7276 7275 SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2), 7276 + HDA_CODEC_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1), 7277 7277 SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), 7278 7278 SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), 7279 7279 SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), ··· 7495 7493 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 7496 7494 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 7497 7495 SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 7496 + SND_PCI_QUIRK(0x17aa, 0x2288, "Thinkpad X390", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), 7498 7497 SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7499 7498 SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7500 7499 SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+9
sound/hda/codecs/realtek/alc662.c
··· 313 313 ALC897_FIXUP_HEADSET_MIC_PIN2, 314 314 ALC897_FIXUP_UNIS_H3C_X500S, 315 315 ALC897_FIXUP_HEADSET_MIC_PIN3, 316 + ALC897_FIXUP_H610M_HP_PIN, 316 317 }; 317 318 318 319 static const struct hda_fixup alc662_fixups[] = { ··· 767 766 { } 768 767 }, 769 768 }, 769 + [ALC897_FIXUP_H610M_HP_PIN] = { 770 + .type = HDA_FIXUP_PINS, 771 + .v.pins = (const struct hda_pintbl[]) { 772 + { 0x19, 0x0321403f }, /* HP out */ 773 + { } 774 + }, 775 + }, 770 776 }; 771 777 772 778 static const struct hda_quirk alc662_fixup_tbl[] = { ··· 823 815 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), 824 816 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), 825 817 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 818 + SND_PCI_QUIRK(0x1458, 0xa194, "H610M H V2 DDR4", ALC897_FIXUP_H610M_HP_PIN), 826 819 SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE), 827 820 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS), 828 821 SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
+14 -4
sound/soc/amd/acp/acp-mach-common.c
··· 127 127 if (drvdata->hs_codec_id != RT5682) 128 128 return -EINVAL; 129 129 130 - drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk"); 131 - drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk"); 130 + drvdata->wclk = devm_clk_get(component->dev, "rt5682-dai-wclk"); 131 + if (IS_ERR(drvdata->wclk)) 132 + return PTR_ERR(drvdata->wclk); 133 + 134 + drvdata->bclk = devm_clk_get(component->dev, "rt5682-dai-bclk"); 135 + if (IS_ERR(drvdata->bclk)) 136 + return PTR_ERR(drvdata->bclk); 132 137 133 138 ret = snd_soc_dapm_new_controls(dapm, rt5682_widgets, 134 139 ARRAY_SIZE(rt5682_widgets)); ··· 375 370 return -EINVAL; 376 371 377 372 if (!drvdata->soc_mclk) { 378 - drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk"); 379 - drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk"); 373 + drvdata->wclk = devm_clk_get(component->dev, "rt5682-dai-wclk"); 374 + if (IS_ERR(drvdata->wclk)) 375 + return PTR_ERR(drvdata->wclk); 376 + 377 + drvdata->bclk = devm_clk_get(component->dev, "rt5682-dai-bclk"); 378 + if (IS_ERR(drvdata->bclk)) 379 + return PTR_ERR(drvdata->bclk); 380 380 } 381 381 382 382 ret = snd_soc_dapm_new_controls(dapm, rt5682s_widgets,
+7 -2
sound/soc/amd/acp3x-rt5682-max9836.c
··· 94 94 return ret; 95 95 } 96 96 97 - rt5682_dai_wclk = clk_get(component->dev, "rt5682-dai-wclk"); 98 - rt5682_dai_bclk = clk_get(component->dev, "rt5682-dai-bclk"); 97 + rt5682_dai_wclk = devm_clk_get(component->dev, "rt5682-dai-wclk"); 98 + if (IS_ERR(rt5682_dai_wclk)) 99 + return PTR_ERR(rt5682_dai_wclk); 100 + 101 + rt5682_dai_bclk = devm_clk_get(component->dev, "rt5682-dai-bclk"); 102 + if (IS_ERR(rt5682_dai_bclk)) 103 + return PTR_ERR(rt5682_dai_bclk); 99 104 100 105 ret = snd_soc_card_jack_new_pins(card, "Headset Jack", 101 106 SND_JACK_HEADSET |
+1 -1
sound/soc/codecs/rt1011.c
··· 1047 1047 struct snd_ctl_elem_value *ucontrol) 1048 1048 { 1049 1049 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); 1050 - struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_to_dapm(kcontrol); 1050 + struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(component); 1051 1051 struct rt1011_priv *rt1011 = 1052 1052 snd_soc_component_get_drvdata(component); 1053 1053
+8 -4
sound/soc/generic/simple-card-utils.c
··· 1038 1038 else 1039 1039 port = np; 1040 1040 1041 - struct device_node *ports __free(device_node) = of_get_parent(port); 1042 - struct device_node *top __free(device_node) = of_get_parent(ports); 1043 - struct device_node *ports0 __free(device_node) = of_get_child_by_name(top, "ports"); 1041 + struct device_node *ports __free(device_node) = of_get_parent(port); 1042 + const char *at = strchr(kbasename(ports->full_name), '@'); 1044 1043 1045 - return ports0 == ports; 1044 + /* 1045 + * Since child iteration order may differ 1046 + * between a base DT and DT overlays, 1047 + * string match "ports" or "ports@0" in the node name instead. 1048 + */ 1049 + return !at || !strcmp(at, "@0"); 1046 1050 } 1047 1051 EXPORT_SYMBOL_GPL(graph_util_is_ports0); 1048 1052
+1
sound/soc/qcom/qdsp6/q6apm-dai.c
··· 838 838 .ack = q6apm_dai_ack, 839 839 .compress_ops = &q6apm_dai_compress_ops, 840 840 .use_dai_pcm_id = true, 841 + .remove_order = SND_SOC_COMP_ORDER_EARLY, 841 842 }; 842 843 843 844 static int q6apm_dai_probe(struct platform_device *pdev)
+1
sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
··· 278 278 .of_xlate_dai_name = q6dsp_audio_ports_of_xlate_dai_name, 279 279 .be_pcm_base = AUDIOREACH_BE_PCM_BASE, 280 280 .use_dai_pcm_id = true, 281 + .remove_order = SND_SOC_COMP_ORDER_FIRST, 281 282 }; 282 283 283 284 static int q6apm_lpass_dai_dev_probe(struct platform_device *pdev)
+1
sound/soc/qcom/qdsp6/q6apm.c
··· 715 715 .name = APM_AUDIO_DRV_NAME, 716 716 .probe = q6apm_audio_probe, 717 717 .remove = q6apm_audio_remove, 718 + .remove_order = SND_SOC_COMP_ORDER_LAST, 718 719 }; 719 720 720 721 static int apm_probe(gpr_device_t *gdev)
+8 -3
sound/soc/soc-core.c
··· 462 462 463 463 list_del(&rtd->list); 464 464 465 - if (delayed_work_pending(&rtd->delayed_work)) 466 - flush_delayed_work(&rtd->delayed_work); 465 + flush_delayed_work(&rtd->delayed_work); 467 466 snd_soc_pcm_component_free(rtd); 468 467 469 468 /* ··· 1863 1864 1864 1865 /* 1865 1866 * Check if a DMI field is valid, i.e. not containing any string 1866 - * in the black list. 1867 + * in the black list and not the empty string. 1867 1868 */ 1868 1869 static int is_dmi_valid(const char *field) 1869 1870 { 1870 1871 int i = 0; 1872 + 1873 + if (!field[0]) 1874 + return 0; 1871 1875 1872 1876 while (dmi_blacklist[i]) { 1873 1877 if (strstr(field, dmi_blacklist[i])) ··· 2124 2122 for_each_card_rtds(card, rtd) 2125 2123 if (rtd->initialized) 2126 2124 snd_soc_link_exit(rtd); 2125 + /* flush delayed work before removing DAIs and DAPM widgets */ 2126 + snd_soc_flush_all_delayed_work(card); 2127 + 2127 2128 /* remove and free each DAI */ 2128 2129 soc_remove_link_dais(card); 2129 2130 soc_remove_link_components(card);
+11
sound/soc/tegra/tegra_audio_graph_card.c
··· 231 231 .plla_out0_rates[x11_RATE] = 45158400, 232 232 }; 233 233 234 + static const struct tegra_audio_cdata tegra238_data = { 235 + /* PLLA */ 236 + .plla_rates[x8_RATE] = 1277952000, 237 + .plla_rates[x11_RATE] = 1264435200, 238 + /* PLLA_OUT0 */ 239 + .plla_out0_rates[x8_RATE] = 49152000, 240 + .plla_out0_rates[x11_RATE] = 45158400, 241 + }; 242 + 234 243 static const struct tegra_audio_cdata tegra264_data = { 235 244 /* PLLA1 */ 236 245 .plla_rates[x8_RATE] = 983040000, ··· 254 245 .data = &tegra210_data }, 255 246 { .compatible = "nvidia,tegra186-audio-graph-card", 256 247 .data = &tegra186_data }, 248 + { .compatible = "nvidia,tegra238-audio-graph-card", 249 + .data = &tegra238_data }, 257 250 { .compatible = "nvidia,tegra264-audio-graph-card", 258 251 .data = &tegra264_data }, 259 252 {},
+2
sound/usb/mixer_scarlett2.c
··· 8251 8251 8252 8252 if (desc->bInterfaceClass != 255) 8253 8253 continue; 8254 + if (desc->bNumEndpoints < 1) 8255 + continue; 8254 8256 8255 8257 epd = get_endpoint(intf->altsetting, 0); 8256 8258 private->bInterfaceNumber = desc->bInterfaceNumber;
+2
sound/usb/quirks.c
··· 2243 2243 QUIRK_FLAG_IFACE_DELAY | QUIRK_FLAG_FORCE_IFACE_RESET), 2244 2244 DEVICE_FLG(0x0661, 0x0883, /* iBasso DC04 Ultra */ 2245 2245 QUIRK_FLAG_DSD_RAW), 2246 + DEVICE_FLG(0x0666, 0x0880, /* SPACETOUCH USB Audio */ 2247 + QUIRK_FLAG_FORCE_IFACE_RESET | QUIRK_FLAG_IFACE_DELAY), 2246 2248 DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */ 2247 2249 QUIRK_FLAG_IGNORE_CTL_ERROR), 2248 2250 DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+1 -1
tools/arch/x86/include/asm/amd/ibs.h
··· 110 110 __u64 ld_op:1, /* 0: load op */ 111 111 st_op:1, /* 1: store op */ 112 112 dc_l1tlb_miss:1, /* 2: data cache L1TLB miss */ 113 - dc_l2tlb_miss:1, /* 3: data cache L2TLB hit in 2M page */ 113 + dc_l2tlb_miss:1, /* 3: data cache L2TLB miss in 2M page */ 114 114 dc_l1tlb_hit_2m:1, /* 4: data cache L1TLB hit in 2M page */ 115 115 dc_l1tlb_hit_1g:1, /* 5: data cache L1TLB hit in 1G page */ 116 116 dc_l2tlb_hit_2m:1, /* 6: data cache L2TLB hit in 2M page */
+3 -1
tools/arch/x86/include/asm/cpufeatures.h
··· 84 84 #define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */ 85 85 #define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */ 86 86 #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */ 87 - #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */ 87 + #define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */ 88 88 #define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */ 89 89 #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */ 90 90 #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */ ··· 326 326 #define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */ 327 327 #define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */ 328 328 #define X86_FEATURE_LAM (12*32+26) /* "lam" Linear Address Masking */ 329 + #define X86_FEATURE_MOVRS (12*32+31) /* MOVRS instructions */ 329 330 330 331 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ 331 332 #define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */ ··· 473 472 #define X86_FEATURE_GP_ON_USER_CPUID (20*32+17) /* User CPUID faulting */ 474 473 475 474 #define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */ 475 + #define X86_FEATURE_ERAPS (20*32+24) /* Enhanced Return Address Predictor Security */ 476 476 #define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */ 477 477 #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */ 478 478 #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
+6
tools/arch/x86/include/asm/msr-index.h
··· 263 263 #define MSR_SNOOP_RSP_0 0x00001328 264 264 #define MSR_SNOOP_RSP_1 0x00001329 265 265 266 + #define MSR_OMR_0 0x000003e0 267 + #define MSR_OMR_1 0x000003e1 268 + #define MSR_OMR_2 0x000003e2 269 + #define MSR_OMR_3 0x000003e3 270 + 266 271 #define MSR_LBR_SELECT 0x000001c8 267 272 #define MSR_LBR_TOS 0x000001c9 268 273 ··· 1224 1219 #define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e 1225 1220 #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f 1226 1221 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 1222 + #define MSR_CORE_PERF_GLOBAL_STATUS_SET 0x00000391 1227 1223 1228 1224 #define MSR_PERF_METRICS 0x00000329 1229 1225
+6 -2
tools/arch/x86/include/uapi/asm/kvm.h
··· 503 503 #define KVM_X86_GRP_SEV 1 504 504 # define KVM_X86_SEV_VMSA_FEATURES 0 505 505 # define KVM_X86_SNP_POLICY_BITS 1 506 + # define KVM_X86_SEV_SNP_REQ_CERTS 2 506 507 507 508 struct kvm_vmx_nested_state_data { 508 509 __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; ··· 744 743 KVM_SEV_SNP_LAUNCH_START = 100, 745 744 KVM_SEV_SNP_LAUNCH_UPDATE, 746 745 KVM_SEV_SNP_LAUNCH_FINISH, 746 + KVM_SEV_SNP_ENABLE_REQ_CERTS, 747 747 748 748 KVM_SEV_NR_MAX, 749 749 }; ··· 916 914 __u64 pad1[4]; 917 915 }; 918 916 919 - #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0) 920 - #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1) 917 + #define KVM_X2APIC_API_USE_32BIT_IDS _BITULL(0) 918 + #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK _BITULL(1) 919 + #define KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST _BITULL(2) 920 + #define KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST _BITULL(3) 921 921 922 922 struct kvm_hyperv_eventfd { 923 923 __u32 conn_id;
+4
tools/bootconfig/samples/bad-non-closed-brace.bconf
··· 1 + foo { 2 + bar { 3 + buz 4 + }
+19
tools/bootconfig/samples/bad-over-max-brace.bconf
··· 1 + key1 { 2 + key2 { 3 + key3 { 4 + key4 { 5 + key5 { 6 + key6 { 7 + key7 { 8 + key8 { 9 + key9 { 10 + key10 { 11 + key11 { 12 + key12 { 13 + key13 { 14 + key14 { 15 + key15 { 16 + key16 { 17 + key17 { 18 + }}}}}}}}}}}}}}}}} 19 +
+1
tools/bootconfig/samples/exp-good-nested-brace.bconf
··· 1 + key1.key2.key3.key4.key5.key6.key7.key8.key9.key10.key11.key12.key13.key14.key15.key16;
+18
tools/bootconfig/samples/good-nested-brace.bconf
··· 1 + key1 { 2 + key2 { 3 + key3 { 4 + key4 { 5 + key5 { 6 + key6 { 7 + key7 { 8 + key8 { 9 + key9 { 10 + key10 { 11 + key11 { 12 + key12 { 13 + key13 { 14 + key14 { 15 + key15 { 16 + key16 { 17 + }}}}}}}}}}}}}}}} 18 +
+9
tools/bootconfig/test-bootconfig.sh
··· 171 171 xfail grep -q 'val[[:space:]]' $OUTFILE 172 172 xpass grep -q 'val2[[:space:]]' $OUTFILE 173 173 174 + echo "Showing correct line:column of no closing brace" 175 + cat > $TEMPCONF << EOF 176 + foo { 177 + bar { 178 + } 179 + EOF 180 + $BOOTCONF -a $TEMPCONF $INITRD 2> $OUTFILE 181 + xpass grep -q "1:1" $OUTFILE 182 + 174 183 echo "=== expected failure cases ===" 175 184 for i in samples/bad-* ; do 176 185 xfail $BOOTCONF -a $i $INITRD
+9
tools/build/Build.include
··· 99 99 cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXXFLAGS_$(basetarget).o) $(CXXFLAGS_$(obj)) 100 100 101 101 ### 102 + # Rust flags to be used on rule definition, includes: 103 + # - global $(RUST_FLAGS) 104 + # - per target Rust flags 105 + # - per object Rust flags 106 + rust_flags_1 = $(RUST_FLAGS) $(RUST_FLAGS_$(basetarget).o) $(RUST_FLAGS_$(obj)) 107 + rust_flags_2 = $(filter-out $(RUST_FLAGS_REMOVE_$(basetarget).o), $(rust_flags_1)) 108 + rust_flags = $(filter-out $(RUST_FLAGS_REMOVE_$(obj)), $(rust_flags_2)) 109 + 110 + ### 102 111 ## HOSTCC C flags 103 112 104 113 host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
+4 -2
tools/build/Makefile.build
··· 70 70 # If there's nothing to link, create empty $@ object. 71 71 quiet_cmd_ld_multi = LD $@ 72 72 cmd_ld_multi = $(if $(strip $(obj-y)),\ 73 - $(LD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@) 73 + printf "$(objprefix)%s " $(patsubst $(objprefix)%,%,$(filter $(obj-y),$^)) | \ 74 + xargs $(LD) -r -o $@,rm -f $@; $(AR) rcs $@) 74 75 75 76 quiet_cmd_host_ld_multi = HOSTLD $@ 76 77 cmd_host_ld_multi = $(if $(strip $(obj-y)),\ 77 - $(HOSTLD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@) 78 + printf "$(objprefix)%s " $(patsubst $(objprefix)%,%,$(filter $(obj-y),$^)) | \ 79 + xargs $(HOSTLD) -r -o $@,rm -f $@; $(HOSTAR) rcs $@) 78 80 79 81 rust_common_cmd = \ 80 82 $(RUSTC) $(rust_flags) \
-24
tools/include/linux/coresight-pmu.h
··· 22 22 #define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2)) 23 23 24 24 /* 25 - * Below are the definition of bit offsets for perf option, and works as 26 - * arbitrary values for all ETM versions. 27 - * 28 - * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore, 29 - * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and 30 - * directly use below macros as config bits. 31 - */ 32 - #define ETM_OPT_BRANCH_BROADCAST 8 33 - #define ETM_OPT_CYCACC 12 34 - #define ETM_OPT_CTXTID 14 35 - #define ETM_OPT_CTXTID2 15 36 - #define ETM_OPT_TS 28 37 - #define ETM_OPT_RETSTK 29 38 - 39 - /* ETMv4 CONFIGR programming bits for the ETM OPTs */ 40 - #define ETM4_CFG_BIT_BB 3 41 - #define ETM4_CFG_BIT_CYCACC 4 42 - #define ETM4_CFG_BIT_CTXTID 6 43 - #define ETM4_CFG_BIT_VMID 7 44 - #define ETM4_CFG_BIT_TS 11 45 - #define ETM4_CFG_BIT_RETSTK 12 46 - #define ETM4_CFG_BIT_VMID_OPT 15 47 - 48 - /* 49 25 * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload. 50 26 * Used to associate a CPU with the CoreSight Trace ID. 51 27 * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+7 -2
tools/include/linux/gfp_types.h
··· 139 139 * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. 140 140 * 141 141 * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension. 142 + * mark_obj_codetag_empty() should be called upon freeing for objects allocated 143 + * with this flag to indicate that their NULL tags are expected and normal. 142 144 */ 143 145 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 144 146 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) ··· 311 309 * 312 310 * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower 313 311 * watermark is applied to allow access to "atomic reserves". 314 - * The current implementation doesn't support NMI and few other strict 315 - * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. 312 + * The current implementation doesn't support NMI, nor contexts that disable 313 + * preemption under PREEMPT_RT. This includes raw_spin_lock() and plain 314 + * preempt_disable() - see "Memory allocation" in 315 + * Documentation/core-api/real-time/differences.rst for more info. 316 316 * 317 317 * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires 318 318 * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. ··· 325 321 * %GFP_NOWAIT is for kernel allocations that should not stall for direct 326 322 * reclaim, start physical IO or use any filesystem callback. It is very 327 323 * likely to fail to allocate memory, even for very small allocations. 324 + * The same restrictions on calling contexts apply as for %GFP_ATOMIC. 328 325 * 329 326 * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages 330 327 * that do not require the starting of any physical IO.
+4 -1
tools/include/uapi/asm-generic/unistd.h
··· 860 860 #define __NR_listns 470 861 861 __SYSCALL(__NR_listns, sys_listns) 862 862 863 + #define __NR_rseq_slice_yield 471 864 + __SYSCALL(__NR_rseq_slice_yield, sys_rseq_slice_yield) 865 + 863 866 #undef __NR_syscalls 864 - #define __NR_syscalls 471 867 + #define __NR_syscalls 472 865 868 866 869 /* 867 870 * 32 bit systems traditionally used different
+23 -1
tools/include/uapi/linux/kvm.h
··· 135 135 } u; 136 136 }; 137 137 138 + struct kvm_exit_snp_req_certs { 139 + __u64 gpa; 140 + __u64 npages; 141 + __u64 ret; 142 + }; 143 + 138 144 #define KVM_S390_GET_SKEYS_NONE 1 139 145 #define KVM_S390_SKEYS_MAX 1048576 140 146 ··· 186 180 #define KVM_EXIT_MEMORY_FAULT 39 187 181 #define KVM_EXIT_TDX 40 188 182 #define KVM_EXIT_ARM_SEA 41 183 + #define KVM_EXIT_ARM_LDST64B 42 184 + #define KVM_EXIT_SNP_REQ_CERTS 43 189 185 190 186 /* For KVM_EXIT_INTERNAL_ERROR */ 191 187 /* Emulate instruction failed. */ ··· 410 402 } eoi; 411 403 /* KVM_EXIT_HYPERV */ 412 404 struct kvm_hyperv_exit hyperv; 413 - /* KVM_EXIT_ARM_NISV */ 405 + /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */ 414 406 struct { 415 407 __u64 esr_iss; 416 408 __u64 fault_ipa; ··· 490 482 __u64 gva; 491 483 __u64 gpa; 492 484 } arm_sea; 485 + /* KVM_EXIT_SNP_REQ_CERTS */ 486 + struct kvm_exit_snp_req_certs snp_req_certs; 493 487 /* Fix the size of the union. */ 494 488 char padding[256]; 495 489 }; ··· 984 974 #define KVM_CAP_GUEST_MEMFD_FLAGS 244 985 975 #define KVM_CAP_ARM_SEA_TO_USER 245 986 976 #define KVM_CAP_S390_USER_OPEREXEC 246 977 + #define KVM_CAP_S390_KEYOP 247 987 978 988 979 struct kvm_irq_routing_irqchip { 989 980 __u32 irqchip; ··· 1230 1219 __s32 tablefd; 1231 1220 }; 1232 1221 1222 + #define KVM_S390_KEYOP_ISKE 0x01 1223 + #define KVM_S390_KEYOP_RRBE 0x02 1224 + #define KVM_S390_KEYOP_SSKE 0x03 1225 + struct kvm_s390_keyop { 1226 + __u64 guest_addr; 1227 + __u8 key; 1228 + __u8 operation; 1229 + __u8 pad[6]; 1230 + }; 1231 + 1233 1232 /* 1234 1233 * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns 1235 1234 * a vcpu fd. ··· 1259 1238 #define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping) 1260 1239 #define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping) 1261 1240 #define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long) 1241 + #define KVM_S390_KEYOP _IOWR(KVMIO, 0x53, struct kvm_s390_keyop) 1262 1242 1263 1243 /* Device model IOC */ 1264 1244 #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
+1 -1
tools/include/uapi/linux/perf_event.h
··· 1396 1396 #define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ 1397 1397 #define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ 1398 1398 #define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ 1399 - #define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */ 1399 + #define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */ 1400 1400 #define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ 1401 1401 #define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ 1402 1402 #define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
+1 -1
tools/objtool/Makefile
··· 13 13 14 14 ifeq ($(ARCH_HAS_KLP),y) 15 15 HAVE_XXHASH = $(shell printf "$(pound)include <xxhash.h>\nXXH3_state_t *state;int main() {}" | \ 16 - $(HOSTCC) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n) 16 + $(HOSTCC) $(HOSTCFLAGS) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n) 17 17 ifeq ($(HAVE_XXHASH),y) 18 18 BUILD_KLP := y 19 19 LIBXXHASH_CFLAGS := $(shell $(HOSTPKG_CONFIG) libxxhash --cflags 2>/dev/null) \
+26 -42
tools/objtool/arch/x86/decode.c
··· 395 395 if (!rex_w) 396 396 break; 397 397 398 - if (modrm_reg == CFI_SP) { 399 - 400 - if (mod_is_reg()) { 401 - /* mov %rsp, reg */ 402 - ADD_OP(op) { 403 - op->src.type = OP_SRC_REG; 404 - op->src.reg = CFI_SP; 405 - op->dest.type = OP_DEST_REG; 406 - op->dest.reg = modrm_rm; 407 - } 408 - break; 409 - 410 - } else { 411 - /* skip RIP relative displacement */ 412 - if (is_RIP()) 413 - break; 414 - 415 - /* skip nontrivial SIB */ 416 - if (have_SIB()) { 417 - modrm_rm = sib_base; 418 - if (sib_index != CFI_SP) 419 - break; 420 - } 421 - 422 - /* mov %rsp, disp(%reg) */ 423 - ADD_OP(op) { 424 - op->src.type = OP_SRC_REG; 425 - op->src.reg = CFI_SP; 426 - op->dest.type = OP_DEST_REG_INDIRECT; 427 - op->dest.reg = modrm_rm; 428 - op->dest.offset = ins.displacement.value; 429 - } 430 - break; 431 - } 432 - 433 - break; 434 - } 435 - 436 - if (rm_is_reg(CFI_SP)) { 437 - 438 - /* mov reg, %rsp */ 398 + if (mod_is_reg()) { 399 + /* mov reg, reg */ 439 400 ADD_OP(op) { 440 401 op->src.type = OP_SRC_REG; 441 402 op->src.reg = modrm_reg; 442 403 op->dest.type = OP_DEST_REG; 443 - op->dest.reg = CFI_SP; 404 + op->dest.reg = modrm_rm; 405 + } 406 + break; 407 + } 408 + 409 + /* skip RIP relative displacement */ 410 + if (is_RIP()) 411 + break; 412 + 413 + /* skip nontrivial SIB */ 414 + if (have_SIB()) { 415 + modrm_rm = sib_base; 416 + if (sib_index != CFI_SP) 417 + break; 418 + } 419 + 420 + /* mov %rsp, disp(%reg) */ 421 + if (modrm_reg == CFI_SP) { 422 + ADD_OP(op) { 423 + op->src.type = OP_SRC_REG; 424 + op->src.reg = CFI_SP; 425 + op->dest.type = OP_DEST_REG_INDIRECT; 426 + op->dest.reg = modrm_rm; 427 + op->dest.offset = ins.displacement.value; 444 428 } 445 429 break; 446 430 }
+19 -5
tools/objtool/check.c
··· 3000 3000 cfi->stack_size += 8; 3001 3001 } 3002 3002 3003 + else if (cfi->vals[op->src.reg].base == CFI_CFA) { 3004 + /* 3005 + * Clang RSP musical chairs: 3006 + * 3007 + * mov %rsp, %rdx [handled above] 3008 + * ... 3009 + * mov %rdx, %rbx [handled here] 3010 + * ... 3011 + * mov %rbx, %rsp [handled above] 3012 + */ 3013 + cfi->vals[op->dest.reg].base = CFI_CFA; 3014 + cfi->vals[op->dest.reg].offset = cfi->vals[op->src.reg].offset; 3015 + } 3016 + 3003 3017 3004 3018 break; 3005 3019 ··· 3748 3734 static int validate_branch(struct objtool_file *file, struct symbol *func, 3749 3735 struct instruction *insn, struct insn_state state); 3750 3736 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 3751 - struct instruction *insn, struct insn_state state); 3737 + struct instruction *insn, struct insn_state *state); 3752 3738 3753 3739 static int validate_insn(struct objtool_file *file, struct symbol *func, 3754 3740 struct instruction *insn, struct insn_state *statep, ··· 4013 3999 * tools/objtool/Documentation/objtool.txt. 4014 4000 */ 4015 4001 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 4016 - struct instruction *insn, struct insn_state state) 4002 + struct instruction *insn, struct insn_state *state) 4017 4003 { 4018 4004 struct instruction *next_insn, *prev_insn = NULL; 4019 4005 bool dead_end; ··· 4044 4030 return 1; 4045 4031 } 4046 4032 4047 - ret = validate_insn(file, func, insn, &state, prev_insn, next_insn, 4033 + ret = validate_insn(file, func, insn, state, prev_insn, next_insn, 4048 4034 &dead_end); 4049 4035 4050 4036 if (!insn->trace) { ··· 4055 4041 } 4056 4042 4057 4043 if (!dead_end && !next_insn) { 4058 - if (state.cfi.cfa.base == CFI_UNDEFINED) 4044 + if (state->cfi.cfa.base == CFI_UNDEFINED) 4059 4045 return 0; 4060 4046 if (file->ignore_unreachables) 4061 4047 return 0; ··· 4080 4066 int ret; 4081 4067 4082 4068 trace_depth_inc(); 4083 - ret = do_validate_branch(file, func, insn, state); 4069 + ret = do_validate_branch(file, func, insn, &state); 4084 4070 trace_depth_dec(); 4085 4071 4086 4072 return ret;
+1 -1
tools/objtool/elf.c
··· 1375 1375 memcpy(sec->data->d_buf, data, size); 1376 1376 1377 1377 sec->data->d_size = size; 1378 - sec->data->d_align = 1; 1378 + sec->data->d_align = sec->sh.sh_addralign; 1379 1379 1380 1380 offset = ALIGN(sec->sh.sh_size, sec->sh.sh_addralign); 1381 1381 sec->sh.sh_size = offset + size;
+1 -1
tools/objtool/include/objtool/warn.h
··· 107 107 #define ERROR_ELF(format, ...) __WARN_ELF(ERROR_STR, format, ##__VA_ARGS__) 108 108 #define ERROR_GLIBC(format, ...) __WARN_GLIBC(ERROR_STR, format, ##__VA_ARGS__) 109 109 #define ERROR_FUNC(sec, offset, format, ...) __WARN_FUNC(ERROR_STR, sec, offset, format, ##__VA_ARGS__) 110 - #define ERROR_INSN(insn, format, ...) WARN_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__) 110 + #define ERROR_INSN(insn, format, ...) ERROR_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__) 111 111 112 112 extern bool debug; 113 113 extern int indent;
+26 -13
tools/objtool/klp-diff.c
··· 1334 1334 * be applied after static branch/call init, resulting in code corruption. 1335 1335 * 1336 1336 * Validate a special section entry to avoid that. Note that an inert 1337 - * tracepoint is harmless enough, in that case just skip the entry and print a 1338 - * warning. Otherwise, return an error. 1337 + * tracepoint or pr_debug() is harmless enough, in that case just skip the 1338 + * entry and print a warning. Otherwise, return an error. 1339 1339 * 1340 - * This is only a temporary limitation which will be fixed when livepatch adds 1341 - * support for submodules: fully self-contained modules which are embedded in 1342 - * the top-level livepatch module's data and which can be loaded on demand when 1343 - * their corresponding to-be-patched module gets loaded. Then klp relocs can 1344 - * be retired. 1340 + * TODO: This is only a temporary limitation which will be fixed when livepatch 1341 + * adds support for submodules: fully self-contained modules which are embedded 1342 + * in the top-level livepatch module's data and which can be loaded on demand 1343 + * when their corresponding to-be-patched module gets loaded. Then klp relocs 1344 + * can be retired. 1345 1345 * 1346 1346 * Return: 1347 1347 * -1: error: validation failed 1348 - * 1: warning: tracepoint skipped 1348 + * 1: warning: disabled tracepoint or pr_debug() 1349 1349 * 0: success 1350 1350 */ 1351 1351 static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym) 1352 1352 { 1353 1353 bool static_branch = !strcmp(sym->sec->name, "__jump_table"); 1354 1354 bool static_call = !strcmp(sym->sec->name, ".static_call_sites"); 1355 - struct symbol *code_sym = NULL; 1355 + const char *code_sym = NULL; 1356 1356 unsigned long code_offset = 0; 1357 1357 struct reloc *reloc; 1358 1358 int ret = 0; ··· 1364 1364 const char *sym_modname; 1365 1365 struct export *export; 1366 1366 1367 + if (convert_reloc_sym(e->patched, reloc)) 1368 + continue; 1369 + 1367 1370 /* Static branch/call keys are always STT_OBJECT */ 1368 1371 if (reloc->sym->type != STT_OBJECT) { 1369 1372 1370 1373 /* Save code location which can be printed below */ 1371 1374 if (reloc->sym->type == STT_FUNC && !code_sym) { 1372 - code_sym = reloc->sym; 1375 + code_sym = reloc->sym->name; 1373 1376 code_offset = reloc_addend(reloc); 1374 1377 } 1375 1378 ··· 1395 1392 if (!strcmp(sym_modname, "vmlinux")) 1396 1393 continue; 1397 1394 1395 + if (!code_sym) 1396 + code_sym = "<unknown>"; 1397 + 1398 1398 if (static_branch) { 1399 1399 if (strstarts(reloc->sym->name, "__tracepoint_")) { 1400 1400 WARN("%s: disabling unsupported tracepoint %s", 1401 - code_sym->name, reloc->sym->name + 13); 1401 + code_sym, reloc->sym->name + 13); 1402 + ret = 1; 1403 + continue; 1404 + } 1405 + 1406 + if (strstr(reloc->sym->name, "__UNIQUE_ID_ddebug_")) { 1407 + WARN("%s: disabling unsupported pr_debug()", 1408 + code_sym); 1402 1409 ret = 1; 1403 1410 continue; 1404 1411 } 1405 1412 1406 1413 ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead", 1407 - code_sym->name, code_offset, reloc->sym->name); 1414 + code_sym, code_offset, reloc->sym->name); 1408 1415 return -1; 1409 1416 } 1410 1417 ··· 1425 1412 } 1426 1413 1427 1414 ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead", 1428 - code_sym->name, code_offset, reloc->sym->name); 1415 + code_sym, code_offset, reloc->sym->name); 1429 1416 return -1; 1430 1417 } 1431 1418
+18
tools/perf/Makefile.config
··· 1163 1163 CFLAGS += -DHAVE_RUST_SUPPORT 1164 1164 $(call detected,CONFIG_RUST_SUPPORT) 1165 1165 endif 1166 + 1167 + ifneq ($(CROSS_COMPILE),) 1168 + RUST_TARGET_FLAGS_arm := arm-unknown-linux-gnueabi 1169 + RUST_TARGET_FLAGS_arm64 := aarch64-unknown-linux-gnu 1170 + RUST_TARGET_FLAGS_m68k := m68k-unknown-linux-gnu 1171 + RUST_TARGET_FLAGS_mips := mipsel-unknown-linux-gnu 1172 + RUST_TARGET_FLAGS_powerpc := powerpc64le-unknown-linux-gnu 1173 + RUST_TARGET_FLAGS_riscv := riscv64gc-unknown-linux-gnu 1174 + RUST_TARGET_FLAGS_s390 := s390x-unknown-linux-gnu 1175 + RUST_TARGET_FLAGS_x86 := x86_64-unknown-linux-gnu 1176 + RUST_TARGET_FLAGS_x86_64 := x86_64-unknown-linux-gnu 1177 + 1178 + ifeq ($(RUST_TARGET_FLAGS_$(ARCH)),) 1179 + $(error Unknown rust cross compilation architecture $(ARCH)) 1180 + endif 1181 + 1182 + RUST_FLAGS += --target=$(RUST_TARGET_FLAGS_$(ARCH)) 1183 + endif 1166 1184 endif 1167 1185 1168 1186 # Among the variables below, these:
+1 -1
tools/perf/Makefile.perf
··· 274 274 PYLINT := $(shell which pylint 2> /dev/null) 275 275 endif 276 276 277 - export srctree OUTPUT RM CC CXX RUSTC LD AR CFLAGS CXXFLAGS V BISON FLEX AWK 277 + export srctree OUTPUT RM CC CXX RUSTC LD AR CFLAGS CXXFLAGS RUST_FLAGS V BISON FLEX AWK 278 278 export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK MYPY PYLINT 279 279 280 280 include $(srctree)/tools/build/Makefile.include
+1
tools/perf/arch/arm/entry/syscalls/syscall.tbl
··· 485 485 468 common file_getattr sys_file_getattr 486 486 469 common file_setattr sys_file_setattr 487 487 470 common listns sys_listns 488 + 471 common rseq_slice_yield sys_rseq_slice_yield
-14
tools/perf/arch/arm/util/cs-etm.c
··· 68 68 69 69 enum cs_etm_version { CS_NOT_PRESENT, CS_ETMV3, CS_ETMV4, CS_ETE }; 70 70 71 - /* ETMv4 CONFIGR register bits */ 72 - #define TRCCONFIGR_BB BIT(3) 73 - #define TRCCONFIGR_CCI BIT(4) 74 - #define TRCCONFIGR_CID BIT(6) 75 - #define TRCCONFIGR_VMID BIT(7) 76 - #define TRCCONFIGR_TS BIT(11) 77 - #define TRCCONFIGR_RS BIT(12) 78 - #define TRCCONFIGR_VMIDOPT BIT(15) 79 - 80 - /* ETMv3 ETMCR register bits */ 81 - #define ETMCR_CYC_ACC BIT(12) 82 - #define ETMCR_TIMESTAMP_EN BIT(28) 83 - #define ETMCR_RETURN_STACK BIT(29) 84 - 85 71 static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu); 86 72 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val); 87 73 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path);
+1
tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
··· 385 385 468 n64 file_getattr sys_file_getattr 386 386 469 n64 file_setattr sys_file_setattr 387 387 470 n64 listns sys_listns 388 + 471 n64 rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
··· 561 561 468 common file_getattr sys_file_getattr 562 562 469 common file_setattr sys_file_setattr 563 563 470 common listns sys_listns 564 + 471 nospu rseq_slice_yield sys_rseq_slice_yield
+392 -467
tools/perf/arch/s390/entry/syscalls/syscall.tbl
··· 3 3 # System call table for s390 4 4 # 5 5 # Format: 6 + # <nr> <abi> <syscall> <entry> 6 7 # 7 - # <nr> <abi> <syscall> <entry-64bit> <compat-entry> 8 - # 9 - # where <abi> can be common, 64, or 32 8 + # <abi> is always common. 10 9 11 - 1 common exit sys_exit sys_exit 12 - 2 common fork sys_fork sys_fork 13 - 3 common read sys_read compat_sys_s390_read 14 - 4 common write sys_write compat_sys_s390_write 15 - 5 common open sys_open compat_sys_open 16 - 6 common close sys_close sys_close 17 - 7 common restart_syscall sys_restart_syscall sys_restart_syscall 18 - 8 common creat sys_creat sys_creat 19 - 9 common link sys_link sys_link 20 - 10 common unlink sys_unlink sys_unlink 21 - 11 common execve sys_execve compat_sys_execve 22 - 12 common chdir sys_chdir sys_chdir 23 - 13 32 time - sys_time32 24 - 14 common mknod sys_mknod sys_mknod 25 - 15 common chmod sys_chmod sys_chmod 26 - 16 32 lchown - sys_lchown16 27 - 19 common lseek sys_lseek compat_sys_lseek 28 - 20 common getpid sys_getpid sys_getpid 29 - 21 common mount sys_mount sys_mount 30 - 22 common umount sys_oldumount sys_oldumount 31 - 23 32 setuid - sys_setuid16 32 - 24 32 getuid - sys_getuid16 33 - 25 32 stime - sys_stime32 34 - 26 common ptrace sys_ptrace compat_sys_ptrace 35 - 27 common alarm sys_alarm sys_alarm 36 - 29 common pause sys_pause sys_pause 37 - 30 common utime sys_utime sys_utime32 38 - 33 common access sys_access sys_access 39 - 34 common nice sys_nice sys_nice 40 - 36 common sync sys_sync sys_sync 41 - 37 common kill sys_kill sys_kill 42 - 38 common rename sys_rename sys_rename 43 - 39 common mkdir sys_mkdir sys_mkdir 44 - 40 common rmdir sys_rmdir sys_rmdir 45 - 41 common dup sys_dup sys_dup 46 - 42 common pipe sys_pipe sys_pipe 47 - 43 common times sys_times compat_sys_times 48 - 45 common brk sys_brk sys_brk 49 - 46 32 setgid - sys_setgid16 50 - 47 32 getgid - sys_getgid16 51 - 48 common signal sys_signal sys_signal 52 - 49 32 geteuid - sys_geteuid16 53 - 50 32 getegid - sys_getegid16 54 - 51 common acct sys_acct sys_acct 55 - 52 common umount2 sys_umount sys_umount 56 - 54 common ioctl sys_ioctl compat_sys_ioctl 57 - 55 common fcntl sys_fcntl compat_sys_fcntl 58 - 57 common setpgid sys_setpgid sys_setpgid 59 - 60 common umask sys_umask sys_umask 60 - 61 common chroot sys_chroot sys_chroot 61 - 62 common ustat sys_ustat compat_sys_ustat 62 - 63 common dup2 sys_dup2 sys_dup2 63 - 64 common getppid sys_getppid sys_getppid 64 - 65 common getpgrp sys_getpgrp sys_getpgrp 65 - 66 common setsid sys_setsid sys_setsid 66 - 67 common sigaction sys_sigaction compat_sys_sigaction 67 - 70 32 setreuid - sys_setreuid16 68 - 71 32 setregid - sys_setregid16 69 - 72 common sigsuspend sys_sigsuspend sys_sigsuspend 70 - 73 common sigpending sys_sigpending compat_sys_sigpending 71 - 74 common sethostname sys_sethostname sys_sethostname 72 - 75 common setrlimit sys_setrlimit compat_sys_setrlimit 73 - 76 32 getrlimit - compat_sys_old_getrlimit 74 - 77 common getrusage sys_getrusage compat_sys_getrusage 75 - 78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday 76 - 79 common settimeofday sys_settimeofday compat_sys_settimeofday 77 - 80 32 getgroups - sys_getgroups16 78 - 81 32 setgroups - sys_setgroups16 79 - 83 common symlink sys_symlink sys_symlink 80 - 85 common readlink sys_readlink sys_readlink 81 - 86 common uselib sys_uselib sys_uselib 82 - 87 common swapon sys_swapon sys_swapon 83 - 88 common reboot sys_reboot sys_reboot 84 - 89 common readdir - compat_sys_old_readdir 85 - 90 common mmap sys_old_mmap compat_sys_s390_old_mmap 86 - 91 common munmap sys_munmap sys_munmap 87 - 92 common truncate sys_truncate compat_sys_truncate 88 - 93 common ftruncate sys_ftruncate compat_sys_ftruncate 89 - 94 common fchmod sys_fchmod sys_fchmod 90 - 95 32 fchown - sys_fchown16 91 - 96 common getpriority sys_getpriority sys_getpriority 92 - 97 common setpriority sys_setpriority sys_setpriority 93 - 99 common statfs sys_statfs compat_sys_statfs 94 - 100 common fstatfs sys_fstatfs compat_sys_fstatfs 95 - 101 32 ioperm - - 96 - 102 common socketcall sys_socketcall compat_sys_socketcall 97 - 103 common syslog sys_syslog sys_syslog 98 - 104 common setitimer sys_setitimer compat_sys_setitimer 99 - 105 common getitimer sys_getitimer compat_sys_getitimer 100 - 106 common stat sys_newstat compat_sys_newstat 101 - 107 common lstat sys_newlstat compat_sys_newlstat 102 - 108 common fstat sys_newfstat compat_sys_newfstat 103 - 110 common lookup_dcookie - - 104 - 111 common vhangup sys_vhangup sys_vhangup 105 - 112 common idle - - 106 - 114 common wait4 sys_wait4 compat_sys_wait4 107 - 115 common swapoff sys_swapoff sys_swapoff 108 - 116 common sysinfo sys_sysinfo compat_sys_sysinfo 109 - 117 common ipc sys_s390_ipc compat_sys_s390_ipc 110 - 118 common fsync sys_fsync sys_fsync 111 - 119 common sigreturn sys_sigreturn compat_sys_sigreturn 112 - 120 common clone sys_clone sys_clone 113 - 121 common setdomainname sys_setdomainname sys_setdomainname 114 - 122 common uname sys_newuname sys_newuname 115 - 124 common adjtimex sys_adjtimex sys_adjtimex_time32 116 - 125 common mprotect sys_mprotect sys_mprotect 117 - 126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask 118 - 127 common create_module - - 119 - 128 common init_module sys_init_module sys_init_module 120 - 129 common delete_module sys_delete_module sys_delete_module 121 - 130 common get_kernel_syms - - 122 - 131 common quotactl sys_quotactl sys_quotactl 123 - 132 common getpgid sys_getpgid sys_getpgid 124 - 133 common fchdir sys_fchdir sys_fchdir 125 - 134 common bdflush sys_ni_syscall sys_ni_syscall 126 - 135 common sysfs sys_sysfs sys_sysfs 127 - 136 common personality sys_s390_personality sys_s390_personality 128 - 137 common afs_syscall - - 129 - 138 32 setfsuid - sys_setfsuid16 130 - 139 32 setfsgid - sys_setfsgid16 131 - 140 32 _llseek - sys_llseek 132 - 141 common getdents sys_getdents compat_sys_getdents 133 - 142 32 _newselect - compat_sys_select 134 - 142 64 select sys_select - 135 - 143 common flock sys_flock sys_flock 136 - 144 common msync sys_msync sys_msync 137 - 145 common readv sys_readv sys_readv 138 - 146 common writev sys_writev sys_writev 139 - 147 common getsid sys_getsid sys_getsid 140 - 148 common fdatasync sys_fdatasync sys_fdatasync 141 - 149 common _sysctl - - 142 - 150 common mlock sys_mlock sys_mlock 143 - 151 common munlock sys_munlock sys_munlock 144 - 152 common mlockall sys_mlockall sys_mlockall 145 - 153 common munlockall sys_munlockall sys_munlockall 146 - 154 common sched_setparam sys_sched_setparam sys_sched_setparam 147 - 155 common sched_getparam sys_sched_getparam sys_sched_getparam 148 - 156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler 149 - 157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler 150 - 158 common sched_yield sys_sched_yield sys_sched_yield 151 - 159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max 152 - 160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min 153 - 161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32 154 - 162 common nanosleep sys_nanosleep sys_nanosleep_time32 155 - 163 common mremap sys_mremap sys_mremap 156 - 164 32 setresuid - sys_setresuid16 157 - 165 32 getresuid - sys_getresuid16 158 - 167 common query_module - - 159 - 168 common poll sys_poll sys_poll 160 - 169 common nfsservctl - - 161 - 170 32 setresgid - sys_setresgid16 162 - 171 32 getresgid - sys_getresgid16 163 - 172 common prctl sys_prctl sys_prctl 164 - 173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn 165 - 174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 166 - 175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 167 - 176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending 168 - 177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32 169 - 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 170 - 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 171 - 180 common pread64 sys_pread64 compat_sys_s390_pread64 172 - 181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64 173 - 182 32 chown - sys_chown16 174 - 183 common getcwd sys_getcwd sys_getcwd 175 - 184 common capget sys_capget sys_capget 176 - 185 common capset sys_capset sys_capset 177 - 186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack 178 - 187 common sendfile sys_sendfile64 compat_sys_sendfile 179 - 188 common getpmsg - - 180 - 189 common putpmsg - - 181 - 190 common vfork sys_vfork sys_vfork 182 - 191 32 ugetrlimit - compat_sys_getrlimit 183 - 191 64 getrlimit sys_getrlimit - 184 - 192 32 mmap2 - compat_sys_s390_mmap2 185 - 193 32 truncate64 - compat_sys_s390_truncate64 186 - 194 32 ftruncate64 - compat_sys_s390_ftruncate64 187 - 195 32 stat64 - compat_sys_s390_stat64 188 - 196 32 lstat64 - compat_sys_s390_lstat64 189 - 197 32 fstat64 - compat_sys_s390_fstat64 190 - 198 32 lchown32 - sys_lchown 191 - 198 64 lchown sys_lchown - 192 - 199 32 getuid32 - sys_getuid 193 - 199 64 getuid sys_getuid - 194 - 200 32 getgid32 - sys_getgid 195 - 200 64 getgid sys_getgid - 196 - 201 32 geteuid32 - sys_geteuid 197 - 201 64 geteuid sys_geteuid - 198 - 202 32 getegid32 - sys_getegid 199 - 202 64 getegid sys_getegid - 200 - 203 32 setreuid32 - sys_setreuid 201 - 203 64 setreuid sys_setreuid - 202 - 204 32 setregid32 - sys_setregid 203 - 204 64 setregid sys_setregid - 204 - 205 32 getgroups32 - sys_getgroups 205 - 205 64 getgroups sys_getgroups - 206 - 206 32 setgroups32 - sys_setgroups 207 - 206 64 setgroups sys_setgroups - 208 - 207 32 fchown32 - sys_fchown 209 - 207 64 fchown sys_fchown - 210 - 208 32 setresuid32 - sys_setresuid 211 - 208 64 setresuid sys_setresuid - 212 - 209 32 getresuid32 - sys_getresuid 213 - 209 64 getresuid sys_getresuid - 214 - 210 32 setresgid32 - sys_setresgid 215 - 210 64 setresgid sys_setresgid - 216 - 211 32 getresgid32 - sys_getresgid 217 - 211 64 getresgid sys_getresgid - 218 - 212 32 chown32 - sys_chown 219 - 212 64 chown sys_chown - 220 - 213 32 setuid32 - sys_setuid 221 - 213 64 setuid sys_setuid - 222 - 214 32 setgid32 - sys_setgid 223 - 214 64 setgid sys_setgid - 224 - 215 32 setfsuid32 - sys_setfsuid 225 - 215 64 setfsuid sys_setfsuid - 226 - 216 32 setfsgid32 - sys_setfsgid 227 - 216 64 setfsgid sys_setfsgid - 228 - 217 common pivot_root sys_pivot_root sys_pivot_root 229 - 218 common mincore sys_mincore sys_mincore 230 - 219 common madvise sys_madvise sys_madvise 231 - 220 common getdents64 sys_getdents64 sys_getdents64 232 - 221 32 fcntl64 - compat_sys_fcntl64 233 - 222 common readahead sys_readahead compat_sys_s390_readahead 234 - 223 32 sendfile64 - compat_sys_sendfile64 235 - 224 common setxattr sys_setxattr sys_setxattr 236 - 225 common lsetxattr sys_lsetxattr sys_lsetxattr 237 - 226 common fsetxattr sys_fsetxattr sys_fsetxattr 238 - 227 common getxattr sys_getxattr sys_getxattr 239 - 228 common lgetxattr sys_lgetxattr sys_lgetxattr 240 - 229 common fgetxattr sys_fgetxattr sys_fgetxattr 241 - 230 common listxattr sys_listxattr sys_listxattr 242 - 231 common llistxattr sys_llistxattr sys_llistxattr 243 - 232 common flistxattr sys_flistxattr sys_flistxattr 244 - 233 common removexattr sys_removexattr sys_removexattr 245 - 234 common lremovexattr sys_lremovexattr sys_lremovexattr 246 - 235 common fremovexattr sys_fremovexattr sys_fremovexattr 247 - 236 common gettid sys_gettid sys_gettid 248 - 237 common tkill sys_tkill sys_tkill 249 - 238 common futex sys_futex sys_futex_time32 250 - 239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 251 - 240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity 252 - 241 common tgkill sys_tgkill sys_tgkill 253 - 243 common io_setup sys_io_setup compat_sys_io_setup 254 - 244 common io_destroy sys_io_destroy sys_io_destroy 255 - 245 common io_getevents sys_io_getevents sys_io_getevents_time32 256 - 246 common io_submit sys_io_submit compat_sys_io_submit 257 - 247 common io_cancel sys_io_cancel sys_io_cancel 258 - 248 common exit_group sys_exit_group sys_exit_group 259 - 249 common epoll_create sys_epoll_create sys_epoll_create 260 - 250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl 261 - 251 common epoll_wait sys_epoll_wait sys_epoll_wait 262 - 252 common set_tid_address sys_set_tid_address sys_set_tid_address 263 - 253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64 264 - 254 common timer_create sys_timer_create compat_sys_timer_create 265 - 255 common timer_settime sys_timer_settime sys_timer_settime32 266 - 256 common timer_gettime sys_timer_gettime sys_timer_gettime32 267 - 257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun 268 - 258 common timer_delete sys_timer_delete sys_timer_delete 269 - 259 common clock_settime sys_clock_settime sys_clock_settime32 270 - 260 common clock_gettime sys_clock_gettime sys_clock_gettime32 271 - 261 common clock_getres sys_clock_getres sys_clock_getres_time32 272 - 262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32 273 - 264 32 fadvise64_64 - compat_sys_s390_fadvise64_64 274 - 265 common statfs64 sys_statfs64 compat_sys_statfs64 275 - 266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 276 - 267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages 277 - 268 common mbind sys_mbind sys_mbind 278 - 269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy 279 - 270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy 280 - 271 common mq_open sys_mq_open compat_sys_mq_open 281 - 272 common mq_unlink sys_mq_unlink sys_mq_unlink 282 - 273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32 283 - 274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32 284 - 275 common mq_notify sys_mq_notify compat_sys_mq_notify 285 - 276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 286 - 277 common kexec_load sys_kexec_load compat_sys_kexec_load 287 - 278 common add_key sys_add_key sys_add_key 288 - 279 common request_key sys_request_key sys_request_key 289 - 280 common keyctl sys_keyctl compat_sys_keyctl 290 - 281 common waitid sys_waitid compat_sys_waitid 291 - 282 common ioprio_set sys_ioprio_set sys_ioprio_set 292 - 283 common ioprio_get sys_ioprio_get sys_ioprio_get 293 - 284 common inotify_init sys_inotify_init sys_inotify_init 294 - 285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch 295 - 286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch 296 - 287 common migrate_pages sys_migrate_pages sys_migrate_pages 297 - 288 common openat sys_openat compat_sys_openat 298 - 289 common mkdirat sys_mkdirat sys_mkdirat 299 - 290 common mknodat sys_mknodat sys_mknodat 300 - 291 common fchownat sys_fchownat sys_fchownat 301 - 292 common futimesat sys_futimesat sys_futimesat_time32 302 - 293 32 fstatat64 - compat_sys_s390_fstatat64 303 - 293 64 newfstatat sys_newfstatat - 304 - 294 common unlinkat sys_unlinkat sys_unlinkat 305 - 295 common renameat sys_renameat sys_renameat 306 - 296 common linkat sys_linkat sys_linkat 307 - 297 common symlinkat sys_symlinkat sys_symlinkat 308 - 298 common readlinkat sys_readlinkat sys_readlinkat 309 - 299 common fchmodat sys_fchmodat sys_fchmodat 310 - 300 common faccessat sys_faccessat sys_faccessat 311 - 301 common pselect6 sys_pselect6 compat_sys_pselect6_time32 312 - 302 common ppoll sys_ppoll compat_sys_ppoll_time32 313 - 303 common unshare sys_unshare sys_unshare 314 - 304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 315 - 305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list 316 - 306 common splice sys_splice sys_splice 317 - 307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range 318 - 308 common tee sys_tee sys_tee 319 - 309 common vmsplice sys_vmsplice sys_vmsplice 320 - 310 common move_pages sys_move_pages sys_move_pages 321 - 311 common getcpu sys_getcpu sys_getcpu 322 - 312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 323 - 313 common utimes sys_utimes sys_utimes_time32 324 - 314 common fallocate sys_fallocate compat_sys_s390_fallocate 325 - 315 common utimensat sys_utimensat sys_utimensat_time32 326 - 316 common signalfd sys_signalfd compat_sys_signalfd 327 - 317 common timerfd - - 328 - 318 common eventfd sys_eventfd sys_eventfd 329 - 319 common timerfd_create sys_timerfd_create sys_timerfd_create 330 - 320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32 331 - 321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32 332 - 322 common signalfd4 sys_signalfd4 compat_sys_signalfd4 333 - 323 common eventfd2 sys_eventfd2 sys_eventfd2 334 - 324 common inotify_init1 sys_inotify_init1 sys_inotify_init1 335 - 325 common pipe2 sys_pipe2 sys_pipe2 336 - 326 common dup3 sys_dup3 sys_dup3 337 - 327 common epoll_create1 sys_epoll_create1 sys_epoll_create1 338 - 328 common preadv sys_preadv compat_sys_preadv 339 - 329 common pwritev sys_pwritev compat_sys_pwritev 340 - 330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 341 - 331 common perf_event_open sys_perf_event_open sys_perf_event_open 342 - 332 common fanotify_init sys_fanotify_init sys_fanotify_init 343 - 333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 344 - 334 common prlimit64 sys_prlimit64 sys_prlimit64 345 - 335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at 346 - 336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at 347 - 337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32 348 - 338 common syncfs sys_syncfs sys_syncfs 349 - 339 common setns sys_setns sys_setns 350 - 340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv 351 - 341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev 352 - 342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr 353 - 343 common kcmp sys_kcmp sys_kcmp 354 - 344 common finit_module sys_finit_module sys_finit_module 355 - 345 common sched_setattr sys_sched_setattr sys_sched_setattr 356 - 346 common sched_getattr sys_sched_getattr sys_sched_getattr 357 - 347 common renameat2 sys_renameat2 sys_renameat2 358 - 348 common seccomp sys_seccomp sys_seccomp 359 - 349 common getrandom sys_getrandom sys_getrandom 360 - 350 common memfd_create sys_memfd_create sys_memfd_create 361 - 351 common bpf sys_bpf sys_bpf 362 - 352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write 363 - 353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read 364 - 354 common execveat sys_execveat compat_sys_execveat 365 - 355 common userfaultfd sys_userfaultfd sys_userfaultfd 366 - 356 common membarrier sys_membarrier sys_membarrier 367 - 357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32 368 - 358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 369 - 359 common socket sys_socket sys_socket 370 - 360 common socketpair sys_socketpair sys_socketpair 371 - 361 common bind sys_bind sys_bind 372 - 362 common connect sys_connect sys_connect 373 - 363 common listen sys_listen sys_listen 374 - 364 common accept4 sys_accept4 sys_accept4 375 - 365 common getsockopt sys_getsockopt sys_getsockopt 376 - 366 common setsockopt sys_setsockopt sys_setsockopt 377 - 367 common getsockname sys_getsockname sys_getsockname 378 - 368 common getpeername sys_getpeername sys_getpeername 379 - 369 common sendto sys_sendto sys_sendto 380 - 370 common sendmsg sys_sendmsg compat_sys_sendmsg 381 - 371 common recvfrom sys_recvfrom compat_sys_recvfrom 382 - 372 common recvmsg sys_recvmsg compat_sys_recvmsg 383 - 373 common shutdown sys_shutdown sys_shutdown 384 - 374 common mlock2 sys_mlock2 sys_mlock2 385 - 375 common copy_file_range sys_copy_file_range sys_copy_file_range 386 - 376 common preadv2 sys_preadv2 compat_sys_preadv2 387 - 377 common pwritev2 sys_pwritev2 compat_sys_pwritev2 388 - 378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage 389 - 379 common statx sys_statx sys_statx 390 - 380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi 391 - 381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load 392 - 382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents 393 - 383 common rseq sys_rseq sys_rseq 394 - 384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect 395 - 385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc 396 - 386 common pkey_free sys_pkey_free sys_pkey_free 10 + 1 common exit sys_exit 11 + 2 common fork sys_fork 12 + 3 common read sys_read 13 + 4 common write sys_write 14 + 5 common open sys_open 15 + 6 common close sys_close 16 + 7 common restart_syscall sys_restart_syscall 17 + 8 common creat sys_creat 18 + 9 common link sys_link 19 + 10 common unlink sys_unlink 20 + 11 common execve sys_execve 21 + 12 common chdir sys_chdir 22 + 14 common mknod sys_mknod 23 + 15 common chmod sys_chmod 24 + 19 common lseek sys_lseek 25 + 20 common getpid sys_getpid 26 + 21 common mount sys_mount 27 + 22 common umount sys_oldumount 28 + 26 common ptrace sys_ptrace 29 + 27 common alarm sys_alarm 30 + 29 common pause sys_pause 31 + 30 common utime sys_utime 32 + 33 common access sys_access 33 + 34 common nice sys_nice 34 + 36 common sync sys_sync 35 + 37 common kill sys_kill 36 + 38 common rename sys_rename 37 + 39 common mkdir sys_mkdir 38 + 40 common rmdir sys_rmdir 39 + 41 common dup sys_dup 40 + 42 common pipe sys_pipe 41 + 43 common times sys_times 42 + 45 common brk sys_brk 43 + 48 common signal sys_signal 44 + 51 common acct sys_acct 45 + 52 common umount2 sys_umount 46 + 54 common ioctl sys_ioctl 47 + 55 common fcntl sys_fcntl 48 + 57 common setpgid sys_setpgid 49 + 60 common umask sys_umask 50 + 61 common chroot sys_chroot 51 + 62 common ustat sys_ustat 52 + 63 common dup2 sys_dup2 53 + 64 common getppid sys_getppid 54 + 65 common getpgrp sys_getpgrp 55 + 66 common setsid sys_setsid 56 + 67 common sigaction sys_sigaction 57 + 72 common sigsuspend sys_sigsuspend 58 + 73 common sigpending sys_sigpending 59 + 74 common sethostname sys_sethostname 60 + 75 common setrlimit sys_setrlimit 61 + 77 common getrusage sys_getrusage 62 + 78 common gettimeofday sys_gettimeofday 63 + 79 common settimeofday sys_settimeofday 64 + 83 common symlink sys_symlink 65 + 85 common readlink sys_readlink 66 + 86 common uselib sys_uselib 67 + 87 common swapon sys_swapon 68 + 88 common reboot sys_reboot 69 + 89 common readdir sys_ni_syscall 70 + 90 common mmap sys_old_mmap 71 + 91 common munmap sys_munmap 72 + 92 common truncate sys_truncate 73 + 93 common ftruncate sys_ftruncate 74 + 94 common fchmod sys_fchmod 75 + 96 common getpriority sys_getpriority 76 + 97 common setpriority sys_setpriority 77 + 99 common statfs sys_statfs 78 + 100 common fstatfs sys_fstatfs 79 + 102 common socketcall sys_socketcall 80 + 103 common syslog sys_syslog 81 + 104 common setitimer sys_setitimer 82 + 105 common getitimer sys_getitimer 83 + 106 common stat sys_newstat 84 + 107 common lstat sys_newlstat 85 + 108 common fstat sys_newfstat 86 + 110 common lookup_dcookie sys_ni_syscall 87 + 111 common vhangup sys_vhangup 88 + 112 common idle sys_ni_syscall 89 + 114 common wait4 sys_wait4 90 + 115 common swapoff sys_swapoff 91 + 116 common sysinfo sys_sysinfo 92 + 117 common ipc sys_s390_ipc 93 + 118 common fsync sys_fsync 94 + 119 common sigreturn sys_sigreturn 95 + 120 common clone sys_clone 96 + 121 common setdomainname sys_setdomainname 97 + 122 common uname sys_newuname 98 + 124 common adjtimex sys_adjtimex 99 + 125 common mprotect sys_mprotect 100 + 126 common sigprocmask sys_sigprocmask 101 + 127 common create_module sys_ni_syscall 102 + 128 common init_module sys_init_module 103 + 129 common delete_module sys_delete_module 104 + 130 common get_kernel_syms sys_ni_syscall 105 + 131 common quotactl sys_quotactl 106 + 132 common getpgid sys_getpgid 107 + 133 common fchdir sys_fchdir 108 + 134 common bdflush sys_ni_syscall 109 + 135 common sysfs sys_sysfs 110 + 136 common personality sys_s390_personality 111 + 137 common afs_syscall sys_ni_syscall 112 + 141 common getdents sys_getdents 113 + 142 common select sys_select 114 + 143 common flock sys_flock 115 + 144 common msync sys_msync 116 + 145 common readv sys_readv 117 + 146 common writev sys_writev 118 + 147 common getsid sys_getsid 119 + 148 common fdatasync sys_fdatasync 120 + 149 common _sysctl sys_ni_syscall 121 + 150 common mlock sys_mlock 122 + 151 common munlock sys_munlock 123 + 152 common mlockall sys_mlockall 124 + 153 common munlockall sys_munlockall 125 + 154 common sched_setparam sys_sched_setparam 126 + 155 common sched_getparam sys_sched_getparam 127 + 156 common sched_setscheduler sys_sched_setscheduler 128 + 157 common sched_getscheduler sys_sched_getscheduler 129 + 158 common sched_yield sys_sched_yield 130 + 159 common sched_get_priority_max sys_sched_get_priority_max 131 + 160 common sched_get_priority_min sys_sched_get_priority_min 132 + 161 common sched_rr_get_interval sys_sched_rr_get_interval 133 + 162 common nanosleep sys_nanosleep 134 + 163 common mremap sys_mremap 135 + 167 common query_module sys_ni_syscall 136 + 168 common poll sys_poll 137 + 169 common nfsservctl sys_ni_syscall 138 + 172 common prctl sys_prctl 139 + 173 common rt_sigreturn sys_rt_sigreturn 140 + 174 common rt_sigaction sys_rt_sigaction 141 + 175 common rt_sigprocmask sys_rt_sigprocmask 142 + 176 common rt_sigpending sys_rt_sigpending 143 + 177 common rt_sigtimedwait sys_rt_sigtimedwait 144 + 178 common rt_sigqueueinfo sys_rt_sigqueueinfo 145 + 179 common rt_sigsuspend sys_rt_sigsuspend 146 + 180 common pread64 sys_pread64 147 + 181 common pwrite64 sys_pwrite64 148 + 183 common getcwd sys_getcwd 149 + 184 common capget sys_capget 150 + 185 common capset sys_capset 151 + 186 common sigaltstack sys_sigaltstack 152 + 187 common sendfile sys_sendfile64 153 + 188 common getpmsg sys_ni_syscall 154 + 189 common putpmsg sys_ni_syscall 155 + 190 common vfork sys_vfork 156 + 191 common getrlimit sys_getrlimit 157 + 198 common lchown sys_lchown 158 + 199 common getuid sys_getuid 159 + 200 common getgid sys_getgid 160 + 201 common geteuid sys_geteuid 161 + 202 common getegid sys_getegid 162 + 203 common setreuid sys_setreuid 163 + 204 common setregid sys_setregid 164 + 205 common getgroups sys_getgroups 165 + 206 common setgroups sys_setgroups 166 + 207 common fchown sys_fchown 167 + 208 common setresuid sys_setresuid 168 + 209 common getresuid sys_getresuid 169 + 210 common setresgid sys_setresgid 170 + 211 common getresgid sys_getresgid 171 + 212 common chown sys_chown 172 + 213 common setuid sys_setuid 173 + 214 common setgid sys_setgid 174 + 215 common setfsuid sys_setfsuid 175 + 216 common setfsgid sys_setfsgid 176 + 217 common pivot_root sys_pivot_root 177 + 218 common mincore sys_mincore 178 + 219 common madvise sys_madvise 179 + 220 common getdents64 sys_getdents64 180 + 222 common readahead sys_readahead 181 + 224 common setxattr sys_setxattr 182 + 225 common lsetxattr sys_lsetxattr 183 + 226 common fsetxattr sys_fsetxattr 184 + 227 common getxattr sys_getxattr 185 + 228 common lgetxattr sys_lgetxattr 186 + 229 common fgetxattr sys_fgetxattr 187 + 230 common listxattr sys_listxattr 188 + 231 common llistxattr sys_llistxattr 189 + 232 common flistxattr sys_flistxattr 190 + 233 common removexattr sys_removexattr 191 + 234 common lremovexattr sys_lremovexattr 192 + 235 common fremovexattr sys_fremovexattr 193 + 236 common gettid sys_gettid 194 + 237 common tkill sys_tkill 195 + 238 common futex sys_futex 196 + 239 common sched_setaffinity sys_sched_setaffinity 197 + 240 common sched_getaffinity sys_sched_getaffinity 198 + 241 common tgkill sys_tgkill 199 + 243 common io_setup sys_io_setup 200 + 244 common io_destroy sys_io_destroy 201 + 245 common io_getevents sys_io_getevents 202 + 246 common io_submit sys_io_submit 203 + 247 common io_cancel sys_io_cancel 204 + 248 common exit_group sys_exit_group 205 + 249 common epoll_create sys_epoll_create 206 + 250 common epoll_ctl sys_epoll_ctl 207 + 251 common epoll_wait sys_epoll_wait 208 + 252 common set_tid_address sys_set_tid_address 209 + 253 common fadvise64 sys_fadvise64_64 210 + 254 common timer_create sys_timer_create 211 + 255 common timer_settime sys_timer_settime 212 + 256 common timer_gettime sys_timer_gettime 213 + 257 common timer_getoverrun sys_timer_getoverrun 214 + 258 common timer_delete sys_timer_delete 215 + 259 common clock_settime sys_clock_settime 216 + 260 common clock_gettime sys_clock_gettime 217 + 261 common clock_getres sys_clock_getres 218 + 262 common clock_nanosleep sys_clock_nanosleep 219 + 265 common statfs64 sys_statfs64 220 + 266 common fstatfs64 sys_fstatfs64 221 + 267 common remap_file_pages sys_remap_file_pages 222 + 268 common mbind sys_mbind 223 + 269 common get_mempolicy sys_get_mempolicy 224 + 270 common set_mempolicy sys_set_mempolicy 225 + 271 common mq_open sys_mq_open 226 + 272 common mq_unlink sys_mq_unlink 227 + 273 common mq_timedsend sys_mq_timedsend 228 + 274 common mq_timedreceive sys_mq_timedreceive 229 + 275 common mq_notify sys_mq_notify 230 + 276 common mq_getsetattr sys_mq_getsetattr 231 + 277 common kexec_load sys_kexec_load 232 + 278 common add_key sys_add_key 233 + 279 common request_key sys_request_key 234 + 280 common keyctl sys_keyctl 235 + 281 common waitid sys_waitid 236 + 282 common ioprio_set sys_ioprio_set 237 + 283 common ioprio_get sys_ioprio_get 238 + 284 common inotify_init sys_inotify_init 239 + 285 common inotify_add_watch sys_inotify_add_watch 240 + 286 common inotify_rm_watch sys_inotify_rm_watch 241 + 287 common migrate_pages sys_migrate_pages 242 + 288 common openat sys_openat 243 + 289 common mkdirat sys_mkdirat 244 + 290 common mknodat sys_mknodat 245 + 291 common fchownat sys_fchownat 246 + 292 common futimesat sys_futimesat 247 + 293 common newfstatat sys_newfstatat 248 + 294 common unlinkat sys_unlinkat 249 + 295 common renameat sys_renameat 250 + 296 common linkat sys_linkat 251 + 297 common symlinkat sys_symlinkat 252 + 298 common readlinkat sys_readlinkat 253 + 299 common fchmodat sys_fchmodat 254 + 300 common faccessat sys_faccessat 255 + 301 common pselect6 sys_pselect6 256 + 302 common ppoll sys_ppoll 257 + 303 common unshare sys_unshare 258 + 304 common set_robust_list sys_set_robust_list 259 + 305 common get_robust_list sys_get_robust_list 260 + 306 common splice sys_splice 261 + 307 common sync_file_range sys_sync_file_range 262 + 308 common tee sys_tee 263 + 309 common vmsplice sys_vmsplice 264 + 310 common move_pages sys_move_pages 265 + 311 common getcpu sys_getcpu 266 + 312 common epoll_pwait sys_epoll_pwait 267 + 313 common utimes sys_utimes 268 + 314 common fallocate sys_fallocate 269 + 315 common utimensat sys_utimensat 270 + 316 common signalfd sys_signalfd 271 + 317 common timerfd sys_ni_syscall 272 + 318 common eventfd sys_eventfd 273 + 319 common timerfd_create sys_timerfd_create 274 + 320 common timerfd_settime sys_timerfd_settime 275 + 321 common timerfd_gettime sys_timerfd_gettime 276 + 322 common signalfd4 sys_signalfd4 277 + 323 common eventfd2 sys_eventfd2 278 + 324 common inotify_init1 sys_inotify_init1 279 + 325 common pipe2 sys_pipe2 280 + 326 common dup3 sys_dup3 281 + 327 common epoll_create1 sys_epoll_create1 282 + 328 common preadv sys_preadv 283 + 329 common pwritev sys_pwritev 284 + 330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 285 + 331 common perf_event_open sys_perf_event_open 286 + 332 common fanotify_init sys_fanotify_init 287 + 333 common fanotify_mark sys_fanotify_mark 288 + 334 common prlimit64 sys_prlimit64 289 + 335 common name_to_handle_at sys_name_to_handle_at 290 + 336 common open_by_handle_at sys_open_by_handle_at 291 + 337 common clock_adjtime sys_clock_adjtime 292 + 338 common syncfs sys_syncfs 293 + 339 common setns sys_setns 294 + 340 common process_vm_readv sys_process_vm_readv 295 + 341 common process_vm_writev sys_process_vm_writev 296 + 342 common s390_runtime_instr sys_s390_runtime_instr 297 + 343 common kcmp sys_kcmp 298 + 344 common finit_module sys_finit_module 299 + 345 common sched_setattr sys_sched_setattr 300 + 346 common sched_getattr sys_sched_getattr 301 + 347 common renameat2 sys_renameat2 302 + 348 common seccomp sys_seccomp 303 + 349 common getrandom sys_getrandom 304 + 350 common memfd_create sys_memfd_create 305 + 351 common bpf sys_bpf 306 + 352 common s390_pci_mmio_write sys_s390_pci_mmio_write 307 + 353 common s390_pci_mmio_read sys_s390_pci_mmio_read 308 + 354 common execveat sys_execveat 309 + 355 common userfaultfd sys_userfaultfd 310 + 356 common membarrier sys_membarrier 311 + 357 common recvmmsg sys_recvmmsg 312 + 358 common sendmmsg sys_sendmmsg 313 + 359 common socket sys_socket 314 + 360 common socketpair sys_socketpair 315 + 361 common bind sys_bind 316 + 362 common connect sys_connect 317 + 363 common listen sys_listen 318 + 364 common accept4 sys_accept4 319 + 365 common getsockopt sys_getsockopt 320 + 366 common setsockopt sys_setsockopt 321 + 367 common getsockname sys_getsockname 322 + 368 common getpeername sys_getpeername 323 + 369 common sendto sys_sendto 324 + 370 common sendmsg sys_sendmsg 325 + 371 common recvfrom sys_recvfrom 326 + 372 common recvmsg sys_recvmsg 327 + 373 common shutdown sys_shutdown 328 + 374 common mlock2 sys_mlock2 329 + 375 common copy_file_range sys_copy_file_range 330 + 376 common preadv2 sys_preadv2 331 + 377 common pwritev2 sys_pwritev2 332 + 378 common s390_guarded_storage sys_s390_guarded_storage 333 + 379 common statx sys_statx 334 + 380 common s390_sthyi sys_s390_sthyi 335 + 381 common kexec_file_load sys_kexec_file_load 336 + 382 common io_pgetevents sys_io_pgetevents 337 + 383 common rseq sys_rseq 338 + 384 common pkey_mprotect sys_pkey_mprotect 339 + 385 common pkey_alloc sys_pkey_alloc 340 + 386 common pkey_free sys_pkey_free 397 341 # room for arch specific syscalls 398 - 392 64 semtimedop sys_semtimedop - 399 - 393 common semget sys_semget sys_semget 400 - 394 common semctl sys_semctl compat_sys_semctl 401 - 395 common shmget sys_shmget sys_shmget 402 - 396 common shmctl sys_shmctl compat_sys_shmctl 403 - 397 common shmat sys_shmat compat_sys_shmat 404 - 398 common shmdt sys_shmdt sys_shmdt 405 - 399 common msgget sys_msgget sys_msgget 406 - 400 common msgsnd sys_msgsnd compat_sys_msgsnd 407 - 401 common msgrcv sys_msgrcv compat_sys_msgrcv 408 - 402 common msgctl sys_msgctl compat_sys_msgctl 409 - 403 32 clock_gettime64 - sys_clock_gettime 410 - 404 32 clock_settime64 - sys_clock_settime 411 - 405 32 clock_adjtime64 - sys_clock_adjtime 412 - 406 32 clock_getres_time64 - sys_clock_getres 413 - 407 32 clock_nanosleep_time64 - sys_clock_nanosleep 414 - 408 32 timer_gettime64 - sys_timer_gettime 415 - 409 32 timer_settime64 - sys_timer_settime 416 - 410 32 timerfd_gettime64 - sys_timerfd_gettime 417 - 411 32 timerfd_settime64 - sys_timerfd_settime 418 - 412 32 utimensat_time64 - sys_utimensat 419 - 413 32 pselect6_time64 - compat_sys_pselect6_time64 420 - 414 32 ppoll_time64 - compat_sys_ppoll_time64 421 - 416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64 422 - 417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64 423 - 418 32 mq_timedsend_time64 - sys_mq_timedsend 424 - 419 32 mq_timedreceive_time64 - sys_mq_timedreceive 425 - 420 32 semtimedop_time64 - sys_semtimedop 426 - 421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64 427 - 422 32 futex_time64 - sys_futex 428 - 423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval 429 - 424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal 430 - 425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup 431 - 426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter 432 - 427 common io_uring_register sys_io_uring_register sys_io_uring_register 433 - 428 common open_tree sys_open_tree sys_open_tree 434 - 429 common move_mount sys_move_mount sys_move_mount 435 - 430 common fsopen sys_fsopen sys_fsopen 436 - 431 common fsconfig sys_fsconfig sys_fsconfig 437 - 432 common fsmount sys_fsmount sys_fsmount 438 - 433 common fspick sys_fspick sys_fspick 439 - 434 common pidfd_open sys_pidfd_open sys_pidfd_open 440 - 435 common clone3 sys_clone3 sys_clone3 441 - 436 common close_range sys_close_range sys_close_range 442 - 437 common openat2 sys_openat2 sys_openat2 443 - 438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd 444 - 439 common faccessat2 sys_faccessat2 sys_faccessat2 445 - 440 common process_madvise sys_process_madvise sys_process_madvise 446 - 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 447 - 442 common mount_setattr sys_mount_setattr sys_mount_setattr 448 - 443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd 449 - 444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset 450 - 445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule 451 - 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self 452 - 447 common memfd_secret sys_memfd_secret sys_memfd_secret 453 - 448 common process_mrelease sys_process_mrelease sys_process_mrelease 454 - 449 common futex_waitv sys_futex_waitv sys_futex_waitv 455 - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node 456 - 451 common cachestat sys_cachestat sys_cachestat 457 - 452 common fchmodat2 sys_fchmodat2 sys_fchmodat2 458 - 453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack 459 - 454 common futex_wake sys_futex_wake sys_futex_wake 460 - 455 common futex_wait sys_futex_wait sys_futex_wait 461 - 456 common futex_requeue sys_futex_requeue sys_futex_requeue 462 - 457 common statmount sys_statmount sys_statmount 463 - 458 common listmount sys_listmount sys_listmount 464 - 459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr 465 - 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr 466 - 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules 467 - 462 common mseal sys_mseal sys_mseal 468 - 463 common setxattrat sys_setxattrat sys_setxattrat 469 - 464 common getxattrat sys_getxattrat sys_getxattrat 470 - 465 common listxattrat sys_listxattrat sys_listxattrat 471 - 466 common removexattrat sys_removexattrat sys_removexattrat 472 - 467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr 473 - 468 common file_getattr sys_file_getattr sys_file_getattr 474 - 469 common file_setattr sys_file_setattr sys_file_setattr 475 - 470 common listns sys_listns sys_listns 342 + 392 common semtimedop sys_semtimedop 343 + 393 common semget sys_semget 344 + 394 common semctl sys_semctl 345 + 395 common shmget sys_shmget 346 + 396 common shmctl sys_shmctl 347 + 397 common shmat sys_shmat 348 + 398 common shmdt sys_shmdt 349 + 399 common msgget sys_msgget 350 + 400 common msgsnd sys_msgsnd 351 + 401 common msgrcv sys_msgrcv 352 + 402 common msgctl sys_msgctl 353 + 424 common pidfd_send_signal sys_pidfd_send_signal 354 + 425 common io_uring_setup sys_io_uring_setup 355 + 426 common io_uring_enter sys_io_uring_enter 356 + 427 common io_uring_register sys_io_uring_register 357 + 428 common open_tree sys_open_tree 358 + 429 common move_mount sys_move_mount 359 + 430 common fsopen sys_fsopen 360 + 431 common fsconfig sys_fsconfig 361 + 432 common fsmount sys_fsmount 362 + 433 common fspick sys_fspick 363 + 434 common pidfd_open sys_pidfd_open 364 + 435 common clone3 sys_clone3 365 + 436 common close_range sys_close_range 366 + 437 common openat2 sys_openat2 367 + 438 common pidfd_getfd sys_pidfd_getfd 368 + 439 common faccessat2 sys_faccessat2 369 + 440 common process_madvise sys_process_madvise 370 + 441 common epoll_pwait2 sys_epoll_pwait2 371 + 442 common mount_setattr sys_mount_setattr 372 + 443 common quotactl_fd sys_quotactl_fd 373 + 444 common landlock_create_ruleset sys_landlock_create_ruleset 374 + 445 common landlock_add_rule sys_landlock_add_rule 375 + 446 common landlock_restrict_self sys_landlock_restrict_self 376 + 447 common memfd_secret sys_memfd_secret 377 + 448 common process_mrelease sys_process_mrelease 378 + 449 common futex_waitv sys_futex_waitv 379 + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 380 + 451 common cachestat sys_cachestat 381 + 452 common fchmodat2 sys_fchmodat2 382 + 453 common map_shadow_stack sys_map_shadow_stack 383 + 454 common futex_wake sys_futex_wake 384 + 455 common futex_wait sys_futex_wait 385 + 456 common futex_requeue sys_futex_requeue 386 + 457 common statmount sys_statmount 387 + 458 common listmount sys_listmount 388 + 459 common lsm_get_self_attr sys_lsm_get_self_attr 389 + 460 common lsm_set_self_attr sys_lsm_set_self_attr 390 + 461 common lsm_list_modules sys_lsm_list_modules 391 + 462 common mseal sys_mseal 392 + 463 common setxattrat sys_setxattrat 393 + 464 common getxattrat sys_getxattrat 394 + 465 common listxattrat sys_listxattrat 395 + 466 common removexattrat sys_removexattrat 396 + 467 common open_tree_attr sys_open_tree_attr 397 + 468 common file_getattr sys_file_getattr 398 + 469 common file_setattr sys_file_setattr 399 + 470 common listns sys_listns 400 + 471 common rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/sh/entry/syscalls/syscall.tbl
··· 474 474 468 common file_getattr sys_file_getattr 475 475 469 common file_setattr sys_file_setattr 476 476 470 common listns sys_listns 477 + 471 common rseq_slice_yield sys_rseq_slice_yield
+2 -1
tools/perf/arch/sparc/entry/syscalls/syscall.tbl
··· 480 480 432 common fsmount sys_fsmount 481 481 433 common fspick sys_fspick 482 482 434 common pidfd_open sys_pidfd_open 483 - # 435 reserved for clone3 483 + 435 common clone3 __sys_clone3 484 484 436 common close_range sys_close_range 485 485 437 common openat2 sys_openat2 486 486 438 common pidfd_getfd sys_pidfd_getfd ··· 516 516 468 common file_getattr sys_file_getattr 517 517 469 common file_setattr sys_file_setattr 518 518 470 common listns sys_listns 519 + 471 common rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
··· 476 476 468 i386 file_getattr sys_file_getattr 477 477 469 i386 file_setattr sys_file_setattr 478 478 470 i386 listns sys_listns 479 + 471 i386 rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
··· 395 395 468 common file_getattr sys_file_getattr 396 396 469 common file_setattr sys_file_setattr 397 397 470 common listns sys_listns 398 + 471 common rseq_slice_yield sys_rseq_slice_yield 398 399 399 400 # 400 401 # Due to a historical design error, certain syscalls are numbered differently
+1
tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
··· 441 441 468 common file_getattr sys_file_getattr 442 442 469 common file_setattr sys_file_setattr 443 443 470 common listns sys_listns 444 + 471 common rseq_slice_yield sys_rseq_slice_yield
+7 -2
tools/perf/builtin-ftrace.c
··· 18 18 #include <poll.h> 19 19 #include <ctype.h> 20 20 #include <linux/capability.h> 21 + #include <linux/err.h> 21 22 #include <linux/string.h> 22 23 #include <sys/stat.h> 23 24 ··· 1210 1209 ftrace->graph_verbose = 0; 1211 1210 1212 1211 ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL); 1213 - if (ftrace->profile_hash == NULL) 1214 - return -ENOMEM; 1212 + if (IS_ERR(ftrace->profile_hash)) { 1213 + int err = PTR_ERR(ftrace->profile_hash); 1214 + 1215 + ftrace->profile_hash = NULL; 1216 + return err; 1217 + } 1215 1218 1216 1219 return 0; 1217 1220 }
+2 -1
tools/perf/pmu-events/Build
··· 214 214 quiet_cmd_rm = RM $^ 215 215 216 216 prune_orphans: $(ORPHAN_FILES) 217 - $(Q)$(call echo-cmd,rm)rm -f $^ 217 + # The list of files can be long. Use xargs to prevent issues. 218 + $(Q)$(call echo-cmd,rm)echo "$^" | xargs rm -f 218 219 219 220 JEVENTS_DEPS += prune_orphans 220 221 endif
+1
tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
··· 77 77 */ 78 78 #define IRQ_WORK_VECTOR 0xf6 79 79 80 + /* IRQ vector for PMIs when running a guest with a mediated PMU. */ 80 81 #define PERF_GUEST_MEDIATED_PMI_VECTOR 0xf5 81 82 82 83 #define DEFERRED_ERROR_VECTOR 0xf4
+1
tools/perf/trace/beauty/include/uapi/linux/fs.h
··· 253 253 #define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ 254 254 #define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */ 255 255 #define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */ 256 + #define FS_XFLAG_VERITY 0x00020000 /* fs-verity enabled */ 256 257 #define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ 257 258 258 259 /* the read-only stuff doesn't really belong here, but any other place is
+11 -2
tools/perf/trace/beauty/include/uapi/linux/mount.h
··· 61 61 /* 62 62 * open_tree() flags. 63 63 */ 64 - #define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */ 64 + #define OPEN_TREE_CLONE (1 << 0) /* Clone the target tree and attach the clone */ 65 + #define OPEN_TREE_NAMESPACE (1 << 1) /* Clone the target tree into a new mount namespace */ 65 66 #define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */ 66 67 67 68 /* ··· 198 197 */ 199 198 struct mnt_id_req { 200 199 __u32 size; 201 - __u32 mnt_ns_fd; 200 + union { 201 + __u32 mnt_ns_fd; 202 + __u32 mnt_fd; 203 + }; 202 204 __u64 mnt_id; 203 205 __u64 param; 204 206 __u64 mnt_ns_id; ··· 235 231 */ 236 232 #define LSMT_ROOT 0xffffffffffffffff /* root mount */ 237 233 #define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */ 234 + 235 + /* 236 + * @flag bits for statmount(2) 237 + */ 238 + #define STATMOUNT_BY_FD 0x00000001U /* want mountinfo for given fd */ 238 239 239 240 #endif /* _UAPI_LINUX_MOUNT_H */
+37
tools/perf/trace/beauty/include/uapi/linux/prctl.h
··· 386 386 # define PR_FUTEX_HASH_SET_SLOTS 1 387 387 # define PR_FUTEX_HASH_GET_SLOTS 2 388 388 389 + /* RSEQ time slice extensions */ 390 + #define PR_RSEQ_SLICE_EXTENSION 79 391 + # define PR_RSEQ_SLICE_EXTENSION_GET 1 392 + # define PR_RSEQ_SLICE_EXTENSION_SET 2 393 + /* 394 + * Bits for RSEQ_SLICE_EXTENSION_GET/SET 395 + * PR_RSEQ_SLICE_EXT_ENABLE: Enable 396 + */ 397 + # define PR_RSEQ_SLICE_EXT_ENABLE 0x01 398 + 399 + /* 400 + * Get the current indirect branch tracking configuration for the current 401 + * thread, this will be the value configured via PR_SET_INDIR_BR_LP_STATUS. 402 + */ 403 + #define PR_GET_INDIR_BR_LP_STATUS 80 404 + 405 + /* 406 + * Set the indirect branch tracking configuration. PR_INDIR_BR_LP_ENABLE will 407 + * enable cpu feature for user thread, to track all indirect branches and ensure 408 + * they land on arch defined landing pad instruction. 409 + * x86 - If enabled, an indirect branch must land on an ENDBRANCH instruction. 410 + * arch64 - If enabled, an indirect branch must land on a BTI instruction. 411 + * riscv - If enabled, an indirect branch must land on an lpad instruction. 412 + * PR_INDIR_BR_LP_DISABLE will disable feature for user thread and indirect 413 + * branches will no more be tracked by cpu to land on arch defined landing pad 414 + * instruction. 415 + */ 416 + #define PR_SET_INDIR_BR_LP_STATUS 81 417 + # define PR_INDIR_BR_LP_ENABLE (1UL << 0) 418 + 419 + /* 420 + * Prevent further changes to the specified indirect branch tracking 421 + * configuration. All bits may be locked via this call, including 422 + * undefined bits. 423 + */ 424 + #define PR_LOCK_INDIR_BR_LP_STATUS 82 425 + 389 426 #endif /* _LINUX_PRCTL_H */
+1 -1
tools/perf/util/annotate-arch/annotate-loongarch.c
··· 93 93 start = map__unmap_ip(map, sym->start); 94 94 end = map__unmap_ip(map, sym->end); 95 95 96 - ops->target.outside = target.addr < start || target.addr > end; 96 + ops->target.outside = target.addr < start || target.addr >= end; 97 97 98 98 if (maps__find_ams(thread__maps(ms->thread), &target) == 0 && 99 99 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
+4 -1
tools/perf/util/annotate.c
··· 44 44 #include "strbuf.h" 45 45 #include <regex.h> 46 46 #include <linux/bitops.h> 47 + #include <linux/err.h> 47 48 #include <linux/kernel.h> 48 49 #include <linux/string.h> 49 50 #include <linux/zalloc.h> ··· 138 137 return -1; 139 138 140 139 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL); 141 - if (src->samples == NULL) 140 + if (IS_ERR(src->samples)) { 142 141 zfree(&src->histograms); 142 + src->samples = NULL; 143 + } 143 144 144 145 return src->histograms ? 0 : -1; 145 146 }
+1 -1
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
··· 549 549 /* 550 550 * Process the PE_CONTEXT packets if we have a valid contextID or VMID. 551 551 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2 552 - * as VMID, Bit ETM_OPT_CTXTID2 is set in this case. 552 + * as VMID, Format attribute 'contextid2' is set in this case. 553 553 */ 554 554 switch (cs_etm__get_pid_fmt(etmq)) { 555 555 case CS_ETM_PIDFMT_CTXTID:
+13 -23
tools/perf/util/cs-etm.c
··· 194 194 * CS_ETM_PIDFMT_CTXTID2: CONTEXTIDR_EL2 is traced. 195 195 * CS_ETM_PIDFMT_NONE: No context IDs 196 196 * 197 - * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2 197 + * It's possible that the two format attributes 'contextid1' and 'contextid2' 198 198 * are enabled at the same time when the session runs on an EL2 kernel. 199 199 * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be 200 200 * recorded in the trace data, the tool will selectively use ··· 210 210 if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) { 211 211 val = metadata[CS_ETM_ETMCR]; 212 212 /* CONTEXTIDR is traced */ 213 - if (val & BIT(ETM_OPT_CTXTID)) 213 + if (val & ETMCR_CTXTID) 214 214 return CS_ETM_PIDFMT_CTXTID; 215 215 } else { 216 216 val = metadata[CS_ETMV4_TRCCONFIGR]; 217 217 /* CONTEXTIDR_EL2 is traced */ 218 - if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT))) 218 + if (val & (TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT)) 219 219 return CS_ETM_PIDFMT_CTXTID2; 220 220 /* CONTEXTIDR_EL1 is traced */ 221 - else if (val & BIT(ETM4_CFG_BIT_CTXTID)) 221 + else if (val & TRCCONFIGR_CID) 222 222 return CS_ETM_PIDFMT_CTXTID; 223 223 } 224 224 ··· 2914 2914 return 0; 2915 2915 } 2916 2916 2917 - static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm) 2917 + static void cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm) 2918 2918 { 2919 - struct evsel *evsel; 2920 - struct evlist *evlist = etm->session->evlist; 2919 + /* Take first ETM as all options will be the same for all ETMs */ 2920 + u64 *metadata = etm->metadata[0]; 2921 2921 2922 2922 /* Override timeless mode with user input from --itrace=Z */ 2923 2923 if (etm->synth_opts.timeless_decoding) { 2924 2924 etm->timeless_decoding = true; 2925 - return 0; 2925 + return; 2926 2926 } 2927 2927 2928 - /* 2929 - * Find the cs_etm evsel and look at what its timestamp setting was 2930 - */ 2931 - evlist__for_each_entry(evlist, evsel) 2932 - if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) { 2933 - etm->timeless_decoding = 2934 - !(evsel->core.attr.config & BIT(ETM_OPT_TS)); 2935 - return 0; 2936 - } 2937 - 2938 - pr_err("CS ETM: Couldn't find ETM evsel\n"); 2939 - return -EINVAL; 2928 + if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) 2929 + etm->timeless_decoding = !(metadata[CS_ETM_ETMCR] & ETMCR_TIMESTAMP_EN); 2930 + else 2931 + etm->timeless_decoding = !(metadata[CS_ETMV4_TRCCONFIGR] & TRCCONFIGR_TS); 2940 2932 } 2941 2933 2942 2934 /* ··· 3491 3499 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace; 3492 3500 session->auxtrace = &etm->auxtrace; 3493 3501 3494 - err = cs_etm__setup_timeless_decoding(etm); 3495 - if (err) 3496 - return err; 3502 + cs_etm__setup_timeless_decoding(etm); 3497 3503 3498 3504 etm->tc.time_shift = tc->time_shift; 3499 3505 etm->tc.time_mult = tc->time_mult;
+15
tools/perf/util/cs-etm.h
··· 230 230 /* CoreSight trace ID is currently the bottom 7 bits of the value */ 231 231 #define CORESIGHT_TRACE_ID_VAL_MASK GENMASK(6, 0) 232 232 233 + /* ETMv4 CONFIGR register bits */ 234 + #define TRCCONFIGR_BB BIT(3) 235 + #define TRCCONFIGR_CCI BIT(4) 236 + #define TRCCONFIGR_CID BIT(6) 237 + #define TRCCONFIGR_VMID BIT(7) 238 + #define TRCCONFIGR_TS BIT(11) 239 + #define TRCCONFIGR_RS BIT(12) 240 + #define TRCCONFIGR_VMIDOPT BIT(15) 241 + 242 + /* ETMv3 ETMCR register bits */ 243 + #define ETMCR_CYC_ACC BIT(12) 244 + #define ETMCR_CTXTID BIT(14) 245 + #define ETMCR_TIMESTAMP_EN BIT(28) 246 + #define ETMCR_RETURN_STACK BIT(29) 247 + 233 248 int cs_etm__process_auxtrace_info(union perf_event *event, 234 249 struct perf_session *session); 235 250 void cs_etm_get_default_config(const struct perf_pmu *pmu, struct perf_event_attr *attr);
+1 -1
tools/perf/util/disasm.c
··· 384 384 start = map__unmap_ip(map, sym->start); 385 385 end = map__unmap_ip(map, sym->end); 386 386 387 - ops->target.outside = target.addr < start || target.addr > end; 387 + ops->target.outside = target.addr < start || target.addr >= end; 388 388 389 389 /* 390 390 * FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
+5
tools/perf/util/synthetic-events.c
··· 703 703 704 704 memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); 705 705 706 + /* Clear stale build ID from previous module iteration */ 707 + event->mmap2.header.misc &= ~PERF_RECORD_MISC_MMAP_BUILD_ID; 708 + memset(event->mmap2.build_id, 0, sizeof(event->mmap2.build_id)); 709 + event->mmap2.build_id_size = 0; 710 + 706 711 perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false); 707 712 } else { 708 713 size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
+5
tools/power/cpupower/cpupower-service.conf
··· 30 30 # its policy for the relative importance of performance versus energy savings to 31 31 # the processor. See man CPUPOWER-SET(1) for additional details 32 32 #PERF_BIAS= 33 + 34 + # Set the Energy Performance Preference 35 + # Available options can be read from 36 + # /sys/devices/system/cpu/cpufreq/policy0/energy_performance_available_preferences 37 + #EPP=
+6
tools/power/cpupower/cpupower.sh
··· 23 23 cpupower set -b "$PERF_BIAS" > /dev/null || ESTATUS=1 24 24 fi 25 25 26 + # apply Energy Performance Preference 27 + if test -n "$EPP" 28 + then 29 + cpupower set -e "$EPP" > /dev/null || ESTATUS=1 30 + fi 31 + 26 32 exit $ESTATUS
+5 -1
tools/power/cpupower/utils/cpupower-set.c
··· 124 124 } 125 125 126 126 if (params.turbo_boost) { 127 - ret = cpupower_set_turbo_boost(turbo_boost); 127 + if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) 128 + ret = cpupower_set_intel_turbo_boost(turbo_boost); 129 + else 130 + ret = cpupower_set_generic_turbo_boost(turbo_boost); 131 + 128 132 if (ret) 129 133 fprintf(stderr, "Error setting turbo-boost\n"); 130 134 }
+4 -1
tools/power/cpupower/utils/helpers/helpers.h
··· 104 104 /* cpuid and cpuinfo helpers **************************/ 105 105 106 106 int cpufreq_has_generic_boost_support(bool *active); 107 - int cpupower_set_turbo_boost(int turbo_boost); 107 + int cpupower_set_generic_turbo_boost(int turbo_boost); 108 108 109 109 /* X86 ONLY ****************************************/ 110 110 #if defined(__i386__) || defined(__x86_64__) ··· 143 143 144 144 int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, 145 145 int *active, int *states); 146 + int cpupower_set_intel_turbo_boost(int turbo_boost); 146 147 147 148 /* AMD P-State stuff **************************/ 148 149 bool cpupower_amd_pstate_enabled(void); ··· 189 188 190 189 static inline int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, 191 190 int *active, int *states) 191 + { return -1; } 192 + static inline int cpupower_set_intel_turbo_boost(int turbo_boost) 192 193 { return -1; } 193 194 194 195 static inline bool cpupower_amd_pstate_enabled(void)
+39 -2
tools/power/cpupower/utils/helpers/misc.c
··· 19 19 { 20 20 int ret; 21 21 unsigned long long val; 22 + char linebuf[MAX_LINE_LEN]; 23 + char path[SYSFS_PATH_MAX]; 24 + char *endp; 22 25 23 26 *support = *active = *states = 0; 24 27 ··· 45 42 } 46 43 } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATE) { 47 44 amd_pstate_boost_init(cpu, support, active); 48 - } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA) 45 + } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA) { 49 46 *support = *active = 1; 47 + 48 + snprintf(path, sizeof(path), PATH_TO_CPU "intel_pstate/no_turbo"); 49 + 50 + if (!is_valid_path(path)) 51 + return 0; 52 + 53 + if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0) 54 + return -1; 55 + 56 + val = strtol(linebuf, &endp, 0); 57 + if (endp == linebuf || errno == ERANGE) 58 + return -1; 59 + 60 + *active = !val; 61 + } 62 + return 0; 63 + } 64 + 65 + int cpupower_set_intel_turbo_boost(int turbo_boost) 66 + { 67 + char path[SYSFS_PATH_MAX]; 68 + char linebuf[2] = {}; 69 + 70 + snprintf(path, sizeof(path), PATH_TO_CPU "intel_pstate/no_turbo"); 71 + 72 + /* Fallback to generic solution when intel_pstate driver not running */ 73 + if (!is_valid_path(path)) 74 + return cpupower_set_generic_turbo_boost(turbo_boost); 75 + 76 + snprintf(linebuf, sizeof(linebuf), "%d", !turbo_boost); 77 + 78 + if (cpupower_write_sysfs(path, linebuf, 2) <= 0) 79 + return -1; 80 + 50 81 return 0; 51 82 } 52 83 ··· 311 274 } 312 275 } 313 276 314 - int cpupower_set_turbo_boost(int turbo_boost) 277 + int cpupower_set_generic_turbo_boost(int turbo_boost) 315 278 { 316 279 char path[SYSFS_PATH_MAX]; 317 280 char linebuf[2] = {};
+2 -2
tools/power/cpupower/utils/powercap-info.c
··· 38 38 printf(" (%s)\n", mode ? "enabled" : "disabled"); 39 39 40 40 if (zone->has_power_uw) 41 - printf(_("%sPower can be monitored in micro Jules\n"), 41 + printf(_("%sPower can be monitored in micro Watts\n"), 42 42 pr_prefix); 43 43 44 44 if (zone->has_energy_uj) 45 - printf(_("%sPower can be monitored in micro Watts\n"), 45 + printf(_("%sPower can be monitored in micro Jules\n"), 46 46 pr_prefix); 47 47 48 48 printf("\n");
+1
tools/scripts/syscall.tbl
··· 411 411 468 common file_getattr sys_file_getattr 412 412 469 common file_setattr sys_file_setattr 413 413 470 common listns sys_listns 414 + 471 common rseq_slice_yield sys_rseq_slice_yield
+12
tools/testing/selftests/hid/progs/hid_bpf_helpers.h
··· 6 6 #define __HID_BPF_HELPERS_H 7 7 8 8 /* "undefine" structs and enums in vmlinux.h, because we "override" them below */ 9 + #define bpf_wq bpf_wq___not_used 9 10 #define hid_bpf_ctx hid_bpf_ctx___not_used 10 11 #define hid_bpf_ops hid_bpf_ops___not_used 12 + #define hid_device hid_device___not_used 11 13 #define hid_report_type hid_report_type___not_used 12 14 #define hid_class_request hid_class_request___not_used 13 15 #define hid_bpf_attach_flags hid_bpf_attach_flags___not_used ··· 29 27 30 28 #include "vmlinux.h" 31 29 30 + #undef bpf_wq 32 31 #undef hid_bpf_ctx 33 32 #undef hid_bpf_ops 33 + #undef hid_device 34 34 #undef hid_report_type 35 35 #undef hid_class_request 36 36 #undef hid_bpf_attach_flags ··· 57 53 HID_FEATURE_REPORT = 2, 58 54 59 55 HID_REPORT_TYPES, 56 + }; 57 + 58 + struct hid_device { 59 + unsigned int id; 60 + } __attribute__((preserve_access_index)); 61 + 62 + struct bpf_wq { 63 + __u64 __opaque[2]; 60 64 }; 61 65 62 66 struct hid_bpf_ctx {
+1
tools/testing/selftests/kvm/Makefile.kvm
··· 71 71 TEST_GEN_PROGS_x86 += x86/cr4_cpuid_sync_test 72 72 TEST_GEN_PROGS_x86 += x86/dirty_log_page_splitting_test 73 73 TEST_GEN_PROGS_x86 += x86/feature_msrs_test 74 + TEST_GEN_PROGS_x86 += x86/evmcs_smm_controls_test 74 75 TEST_GEN_PROGS_x86 += x86/exit_on_emulation_failure_test 75 76 TEST_GEN_PROGS_x86 += x86/fastops_test 76 77 TEST_GEN_PROGS_x86 += x86/fix_hypercall_test
+1 -1
tools/testing/selftests/kvm/guest_memfd_test.c
··· 80 80 { 81 81 const unsigned long nodemask_0 = 1; /* nid: 0 */ 82 82 unsigned long nodemask = 0; 83 - unsigned long maxnode = 8; 83 + unsigned long maxnode = BITS_PER_TYPE(nodemask); 84 84 int policy; 85 85 char *mem; 86 86 int ret;
+23
tools/testing/selftests/kvm/include/x86/processor.h
··· 557 557 return cr0; 558 558 } 559 559 560 + static inline void set_cr0(uint64_t val) 561 + { 562 + __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory"); 563 + } 564 + 560 565 static inline uint64_t get_cr3(void) 561 566 { 562 567 uint64_t cr3; ··· 569 564 __asm__ __volatile__("mov %%cr3, %[cr3]" 570 565 : /* output */ [cr3]"=r"(cr3)); 571 566 return cr3; 567 + } 568 + 569 + static inline void set_cr3(uint64_t val) 570 + { 571 + __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory"); 572 572 } 573 573 574 574 static inline uint64_t get_cr4(void) ··· 588 578 static inline void set_cr4(uint64_t val) 589 579 { 590 580 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); 581 + } 582 + 583 + static inline uint64_t get_cr8(void) 584 + { 585 + uint64_t cr8; 586 + 587 + __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8)); 588 + return cr8; 589 + } 590 + 591 + static inline void set_cr8(uint64_t val) 592 + { 593 + __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory"); 591 594 } 592 595 593 596 static inline void set_idt(const struct desc_ptr *idt_desc)
+17
tools/testing/selftests/kvm/include/x86/smm.h
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #ifndef SELFTEST_KVM_SMM_H 3 + #define SELFTEST_KVM_SMM_H 4 + 5 + #include "kvm_util.h" 6 + 7 + #define SMRAM_SIZE 65536 8 + #define SMRAM_MEMSLOT ((1 << 16) | 1) 9 + #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) 10 + 11 + void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 12 + uint64_t smram_gpa, 13 + const void *smi_handler, size_t handler_size); 14 + 15 + void inject_smi(struct kvm_vcpu *vcpu); 16 + 17 + #endif /* SELFTEST_KVM_SMM_H */
+26
tools/testing/selftests/kvm/lib/x86/processor.c
··· 8 8 #include "kvm_util.h" 9 9 #include "pmu.h" 10 10 #include "processor.h" 11 + #include "smm.h" 11 12 #include "svm_util.h" 12 13 #include "sev.h" 13 14 #include "vmx.h" ··· 1444 1443 bool kvm_arch_has_default_irqchip(void) 1445 1444 { 1446 1445 return true; 1446 + } 1447 + 1448 + void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 1449 + uint64_t smram_gpa, 1450 + const void *smi_handler, size_t handler_size) 1451 + { 1452 + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa, 1453 + SMRAM_MEMSLOT, SMRAM_PAGES, 0); 1454 + TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, smram_gpa, 1455 + SMRAM_MEMSLOT) == smram_gpa, 1456 + "Could not allocate guest physical addresses for SMRAM"); 1457 + 1458 + memset(addr_gpa2hva(vm, smram_gpa), 0x0, SMRAM_SIZE); 1459 + memcpy(addr_gpa2hva(vm, smram_gpa) + 0x8000, smi_handler, handler_size); 1460 + vcpu_set_msr(vcpu, MSR_IA32_SMBASE, smram_gpa); 1461 + } 1462 + 1463 + void inject_smi(struct kvm_vcpu *vcpu) 1464 + { 1465 + struct kvm_vcpu_events events; 1466 + 1467 + vcpu_events_get(vcpu, &events); 1468 + events.smi.pending = 1; 1469 + events.flags |= KVM_VCPUEVENT_VALID_SMM; 1470 + vcpu_events_set(vcpu, &events); 1447 1471 }
+150
tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2026, Red Hat, Inc. 4 + * 5 + * Test that vmx_leave_smm() validates vmcs12 controls before re-entering 6 + * nested guest mode on RSM. 7 + */ 8 + #include <fcntl.h> 9 + #include <stdio.h> 10 + #include <stdlib.h> 11 + #include <string.h> 12 + #include <sys/ioctl.h> 13 + 14 + #include "test_util.h" 15 + #include "kvm_util.h" 16 + #include "smm.h" 17 + #include "hyperv.h" 18 + #include "vmx.h" 19 + 20 + #define SMRAM_GPA 0x1000000 21 + #define SMRAM_STAGE 0xfe 22 + 23 + #define SYNC_PORT 0xe 24 + 25 + #define STR(x) #x 26 + #define XSTR(s) STR(s) 27 + 28 + /* 29 + * SMI handler: runs in real-address mode. 30 + * Reports SMRAM_STAGE via port IO, then does RSM. 31 + */ 32 + static uint8_t smi_handler[] = { 33 + 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 34 + 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 35 + 0x0f, 0xaa, /* rsm */ 36 + }; 37 + 38 + static inline void sync_with_host(uint64_t phase) 39 + { 40 + asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n" 41 + : "+a" (phase)); 42 + } 43 + 44 + static void l2_guest_code(void) 45 + { 46 + sync_with_host(1); 47 + 48 + /* After SMI+RSM with invalid controls, we should not reach here. */ 49 + vmcall(); 50 + } 51 + 52 + static void guest_code(struct vmx_pages *vmx_pages, 53 + struct hyperv_test_pages *hv_pages) 54 + { 55 + #define L2_GUEST_STACK_SIZE 64 56 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 57 + 58 + /* Set up Hyper-V enlightenments and eVMCS */ 59 + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); 60 + enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist); 61 + evmcs_enable(); 62 + 63 + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 64 + GUEST_ASSERT(load_evmcs(hv_pages)); 65 + prepare_vmcs(vmx_pages, l2_guest_code, 66 + &l2_guest_stack[L2_GUEST_STACK_SIZE]); 67 + 68 + GUEST_ASSERT(!vmlaunch()); 69 + 70 + /* L2 exits via vmcall if test fails */ 71 + sync_with_host(2); 72 + } 73 + 74 + int main(int argc, char *argv[]) 75 + { 76 + vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; 77 + struct hyperv_test_pages *hv; 78 + struct hv_enlightened_vmcs *evmcs; 79 + struct kvm_vcpu *vcpu; 80 + struct kvm_vm *vm; 81 + struct kvm_regs regs; 82 + int stage_reported; 83 + 84 + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); 85 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); 86 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)); 87 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM)); 88 + 89 + vm = vm_create_with_one_vcpu(&vcpu, guest_code); 90 + 91 + setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler)); 92 + 93 + vcpu_set_hv_cpuid(vcpu); 94 + vcpu_enable_evmcs(vcpu); 95 + vcpu_alloc_vmx(vm, &vmx_pages_gva); 96 + hv = vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva); 97 + vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva); 98 + 99 + vcpu_run(vcpu); 100 + 101 + /* L2 is running and syncs with host. */ 102 + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 103 + vcpu_regs_get(vcpu, &regs); 104 + stage_reported = regs.rax & 0xff; 105 + TEST_ASSERT(stage_reported == 1, 106 + "Expected stage 1, got %d", stage_reported); 107 + 108 + /* Inject SMI while L2 is running. */ 109 + inject_smi(vcpu); 110 + vcpu_run(vcpu); 111 + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 112 + vcpu_regs_get(vcpu, &regs); 113 + stage_reported = regs.rax & 0xff; 114 + TEST_ASSERT(stage_reported == SMRAM_STAGE, 115 + "Expected SMM handler stage %#x, got %#x", 116 + SMRAM_STAGE, stage_reported); 117 + 118 + /* 119 + * Guest is now paused in the SMI handler, about to execute RSM. 120 + * Hack the eVMCS page to set-up invalid pin-based execution 121 + * control (PIN_BASED_VIRTUAL_NMIS without PIN_BASED_NMI_EXITING). 122 + */ 123 + evmcs = hv->enlightened_vmcs_hva; 124 + evmcs->pin_based_vm_exec_control |= PIN_BASED_VIRTUAL_NMIS; 125 + evmcs->hv_clean_fields = 0; 126 + 127 + /* 128 + * Trigger copy_enlightened_to_vmcs12() via KVM_GET_NESTED_STATE, 129 + * copying the invalid pin_based_vm_exec_control into cached_vmcs12. 130 + */ 131 + union { 132 + struct kvm_nested_state state; 133 + char state_[16384]; 134 + } nested_state_buf; 135 + 136 + memset(&nested_state_buf, 0, sizeof(nested_state_buf)); 137 + nested_state_buf.state.size = sizeof(nested_state_buf); 138 + vcpu_nested_state_get(vcpu, &nested_state_buf.state); 139 + 140 + /* 141 + * Resume the guest. The SMI handler executes RSM, which calls 142 + * vmx_leave_smm(). nested_vmx_check_controls() should detect 143 + * VIRTUAL_NMIS without NMI_EXITING and cause a triple fault. 144 + */ 145 + vcpu_run(vcpu); 146 + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN); 147 + 148 + kvm_vm_free(vm); 149 + return 0; 150 + }
+30
tools/testing/selftests/kvm/x86/sev_smoke_test.c
··· 13 13 #include "linux/psp-sev.h" 14 14 #include "sev.h" 15 15 16 + static void guest_sev_test_msr(uint32_t msr) 17 + { 18 + uint64_t val = rdmsr(msr); 19 + 20 + wrmsr(msr, val); 21 + GUEST_ASSERT(val == rdmsr(msr)); 22 + } 23 + 24 + #define guest_sev_test_reg(reg) \ 25 + do { \ 26 + uint64_t val = get_##reg(); \ 27 + \ 28 + set_##reg(val); \ 29 + GUEST_ASSERT(val == get_##reg()); \ 30 + } while (0) 31 + 32 + static void guest_sev_test_regs(void) 33 + { 34 + guest_sev_test_msr(MSR_EFER); 35 + guest_sev_test_reg(cr0); 36 + guest_sev_test_reg(cr3); 37 + guest_sev_test_reg(cr4); 38 + guest_sev_test_reg(cr8); 39 + } 16 40 17 41 #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM) 18 42 ··· 48 24 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED); 49 25 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED); 50 26 27 + guest_sev_test_regs(); 28 + 51 29 wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ); 52 30 vmgexit(); 53 31 } ··· 59 33 /* TODO: Check CPUID after GHCB-based hypercall support is added. */ 60 34 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); 61 35 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED); 36 + 37 + guest_sev_test_regs(); 62 38 63 39 /* 64 40 * TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply ··· 74 46 { 75 47 GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV)); 76 48 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); 49 + 50 + guest_sev_test_regs(); 77 51 78 52 GUEST_DONE(); 79 53 }
+2 -25
tools/testing/selftests/kvm/x86/smm_test.c
··· 14 14 #include "test_util.h" 15 15 16 16 #include "kvm_util.h" 17 + #include "smm.h" 17 18 18 19 #include "vmx.h" 19 20 #include "svm_util.h" 20 21 21 - #define SMRAM_SIZE 65536 22 - #define SMRAM_MEMSLOT ((1 << 16) | 1) 23 - #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) 24 22 #define SMRAM_GPA 0x1000000 25 23 #define SMRAM_STAGE 0xfe 26 24 ··· 111 113 sync_with_host(DONE); 112 114 } 113 115 114 - void inject_smi(struct kvm_vcpu *vcpu) 115 - { 116 - struct kvm_vcpu_events events; 117 - 118 - vcpu_events_get(vcpu, &events); 119 - 120 - events.smi.pending = 1; 121 - events.flags |= KVM_VCPUEVENT_VALID_SMM; 122 - 123 - vcpu_events_set(vcpu, &events); 124 - } 125 - 126 116 int main(int argc, char *argv[]) 127 117 { 128 118 vm_vaddr_t nested_gva = 0; ··· 126 140 /* Create VM */ 127 141 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 128 142 129 - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, 130 - SMRAM_MEMSLOT, SMRAM_PAGES, 0); 131 - TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) 132 - == SMRAM_GPA, "could not allocate guest physical addresses?"); 133 - 134 - memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); 135 - memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, 136 - sizeof(smi_handler)); 137 - 138 - vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA); 143 + setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler)); 139 144 140 145 if (kvm_has_cap(KVM_CAP_NESTED_STATE)) { 141 146 if (kvm_cpu_has(X86_FEATURE_SVM))
+2 -2
tools/testing/selftests/powerpc/copyloops/.gitignore
··· 2 2 copyuser_64_t0 3 3 copyuser_64_t1 4 4 copyuser_64_t2 5 - copyuser_p7_t0 6 - copyuser_p7_t1 5 + copyuser_p7 6 + copyuser_p7_vmx 7 7 memcpy_64_t0 8 8 memcpy_64_t1 9 9 memcpy_64_t2
+8 -3
tools/testing/selftests/powerpc/copyloops/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \ 3 - copyuser_p7_t0 copyuser_p7_t1 \ 3 + copyuser_p7 copyuser_p7_vmx \ 4 4 memcpy_64_t0 memcpy_64_t1 memcpy_64_t2 \ 5 5 memcpy_p7_t0 memcpy_p7_t1 copy_mc_64 \ 6 6 copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2 \ ··· 28 28 -D SELFTEST_CASE=$(subst copyuser_64_t,,$(notdir $@)) \ 29 29 -o $@ $^ 30 30 31 - $(OUTPUT)/copyuser_p7_t%: copyuser_power7.S $(EXTRA_SOURCES) 31 + $(OUTPUT)/copyuser_p7: copyuser_power7.S $(EXTRA_SOURCES) 32 32 $(CC) $(CPPFLAGS) $(CFLAGS) \ 33 33 -D COPY_LOOP=test___copy_tofrom_user_power7 \ 34 - -D SELFTEST_CASE=$(subst copyuser_p7_t,,$(notdir $@)) \ 34 + -o $@ $^ 35 + 36 + $(OUTPUT)/copyuser_p7_vmx: copyuser_power7.S $(EXTRA_SOURCES) ../utils.c 37 + $(CC) $(CPPFLAGS) $(CFLAGS) \ 38 + -D COPY_LOOP=test___copy_tofrom_user_power7_vmx \ 39 + -D VMX_TEST \ 35 40 -o $@ $^ 36 41 37 42 # Strictly speaking, we only need the memcpy_64 test cases for big-endian
-8
tools/testing/selftests/powerpc/copyloops/stubs.S
··· 1 1 #include <asm/ppc_asm.h> 2 2 3 - FUNC_START(enter_vmx_usercopy) 4 - li r3,1 5 - blr 6 - 7 - FUNC_START(exit_vmx_usercopy) 8 - li r3,0 9 - blr 10 - 11 3 FUNC_START(enter_vmx_ops) 12 4 li r3,1 13 5 blr
+14 -1
tools/testing/selftests/powerpc/copyloops/validate.c
··· 12 12 #define BUFLEN (MAX_LEN+MAX_OFFSET+2*MIN_REDZONE) 13 13 #define POISON 0xa5 14 14 15 + #ifdef VMX_TEST 16 + #define VMX_COPY_THRESHOLD 3328 17 + #endif 18 + 15 19 unsigned long COPY_LOOP(void *to, const void *from, unsigned long size); 16 20 17 21 static void do_one(char *src, char *dst, unsigned long src_off, ··· 85 81 /* Fill with sequential bytes */ 86 82 for (i = 0; i < BUFLEN; i++) 87 83 fill[i] = i & 0xff; 88 - 84 + #ifdef VMX_TEST 85 + /* Force sizes above kernel VMX threshold (3328) */ 86 + for (len = VMX_COPY_THRESHOLD + 1; len < MAX_LEN; len++) { 87 + #else 89 88 for (len = 1; len < MAX_LEN; len++) { 89 + #endif 90 90 for (src_off = 0; src_off < MAX_OFFSET; src_off++) { 91 91 for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) { 92 92 do_one(src, dst, src_off, dst_off, len, ··· 104 96 105 97 int main(void) 106 98 { 99 + #ifdef VMX_TEST 100 + /* Skip if Altivec not present */ 101 + SKIP_IF_MSG(!have_hwcap(PPC_FEATURE_HAS_ALTIVEC), "ALTIVEC not supported"); 102 + #endif 103 + 107 104 return test_harness(test_copy_loop, str(COPY_LOOP)); 108 105 }
+2 -2
tools/testing/selftests/sched_ext/util.c
··· 60 60 char buf[64]; 61 61 int ret; 62 62 63 - ret = sprintf(buf, "%lu", val); 63 + ret = sprintf(buf, "%ld", val); 64 64 if (ret < 0) 65 65 return ret; 66 66 67 - if (write_text(path, buf, sizeof(buf)) <= 0) 67 + if (write_text(path, buf, ret) <= 0) 68 68 return -1; 69 69 70 70 return 0;
+1 -1
virt/kvm/binary_stats.c
··· 50 50 * Return: the number of bytes that has been successfully read 51 51 */ 52 52 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, 53 - const struct _kvm_stats_desc *desc, 53 + const struct kvm_stats_desc *desc, 54 54 void *stats, size_t size_stats, 55 55 char __user *user_buffer, size_t size, loff_t *offset) 56 56 {
+10 -10
virt/kvm/kvm_main.c
··· 973 973 kvm_free_memslot(kvm, memslot); 974 974 } 975 975 976 - static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 976 + static umode_t kvm_stats_debugfs_mode(const struct kvm_stats_desc *desc) 977 977 { 978 - switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 978 + switch (desc->flags & KVM_STATS_TYPE_MASK) { 979 979 case KVM_STATS_TYPE_INSTANT: 980 980 return 0444; 981 981 case KVM_STATS_TYPE_CUMULATIVE: ··· 1010 1010 struct dentry *dent; 1011 1011 char dir_name[ITOA_MAX_LEN * 2]; 1012 1012 struct kvm_stat_data *stat_data; 1013 - const struct _kvm_stats_desc *pdesc; 1013 + const struct kvm_stats_desc *pdesc; 1014 1014 int i, ret = -ENOMEM; 1015 1015 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1016 1016 kvm_vcpu_stats_header.num_desc; ··· 6171 6171 switch (stat_data->kind) { 6172 6172 case KVM_STAT_VM: 6173 6173 r = kvm_get_stat_per_vm(stat_data->kvm, 6174 - stat_data->desc->desc.offset, val); 6174 + stat_data->desc->offset, val); 6175 6175 break; 6176 6176 case KVM_STAT_VCPU: 6177 6177 r = kvm_get_stat_per_vcpu(stat_data->kvm, 6178 - stat_data->desc->desc.offset, val); 6178 + stat_data->desc->offset, val); 6179 6179 break; 6180 6180 } 6181 6181 ··· 6193 6193 switch (stat_data->kind) { 6194 6194 case KVM_STAT_VM: 6195 6195 r = kvm_clear_stat_per_vm(stat_data->kvm, 6196 - stat_data->desc->desc.offset); 6196 + stat_data->desc->offset); 6197 6197 break; 6198 6198 case KVM_STAT_VCPU: 6199 6199 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 6200 - stat_data->desc->desc.offset); 6200 + stat_data->desc->offset); 6201 6201 break; 6202 6202 } 6203 6203 ··· 6345 6345 static void kvm_init_debug(void) 6346 6346 { 6347 6347 const struct file_operations *fops; 6348 - const struct _kvm_stats_desc *pdesc; 6348 + const struct kvm_stats_desc *pdesc; 6349 6349 int i; 6350 6350 6351 6351 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); ··· 6358 6358 fops = &vm_stat_readonly_fops; 6359 6359 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 6360 6360 kvm_debugfs_dir, 6361 - (void *)(long)pdesc->desc.offset, fops); 6361 + (void *)(long)pdesc->offset, fops); 6362 6362 } 6363 6363 6364 6364 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { ··· 6369 6369 fops = &vcpu_stat_readonly_fops; 6370 6370 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 6371 6371 kvm_debugfs_dir, 6372 - (void *)(long)pdesc->desc.offset, fops); 6372 + (void *)(long)pdesc->offset, fops); 6373 6373 } 6374 6374 } 6375 6375