Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/net/ipa/ipa_interrupt.c
drivers/net/ipa/ipa_interrupt.h
9ec9b2a30853 ("net: ipa: disable ipa interrupt during suspend")
8e461e1f092b ("net: ipa: introduce ipa_interrupt_enable()")
d50ed3558719 ("net: ipa: enable IPA interrupt handlers separate from registration")
https://lore.kernel.org/all/20230119114125.5182c7ab@canb.auug.org.au/
https://lore.kernel.org/all/79e46152-8043-a512-79d9-c3b905462774@tessares.net/

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+3436 -2704
+1
.mailmap
··· 371 371 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com> 372 372 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org> 373 373 Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com> 374 + Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org> 374 375 Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com> 375 376 Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com> 376 377 Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
+1 -3
Documentation/admin-guide/mm/zswap.rst
··· 70 70 The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which 71 71 means the compression ratio will always be 2:1 or worse (because of half-full 72 72 zbud pages). The zsmalloc type zpool has a more complex compressed page 73 - storage method, and it can achieve greater storage densities. However, 74 - zsmalloc does not implement compressed page eviction, so once zswap fills it 75 - cannot evict the oldest page, it can only reject new pages. 73 + storage method, and it can achieve greater storage densities. 76 74 77 75 When a swap page is passed from frontswap to zswap, zswap maintains a mapping 78 76 of the swap entry, a combination of the swap type and swap offset, to the zpool
+2
Documentation/arm64/silicon-errata.rst
··· 120 120 +----------------+-----------------+-----------------+-----------------------------+ 121 121 | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | 122 122 +----------------+-----------------+-----------------+-----------------------------+ 123 + | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | 124 + +----------------+-----------------+-----------------+-----------------------------+ 123 125 | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | 124 126 +----------------+-----------------+-----------------+-----------------------------+ 125 127 | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
+11 -1
Documentation/conf.py
··· 31 31 # Get Sphinx version 32 32 major, minor, patch = sphinx.version_info[:3] 33 33 34 + # 35 + # Warn about older versions that we don't want to support for much 36 + # longer. 37 + # 38 + if (major < 2) or (major == 2 and minor < 4): 39 + print('WARNING: support for Sphinx < 2.4 will be removed soon.') 34 40 35 41 # If extensions (or modules to document with autodoc) are in another directory, 36 42 # add these directories to sys.path here. If the directory is relative to the ··· 345 339 346 340 # Custom sidebar templates, maps document names to template names. 347 341 # Note that the RTD theme ignores this 348 - html_sidebars = { '**': ["about.html", 'searchbox.html', 'localtoc.html', 'sourcelink.html']} 342 + html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']} 343 + 344 + # about.html is available for alabaster theme. Add it at the front. 345 + if html_theme == 'alabaster': 346 + html_sidebars['**'].insert(0, 'about.html') 349 347 350 348 # Output file base name for HTML help builder. 351 349 htmlhelp_basename = 'TheLinuxKerneldoc'
+11
Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
··· 54 54 - const: xo 55 55 - const: alternate 56 56 57 + interrupts: 58 + minItems: 1 59 + maxItems: 3 60 + 61 + interrupt-names: 62 + minItems: 1 63 + items: 64 + - const: dcvsh-irq-0 65 + - const: dcvsh-irq-1 66 + - const: dcvsh-irq-2 67 + 57 68 '#freq-domain-cells': 58 69 const: 1 59 70
+1 -3
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
··· 32 32 - description: Display byte clock 33 33 - description: Display byte interface clock 34 34 - description: Display pixel clock 35 - - description: Display escape clock 35 + - description: Display core clock 36 36 - description: Display AHB clock 37 37 - description: Display AXI clock 38 38 ··· 137 137 - phys 138 138 - assigned-clocks 139 139 - assigned-clock-parents 140 - - power-domains 141 - - operating-points-v2 142 140 - ports 143 141 144 142 additionalProperties: false
-1
Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
··· 69 69 - compatible 70 70 - reg 71 71 - reg-names 72 - - vdds-supply 73 72 74 73 unevaluatedProperties: false 75 74
-1
Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
··· 39 39 - compatible 40 40 - reg 41 41 - reg-names 42 - - vcca-supply 43 42 44 43 unevaluatedProperties: false 45 44
+4
Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
··· 34 34 vddio-supply: 35 35 description: Phandle to vdd-io regulator device node. 36 36 37 + qcom,dsi-phy-regulator-ldo-mode: 38 + type: boolean 39 + description: Indicates if the LDO mode PHY regulator is wanted. 40 + 37 41 required: 38 42 - compatible 39 43 - reg
+1 -1
Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml
··· 72 72 #include <dt-bindings/interconnect/qcom,qcm2290.h> 73 73 #include <dt-bindings/power/qcom-rpmpd.h> 74 74 75 - mdss@5e00000 { 75 + display-subsystem@5e00000 { 76 76 #address-cells = <1>; 77 77 #size-cells = <1>; 78 78 compatible = "qcom,qcm2290-mdss";
+1 -1
Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml
··· 62 62 #include <dt-bindings/interrupt-controller/arm-gic.h> 63 63 #include <dt-bindings/power/qcom-rpmpd.h> 64 64 65 - mdss@5e00000 { 65 + display-subsystem@5e00000 { 66 66 #address-cells = <1>; 67 67 #size-cells = <1>; 68 68 compatible = "qcom,sm6115-mdss";
+1
Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
··· 16 16 compatible: 17 17 enum: 18 18 - mediatek,mt8186-mt6366-rt1019-rt5682s-sound 19 + - mediatek,mt8186-mt6366-rt5682s-max98360-sound 19 20 20 21 mediatek,platform: 21 22 $ref: "/schemas/types.yaml#/definitions/phandle"
+3 -1
Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml
··· 30 30 const: 0 31 31 32 32 clocks: 33 - maxItems: 5 33 + oneOf: 34 + - maxItems: 3 35 + - maxItems: 5 34 36 35 37 clock-names: 36 38 oneOf:
+47 -11
Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml
··· 9 9 maintainers: 10 10 - Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 11 11 12 - allOf: 13 - - $ref: dai-common.yaml# 14 - 15 12 properties: 16 13 compatible: 17 14 enum: ··· 27 30 const: 0 28 31 29 32 clocks: 30 - maxItems: 5 33 + minItems: 5 34 + maxItems: 6 31 35 32 36 clock-names: 33 - items: 34 - - const: mclk 35 - - const: npl 36 - - const: macro 37 - - const: dcodec 38 - - const: fsgen 37 + minItems: 5 38 + maxItems: 6 39 39 40 40 clock-output-names: 41 41 maxItems: 1 ··· 49 55 - reg 50 56 - "#sound-dai-cells" 51 57 58 + allOf: 59 + - $ref: dai-common.yaml# 60 + 61 + - if: 62 + properties: 63 + compatible: 64 + enum: 65 + - qcom,sc7280-lpass-wsa-macro 66 + - qcom,sm8450-lpass-wsa-macro 67 + - qcom,sc8280xp-lpass-wsa-macro 68 + then: 69 + properties: 70 + clocks: 71 + maxItems: 5 72 + clock-names: 73 + items: 74 + - const: mclk 75 + - const: npl 76 + - const: macro 77 + - const: dcodec 78 + - const: fsgen 79 + 80 + - if: 81 + properties: 82 + compatible: 83 + enum: 84 + - qcom,sm8250-lpass-wsa-macro 85 + then: 86 + properties: 87 + clocks: 88 + minItems: 6 89 + clock-names: 90 + items: 91 + - const: mclk 92 + - const: npl 93 + - const: macro 94 + - const: dcodec 95 + - const: va 96 + - const: fsgen 97 + 52 98 unevaluatedProperties: false 53 99 54 100 examples: 55 101 - | 102 + #include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h> 56 103 #include <dt-bindings/sound/qcom,q6afe.h> 57 104 codec@3240000 { 58 105 compatible = "qcom,sm8250-lpass-wsa-macro"; ··· 104 69 <&audiocc 0>, 105 70 <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, 106 71 <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, 72 + <&aoncc LPASS_CDC_VA_MCLK>, 107 73 <&vamacro>; 108 - clock-names = "mclk", "npl", "macro", "dcodec", "fsgen"; 74 + clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen"; 109 75 clock-output-names = "mclk"; 110 76 };
+2
Documentation/filesystems/erofs.rst
··· 120 120 dax A legacy option which is an alias for ``dax=always``. 121 121 device=%s Specify a path to an extra device to be used together. 122 122 fsid=%s Specify a filesystem image ID for Fscache back-end. 123 + domain_id=%s Specify a domain ID in fscache mode so that different images 124 + with the same blobs under a given domain ID can share storage. 123 125 =================== ========================================================= 124 126 125 127 Sysfs Entries
+4 -2
Documentation/sphinx/load_config.py
··· 3 3 4 4 import os 5 5 import sys 6 - from sphinx.util.pycompat import execfile_ 6 + from sphinx.util.osutil import fs_encoding 7 7 8 8 # ------------------------------------------------------------------------------ 9 9 def loadConfig(namespace): ··· 48 48 sys.stdout.write("load additional sphinx-config: %s\n" % config_file) 49 49 config = namespace.copy() 50 50 config['__file__'] = config_file 51 - execfile_(config_file, config) 51 + with open(config_file, 'rb') as f: 52 + code = compile(f.read(), fs_encoding, 'exec') 53 + exec(code, config) 52 54 del config['__file__'] 53 55 namespace.update(config) 54 56 else:
+22
Documentation/virt/kvm/api.rst
··· 1354 1354 mmap() that affects the region will be made visible immediately. Another 1355 1355 example is madvise(MADV_DROP). 1356 1356 1357 + Note: On arm64, a write generated by the page-table walker (to update 1358 + the Access and Dirty flags, for example) never results in a 1359 + KVM_EXIT_MMIO exit when the slot has the KVM_MEM_READONLY flag. This 1360 + is because KVM cannot provide the data that would be written by the 1361 + page-table walker, making it impossible to emulate the access. 1362 + Instead, an abort (data abort if the cause of the page-table update 1363 + was a load or a store, instruction abort if it was an instruction 1364 + fetch) is injected in the guest. 1357 1365 1358 1366 4.36 KVM_SET_TSS_ADDR 1359 1367 --------------------- ··· 8317 8309 CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``. 8318 8310 It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel 8319 8311 has enabled in-kernel emulation of the local APIC. 8312 + 8313 + CPU topology 8314 + ~~~~~~~~~~~~ 8315 + 8316 + Several CPUID values include topology information for the host CPU: 8317 + 0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems. Different 8318 + versions of KVM return different values for this information and userspace 8319 + should not rely on it. Currently they return all zeroes. 8320 + 8321 + If userspace wishes to set up a guest topology, it should be careful that 8322 + the values of these three leaves differ for each CPU. In particular, 8323 + the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX 8324 + for 0x8000001e; the latter also encodes the core id and node id in bits 8325 + 7:0 of EBX and ECX respectively. 8320 8326 8321 8327 Obsolete ioctls and capabilities 8322 8328 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+12 -11
Documentation/virt/kvm/locking.rst
··· 24 24 25 25 For SRCU: 26 26 27 - - ``synchronize_srcu(&kvm->srcu)`` is called _inside_ 28 - the kvm->slots_lock critical section, therefore kvm->slots_lock 29 - cannot be taken inside a kvm->srcu read-side critical section. 30 - Instead, kvm->slots_arch_lock is released before the call 31 - to ``synchronize_srcu()`` and _can_ be taken inside a 32 - kvm->srcu read-side critical section. 27 + - ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections 28 + for kvm->lock, vcpu->mutex and kvm->slots_lock. These locks _cannot_ 29 + be taken inside a kvm->srcu read-side critical section; that is, the 30 + following is broken:: 33 31 34 - - kvm->lock is taken inside kvm->srcu, therefore 35 - ``synchronize_srcu(&kvm->srcu)`` cannot be called inside 36 - a kvm->lock critical section. If you cannot delay the 37 - call until after kvm->lock is released, use ``call_srcu``. 32 + srcu_read_lock(&kvm->srcu); 33 + mutex_lock(&kvm->slots_lock); 34 + 35 + - kvm->slots_arch_lock instead is released before the call to 36 + ``synchronize_srcu()``. It _can_ therefore be taken inside a 37 + kvm->srcu read-side critical section, for example while processing 38 + a vmexit. 38 39 39 40 On x86: 40 41 41 - - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock 42 + - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock 42 43 43 44 - kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and 44 45 kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
+26 -6
MAINTAINERS
··· 1104 1104 F: arch/arm64/boot/dts/amd/ 1105 1105 1106 1106 AMD XGBE DRIVER 1107 - M: Tom Lendacky <thomas.lendacky@amd.com> 1108 1107 M: "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com> 1109 1108 L: netdev@vger.kernel.org 1110 1109 S: Supported ··· 6947 6948 DRM DRIVERS FOR BRIDGE CHIPS 6948 6949 M: Andrzej Hajda <andrzej.hajda@intel.com> 6949 6950 M: Neil Armstrong <neil.armstrong@linaro.org> 6950 - M: Robert Foss <robert.foss@linaro.org> 6951 + M: Robert Foss <rfoss@kernel.org> 6951 6952 R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com> 6952 6953 R: Jonas Karlman <jonas@kwiboo.se> 6953 6954 R: Jernej Skrabec <jernej.skrabec@gmail.com> ··· 11355 11356 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 11356 11357 M: Marc Zyngier <maz@kernel.org> 11357 11358 R: James Morse <james.morse@arm.com> 11358 - R: Alexandru Elisei <alexandru.elisei@arm.com> 11359 11359 R: Suzuki K Poulose <suzuki.poulose@arm.com> 11360 11360 R: Oliver Upton <oliver.upton@linux.dev> 11361 + R: Zenghui Yu <yuzenghui@huawei.com> 11361 11362 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 11362 11363 L: kvmarm@lists.linux.dev 11363 11364 L: kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers) ··· 14921 14922 F: Documentation/nvme/ 14922 14923 F: drivers/nvme/host/ 14923 14924 F: drivers/nvme/common/ 14924 - F: include/linux/nvme* 14925 + F: include/linux/nvme.h 14926 + F: include/linux/nvme-*.h 14925 14927 F: include/uapi/linux/nvme_ioctl.h 14926 14928 14927 14929 NVM EXPRESS FABRICS AUTHENTICATION ··· 15758 15758 S: Maintained 15759 15759 W: https://wireless.wiki.kernel.org/en/users/Drivers/p54 15760 15760 F: drivers/net/wireless/intersil/p54/ 15761 + 15762 + PACKET SOCKETS 15763 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 15764 + S: Maintained 15765 + F: include/uapi/linux/if_packet.h 15766 + F: net/packet/af_packet.c 15761 15767 15762 15768 PACKING 15763 15769 M: Vladimir Oltean <olteanv@gmail.com> ··· 17260 17254 F: drivers/net/wwan/qcom_bam_dmux.c 17261 17255 17262 17256 QUALCOMM CAMERA SUBSYSTEM DRIVER 17263 - M: Robert Foss <robert.foss@linaro.org> 17257 + M: Robert Foss <rfoss@kernel.org> 17264 17258 M: Todor Tomov <todor.too@gmail.com> 17265 17259 L: linux-media@vger.kernel.org 17266 17260 S: Maintained ··· 17340 17334 17341 17335 QUALCOMM I2C CCI DRIVER 17342 17336 M: Loic Poulain <loic.poulain@linaro.org> 17343 - M: Robert Foss <robert.foss@linaro.org> 17337 + M: Robert Foss <rfoss@kernel.org> 17344 17338 L: linux-i2c@vger.kernel.org 17345 17339 L: linux-arm-msm@vger.kernel.org 17346 17340 S: Maintained ··· 19347 19341 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 19348 19342 S: Orphan 19349 19343 F: sound/soc/uniphier/ 19344 + 19345 + SOCKET TIMESTAMPING 19346 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 19347 + S: Maintained 19348 + F: Documentation/networking/timestamping.rst 19349 + F: include/uapi/linux/net_tstamp.h 19350 + F: tools/testing/selftests/net/so_txtime.c 19350 19351 19351 19352 SOEKRIS NET48XX LED SUPPORT 19352 19353 M: Chris Boot <bootc@bootc.net> ··· 21774 21761 T: git git://linuxtv.org/media_tree.git 21775 21762 F: Documentation/admin-guide/media/zr364xx* 21776 21763 F: drivers/staging/media/deprecated/zr364xx/ 21764 + 21765 + USER DATAGRAM PROTOCOL (UDP) 21766 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 21767 + S: Maintained 21768 + F: include/linux/udp.h 21769 + F: net/ipv4/udp.c 21770 + F: net/ipv6/udp.c 21777 21771 21778 21772 USER-MODE LINUX (UML) 21779 21773 M: Richard Weinberger <richard@nod.at>
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 2 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION*
+16 -2
arch/arm64/Kconfig
··· 184 184 select HAVE_DEBUG_KMEMLEAK 185 185 select HAVE_DMA_CONTIGUOUS 186 186 select HAVE_DYNAMIC_FTRACE 187 - select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ 188 - if $(cc-option,-fpatchable-function-entry=2) 189 187 select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ 190 188 if DYNAMIC_FTRACE_WITH_ARGS 191 189 select HAVE_EFFICIENT_UNALIGNED_ACCESS ··· 967 969 Work around this problem by returning 0 when reading the affected counter in 968 970 key locations that results in disabling all users of this counter. This effect 969 971 is the same to firmware disabling affected counters. 972 + 973 + If unsure, say Y. 974 + 975 + config ARM64_ERRATUM_2645198 976 + bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption" 977 + default y 978 + help 979 + This option adds the workaround for ARM Cortex-A715 erratum 2645198. 980 + 981 + If a Cortex-A715 cpu sees a page mapping permissions change from executable 982 + to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the 983 + next instruction abort caused by permission fault. 984 + 985 + Only user-space does executable to non-executable permission transition via 986 + mprotect() system call. Workaround the problem by doing a break-before-make 987 + TLB invalidation, for all changes to executable user space mappings. 970 988 971 989 If unsure, say Y. 972 990
+1 -1
arch/arm64/include/asm/atomic_ll_sc.h
··· 315 315 " cbnz %w0, 1b\n" \ 316 316 " " #mb "\n" \ 317 317 "2:" \ 318 - : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ 318 + : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \ 319 319 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ 320 320 : cl); \ 321 321 \
+1 -1
arch/arm64/include/asm/atomic_lse.h
··· 311 311 " eor %[old2], %[old2], %[oldval2]\n" \ 312 312 " orr %[old1], %[old1], %[old2]" \ 313 313 : [old1] "+&r" (x0), [old2] "+&r" (x1), \ 314 - [v] "+Q" (*(unsigned long *)ptr) \ 314 + [v] "+Q" (*(__uint128_t *)ptr) \ 315 315 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ 316 316 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ 317 317 : cl); \
+4
arch/arm64/include/asm/cputype.h
··· 124 124 #define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025 125 125 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028 126 126 #define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029 127 + #define APPLE_CPU_PART_M2_BLIZZARD 0x032 128 + #define APPLE_CPU_PART_M2_AVALANCHE 0x033 127 129 128 130 #define AMPERE_CPU_PART_AMPERE1 0xAC3 129 131 ··· 179 177 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO) 180 178 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX) 181 179 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) 180 + #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD) 181 + #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE) 182 182 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) 183 183 184 184 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+9
arch/arm64/include/asm/esr.h
··· 114 114 #define ESR_ELx_FSC_ACCESS (0x08) 115 115 #define ESR_ELx_FSC_FAULT (0x04) 116 116 #define ESR_ELx_FSC_PERM (0x0C) 117 + #define ESR_ELx_FSC_SEA_TTW0 (0x14) 118 + #define ESR_ELx_FSC_SEA_TTW1 (0x15) 119 + #define ESR_ELx_FSC_SEA_TTW2 (0x16) 120 + #define ESR_ELx_FSC_SEA_TTW3 (0x17) 121 + #define ESR_ELx_FSC_SECC (0x18) 122 + #define ESR_ELx_FSC_SECC_TTW0 (0x1c) 123 + #define ESR_ELx_FSC_SECC_TTW1 (0x1d) 124 + #define ESR_ELx_FSC_SECC_TTW2 (0x1e) 125 + #define ESR_ELx_FSC_SECC_TTW3 (0x1f) 117 126 118 127 /* ISS field definitions for Data Aborts */ 119 128 #define ESR_ELx_ISV_SHIFT (24)
+9
arch/arm64/include/asm/hugetlb.h
··· 49 49 50 50 void __init arm64_hugetlb_cma_reserve(void); 51 51 52 + #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 53 + extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 54 + unsigned long addr, pte_t *ptep); 55 + 56 + #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 57 + extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 58 + unsigned long addr, pte_t *ptep, 59 + pte_t old_pte, pte_t new_pte); 60 + 52 61 #include <asm-generic/hugetlb.h> 53 62 54 63 #endif /* __ASM_HUGETLB_H */
-15
arch/arm64/include/asm/kvm_arm.h
··· 319 319 BIT(18) | \ 320 320 GENMASK(16, 15)) 321 321 322 - /* For compatibility with fault code shared with 32-bit */ 323 - #define FSC_FAULT ESR_ELx_FSC_FAULT 324 - #define FSC_ACCESS ESR_ELx_FSC_ACCESS 325 - #define FSC_PERM ESR_ELx_FSC_PERM 326 - #define FSC_SEA ESR_ELx_FSC_EXTABT 327 - #define FSC_SEA_TTW0 (0x14) 328 - #define FSC_SEA_TTW1 (0x15) 329 - #define FSC_SEA_TTW2 (0x16) 330 - #define FSC_SEA_TTW3 (0x17) 331 - #define FSC_SECC (0x18) 332 - #define FSC_SECC_TTW0 (0x1c) 333 - #define FSC_SECC_TTW1 (0x1d) 334 - #define FSC_SECC_TTW2 (0x1e) 335 - #define FSC_SECC_TTW3 (0x1f) 336 - 337 322 /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ 338 323 #define HPFAR_MASK (~UL(0xf)) 339 324 /*
+30 -12
arch/arm64/include/asm/kvm_emulate.h
··· 349 349 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) 350 350 { 351 351 switch (kvm_vcpu_trap_get_fault(vcpu)) { 352 - case FSC_SEA: 353 - case FSC_SEA_TTW0: 354 - case FSC_SEA_TTW1: 355 - case FSC_SEA_TTW2: 356 - case FSC_SEA_TTW3: 357 - case FSC_SECC: 358 - case FSC_SECC_TTW0: 359 - case FSC_SECC_TTW1: 360 - case FSC_SECC_TTW2: 361 - case FSC_SECC_TTW3: 352 + case ESR_ELx_FSC_EXTABT: 353 + case ESR_ELx_FSC_SEA_TTW0: 354 + case ESR_ELx_FSC_SEA_TTW1: 355 + case ESR_ELx_FSC_SEA_TTW2: 356 + case ESR_ELx_FSC_SEA_TTW3: 357 + case ESR_ELx_FSC_SECC: 358 + case ESR_ELx_FSC_SECC_TTW0: 359 + case ESR_ELx_FSC_SECC_TTW1: 360 + case ESR_ELx_FSC_SECC_TTW2: 361 + case ESR_ELx_FSC_SECC_TTW3: 362 362 return true; 363 363 default: 364 364 return false; ··· 373 373 374 374 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 375 375 { 376 - if (kvm_vcpu_abt_iss1tw(vcpu)) 377 - return true; 376 + if (kvm_vcpu_abt_iss1tw(vcpu)) { 377 + /* 378 + * Only a permission fault on a S1PTW should be 379 + * considered as a write. Otherwise, page tables baked 380 + * in a read-only memslot will result in an exception 381 + * being delivered in the guest. 382 + * 383 + * The drawback is that we end-up faulting twice if the 384 + * guest is using any of HW AF/DB: a translation fault 385 + * to map the page containing the PT (read only at 386 + * first), then a permission fault to allow the flags 387 + * to be set. 388 + */ 389 + switch (kvm_vcpu_trap_get_fault_type(vcpu)) { 390 + case ESR_ELx_FSC_PERM: 391 + return true; 392 + default: 393 + return false; 394 + } 395 + } 378 396 379 397 if (kvm_vcpu_trap_is_iabt(vcpu)) 380 398 return false;
+13 -3
arch/arm64/include/asm/pgtable.h
··· 681 681 #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 682 682 #define pud_valid(pud) pte_valid(pud_pte(pud)) 683 683 #define pud_user(pud) pte_user(pud_pte(pud)) 684 - 684 + #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 685 685 686 686 static inline void set_pud(pud_t *pudp, pud_t pud) 687 687 { ··· 730 730 #else 731 731 732 732 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 733 + #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 733 734 734 735 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 735 736 #define pmd_set_fixmap(addr) NULL ··· 863 862 864 863 static inline bool pmd_user_accessible_page(pmd_t pmd) 865 864 { 866 - return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 865 + return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 867 866 } 868 867 869 868 static inline bool pud_user_accessible_page(pud_t pud) 870 869 { 871 - return pud_leaf(pud) && pud_user(pud); 870 + return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud)); 872 871 } 873 872 #endif 874 873 ··· 1094 1093 } 1095 1094 1096 1095 1096 + #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1097 + #define ptep_modify_prot_start ptep_modify_prot_start 1098 + extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1099 + unsigned long addr, pte_t *ptep); 1100 + 1101 + #define ptep_modify_prot_commit ptep_modify_prot_commit 1102 + extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 1103 + unsigned long addr, pte_t *ptep, 1104 + pte_t old_pte, pte_t new_pte); 1097 1105 #endif /* !__ASSEMBLY__ */ 1098 1106 1099 1107 #endif /* __ASM_PGTABLE_H */
+1 -1
arch/arm64/include/asm/uprobes.h
··· 16 16 #define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE 17 17 #define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES 18 18 19 - typedef u32 uprobe_opcode_t; 19 + typedef __le32 uprobe_opcode_t; 20 20 21 21 struct arch_uprobe_task { 22 22 };
+7
arch/arm64/kernel/cpu_errata.c
··· 661 661 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), 662 662 }, 663 663 #endif 664 + #ifdef CONFIG_ARM64_ERRATUM_2645198 665 + { 666 + .desc = "ARM erratum 2645198", 667 + .capability = ARM64_WORKAROUND_2645198, 668 + ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715) 669 + }, 670 + #endif 664 671 #ifdef CONFIG_ARM64_ERRATUM_2077057 665 672 { 666 673 .desc = "ARM erratum 2077057",
+1
arch/arm64/kernel/efi-rt-wrapper.S
··· 4 4 */ 5 5 6 6 #include <linux/linkage.h> 7 + #include <asm/assembler.h> 7 8 8 9 SYM_FUNC_START(__efi_rt_asm_wrapper) 9 10 stp x29, x30, [sp, #-112]!
+28 -33
arch/arm64/kernel/elfcore.c
··· 8 8 #include <asm/cpufeature.h> 9 9 #include <asm/mte.h> 10 10 11 - #define for_each_mte_vma(vmi, vma) \ 11 + #define for_each_mte_vma(cprm, i, m) \ 12 12 if (system_supports_mte()) \ 13 - for_each_vma(vmi, vma) \ 14 - if (vma->vm_flags & VM_MTE) 13 + for (i = 0, m = cprm->vma_meta; \ 14 + i < cprm->vma_count; \ 15 + i++, m = cprm->vma_meta + i) \ 16 + if (m->flags & VM_MTE) 15 17 16 - static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma) 18 + static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m) 17 19 { 18 - if (vma->vm_flags & VM_DONTDUMP) 19 - return 0; 20 - 21 - return vma_pages(vma) * MTE_PAGE_TAG_STORAGE; 20 + return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE; 22 21 } 23 22 24 23 /* Derived from dump_user_range(); start/end must be page-aligned */ 25 24 static int mte_dump_tag_range(struct coredump_params *cprm, 26 - unsigned long start, unsigned long end) 25 + unsigned long start, unsigned long len) 27 26 { 28 27 int ret = 1; 29 28 unsigned long addr; 30 29 void *tags = NULL; 31 30 32 - for (addr = start; addr < end; addr += PAGE_SIZE) { 31 + for (addr = start; addr < start + len; addr += PAGE_SIZE) { 33 32 struct page *page = get_dump_page(addr); 34 33 35 34 /* ··· 64 65 mte_save_page_tags(page_address(page), tags); 65 66 put_page(page); 66 67 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) { 67 - mte_free_tag_storage(tags); 68 68 ret = 0; 69 69 break; 70 70 } ··· 75 77 return ret; 76 78 } 77 79 78 - Elf_Half elf_core_extra_phdrs(void) 80 + Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm) 79 81 { 80 - struct vm_area_struct *vma; 82 + int i; 83 + struct core_vma_metadata *m; 81 84 int vma_count = 0; 82 - VMA_ITERATOR(vmi, current->mm, 0); 83 85 84 - for_each_mte_vma(vmi, vma) 86 + for_each_mte_vma(cprm, i, m) 85 87 vma_count++; 86 88 87 89 return vma_count; ··· 89 91 90 92 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) 91 93 { 92 - struct vm_area_struct *vma; 93 - VMA_ITERATOR(vmi, current->mm, 0); 94 + int i; 95 + struct core_vma_metadata *m; 94 96 95 - for_each_mte_vma(vmi, vma) { 97 + for_each_mte_vma(cprm, i, m) { 96 98 struct elf_phdr phdr; 97 99 98 100 phdr.p_type = PT_AARCH64_MEMTAG_MTE; 99 101 phdr.p_offset = offset; 100 - phdr.p_vaddr = vma->vm_start; 102 + phdr.p_vaddr = m->start; 101 103 phdr.p_paddr = 0; 102 - phdr.p_filesz = mte_vma_tag_dump_size(vma); 103 - phdr.p_memsz = vma->vm_end - vma->vm_start; 104 + phdr.p_filesz = mte_vma_tag_dump_size(m); 105 + phdr.p_memsz = m->end - m->start; 104 106 offset += phdr.p_filesz; 105 107 phdr.p_flags = 0; 106 108 phdr.p_align = 0; ··· 112 114 return 1; 113 115 } 114 116 115 - size_t elf_core_extra_data_size(void) 117 + size_t elf_core_extra_data_size(struct coredump_params *cprm) 116 118 { 117 - struct vm_area_struct *vma; 119 + int i; 120 + struct core_vma_metadata *m; 118 121 size_t data_size = 0; 119 - VMA_ITERATOR(vmi, current->mm, 0); 120 122 121 - for_each_mte_vma(vmi, vma) 122 - data_size += mte_vma_tag_dump_size(vma); 123 + for_each_mte_vma(cprm, i, m) 124 + data_size += mte_vma_tag_dump_size(m); 123 125 124 126 return data_size; 125 127 } 126 128 127 129 int elf_core_write_extra_data(struct coredump_params *cprm) 128 130 { 129 - struct vm_area_struct *vma; 130 - VMA_ITERATOR(vmi, current->mm, 0); 131 + int i; 132 + struct core_vma_metadata *m; 131 133 132 - for_each_mte_vma(vmi, vma) { 133 - if (vma->vm_flags & VM_DONTDUMP) 134 - continue; 135 - 136 - if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end)) 134 + for_each_mte_vma(cprm, i, m) { 135 + if (!mte_dump_tag_range(cprm, m->start, m->dump_size)) 137 136 return 0; 138 137 } 139 138
+1 -1
arch/arm64/kernel/fpsimd.c
··· 385 385 WARN_ON(!system_supports_fpsimd()); 386 386 WARN_ON(!have_cpu_fpsimd_context()); 387 387 388 - if (system_supports_sve()) { 388 + if (system_supports_sve() || system_supports_sme()) { 389 389 switch (current->thread.fp_type) { 390 390 case FP_STATE_FPSIMD: 391 391 /* Stop tracking SVE for this task until next use. */
+1 -1
arch/arm64/kernel/ptrace.c
··· 1357 1357 #ifdef CONFIG_ARM64_SVE 1358 1358 REGSET_SVE, 1359 1359 #endif 1360 - #ifdef CONFIG_ARM64_SVE 1360 + #ifdef CONFIG_ARM64_SME 1361 1361 REGSET_SSVE, 1362 1362 REGSET_ZA, 1363 1363 #endif
+7 -2
arch/arm64/kernel/signal.c
··· 281 281 282 282 vl = task_get_sme_vl(current); 283 283 } else { 284 - if (!system_supports_sve()) 284 + /* 285 + * A SME only system use SVE for streaming mode so can 286 + * have a SVE formatted context with a zero VL and no 287 + * payload data. 288 + */ 289 + if (!system_supports_sve() && !system_supports_sme()) 285 290 return -EINVAL; 286 291 287 292 vl = task_get_sve_vl(current); ··· 737 732 return err; 738 733 } 739 734 740 - if (system_supports_sve()) { 735 + if (system_supports_sve() || system_supports_sme()) { 741 736 unsigned int vq = 0; 742 737 743 738 if (add_all || test_thread_flag(TIF_SVE) ||
+1 -1
arch/arm64/kvm/hyp/include/hyp/fault.h
··· 60 60 */ 61 61 if (!(esr & ESR_ELx_S1PTW) && 62 62 (cpus_have_final_cap(ARM64_WORKAROUND_834220) || 63 - (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { 63 + (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) { 64 64 if (!__translate_far_to_hpfar(far, &hpfar)) 65 65 return false; 66 66 } else {
+1 -1
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 367 367 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { 368 368 bool valid; 369 369 370 - valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && 370 + valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT && 371 371 kvm_vcpu_dabt_isvalid(vcpu) && 372 372 !kvm_vcpu_abt_issea(vcpu) && 373 373 !kvm_vcpu_abt_iss1tw(vcpu);
+12 -9
arch/arm64/kvm/mmu.c
··· 1212 1212 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); 1213 1213 VM_BUG_ON(write_fault && exec_fault); 1214 1214 1215 - if (fault_status == FSC_PERM && !write_fault && !exec_fault) { 1215 + if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) { 1216 1216 kvm_err("Unexpected L2 read permission error\n"); 1217 1217 return -EFAULT; 1218 1218 } ··· 1277 1277 * only exception to this is when dirty logging is enabled at runtime 1278 1278 * and a write fault needs to collapse a block entry into a table. 1279 1279 */ 1280 - if (fault_status != FSC_PERM || (logging_active && write_fault)) { 1280 + if (fault_status != ESR_ELx_FSC_PERM || 1281 + (logging_active && write_fault)) { 1281 1282 ret = kvm_mmu_topup_memory_cache(memcache, 1282 1283 kvm_mmu_cache_min_pages(kvm)); 1283 1284 if (ret) ··· 1343 1342 * backed by a THP and thus use block mapping if possible. 1344 1343 */ 1345 1344 if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) { 1346 - if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE) 1345 + if (fault_status == ESR_ELx_FSC_PERM && 1346 + fault_granule > PAGE_SIZE) 1347 1347 vma_pagesize = fault_granule; 1348 1348 else 1349 1349 vma_pagesize = transparent_hugepage_adjust(kvm, memslot, ··· 1352 1350 &fault_ipa); 1353 1351 } 1354 1352 1355 - if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { 1353 + if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) { 1356 1354 /* Check the VMM hasn't introduced a new disallowed VMA */ 1357 1355 if (kvm_vma_mte_allowed(vma)) { 1358 1356 sanitise_mte_tags(kvm, pfn, vma_pagesize); ··· 1378 1376 * permissions only if vma_pagesize equals fault_granule. Otherwise, 1379 1377 * kvm_pgtable_stage2_map() should be called to change block size. 1380 1378 */ 1381 - if (fault_status == FSC_PERM && vma_pagesize == fault_granule) 1379 + if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule) 1382 1380 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); 1383 1381 else 1384 1382 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, ··· 1443 1441 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 1444 1442 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); 1445 1443 1446 - if (fault_status == FSC_FAULT) { 1444 + if (fault_status == ESR_ELx_FSC_FAULT) { 1447 1445 /* Beyond sanitised PARange (which is the IPA limit) */ 1448 1446 if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) { 1449 1447 kvm_inject_size_fault(vcpu); ··· 1478 1476 kvm_vcpu_get_hfar(vcpu), fault_ipa); 1479 1477 1480 1478 /* Check the stage-2 fault is trans. fault or write fault */ 1481 - if (fault_status != FSC_FAULT && fault_status != FSC_PERM && 1482 - fault_status != FSC_ACCESS) { 1479 + if (fault_status != ESR_ELx_FSC_FAULT && 1480 + fault_status != ESR_ELx_FSC_PERM && 1481 + fault_status != ESR_ELx_FSC_ACCESS) { 1483 1482 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", 1484 1483 kvm_vcpu_trap_get_class(vcpu), 1485 1484 (unsigned long)kvm_vcpu_trap_get_fault(vcpu), ··· 1542 1539 /* Userspace should not be able to register out-of-bounds IPAs */ 1543 1540 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); 1544 1541 1545 - if (fault_status == FSC_ACCESS) { 1542 + if (fault_status == ESR_ELx_FSC_ACCESS) { 1546 1543 handle_access_fault(vcpu, fault_ipa); 1547 1544 ret = 1; 1548 1545 goto out_unlock;
+1 -1
arch/arm64/kvm/sys_regs.c
··· 646 646 return; 647 647 648 648 /* Only preserve PMCR_EL0.N, and reset the rest to 0 */ 649 - pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK; 649 + pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); 650 650 if (!kvm_supports_32bit_el0()) 651 651 pmcr |= ARMV8_PMU_PMCR_LC; 652 652
+2
arch/arm64/kvm/vgic/vgic-v3.c
··· 616 616 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO), 617 617 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX), 618 618 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX), 619 + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD), 620 + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE), 619 621 {}, 620 622 }; 621 623
+21
arch/arm64/mm/hugetlbpage.c
··· 559 559 { 560 560 return __hugetlb_valid_size(size); 561 561 } 562 + 563 + pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 564 + { 565 + if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) && 566 + cpus_have_const_cap(ARM64_WORKAROUND_2645198)) { 567 + /* 568 + * Break-before-make (BBM) is required for all user space mappings 569 + * when the permission changes from executable to non-executable 570 + * in cases where cpu is affected with errata #2645198. 571 + */ 572 + if (pte_user_exec(READ_ONCE(*ptep))) 573 + return huge_ptep_clear_flush(vma, addr, ptep); 574 + } 575 + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 576 + } 577 + 578 + void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 579 + pte_t old_pte, pte_t pte) 580 + { 581 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 582 + }
+21
arch/arm64/mm/mmu.c
··· 1630 1630 } 1631 1631 early_initcall(prevent_bootmem_remove_init); 1632 1632 #endif 1633 + 1634 + pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 1635 + { 1636 + if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) && 1637 + cpus_have_const_cap(ARM64_WORKAROUND_2645198)) { 1638 + /* 1639 + * Break-before-make (BBM) is required for all user space mappings 1640 + * when the permission changes from executable to non-executable 1641 + * in cases where cpu is affected with errata #2645198. 1642 + */ 1643 + if (pte_user_exec(READ_ONCE(*ptep))) 1644 + return ptep_clear_flush(vma, addr, ptep); 1645 + } 1646 + return ptep_get_and_clear(vma->vm_mm, addr, ptep); 1647 + } 1648 + 1649 + void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1650 + pte_t old_pte, pte_t pte) 1651 + { 1652 + set_pte_at(vma->vm_mm, addr, ptep, pte); 1653 + }
+1
arch/arm64/tools/cpucaps
··· 71 71 WORKAROUND_2064142 72 72 WORKAROUND_2077057 73 73 WORKAROUND_2457168 74 + WORKAROUND_2645198 74 75 WORKAROUND_2658417 75 76 WORKAROUND_TRBE_OVERWRITE_FILL_MODE 76 77 WORKAROUND_TSB_FLUSH_FAILURE
+2 -2
arch/ia64/kernel/elfcore.c
··· 7 7 #include <asm/elf.h> 8 8 9 9 10 - Elf64_Half elf_core_extra_phdrs(void) 10 + Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm) 11 11 { 12 12 return GATE_EHDR->e_phnum; 13 13 } ··· 60 60 return 1; 61 61 } 62 62 63 - size_t elf_core_extra_data_size(void) 63 + size_t elf_core_extra_data_size(struct coredump_params *cprm) 64 64 { 65 65 const struct elf_phdr *const gate_phdrs = 66 66 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
-2
arch/loongarch/include/asm/ftrace.h
··· 10 10 #define FTRACE_REGS_PLT_IDX 1 11 11 #define NR_FTRACE_PLTS 2 12 12 13 - #define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1])) 14 - 15 13 #ifdef CONFIG_FUNCTION_TRACER 16 14 17 15 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
+1 -8
arch/loongarch/include/asm/inst.h
··· 377 377 return val < (1UL << bit); 378 378 } 379 379 380 - static inline unsigned long sign_extend(unsigned long val, unsigned int idx) 381 - { 382 - if (!is_imm_negative(val, idx + 1)) 383 - return ((1UL << idx) - 1) & val; 384 - else 385 - return ~((1UL << idx) - 1) | val; 386 - } 387 - 388 380 #define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \ 389 381 static inline void emit_##NAME(union loongarch_instruction *insn, \ 390 382 int offset) \ ··· 393 401 } 394 402 395 403 DEF_EMIT_REG0I26_FORMAT(b, b_op) 404 + DEF_EMIT_REG0I26_FORMAT(bl, bl_op) 396 405 397 406 #define DEF_EMIT_REG1I20_FORMAT(NAME, OP) \ 398 407 static inline void emit_##NAME(union loongarch_instruction *insn, \
+40 -1
arch/loongarch/include/asm/unwind.h
··· 8 8 #define _ASM_UNWIND_H 9 9 10 10 #include <linux/sched.h> 11 + #include <linux/ftrace.h> 11 12 13 + #include <asm/ptrace.h> 12 14 #include <asm/stacktrace.h> 13 15 14 16 enum unwinder_type { ··· 22 20 char type; /* UNWINDER_XXX */ 23 21 struct stack_info stack_info; 24 22 struct task_struct *task; 25 - bool first, error, is_ftrace; 23 + bool first, error, reset; 26 24 int graph_idx; 27 25 unsigned long sp, pc, ra; 28 26 }; 27 + 28 + bool default_next_frame(struct unwind_state *state); 29 29 30 30 void unwind_start(struct unwind_state *state, 31 31 struct task_struct *task, struct pt_regs *regs); ··· 44 40 return state->error; 45 41 } 46 42 43 + #define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1])) 44 + 45 + static inline unsigned long unwind_graph_addr(struct unwind_state *state, 46 + unsigned long pc, unsigned long cfa) 47 + { 48 + return ftrace_graph_ret_addr(state->task, &state->graph_idx, 49 + pc, (unsigned long *)(cfa - GRAPH_FAKE_OFFSET)); 50 + } 51 + 52 + static __always_inline void __unwind_start(struct unwind_state *state, 53 + struct task_struct *task, struct pt_regs *regs) 54 + { 55 + memset(state, 0, sizeof(*state)); 56 + if (regs) { 57 + state->sp = regs->regs[3]; 58 + state->pc = regs->csr_era; 59 + state->ra = regs->regs[1]; 60 + } else if (task && task != current) { 61 + state->sp = thread_saved_fp(task); 62 + state->pc = thread_saved_ra(task); 63 + state->ra = 0; 64 + } else { 65 + state->sp = (unsigned long)__builtin_frame_address(0); 66 + state->pc = (unsigned long)__builtin_return_address(0); 67 + state->ra = 0; 68 + } 69 + state->task = task; 70 + get_stack_info(state->sp, state->task, &state->stack_info); 71 + state->pc = unwind_graph_addr(state, state->pc, state->sp); 72 + } 73 + 74 + static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state) 75 + { 76 + return unwind_done(state) ? 0 : state->pc; 77 + } 47 78 #endif /* _ASM_UNWIND_H */
+1 -1
arch/loongarch/kernel/Makefile
··· 8 8 obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ 9 9 traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ 10 10 elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ 11 - alternative.o unaligned.o 11 + alternative.o unaligned.o unwind.o 12 12 13 13 obj-$(CONFIG_ACPI) += acpi.o 14 14 obj-$(CONFIG_EFI) += efi.o
+3 -3
arch/loongarch/kernel/alternative.c
··· 74 74 switch (src->reg0i26_format.opcode) { 75 75 case b_op: 76 76 case bl_op: 77 - jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27); 77 + jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27); 78 78 if (in_alt_jump(jump_addr, start, end)) 79 79 return; 80 80 offset = jump_addr - pc; ··· 93 93 fallthrough; 94 94 case beqz_op: 95 95 case bnez_op: 96 - jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22); 96 + jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22); 97 97 if (in_alt_jump(jump_addr, start, end)) 98 98 return; 99 99 offset = jump_addr - pc; ··· 112 112 case bge_op: 113 113 case bltu_op: 114 114 case bgeu_op: 115 - jump_addr = cur_pc + sign_extend(si << 2, 17); 115 + jump_addr = cur_pc + sign_extend64(si << 2, 17); 116 116 if (in_alt_jump(jump_addr, start, end)) 117 117 return; 118 118 offset = jump_addr - pc;
+1 -1
arch/loongarch/kernel/cpu-probe.c
··· 94 94 c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | 95 95 LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH; 96 96 97 - elf_hwcap |= HWCAP_LOONGARCH_CRC32; 97 + elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32; 98 98 99 99 config = read_cpucfg(LOONGARCH_CPUCFG1); 100 100 if (config & CPUCFG1_UAL) {
+3
arch/loongarch/kernel/genex.S
··· 67 67 .macro BUILD_HANDLER exception handler prep 68 68 .align 5 69 69 SYM_FUNC_START(handle_\exception) 70 + 666: 70 71 BACKUP_T0T1 71 72 SAVE_ALL 72 73 build_prep_\prep 73 74 move a0, sp 74 75 la.abs t0, do_\handler 75 76 jirl ra, t0, 0 77 + 668: 76 78 RESTORE_ALL_AND_RET 77 79 SYM_FUNC_END(handle_\exception) 80 + SYM_DATA(unwind_hint_\exception, .word 668b - 666b) 78 81 .endm 79 82 80 83 BUILD_HANDLER ade ade badv
+7 -38
arch/loongarch/kernel/inst.c
··· 58 58 u32 larch_insn_gen_b(unsigned long pc, unsigned long dest) 59 59 { 60 60 long offset = dest - pc; 61 - unsigned int immediate_l, immediate_h; 62 61 union loongarch_instruction insn; 63 62 64 63 if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) { ··· 65 66 return INSN_BREAK; 66 67 } 67 68 68 - offset >>= 2; 69 - 70 - immediate_l = offset & 0xffff; 71 - offset >>= 16; 72 - immediate_h = offset & 0x3ff; 73 - 74 - insn.reg0i26_format.opcode = b_op; 75 - insn.reg0i26_format.immediate_l = immediate_l; 76 - insn.reg0i26_format.immediate_h = immediate_h; 69 + emit_b(&insn, offset >> 2); 77 70 78 71 return insn.word; 79 72 } ··· 73 82 u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest) 74 83 { 75 84 long offset = dest - pc; 76 - unsigned int immediate_l, immediate_h; 77 85 union loongarch_instruction insn; 78 86 79 87 if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) { ··· 80 90 return INSN_BREAK; 81 91 } 82 92 83 - offset >>= 2; 84 - 85 - immediate_l = offset & 0xffff; 86 - offset >>= 16; 87 - immediate_h = offset & 0x3ff; 88 - 89 - insn.reg0i26_format.opcode = bl_op; 90 - insn.reg0i26_format.immediate_l = immediate_l; 91 - insn.reg0i26_format.immediate_h = immediate_h; 93 + emit_bl(&insn, offset >> 2); 92 94 93 95 return insn.word; 94 96 } ··· 89 107 { 90 108 union loongarch_instruction insn; 91 109 92 - insn.reg3_format.opcode = or_op; 93 - insn.reg3_format.rd = rd; 94 - insn.reg3_format.rj = rj; 95 - insn.reg3_format.rk = rk; 110 + emit_or(&insn, rd, rj, rk); 96 111 97 112 return insn.word; 98 113 } ··· 103 124 { 104 125 union loongarch_instruction insn; 105 126 106 - insn.reg1i20_format.opcode = lu12iw_op; 107 - insn.reg1i20_format.rd = rd; 108 - insn.reg1i20_format.immediate = imm; 127 + emit_lu12iw(&insn, rd, imm); 109 128 110 129 return insn.word; 111 130 } ··· 112 135 { 113 136 union loongarch_instruction insn; 114 137 115 - insn.reg1i20_format.opcode = lu32id_op; 116 - insn.reg1i20_format.rd = rd; 117 - insn.reg1i20_format.immediate = imm; 138 + emit_lu32id(&insn, rd, imm); 118 139 119 140 return insn.word; 120 141 } ··· 121 146 { 122 147 union loongarch_instruction insn; 123 148 124 - insn.reg2i12_format.opcode = lu52id_op; 125 - insn.reg2i12_format.rd = rd; 126 - insn.reg2i12_format.rj = rj; 127 - insn.reg2i12_format.immediate = imm; 149 + emit_lu52id(&insn, rd, rj, imm); 128 150 129 151 return insn.word; 130 152 } ··· 130 158 { 131 159 union loongarch_instruction insn; 132 160 133 - insn.reg2i16_format.opcode = jirl_op; 134 - insn.reg2i16_format.rd = rd; 135 - insn.reg2i16_format.rj = rj; 136 - insn.reg2i16_format.immediate = (dest - pc) >> 2; 161 + emit_jirl(&insn, rj, rd, (dest - pc) >> 2); 137 162 138 163 return insn.word; 139 164 }
+3 -9
arch/loongarch/kernel/process.c
··· 191 191 192 192 unsigned long __get_wchan(struct task_struct *task) 193 193 { 194 - unsigned long pc; 194 + unsigned long pc = 0; 195 195 struct unwind_state state; 196 196 197 197 if (!try_get_task_stack(task)) 198 198 return 0; 199 199 200 - unwind_start(&state, task, NULL); 201 - state.sp = thread_saved_fp(task); 202 - get_stack_info(state.sp, state.task, &state.stack_info); 203 - state.pc = thread_saved_ra(task); 204 - #ifdef CONFIG_UNWINDER_PROLOGUE 205 - state.type = UNWINDER_PROLOGUE; 206 - #endif 207 - for (; !unwind_done(&state); unwind_next_frame(&state)) { 200 + for (unwind_start(&state, task, NULL); 201 + !unwind_done(&state); unwind_next_frame(&state)) { 208 202 pc = unwind_get_return_address(&state); 209 203 if (!pc) 210 204 break;
-3
arch/loongarch/kernel/traps.c
··· 72 72 if (!task) 73 73 task = current; 74 74 75 - if (user_mode(regs)) 76 - state.type = UNWINDER_GUESS; 77 - 78 75 printk("%sCall Trace:", loglvl); 79 76 for (unwind_start(&state, task, pregs); 80 77 !unwind_done(&state); unwind_next_frame(&state)) {
+32
arch/loongarch/kernel/unwind.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2022-2023 Loongson Technology Corporation Limited 4 + */ 5 + #include <linux/kernel.h> 6 + #include <linux/ftrace.h> 7 + 8 + #include <asm/unwind.h> 9 + 10 + bool default_next_frame(struct unwind_state *state) 11 + { 12 + struct stack_info *info = &state->stack_info; 13 + unsigned long addr; 14 + 15 + if (unwind_done(state)) 16 + return false; 17 + 18 + do { 19 + for (state->sp += sizeof(unsigned long); 20 + state->sp < info->end; state->sp += sizeof(unsigned long)) { 21 + addr = *(unsigned long *)(state->sp); 22 + state->pc = unwind_graph_addr(state, addr, state->sp + 8); 23 + if (__kernel_text_address(state->pc)) 24 + return true; 25 + } 26 + 27 + state->sp = info->next_sp; 28 + 29 + } while (!get_stack_info(state->sp, state->task, info)); 30 + 31 + return false; 32 + }
+3 -46
arch/loongarch/kernel/unwind_guess.c
··· 2 2 /* 3 3 * Copyright (C) 2022 Loongson Technology Corporation Limited 4 4 */ 5 - #include <linux/kernel.h> 6 - #include <linux/ftrace.h> 7 - 8 5 #include <asm/unwind.h> 9 6 10 7 unsigned long unwind_get_return_address(struct unwind_state *state) 11 8 { 12 - if (unwind_done(state)) 13 - return 0; 14 - else if (state->first) 15 - return state->pc; 16 - 17 - return *(unsigned long *)(state->sp); 9 + return __unwind_get_return_address(state); 18 10 } 19 11 EXPORT_SYMBOL_GPL(unwind_get_return_address); 20 12 21 13 void unwind_start(struct unwind_state *state, struct task_struct *task, 22 14 struct pt_regs *regs) 23 15 { 24 - memset(state, 0, sizeof(*state)); 25 - 26 - if (regs) { 27 - state->sp = regs->regs[3]; 28 - state->pc = regs->csr_era; 29 - } 30 - 31 - state->task = task; 32 - state->first = true; 33 - 34 - get_stack_info(state->sp, state->task, &state->stack_info); 35 - 16 + __unwind_start(state, task, regs); 36 17 if (!unwind_done(state) && !__kernel_text_address(state->pc)) 37 18 unwind_next_frame(state); 38 19 } ··· 21 40 22 41 bool unwind_next_frame(struct unwind_state *state) 23 42 { 24 - struct stack_info *info = &state->stack_info; 25 - unsigned long addr; 26 - 27 - if (unwind_done(state)) 28 - return false; 29 - 30 - if (state->first) 31 - state->first = false; 32 - 33 - do { 34 - for (state->sp += sizeof(unsigned long); 35 - state->sp < info->end; 36 - state->sp += sizeof(unsigned long)) { 37 - addr = *(unsigned long *)(state->sp); 38 - state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, 39 - addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); 40 - if (__kernel_text_address(addr)) 41 - return true; 42 - } 43 - 44 - state->sp = info->next_sp; 45 - 46 - } while (!get_stack_info(state->sp, state->task, info)); 47 - 48 - return false; 43 + return default_next_frame(state); 49 44 } 50 45 EXPORT_SYMBOL_GPL(unwind_next_frame);
+154 -102
arch/loongarch/kernel/unwind_prologue.c
··· 2 2 /* 3 3 * Copyright (C) 2022 Loongson Technology Corporation Limited 4 4 */ 5 + #include <linux/cpumask.h> 5 6 #include <linux/ftrace.h> 6 7 #include <linux/kallsyms.h> 7 8 8 9 #include <asm/inst.h> 10 + #include <asm/loongson.h> 9 11 #include <asm/ptrace.h> 12 + #include <asm/setup.h> 10 13 #include <asm/unwind.h> 11 14 12 - static inline void unwind_state_fixup(struct unwind_state *state) 15 + extern const int unwind_hint_ade; 16 + extern const int unwind_hint_ale; 17 + extern const int unwind_hint_bp; 18 + extern const int unwind_hint_fpe; 19 + extern const int unwind_hint_fpu; 20 + extern const int unwind_hint_lsx; 21 + extern const int unwind_hint_lasx; 22 + extern const int unwind_hint_lbt; 23 + extern const int unwind_hint_ri; 24 + extern const int unwind_hint_watch; 25 + extern unsigned long eentry; 26 + #ifdef CONFIG_NUMA 27 + extern unsigned long pcpu_handlers[NR_CPUS]; 28 + #endif 29 + 30 + static inline bool scan_handlers(unsigned long entry_offset) 31 + { 32 + int idx, offset; 33 + 34 + if (entry_offset >= EXCCODE_INT_START * VECSIZE) 35 + return false; 36 + 37 + idx = entry_offset / VECSIZE; 38 + offset = entry_offset % VECSIZE; 39 + switch (idx) { 40 + case EXCCODE_ADE: 41 + return offset == unwind_hint_ade; 42 + case EXCCODE_ALE: 43 + return offset == unwind_hint_ale; 44 + case EXCCODE_BP: 45 + return offset == unwind_hint_bp; 46 + case EXCCODE_FPE: 47 + return offset == unwind_hint_fpe; 48 + case EXCCODE_FPDIS: 49 + return offset == unwind_hint_fpu; 50 + case EXCCODE_LSXDIS: 51 + return offset == unwind_hint_lsx; 52 + case EXCCODE_LASXDIS: 53 + return offset == unwind_hint_lasx; 54 + case EXCCODE_BTDIS: 55 + return offset == unwind_hint_lbt; 56 + case EXCCODE_INE: 57 + return offset == unwind_hint_ri; 58 + case EXCCODE_WATCH: 59 + return offset == unwind_hint_watch; 60 + default: 61 + return false; 62 + } 63 + } 64 + 65 + static inline bool fix_exception(unsigned long pc) 66 + { 67 + #ifdef CONFIG_NUMA 68 + int cpu; 69 + 70 + for_each_possible_cpu(cpu) { 71 + if (!pcpu_handlers[cpu]) 72 + continue; 73 + if (scan_handlers(pc - pcpu_handlers[cpu])) 74 + return true; 75 + } 76 + #endif 77 + return scan_handlers(pc - eentry); 78 + } 79 + 80 + /* 81 + * As we meet ftrace_regs_entry, reset first flag like first doing 82 + * tracing. Prologue analysis will stop soon because PC is at entry. 83 + */ 84 + static inline bool fix_ftrace(unsigned long pc) 13 85 { 14 86 #ifdef CONFIG_DYNAMIC_FTRACE 15 - static unsigned long ftrace = (unsigned long)ftrace_call + 4; 16 - 17 - if (state->pc == ftrace) 18 - state->is_ftrace = true; 87 + return pc == (unsigned long)ftrace_call + LOONGARCH_INSN_SIZE; 88 + #else 89 + return false; 19 90 #endif 20 91 } 21 92 22 - unsigned long unwind_get_return_address(struct unwind_state *state) 93 + static inline bool unwind_state_fixup(struct unwind_state *state) 23 94 { 95 + if (!fix_exception(state->pc) && !fix_ftrace(state->pc)) 96 + return false; 24 97 25 - if (unwind_done(state)) 26 - return 0; 27 - else if (state->type) 28 - return state->pc; 29 - else if (state->first) 30 - return state->pc; 31 - 32 - return *(unsigned long *)(state->sp); 33 - 34 - } 35 - EXPORT_SYMBOL_GPL(unwind_get_return_address); 36 - 37 - static bool unwind_by_guess(struct unwind_state *state) 38 - { 39 - struct stack_info *info = &state->stack_info; 40 - unsigned long addr; 41 - 42 - for (state->sp += sizeof(unsigned long); 43 - state->sp < info->end; 44 - state->sp += sizeof(unsigned long)) { 45 - addr = *(unsigned long *)(state->sp); 46 - state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, 47 - addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); 48 - if (__kernel_text_address(addr)) 49 - return true; 50 - } 51 - 52 - return false; 98 + state->reset = true; 99 + return true; 53 100 } 54 101 102 + /* 103 + * LoongArch function prologue is like follows, 104 + * [instructions not use stack var] 105 + * addi.d sp, sp, -imm 106 + * st.d xx, sp, offset <- save callee saved regs and 107 + * st.d yy, sp, offset save ra if function is nest. 108 + * [others instructions] 109 + */ 55 110 static bool unwind_by_prologue(struct unwind_state *state) 56 111 { 57 112 long frame_ra = -1; 58 113 unsigned long frame_size = 0; 59 - unsigned long size, offset, pc = state->pc; 114 + unsigned long size, offset, pc; 60 115 struct pt_regs *regs; 61 116 struct stack_info *info = &state->stack_info; 62 117 union loongarch_instruction *ip, *ip_end; ··· 119 64 if (state->sp >= info->end || state->sp < info->begin) 120 65 return false; 121 66 122 - if (state->is_ftrace) { 123 - /* 124 - * As we meet ftrace_regs_entry, reset first flag like first doing 125 - * tracing. Prologue analysis will stop soon because PC is at entry. 126 - */ 67 + if (state->reset) { 127 68 regs = (struct pt_regs *)state->sp; 128 69 state->first = true; 129 - state->is_ftrace = false; 70 + state->reset = false; 130 71 state->pc = regs->csr_era; 131 72 state->ra = regs->regs[1]; 132 73 state->sp = regs->regs[3]; 133 74 return true; 134 75 } 135 76 77 + /* 78 + * When first is not set, the PC is a return address in the previous frame. 79 + * We need to adjust its value in case overflow to the next symbol. 80 + */ 81 + pc = state->pc - (state->first ? 0 : LOONGARCH_INSN_SIZE); 136 82 if (!kallsyms_lookup_size_offset(pc, &size, &offset)) 137 83 return false; 138 84 ··· 149 93 ip++; 150 94 } 151 95 96 + /* 97 + * Can't find stack alloc action, PC may be in a leaf function. Only the 98 + * first being true is reasonable, otherwise indicate analysis is broken. 99 + */ 152 100 if (!frame_size) { 153 101 if (state->first) 154 102 goto first; ··· 170 110 ip++; 171 111 } 172 112 113 + /* Can't find save $ra action, PC may be in a leaf function, too. */ 173 114 if (frame_ra < 0) { 174 115 if (state->first) { 175 116 state->sp = state->sp + frame_size; ··· 179 118 return false; 180 119 } 181 120 182 - if (state->first) 183 - state->first = false; 184 - 185 121 state->pc = *(unsigned long *)(state->sp + frame_ra); 186 122 state->sp = state->sp + frame_size; 187 123 goto out; 188 124 189 125 first: 190 - state->first = false; 191 - if (state->pc == state->ra) 192 - return false; 193 - 194 126 state->pc = state->ra; 195 127 196 128 out: 197 - unwind_state_fixup(state); 198 - return !!__kernel_text_address(state->pc); 129 + state->first = false; 130 + return unwind_state_fixup(state) || __kernel_text_address(state->pc); 199 131 } 200 132 201 - void unwind_start(struct unwind_state *state, struct task_struct *task, 202 - struct pt_regs *regs) 133 + static bool next_frame(struct unwind_state *state) 203 134 { 204 - memset(state, 0, sizeof(*state)); 205 - 206 - if (regs && __kernel_text_address(regs->csr_era)) { 207 - state->pc = regs->csr_era; 208 - state->sp = regs->regs[3]; 209 - state->ra = regs->regs[1]; 210 - state->type = UNWINDER_PROLOGUE; 211 - } 212 - 213 - state->task = task; 214 - state->first = true; 215 - 216 - get_stack_info(state->sp, state->task, &state->stack_info); 217 - 218 - if (!unwind_done(state) && !__kernel_text_address(state->pc)) 219 - unwind_next_frame(state); 220 - } 221 - EXPORT_SYMBOL_GPL(unwind_start); 222 - 223 - bool unwind_next_frame(struct unwind_state *state) 224 - { 225 - struct stack_info *info = &state->stack_info; 226 - struct pt_regs *regs; 227 135 unsigned long pc; 136 + struct pt_regs *regs; 137 + struct stack_info *info = &state->stack_info; 228 138 229 139 if (unwind_done(state)) 230 140 return false; 231 141 232 142 do { 233 - switch (state->type) { 234 - case UNWINDER_GUESS: 235 - state->first = false; 236 - if (unwind_by_guess(state)) 237 - return true; 238 - break; 143 + if (unwind_by_prologue(state)) { 144 + state->pc = unwind_graph_addr(state, state->pc, state->sp); 145 + return true; 146 + } 239 147 240 - case UNWINDER_PROLOGUE: 241 - if (unwind_by_prologue(state)) { 242 - state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, 243 - state->pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); 244 - return true; 245 - } 148 + if (info->type == STACK_TYPE_IRQ && info->end == state->sp) { 149 + regs = (struct pt_regs *)info->next_sp; 150 + pc = regs->csr_era; 246 151 247 - if (info->type == STACK_TYPE_IRQ && 248 - info->end == state->sp) { 249 - regs = (struct pt_regs *)info->next_sp; 250 - pc = regs->csr_era; 152 + if (user_mode(regs) || !__kernel_text_address(pc)) 153 + return false; 251 154 252 - if (user_mode(regs) || !__kernel_text_address(pc)) 253 - return false; 155 + state->first = true; 156 + state->pc = pc; 157 + state->ra = regs->regs[1]; 158 + state->sp = regs->regs[3]; 159 + get_stack_info(state->sp, state->task, info); 254 160 255 - state->first = true; 256 - state->ra = regs->regs[1]; 257 - state->sp = regs->regs[3]; 258 - state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, 259 - pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); 260 - get_stack_info(state->sp, state->task, info); 261 - 262 - return true; 263 - } 161 + return true; 264 162 } 265 163 266 164 state->sp = info->next_sp; ··· 227 207 } while (!get_stack_info(state->sp, state->task, info)); 228 208 229 209 return false; 210 + } 211 + 212 + unsigned long unwind_get_return_address(struct unwind_state *state) 213 + { 214 + return __unwind_get_return_address(state); 215 + } 216 + EXPORT_SYMBOL_GPL(unwind_get_return_address); 217 + 218 + void unwind_start(struct unwind_state *state, struct task_struct *task, 219 + struct pt_regs *regs) 220 + { 221 + __unwind_start(state, task, regs); 222 + state->type = UNWINDER_PROLOGUE; 223 + state->first = true; 224 + 225 + /* 226 + * The current PC is not kernel text address, we cannot find its 227 + * relative symbol. Thus, prologue analysis will be broken. Luckily, 228 + * we can use the default_next_frame(). 229 + */ 230 + if (!__kernel_text_address(state->pc)) { 231 + state->type = UNWINDER_GUESS; 232 + if (!unwind_done(state)) 233 + unwind_next_frame(state); 234 + } 235 + } 236 + EXPORT_SYMBOL_GPL(unwind_start); 237 + 238 + bool unwind_next_frame(struct unwind_state *state) 239 + { 240 + return state->type == UNWINDER_PROLOGUE ? 241 + next_frame(state) : default_next_frame(state); 230 242 } 231 243 EXPORT_SYMBOL_GPL(unwind_next_frame);
+1 -1
arch/loongarch/mm/tlb.c
··· 251 251 } 252 252 253 253 #ifdef CONFIG_NUMA 254 - static unsigned long pcpu_handlers[NR_CPUS]; 254 + unsigned long pcpu_handlers[NR_CPUS]; 255 255 #endif 256 256 extern long exception_handlers[VECSIZE * 128 / sizeof(long)]; 257 257
+4
arch/powerpc/boot/wrapper
··· 210 210 gsub(".*version ", ""); 211 211 gsub("-.*", ""); 212 212 split($1,a, "."); 213 + if( length(a[3]) == "8" ) 214 + # a[3] is probably a date of format yyyymmdd used for release snapshots. We 215 + # can assume it to be zero as it does not signify a new version as such. 216 + a[3] = 0; 213 217 print a[1]*100000000 + a[2]*1000000 + a[3]*10000; 214 218 exit 215 219 }'
+1 -1
arch/powerpc/include/asm/imc-pmu.h
··· 137 137 * are inited. 138 138 */ 139 139 struct imc_pmu_ref { 140 - struct mutex lock; 140 + spinlock_t lock; 141 141 unsigned int id; 142 142 int refc; 143 143 };
+1 -1
arch/powerpc/mm/book3s64/hash_utils.c
··· 1012 1012 1013 1013 void hpt_clear_stress(void); 1014 1014 static struct timer_list stress_hpt_timer; 1015 - void stress_hpt_timer_fn(struct timer_list *timer) 1015 + static void stress_hpt_timer_fn(struct timer_list *timer) 1016 1016 { 1017 1017 int next_cpu; 1018 1018
+66 -70
arch/powerpc/perf/imc-pmu.c
··· 14 14 #include <asm/cputhreads.h> 15 15 #include <asm/smp.h> 16 16 #include <linux/string.h> 17 + #include <linux/spinlock.h> 17 18 18 19 /* Nest IMC data structures and variables */ 19 20 ··· 22 21 * Used to avoid races in counting the nest-pmu units during hotplug 23 22 * register and unregister 24 23 */ 25 - static DEFINE_MUTEX(nest_init_lock); 24 + static DEFINE_SPINLOCK(nest_init_lock); 26 25 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); 27 26 static struct imc_pmu **per_nest_pmu_arr; 28 27 static cpumask_t nest_imc_cpumask; ··· 51 50 * core and trace-imc 52 51 */ 53 52 static struct imc_pmu_ref imc_global_refc = { 54 - .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), 53 + .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock), 55 54 .id = 0, 56 55 .refc = 0, 57 56 }; ··· 401 400 get_hard_smp_processor_id(cpu)); 402 401 /* 403 402 * If this is the last cpu in this chip then, skip the reference 404 - * count mutex lock and make the reference count on this chip zero. 403 + * count lock and make the reference count on this chip zero. 405 404 */ 406 405 ref = get_nest_pmu_ref(cpu); 407 406 if (!ref) ··· 463 462 /* 464 463 * See if we need to disable the nest PMU. 465 464 * If no events are currently in use, then we have to take a 466 - * mutex to ensure that we don't race with another task doing 465 + * lock to ensure that we don't race with another task doing 467 466 * enable or disable the nest counters. 468 467 */ 469 468 ref = get_nest_pmu_ref(event->cpu); 470 469 if (!ref) 471 470 return; 472 471 473 - /* Take the mutex lock for this node and then decrement the reference count */ 474 - mutex_lock(&ref->lock); 472 + /* Take the lock for this node and then decrement the reference count */ 473 + spin_lock(&ref->lock); 475 474 if (ref->refc == 0) { 476 475 /* 477 476 * The scenario where this is true is, when perf session is ··· 483 482 * an OPAL call to disable the engine in that node. 484 483 * 485 484 */ 486 - mutex_unlock(&ref->lock); 485 + spin_unlock(&ref->lock); 487 486 return; 488 487 } 489 488 ref->refc--; ··· 491 490 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 492 491 get_hard_smp_processor_id(event->cpu)); 493 492 if (rc) { 494 - mutex_unlock(&ref->lock); 493 + spin_unlock(&ref->lock); 495 494 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); 496 495 return; 497 496 } ··· 499 498 WARN(1, "nest-imc: Invalid event reference count\n"); 500 499 ref->refc = 0; 501 500 } 502 - mutex_unlock(&ref->lock); 501 + spin_unlock(&ref->lock); 503 502 } 504 503 505 504 static int nest_imc_event_init(struct perf_event *event) ··· 558 557 559 558 /* 560 559 * Get the imc_pmu_ref struct for this node. 561 - * Take the mutex lock and then increment the count of nest pmu events 562 - * inited. 560 + * Take the lock and then increment the count of nest pmu events inited. 563 561 */ 564 562 ref = get_nest_pmu_ref(event->cpu); 565 563 if (!ref) 566 564 return -EINVAL; 567 565 568 - mutex_lock(&ref->lock); 566 + spin_lock(&ref->lock); 569 567 if (ref->refc == 0) { 570 568 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, 571 569 get_hard_smp_processor_id(event->cpu)); 572 570 if (rc) { 573 - mutex_unlock(&ref->lock); 571 + spin_unlock(&ref->lock); 574 572 pr_err("nest-imc: Unable to start the counters for node %d\n", 575 573 node_id); 576 574 return rc; 577 575 } 578 576 } 579 577 ++ref->refc; 580 - mutex_unlock(&ref->lock); 578 + spin_unlock(&ref->lock); 581 579 582 580 event->destroy = nest_imc_counters_release; 583 581 return 0; ··· 612 612 return -ENOMEM; 613 613 mem_info->vbase = page_address(page); 614 614 615 - /* Init the mutex */ 616 615 core_imc_refc[core_id].id = core_id; 617 - mutex_init(&core_imc_refc[core_id].lock); 616 + spin_lock_init(&core_imc_refc[core_id].lock); 618 617 619 618 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, 620 619 __pa((void *)mem_info->vbase), ··· 702 703 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); 703 704 } else { 704 705 /* 705 - * If this is the last cpu in this core then, skip taking refernce 706 - * count mutex lock for this core and directly zero "refc" for 707 - * this core. 706 + * If this is the last cpu in this core then skip taking reference 707 + * count lock for this core and directly zero "refc" for this core. 708 708 */ 709 709 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 710 710 get_hard_smp_processor_id(cpu)); ··· 718 720 * last cpu in this core and core-imc event running 719 721 * in this cpu. 720 722 */ 721 - mutex_lock(&imc_global_refc.lock); 723 + spin_lock(&imc_global_refc.lock); 722 724 if (imc_global_refc.id == IMC_DOMAIN_CORE) 723 725 imc_global_refc.refc--; 724 726 725 - mutex_unlock(&imc_global_refc.lock); 727 + spin_unlock(&imc_global_refc.lock); 726 728 } 727 729 return 0; 728 730 } ··· 737 739 738 740 static void reset_global_refc(struct perf_event *event) 739 741 { 740 - mutex_lock(&imc_global_refc.lock); 742 + spin_lock(&imc_global_refc.lock); 741 743 imc_global_refc.refc--; 742 744 743 745 /* ··· 749 751 imc_global_refc.refc = 0; 750 752 imc_global_refc.id = 0; 751 753 } 752 - mutex_unlock(&imc_global_refc.lock); 754 + spin_unlock(&imc_global_refc.lock); 753 755 } 754 756 755 757 static void core_imc_counters_release(struct perf_event *event) ··· 762 764 /* 763 765 * See if we need to disable the IMC PMU. 764 766 * If no events are currently in use, then we have to take a 765 - * mutex to ensure that we don't race with another task doing 767 + * lock to ensure that we don't race with another task doing 766 768 * enable or disable the core counters. 767 769 */ 768 770 core_id = event->cpu / threads_per_core; 769 771 770 - /* Take the mutex lock and decrement the refernce count for this core */ 772 + /* Take the lock and decrement the refernce count for this core */ 771 773 ref = &core_imc_refc[core_id]; 772 774 if (!ref) 773 775 return; 774 776 775 - mutex_lock(&ref->lock); 777 + spin_lock(&ref->lock); 776 778 if (ref->refc == 0) { 777 779 /* 778 780 * The scenario where this is true is, when perf session is ··· 784 786 * an OPAL call to disable the engine in that core. 785 787 * 786 788 */ 787 - mutex_unlock(&ref->lock); 789 + spin_unlock(&ref->lock); 788 790 return; 789 791 } 790 792 ref->refc--; ··· 792 794 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 793 795 get_hard_smp_processor_id(event->cpu)); 794 796 if (rc) { 795 - mutex_unlock(&ref->lock); 797 + spin_unlock(&ref->lock); 796 798 pr_err("IMC: Unable to stop the counters for core %d\n", core_id); 797 799 return; 798 800 } ··· 800 802 WARN(1, "core-imc: Invalid event reference count\n"); 801 803 ref->refc = 0; 802 804 } 803 - mutex_unlock(&ref->lock); 805 + spin_unlock(&ref->lock); 804 806 805 807 reset_global_refc(event); 806 808 } ··· 838 840 if ((!pcmi->vbase)) 839 841 return -ENODEV; 840 842 841 - /* Get the core_imc mutex for this core */ 842 843 ref = &core_imc_refc[core_id]; 843 844 if (!ref) 844 845 return -EINVAL; ··· 845 848 /* 846 849 * Core pmu units are enabled only when it is used. 847 850 * See if this is triggered for the first time. 848 - * If yes, take the mutex lock and enable the core counters. 851 + * If yes, take the lock and enable the core counters. 849 852 * If not, just increment the count in core_imc_refc struct. 850 853 */ 851 - mutex_lock(&ref->lock); 854 + spin_lock(&ref->lock); 852 855 if (ref->refc == 0) { 853 856 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 854 857 get_hard_smp_processor_id(event->cpu)); 855 858 if (rc) { 856 - mutex_unlock(&ref->lock); 859 + spin_unlock(&ref->lock); 857 860 pr_err("core-imc: Unable to start the counters for core %d\n", 858 861 core_id); 859 862 return rc; 860 863 } 861 864 } 862 865 ++ref->refc; 863 - mutex_unlock(&ref->lock); 866 + spin_unlock(&ref->lock); 864 867 865 868 /* 866 869 * Since the system can run either in accumulation or trace-mode ··· 871 874 * to know whether any other trace/thread imc 872 875 * events are running. 873 876 */ 874 - mutex_lock(&imc_global_refc.lock); 877 + spin_lock(&imc_global_refc.lock); 875 878 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { 876 879 /* 877 880 * No other trace/thread imc events are running in ··· 880 883 imc_global_refc.id = IMC_DOMAIN_CORE; 881 884 imc_global_refc.refc++; 882 885 } else { 883 - mutex_unlock(&imc_global_refc.lock); 886 + spin_unlock(&imc_global_refc.lock); 884 887 return -EBUSY; 885 888 } 886 - mutex_unlock(&imc_global_refc.lock); 889 + spin_unlock(&imc_global_refc.lock); 887 890 888 891 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); 889 892 event->destroy = core_imc_counters_release; ··· 955 958 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); 956 959 957 960 /* Reduce the refc if thread-imc event running on this cpu */ 958 - mutex_lock(&imc_global_refc.lock); 961 + spin_lock(&imc_global_refc.lock); 959 962 if (imc_global_refc.id == IMC_DOMAIN_THREAD) 960 963 imc_global_refc.refc--; 961 - mutex_unlock(&imc_global_refc.lock); 964 + spin_unlock(&imc_global_refc.lock); 962 965 963 966 return 0; 964 967 } ··· 998 1001 if (!target) 999 1002 return -EINVAL; 1000 1003 1001 - mutex_lock(&imc_global_refc.lock); 1004 + spin_lock(&imc_global_refc.lock); 1002 1005 /* 1003 1006 * Check if any other trace/core imc events are running in the 1004 1007 * system, if not set the global id to thread-imc. ··· 1007 1010 imc_global_refc.id = IMC_DOMAIN_THREAD; 1008 1011 imc_global_refc.refc++; 1009 1012 } else { 1010 - mutex_unlock(&imc_global_refc.lock); 1013 + spin_unlock(&imc_global_refc.lock); 1011 1014 return -EBUSY; 1012 1015 } 1013 - mutex_unlock(&imc_global_refc.lock); 1016 + spin_unlock(&imc_global_refc.lock); 1014 1017 1015 1018 event->pmu->task_ctx_nr = perf_sw_context; 1016 1019 event->destroy = reset_global_refc; ··· 1132 1135 /* 1133 1136 * imc pmus are enabled only when it is used. 1134 1137 * See if this is triggered for the first time. 1135 - * If yes, take the mutex lock and enable the counters. 1138 + * If yes, take the lock and enable the counters. 1136 1139 * If not, just increment the count in ref count struct. 1137 1140 */ 1138 1141 ref = &core_imc_refc[core_id]; 1139 1142 if (!ref) 1140 1143 return -EINVAL; 1141 1144 1142 - mutex_lock(&ref->lock); 1145 + spin_lock(&ref->lock); 1143 1146 if (ref->refc == 0) { 1144 1147 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 1145 1148 get_hard_smp_processor_id(smp_processor_id()))) { 1146 - mutex_unlock(&ref->lock); 1149 + spin_unlock(&ref->lock); 1147 1150 pr_err("thread-imc: Unable to start the counter\ 1148 1151 for core %d\n", core_id); 1149 1152 return -EINVAL; 1150 1153 } 1151 1154 } 1152 1155 ++ref->refc; 1153 - mutex_unlock(&ref->lock); 1156 + spin_unlock(&ref->lock); 1154 1157 return 0; 1155 1158 } 1156 1159 ··· 1167 1170 return; 1168 1171 } 1169 1172 1170 - mutex_lock(&ref->lock); 1173 + spin_lock(&ref->lock); 1171 1174 ref->refc--; 1172 1175 if (ref->refc == 0) { 1173 1176 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 1174 1177 get_hard_smp_processor_id(smp_processor_id()))) { 1175 - mutex_unlock(&ref->lock); 1178 + spin_unlock(&ref->lock); 1176 1179 pr_err("thread-imc: Unable to stop the counters\ 1177 1180 for core %d\n", core_id); 1178 1181 return; ··· 1180 1183 } else if (ref->refc < 0) { 1181 1184 ref->refc = 0; 1182 1185 } 1183 - mutex_unlock(&ref->lock); 1186 + spin_unlock(&ref->lock); 1184 1187 1185 1188 /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ 1186 1189 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); ··· 1221 1224 } 1222 1225 } 1223 1226 1224 - /* Init the mutex, if not already */ 1225 1227 trace_imc_refc[core_id].id = core_id; 1226 - mutex_init(&trace_imc_refc[core_id].lock); 1228 + spin_lock_init(&trace_imc_refc[core_id].lock); 1227 1229 1228 1230 mtspr(SPRN_LDBAR, 0); 1229 1231 return 0; ··· 1242 1246 * Reduce the refc if any trace-imc event running 1243 1247 * on this cpu. 1244 1248 */ 1245 - mutex_lock(&imc_global_refc.lock); 1249 + spin_lock(&imc_global_refc.lock); 1246 1250 if (imc_global_refc.id == IMC_DOMAIN_TRACE) 1247 1251 imc_global_refc.refc--; 1248 - mutex_unlock(&imc_global_refc.lock); 1252 + spin_unlock(&imc_global_refc.lock); 1249 1253 1250 1254 return 0; 1251 1255 } ··· 1367 1371 } 1368 1372 1369 1373 mtspr(SPRN_LDBAR, ldbar_value); 1370 - mutex_lock(&ref->lock); 1374 + spin_lock(&ref->lock); 1371 1375 if (ref->refc == 0) { 1372 1376 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE, 1373 1377 get_hard_smp_processor_id(smp_processor_id()))) { 1374 - mutex_unlock(&ref->lock); 1378 + spin_unlock(&ref->lock); 1375 1379 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); 1376 1380 return -EINVAL; 1377 1381 } 1378 1382 } 1379 1383 ++ref->refc; 1380 - mutex_unlock(&ref->lock); 1384 + spin_unlock(&ref->lock); 1381 1385 return 0; 1382 1386 } 1383 1387 ··· 1410 1414 return; 1411 1415 } 1412 1416 1413 - mutex_lock(&ref->lock); 1417 + spin_lock(&ref->lock); 1414 1418 ref->refc--; 1415 1419 if (ref->refc == 0) { 1416 1420 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE, 1417 1421 get_hard_smp_processor_id(smp_processor_id()))) { 1418 - mutex_unlock(&ref->lock); 1422 + spin_unlock(&ref->lock); 1419 1423 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); 1420 1424 return; 1421 1425 } 1422 1426 } else if (ref->refc < 0) { 1423 1427 ref->refc = 0; 1424 1428 } 1425 - mutex_unlock(&ref->lock); 1429 + spin_unlock(&ref->lock); 1426 1430 1427 1431 trace_imc_event_stop(event, flags); 1428 1432 } ··· 1444 1448 * no other thread is running any core/thread imc 1445 1449 * events 1446 1450 */ 1447 - mutex_lock(&imc_global_refc.lock); 1451 + spin_lock(&imc_global_refc.lock); 1448 1452 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { 1449 1453 /* 1450 1454 * No core/thread imc events are running in the ··· 1453 1457 imc_global_refc.id = IMC_DOMAIN_TRACE; 1454 1458 imc_global_refc.refc++; 1455 1459 } else { 1456 - mutex_unlock(&imc_global_refc.lock); 1460 + spin_unlock(&imc_global_refc.lock); 1457 1461 return -EBUSY; 1458 1462 } 1459 - mutex_unlock(&imc_global_refc.lock); 1463 + spin_unlock(&imc_global_refc.lock); 1460 1464 1461 1465 event->hw.idx = -1; 1462 1466 ··· 1529 1533 i = 0; 1530 1534 for_each_node(nid) { 1531 1535 /* 1532 - * Mutex lock to avoid races while tracking the number of 1536 + * Take the lock to avoid races while tracking the number of 1533 1537 * sessions using the chip's nest pmu units. 1534 1538 */ 1535 - mutex_init(&nest_imc_refc[i].lock); 1539 + spin_lock_init(&nest_imc_refc[i].lock); 1536 1540 1537 1541 /* 1538 1542 * Loop to init the "id" with the node_id. Variable "i" initialized to ··· 1629 1633 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) 1630 1634 { 1631 1635 if (pmu_ptr->domain == IMC_DOMAIN_NEST) { 1632 - mutex_lock(&nest_init_lock); 1636 + spin_lock(&nest_init_lock); 1633 1637 if (nest_pmus == 1) { 1634 1638 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); 1635 1639 kfree(nest_imc_refc); ··· 1639 1643 1640 1644 if (nest_pmus > 0) 1641 1645 nest_pmus--; 1642 - mutex_unlock(&nest_init_lock); 1646 + spin_unlock(&nest_init_lock); 1643 1647 } 1644 1648 1645 1649 /* Free core_imc memory */ ··· 1796 1800 * rest. To handle the cpuhotplug callback unregister, we track 1797 1801 * the number of nest pmus in "nest_pmus". 1798 1802 */ 1799 - mutex_lock(&nest_init_lock); 1803 + spin_lock(&nest_init_lock); 1800 1804 if (nest_pmus == 0) { 1801 1805 ret = init_nest_pmu_ref(); 1802 1806 if (ret) { 1803 - mutex_unlock(&nest_init_lock); 1807 + spin_unlock(&nest_init_lock); 1804 1808 kfree(per_nest_pmu_arr); 1805 1809 per_nest_pmu_arr = NULL; 1806 1810 goto err_free_mem; ··· 1808 1812 /* Register for cpu hotplug notification. */ 1809 1813 ret = nest_pmu_cpumask_init(); 1810 1814 if (ret) { 1811 - mutex_unlock(&nest_init_lock); 1815 + spin_unlock(&nest_init_lock); 1812 1816 kfree(nest_imc_refc); 1813 1817 kfree(per_nest_pmu_arr); 1814 1818 per_nest_pmu_arr = NULL; ··· 1816 1820 } 1817 1821 } 1818 1822 nest_pmus++; 1819 - mutex_unlock(&nest_init_lock); 1823 + spin_unlock(&nest_init_lock); 1820 1824 break; 1821 1825 case IMC_DOMAIN_CORE: 1822 1826 ret = core_imc_pmu_cpumask_init();
+3 -2
arch/s390/kernel/setup.c
··· 508 508 { 509 509 struct lowcore *abs_lc; 510 510 unsigned long flags; 511 + int i; 511 512 512 513 __ctl_clear_bit(0, 28); 513 514 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; ··· 524 523 abs_lc = get_abs_lowcore(&flags); 525 524 abs_lc->restart_flags = RESTART_FLAG_CTLREGS; 526 525 abs_lc->program_new_psw = S390_lowcore.program_new_psw; 527 - memcpy(abs_lc->cregs_save_area, S390_lowcore.cregs_save_area, 528 - sizeof(abs_lc->cregs_save_area)); 526 + for (i = 0; i < 16; i++) 527 + abs_lc->cregs_save_area[i] = S390_lowcore.cregs_save_area[i]; 529 528 put_abs_lowcore(abs_lc, flags); 530 529 } 531 530
+2 -2
arch/x86/boot/bioscall.S
··· 32 32 movw %dx, %si 33 33 movw %sp, %di 34 34 movw $11, %cx 35 - rep; movsd 35 + rep; movsl 36 36 37 37 /* Pop full state from the stack */ 38 38 popal ··· 67 67 jz 4f 68 68 movw %sp, %si 69 69 movw $11, %cx 70 - rep; movsd 70 + rep; movsl 71 71 4: addw $44, %sp 72 72 73 73 /* Restore state and return */
+1
arch/x86/include/asm/kvm_host.h
··· 1111 1111 1112 1112 /* Xen emulation context */ 1113 1113 struct kvm_xen { 1114 + struct mutex xen_lock; 1114 1115 u32 xen_version; 1115 1116 bool long_mode; 1116 1117 bool runstate_update_flag;
+33 -16
arch/x86/kernel/cpu/resctrl/monitor.c
··· 146 146 return entry; 147 147 } 148 148 149 + static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) 150 + { 151 + u64 msr_val; 152 + 153 + /* 154 + * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured 155 + * with a valid event code for supported resource type and the bits 156 + * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, 157 + * IA32_QM_CTR.data (bits 61:0) reports the monitored data. 158 + * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) 159 + * are error bits. 160 + */ 161 + wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); 162 + rdmsrl(MSR_IA32_QM_CTR, msr_val); 163 + 164 + if (msr_val & RMID_VAL_ERROR) 165 + return -EIO; 166 + if (msr_val & RMID_VAL_UNAVAIL) 167 + return -EINVAL; 168 + 169 + *val = msr_val; 170 + return 0; 171 + } 172 + 149 173 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, 150 174 u32 rmid, 151 175 enum resctrl_event_id eventid) ··· 196 172 struct arch_mbm_state *am; 197 173 198 174 am = get_arch_mbm_state(hw_dom, rmid, eventid); 199 - if (am) 175 + if (am) { 200 176 memset(am, 0, sizeof(*am)); 177 + 178 + /* Record any initial, non-zero count value. */ 179 + __rmid_read(rmid, eventid, &am->prev_msr); 180 + } 201 181 } 202 182 203 183 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) ··· 219 191 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 220 192 struct arch_mbm_state *am; 221 193 u64 msr_val, chunks; 194 + int ret; 222 195 223 196 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) 224 197 return -EINVAL; 225 198 226 - /* 227 - * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured 228 - * with a valid event code for supported resource type and the bits 229 - * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, 230 - * IA32_QM_CTR.data (bits 61:0) reports the monitored data. 231 - * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) 232 - * are error bits. 233 - */ 234 - wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); 235 - rdmsrl(MSR_IA32_QM_CTR, msr_val); 236 - 237 - if (msr_val & RMID_VAL_ERROR) 238 - return -EIO; 239 - if (msr_val & RMID_VAL_UNAVAIL) 240 - return -EINVAL; 199 + ret = __rmid_read(rmid, eventid, &msr_val); 200 + if (ret) 201 + return ret; 241 202 242 203 am = get_arch_mbm_state(hw_dom, rmid, eventid); 243 204 if (am) {
+11 -1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 580 580 /* 581 581 * Ensure the task's closid and rmid are written before determining if 582 582 * the task is current that will decide if it will be interrupted. 583 + * This pairs with the full barrier between the rq->curr update and 584 + * resctrl_sched_in() during context switch. 583 585 */ 584 - barrier(); 586 + smp_mb(); 585 587 586 588 /* 587 589 * By now, the task's closid and rmid are set. If the task is current ··· 2402 2400 is_rmid_match(t, from)) { 2403 2401 WRITE_ONCE(t->closid, to->closid); 2404 2402 WRITE_ONCE(t->rmid, to->mon.rmid); 2403 + 2404 + /* 2405 + * Order the closid/rmid stores above before the loads 2406 + * in task_curr(). This pairs with the full barrier 2407 + * between the rq->curr update and resctrl_sched_in() 2408 + * during context switch. 2409 + */ 2410 + smp_mb(); 2405 2411 2406 2412 /* 2407 2413 * If the task is on a CPU, set the CPU in the mask.
+17 -17
arch/x86/kvm/cpuid.c
··· 770 770 int nent; 771 771 }; 772 772 773 - static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, 774 - u32 function, u32 index) 773 + static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) 775 774 { 776 - struct kvm_cpuid_entry2 *entry; 777 - 778 775 if (array->nent >= array->maxnent) 779 776 return NULL; 780 777 781 - entry = &array->entries[array->nent++]; 778 + return &array->entries[array->nent++]; 779 + } 780 + 781 + static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, 782 + u32 function, u32 index) 783 + { 784 + struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); 785 + 786 + if (!entry) 787 + return NULL; 782 788 783 789 memset(entry, 0, sizeof(*entry)); 784 790 entry->function = function; ··· 962 956 entry->edx = edx.full; 963 957 break; 964 958 } 965 - /* 966 - * Per Intel's SDM, the 0x1f is a superset of 0xb, 967 - * thus they can be handled by common code. 968 - */ 969 959 case 0x1f: 970 960 case 0xb: 971 961 /* 972 - * Populate entries until the level type (ECX[15:8]) of the 973 - * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is 974 - * the starting entry, filled by the primary do_host_cpuid(). 962 + * No topology; a valid topology is indicated by the presence 963 + * of subleaf 1. 975 964 */ 976 - for (i = 1; entry->ecx & 0xff00; ++i) { 977 - entry = do_host_cpuid(array, function, i); 978 - if (!entry) 979 - goto out; 980 - } 965 + entry->eax = entry->ebx = entry->ecx = 0; 981 966 break; 982 967 case 0xd: { 983 968 u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm(); ··· 1199 1202 entry->ebx = entry->ecx = entry->edx = 0; 1200 1203 break; 1201 1204 case 0x8000001e: 1205 + /* Do not return host topology information. */ 1206 + entry->eax = entry->ebx = entry->ecx = 0; 1207 + entry->edx = 0; /* reserved */ 1202 1208 break; 1203 1209 case 0x8000001F: 1204 1210 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
+5 -7
arch/x86/kvm/svm/nested.c
··· 138 138 c->intercepts[i] = h->intercepts[i]; 139 139 140 140 if (g->int_ctl & V_INTR_MASKING_MASK) { 141 - /* We only want the cr8 intercept bits of L1 */ 142 - vmcb_clr_intercept(c, INTERCEPT_CR8_READ); 143 - vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); 144 - 145 141 /* 146 - * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not 147 - * affect any interrupt we may want to inject; therefore, 148 - * interrupt window vmexits are irrelevant to L0. 142 + * Once running L2 with HF_VINTR_MASK, EFLAGS.IF and CR8 143 + * does not affect any interrupt we may want to inject; 144 + * therefore, writes to CR8 are irrelevant to L0, as are 145 + * interrupt window vmexits. 149 146 */ 147 + vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); 150 148 vmcb_clr_intercept(c, INTERCEPT_VINTR); 151 149 } 152 150
+50 -40
arch/x86/kvm/xen.c
··· 271 271 * Attempt to obtain the GPC lock on *both* (if there are two) 272 272 * gfn_to_pfn caches that cover the region. 273 273 */ 274 - read_lock_irqsave(&gpc1->lock, flags); 274 + if (atomic) { 275 + local_irq_save(flags); 276 + if (!read_trylock(&gpc1->lock)) { 277 + local_irq_restore(flags); 278 + return; 279 + } 280 + } else { 281 + read_lock_irqsave(&gpc1->lock, flags); 282 + } 275 283 while (!kvm_gpc_check(gpc1, user_len1)) { 276 284 read_unlock_irqrestore(&gpc1->lock, flags); 277 285 ··· 312 304 * The guest's runstate_info is split across two pages and we 313 305 * need to hold and validate both GPCs simultaneously. We can 314 306 * declare a lock ordering GPC1 > GPC2 because nothing else 315 - * takes them more than one at a time. 307 + * takes them more than one at a time. Set a subclass on the 308 + * gpc1 lock to make lockdep shut up about it. 316 309 */ 317 - read_lock(&gpc2->lock); 310 + lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_); 311 + if (atomic) { 312 + if (!read_trylock(&gpc2->lock)) { 313 + read_unlock_irqrestore(&gpc1->lock, flags); 314 + return; 315 + } 316 + } else { 317 + read_lock(&gpc2->lock); 318 + } 318 319 319 320 if (!kvm_gpc_check(gpc2, user_len2)) { 320 321 read_unlock(&gpc2->lock); ··· 607 590 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { 608 591 r = -EINVAL; 609 592 } else { 610 - mutex_lock(&kvm->lock); 593 + mutex_lock(&kvm->arch.xen.xen_lock); 611 594 kvm->arch.xen.long_mode = !!data->u.long_mode; 612 - mutex_unlock(&kvm->lock); 595 + mutex_unlock(&kvm->arch.xen.xen_lock); 613 596 r = 0; 614 597 } 615 598 break; 616 599 617 600 case KVM_XEN_ATTR_TYPE_SHARED_INFO: 618 - mutex_lock(&kvm->lock); 601 + mutex_lock(&kvm->arch.xen.xen_lock); 619 602 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); 620 - mutex_unlock(&kvm->lock); 603 + mutex_unlock(&kvm->arch.xen.xen_lock); 621 604 break; 622 605 623 606 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: 624 607 if (data->u.vector && data->u.vector < 0x10) 625 608 r = -EINVAL; 626 609 else { 627 - mutex_lock(&kvm->lock); 610 + mutex_lock(&kvm->arch.xen.xen_lock); 628 611 kvm->arch.xen.upcall_vector = data->u.vector; 629 - mutex_unlock(&kvm->lock); 612 + mutex_unlock(&kvm->arch.xen.xen_lock); 630 613 r = 0; 631 614 } 632 615 break; ··· 636 619 break; 637 620 638 621 case KVM_XEN_ATTR_TYPE_XEN_VERSION: 639 - mutex_lock(&kvm->lock); 622 + mutex_lock(&kvm->arch.xen.xen_lock); 640 623 kvm->arch.xen.xen_version = data->u.xen_version; 641 - mutex_unlock(&kvm->lock); 624 + mutex_unlock(&kvm->arch.xen.xen_lock); 642 625 r = 0; 643 626 break; 644 627 ··· 647 630 r = -EOPNOTSUPP; 648 631 break; 649 632 } 650 - mutex_lock(&kvm->lock); 633 + mutex_lock(&kvm->arch.xen.xen_lock); 651 634 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag; 652 - mutex_unlock(&kvm->lock); 635 + mutex_unlock(&kvm->arch.xen.xen_lock); 653 636 r = 0; 654 637 break; 655 638 ··· 664 647 { 665 648 int r = -ENOENT; 666 649 667 - mutex_lock(&kvm->lock); 650 + mutex_lock(&kvm->arch.xen.xen_lock); 668 651 669 652 switch (data->type) { 670 653 case KVM_XEN_ATTR_TYPE_LONG_MODE: ··· 703 686 break; 704 687 } 705 688 706 - mutex_unlock(&kvm->lock); 689 + mutex_unlock(&kvm->arch.xen.xen_lock); 707 690 return r; 708 691 } 709 692 ··· 711 694 { 712 695 int idx, r = -ENOENT; 713 696 714 - mutex_lock(&vcpu->kvm->lock); 697 + mutex_lock(&vcpu->kvm->arch.xen.xen_lock); 715 698 idx = srcu_read_lock(&vcpu->kvm->srcu); 716 699 717 700 switch (data->type) { ··· 939 922 } 940 923 941 924 srcu_read_unlock(&vcpu->kvm->srcu, idx); 942 - mutex_unlock(&vcpu->kvm->lock); 925 + mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); 943 926 return r; 944 927 } 945 928 ··· 947 930 { 948 931 int r = -ENOENT; 949 932 950 - mutex_lock(&vcpu->kvm->lock); 933 + mutex_lock(&vcpu->kvm->arch.xen.xen_lock); 951 934 952 935 switch (data->type) { 953 936 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: ··· 1030 1013 break; 1031 1014 } 1032 1015 1033 - mutex_unlock(&vcpu->kvm->lock); 1016 + mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); 1034 1017 return r; 1035 1018 } 1036 1019 ··· 1123 1106 xhc->blob_size_32 || xhc->blob_size_64)) 1124 1107 return -EINVAL; 1125 1108 1126 - mutex_lock(&kvm->lock); 1109 + mutex_lock(&kvm->arch.xen.xen_lock); 1127 1110 1128 1111 if (xhc->msr && !kvm->arch.xen_hvm_config.msr) 1129 1112 static_branch_inc(&kvm_xen_enabled.key); ··· 1132 1115 1133 1116 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); 1134 1117 1135 - mutex_unlock(&kvm->lock); 1118 + mutex_unlock(&kvm->arch.xen.xen_lock); 1136 1119 return 0; 1137 1120 } 1138 1121 ··· 1675 1658 mm_borrowed = true; 1676 1659 } 1677 1660 1678 - /* 1679 - * For the irqfd workqueue, using the main kvm->lock mutex is 1680 - * fine since this function is invoked from kvm_set_irq() with 1681 - * no other lock held, no srcu. In future if it will be called 1682 - * directly from a vCPU thread (e.g. on hypercall for an IPI) 1683 - * then it may need to switch to using a leaf-node mutex for 1684 - * serializing the shared_info mapping. 1685 - */ 1686 - mutex_lock(&kvm->lock); 1661 + mutex_lock(&kvm->arch.xen.xen_lock); 1687 1662 1688 1663 /* 1689 1664 * It is theoretically possible for the page to be unmapped ··· 1704 1695 srcu_read_unlock(&kvm->srcu, idx); 1705 1696 } while(!rc); 1706 1697 1707 - mutex_unlock(&kvm->lock); 1698 + mutex_unlock(&kvm->arch.xen.xen_lock); 1708 1699 1709 1700 if (mm_borrowed) 1710 1701 kthread_unuse_mm(kvm->mm); ··· 1820 1811 int ret; 1821 1812 1822 1813 /* Protect writes to evtchnfd as well as the idr lookup. */ 1823 - mutex_lock(&kvm->lock); 1814 + mutex_lock(&kvm->arch.xen.xen_lock); 1824 1815 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); 1825 1816 1826 1817 ret = -ENOENT; ··· 1851 1842 } 1852 1843 ret = 0; 1853 1844 out_unlock: 1854 - mutex_unlock(&kvm->lock); 1845 + mutex_unlock(&kvm->arch.xen.xen_lock); 1855 1846 return ret; 1856 1847 } 1857 1848 ··· 1914 1905 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; 1915 1906 } 1916 1907 1917 - mutex_lock(&kvm->lock); 1908 + mutex_lock(&kvm->arch.xen.xen_lock); 1918 1909 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, 1919 1910 GFP_KERNEL); 1920 - mutex_unlock(&kvm->lock); 1911 + mutex_unlock(&kvm->arch.xen.xen_lock); 1921 1912 if (ret >= 0) 1922 1913 return 0; 1923 1914 ··· 1935 1926 { 1936 1927 struct evtchnfd *evtchnfd; 1937 1928 1938 - mutex_lock(&kvm->lock); 1929 + mutex_lock(&kvm->arch.xen.xen_lock); 1939 1930 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); 1940 - mutex_unlock(&kvm->lock); 1931 + mutex_unlock(&kvm->arch.xen.xen_lock); 1941 1932 1942 1933 if (!evtchnfd) 1943 1934 return -ENOENT; ··· 1955 1946 int i; 1956 1947 int n = 0; 1957 1948 1958 - mutex_lock(&kvm->lock); 1949 + mutex_lock(&kvm->arch.xen.xen_lock); 1959 1950 1960 1951 /* 1961 1952 * Because synchronize_srcu() cannot be called inside the ··· 1967 1958 1968 1959 all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL); 1969 1960 if (!all_evtchnfds) { 1970 - mutex_unlock(&kvm->lock); 1961 + mutex_unlock(&kvm->arch.xen.xen_lock); 1971 1962 return -ENOMEM; 1972 1963 } 1973 1964 ··· 1976 1967 all_evtchnfds[n++] = evtchnfd; 1977 1968 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); 1978 1969 } 1979 - mutex_unlock(&kvm->lock); 1970 + mutex_unlock(&kvm->arch.xen.xen_lock); 1980 1971 1981 1972 synchronize_srcu(&kvm->srcu); 1982 1973 ··· 2078 2069 2079 2070 void kvm_xen_init_vm(struct kvm *kvm) 2080 2071 { 2072 + mutex_init(&kvm->arch.xen.xen_lock); 2081 2073 idr_init(&kvm->arch.xen.evtchn_ports); 2082 2074 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN); 2083 2075 }
+4
arch/x86/mm/init.c
··· 26 26 #include <asm/pti.h> 27 27 #include <asm/text-patching.h> 28 28 #include <asm/memtype.h> 29 + #include <asm/paravirt.h> 29 30 30 31 /* 31 32 * We need to define the tracepoints somewhere, and tlb.c ··· 804 803 805 804 poking_mm = mm_alloc(); 806 805 BUG_ON(!poking_mm); 806 + 807 + /* Xen PV guests need the PGD to be pinned. */ 808 + paravirt_arch_dup_mmap(NULL, poking_mm); 807 809 808 810 /* 809 811 * Randomize the poking address, but make sure that the following page
+2 -1
arch/x86/mm/pat/memtype.c
··· 387 387 u8 mtrr_type, uniform; 388 388 389 389 mtrr_type = mtrr_type_lookup(start, end, &uniform); 390 - if (mtrr_type != MTRR_TYPE_WRBACK) 390 + if (mtrr_type != MTRR_TYPE_WRBACK && 391 + mtrr_type != MTRR_TYPE_INVALID) 391 392 return _PAGE_CACHE_MODE_UC_MINUS; 392 393 393 394 return _PAGE_CACHE_MODE_WB;
+38 -6
arch/x86/pci/mmconfig-shared.c
··· 12 12 */ 13 13 14 14 #include <linux/acpi.h> 15 + #include <linux/efi.h> 15 16 #include <linux/pci.h> 16 17 #include <linux/init.h> 17 18 #include <linux/bitmap.h> ··· 443 442 return mcfg_res.flags; 444 443 } 445 444 445 + static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used) 446 + { 447 + #ifdef CONFIG_EFI 448 + efi_memory_desc_t *md; 449 + u64 size, mmio_start, mmio_end; 450 + 451 + for_each_efi_memory_desc(md) { 452 + if (md->type == EFI_MEMORY_MAPPED_IO) { 453 + size = md->num_pages << EFI_PAGE_SHIFT; 454 + mmio_start = md->phys_addr; 455 + mmio_end = mmio_start + size; 456 + 457 + /* 458 + * N.B. Caller supplies (start, start + size), 459 + * so to match, mmio_end is the first address 460 + * *past* the EFI_MEMORY_MAPPED_IO area. 461 + */ 462 + if (mmio_start <= start && end <= mmio_end) 463 + return true; 464 + } 465 + } 466 + #endif 467 + 468 + return false; 469 + } 470 + 446 471 typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type); 447 472 448 473 static bool __ref is_mmconf_reserved(check_reserved_t is_reserved, 449 474 struct pci_mmcfg_region *cfg, 450 - struct device *dev, int with_e820) 475 + struct device *dev, const char *method) 451 476 { 452 477 u64 addr = cfg->res.start; 453 478 u64 size = resource_size(&cfg->res); 454 479 u64 old_size = size; 455 480 int num_buses; 456 - char *method = with_e820 ? "E820" : "ACPI motherboard resources"; 457 481 458 482 while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) { 459 483 size >>= 1; ··· 490 464 return false; 491 465 492 466 if (dev) 493 - dev_info(dev, "MMCONFIG at %pR reserved in %s\n", 467 + dev_info(dev, "MMCONFIG at %pR reserved as %s\n", 494 468 &cfg->res, method); 495 469 else 496 - pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n", 470 + pr_info(PREFIX "MMCONFIG at %pR reserved as %s\n", 497 471 &cfg->res, method); 498 472 499 473 if (old_size != size) { ··· 526 500 pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early) 527 501 { 528 502 if (!early && !acpi_disabled) { 529 - if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0)) 503 + if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 504 + "ACPI motherboard resource")) 530 505 return true; 531 506 532 507 if (dev) ··· 540 513 "MMCONFIG at %pR not reserved in " 541 514 "ACPI motherboard resources\n", 542 515 &cfg->res); 516 + 517 + if (is_mmconf_reserved(is_efi_mmio, cfg, dev, 518 + "EfiMemoryMappedIO")) 519 + return true; 543 520 } 544 521 545 522 /* ··· 558 527 /* Don't try to do this check unless configuration 559 528 type 1 is available. how about type 2 ?*/ 560 529 if (raw_pci_ops) 561 - return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1); 530 + return is_mmconf_reserved(e820__mapped_all, cfg, dev, 531 + "E820 entry"); 562 532 563 533 return false; 564 534 }
+2 -2
arch/x86/um/elfcore.c
··· 7 7 #include <asm/elf.h> 8 8 9 9 10 - Elf32_Half elf_core_extra_phdrs(void) 10 + Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm) 11 11 { 12 12 return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; 13 13 } ··· 60 60 return 1; 61 61 } 62 62 63 - size_t elf_core_extra_data_size(void) 63 + size_t elf_core_extra_data_size(struct coredump_params *cprm) 64 64 { 65 65 if ( vsyscall_ehdr ) { 66 66 const struct elfhdr *const ehdrp =
-3
block/blk-core.c
··· 283 283 * 284 284 * Decrements the refcount of the request_queue and free it when the refcount 285 285 * reaches 0. 286 - * 287 - * Context: Can sleep. 288 286 */ 289 287 void blk_put_queue(struct request_queue *q) 290 288 { 291 - might_sleep(); 292 289 if (refcount_dec_and_test(&q->refs)) 293 290 blk_free_queue(q); 294 291 }
+12 -2
drivers/acpi/glue.c
··· 75 75 } 76 76 77 77 #define FIND_CHILD_MIN_SCORE 1 78 - #define FIND_CHILD_MAX_SCORE 2 78 + #define FIND_CHILD_MID_SCORE 2 79 + #define FIND_CHILD_MAX_SCORE 3 79 80 80 81 static int match_any(struct acpi_device *adev, void *not_used) 81 82 { ··· 97 96 return -ENODEV; 98 97 99 98 status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); 100 - if (status == AE_NOT_FOUND) 99 + if (status == AE_NOT_FOUND) { 100 + /* 101 + * Special case: backlight device objects without _STA are 102 + * preferred to other objects with the same _ADR value, because 103 + * it is more likely that they are actually useful. 104 + */ 105 + if (adev->pnp.type.backlight) 106 + return FIND_CHILD_MID_SCORE; 107 + 101 108 return FIND_CHILD_MIN_SCORE; 109 + } 102 110 103 111 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) 104 112 return -ENODEV;
+7
drivers/acpi/resource.c
··· 433 433 }, 434 434 }, 435 435 { 436 + .ident = "Asus ExpertBook B2402CBA", 437 + .matches = { 438 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 439 + DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"), 440 + }, 441 + }, 442 + { 436 443 .ident = "Asus ExpertBook B2502", 437 444 .matches = { 438 445 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+5 -2
drivers/acpi/scan.c
··· 1370 1370 * Some devices don't reliably have _HIDs & _CIDs, so add 1371 1371 * synthetic HIDs to make sure drivers can find them. 1372 1372 */ 1373 - if (acpi_is_video_device(handle)) 1373 + if (acpi_is_video_device(handle)) { 1374 1374 acpi_add_id(pnp, ACPI_VIDEO_HID); 1375 - else if (acpi_bay_match(handle)) 1375 + pnp->type.backlight = 1; 1376 + break; 1377 + } 1378 + if (acpi_bay_match(handle)) 1376 1379 acpi_add_id(pnp, ACPI_BAY_HID); 1377 1380 else if (acpi_dock_match(handle)) 1378 1381 acpi_add_id(pnp, ACPI_DOCK_HID);
+4
drivers/acpi/video_detect.c
··· 50 50 acpi_backlight_cmdline = acpi_backlight_video; 51 51 if (!strcmp("native", acpi_video_backlight_string)) 52 52 acpi_backlight_cmdline = acpi_backlight_native; 53 + if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string)) 54 + acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec; 55 + if (!strcmp("apple_gmux", acpi_video_backlight_string)) 56 + acpi_backlight_cmdline = acpi_backlight_apple_gmux; 53 57 if (!strcmp("none", acpi_video_backlight_string)) 54 58 acpi_backlight_cmdline = acpi_backlight_none; 55 59 }
+1
drivers/ata/Kconfig
··· 640 640 config PATA_CS5535 641 641 tristate "CS5535 PATA support (Experimental)" 642 642 depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST)) 643 + depends on !UML 643 644 help 644 645 This option enables support for the NatSemi/AMD CS5535 645 646 companion chip used with the Geode processor family.
+7
drivers/bluetooth/hci_qca.c
··· 2164 2164 int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); 2165 2165 struct serdev_device *serdev = to_serdev_device(dev); 2166 2166 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); 2167 + struct hci_uart *hu = &qcadev->serdev_hu; 2168 + struct hci_dev *hdev = hu->hdev; 2169 + struct qca_data *qca = hu->priv; 2167 2170 const u8 ibs_wake_cmd[] = { 0xFD }; 2168 2171 const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; 2169 2172 2170 2173 if (qcadev->btsoc_type == QCA_QCA6390) { 2174 + if (test_bit(QCA_BT_OFF, &qca->flags) || 2175 + !test_bit(HCI_RUNNING, &hdev->flags)) 2176 + return; 2177 + 2171 2178 serdev_device_write_flush(serdev); 2172 2179 ret = serdev_device_write_buf(serdev, ibs_wake_cmd, 2173 2180 sizeof(ibs_wake_cmd));
+1
drivers/cpufreq/amd-pstate.c
··· 307 307 max_perf = min_perf; 308 308 309 309 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); 310 + cpufreq_cpu_put(policy); 310 311 } 311 312 312 313 static int amd_get_min_freq(struct amd_cpudata *cpudata)
+2 -1
drivers/cpufreq/apple-soc-cpufreq.c
··· 280 280 policy->cpuinfo.transition_latency = transition_latency; 281 281 policy->dvfs_possible_from_any_cpu = true; 282 282 policy->fast_switch_possible = true; 283 + policy->suspend_freq = freq_table[0].frequency; 283 284 284 285 if (policy_has_boost_freq(policy)) { 285 286 ret = cpufreq_enable_boost_support(); ··· 322 321 .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | 323 322 CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, 324 323 .verify = cpufreq_generic_frequency_table_verify, 325 - .attr = cpufreq_generic_attr, 326 324 .get = apple_soc_cpufreq_get_rate, 327 325 .init = apple_soc_cpufreq_init, 328 326 .exit = apple_soc_cpufreq_exit, ··· 329 329 .fast_switch = apple_soc_cpufreq_fast_switch, 330 330 .register_em = cpufreq_register_em_with_opp, 331 331 .attr = apple_soc_cpufreq_hw_attr, 332 + .suspend = cpufreq_generic_suspend, 332 333 }; 333 334 334 335 static int __init apple_soc_cpufreq_module_init(void)
+1 -1
drivers/cpufreq/armada-37xx-cpufreq.c
··· 445 445 return -ENODEV; 446 446 } 447 447 448 - clk = clk_get(cpu_dev, 0); 448 + clk = clk_get(cpu_dev, NULL); 449 449 if (IS_ERR(clk)) { 450 450 dev_err(cpu_dev, "Cannot get clock for CPU0\n"); 451 451 return PTR_ERR(clk);
+6 -5
drivers/cpufreq/cppc_cpufreq.c
··· 487 487 cpu_data = policy->driver_data; 488 488 perf_caps = &cpu_data->perf_caps; 489 489 max_cap = arch_scale_cpu_capacity(cpu); 490 - min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf); 490 + min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf, 491 + perf_caps->highest_perf); 491 492 if ((min_cap == 0) || (max_cap < min_cap)) 492 493 return 0; 493 494 return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP; ··· 520 519 cpu_data = policy->driver_data; 521 520 perf_caps = &cpu_data->perf_caps; 522 521 max_cap = arch_scale_cpu_capacity(cpu_dev->id); 523 - min_cap = div_u64(max_cap * perf_caps->lowest_perf, 524 - perf_caps->highest_perf); 525 - 526 - perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap; 522 + min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf, 523 + perf_caps->highest_perf); 524 + perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf, 525 + max_cap); 527 526 min_step = min_cap / CPPC_EM_CAP_STEP; 528 527 max_step = max_cap / CPPC_EM_CAP_STEP; 529 528
+2
drivers/cpufreq/cpufreq-dt-platdev.c
··· 137 137 { .compatible = "nvidia,tegra30", }, 138 138 { .compatible = "nvidia,tegra124", }, 139 139 { .compatible = "nvidia,tegra210", }, 140 + { .compatible = "nvidia,tegra234", }, 140 141 141 142 { .compatible = "qcom,apq8096", }, 142 143 { .compatible = "qcom,msm8996", }, ··· 151 150 { .compatible = "qcom,sdm845", }, 152 151 { .compatible = "qcom,sm6115", }, 153 152 { .compatible = "qcom,sm6350", }, 153 + { .compatible = "qcom,sm6375", }, 154 154 { .compatible = "qcom,sm8150", }, 155 155 { .compatible = "qcom,sm8250", }, 156 156 { .compatible = "qcom,sm8350", },
+20 -2
drivers/cpufreq/qcom-cpufreq-hw.c
··· 649 649 { 650 650 struct clk_hw_onecell_data *clk_data; 651 651 struct device *dev = &pdev->dev; 652 + struct device_node *soc_node; 652 653 struct device *cpu_dev; 653 654 struct clk *clk; 654 - int ret, i, num_domains; 655 + int ret, i, num_domains, reg_sz; 655 656 656 657 clk = clk_get(dev, "xo"); 657 658 if (IS_ERR(clk)) ··· 680 679 return ret; 681 680 682 681 /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */ 683 - num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * 4); 682 + soc_node = of_get_parent(dev->of_node); 683 + if (!soc_node) 684 + return -EINVAL; 685 + 686 + ret = of_property_read_u32(soc_node, "#address-cells", &reg_sz); 687 + if (ret) 688 + goto of_exit; 689 + 690 + ret = of_property_read_u32(soc_node, "#size-cells", &i); 691 + if (ret) 692 + goto of_exit; 693 + 694 + reg_sz += i; 695 + 696 + num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * reg_sz); 684 697 if (num_domains <= 0) 685 698 return num_domains; 686 699 ··· 757 742 dev_err(dev, "CPUFreq HW driver failed to register\n"); 758 743 else 759 744 dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n"); 745 + 746 + of_exit: 747 + of_node_put(soc_node); 760 748 761 749 return ret; 762 750 }
+8 -9
drivers/edac/edac_device.c
··· 394 394 * Then restart the workq on the new delay 395 395 */ 396 396 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, 397 - unsigned long value) 397 + unsigned long msec) 398 398 { 399 - unsigned long jiffs = msecs_to_jiffies(value); 399 + edac_dev->poll_msec = msec; 400 + edac_dev->delay = msecs_to_jiffies(msec); 400 401 401 - if (value == 1000) 402 - jiffs = round_jiffies_relative(value); 403 - 404 - edac_dev->poll_msec = value; 405 - edac_dev->delay = jiffs; 406 - 407 - edac_mod_work(&edac_dev->work, jiffs); 402 + /* See comment in edac_device_workq_setup() above */ 403 + if (edac_dev->poll_msec == 1000) 404 + edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); 405 + else 406 + edac_mod_work(&edac_dev->work, edac_dev->delay); 408 407 } 409 408 410 409 int edac_device_alloc_index(void)
+1 -1
drivers/edac/edac_module.h
··· 53 53 bool edac_mod_work(struct delayed_work *work, unsigned long delay); 54 54 55 55 extern void edac_device_reset_delay_period(struct edac_device_ctl_info 56 - *edac_dev, unsigned long value); 56 + *edac_dev, unsigned long msec); 57 57 extern void edac_mc_reset_delay_period(unsigned long value); 58 58 59 59 /*
+5 -2
drivers/edac/highbank_mc_edac.c
··· 174 174 drvdata = mci->pvt_info; 175 175 platform_set_drvdata(pdev, mci); 176 176 177 - if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) 178 - return -ENOMEM; 177 + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { 178 + res = -ENOMEM; 179 + goto free; 180 + } 179 181 180 182 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 181 183 if (!r) { ··· 245 243 edac_mc_del_mc(&pdev->dev); 246 244 err: 247 245 devres_release_group(&pdev->dev, NULL); 246 + free: 248 247 edac_mc_free(mci); 249 248 return res; 250 249 }
+6 -3
drivers/firmware/efi/efi.c
··· 394 394 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 395 395 if (!efi_kobj) { 396 396 pr_err("efi: Firmware registration failed.\n"); 397 - destroy_workqueue(efi_rts_wq); 398 - return -ENOMEM; 397 + error = -ENOMEM; 398 + goto err_destroy_wq; 399 399 } 400 400 401 401 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | ··· 443 443 err_put: 444 444 kobject_put(efi_kobj); 445 445 efi_kobj = NULL; 446 - destroy_workqueue(efi_rts_wq); 446 + err_destroy_wq: 447 + if (efi_rts_wq) 448 + destroy_workqueue(efi_rts_wq); 449 + 447 450 return error; 448 451 } 449 452
+1
drivers/firmware/efi/runtime-wrappers.c
··· 62 62 \ 63 63 if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \ 64 64 pr_warn_once("EFI Runtime Services are disabled!\n"); \ 65 + efi_rts_work.status = EFI_DEVICE_ERROR; \ 65 66 goto exit; \ 66 67 } \ 67 68 \
+7 -2
drivers/firmware/google/coreboot_table.c
··· 93 93 for (i = 0; i < header->table_entries; i++) { 94 94 entry = ptr_entry; 95 95 96 - device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL); 96 + if (entry->size < sizeof(*entry)) { 97 + dev_warn(dev, "coreboot table entry too small!\n"); 98 + return -EINVAL; 99 + } 100 + 101 + device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL); 97 102 if (!device) 98 103 return -ENOMEM; 99 104 100 105 device->dev.parent = dev; 101 106 device->dev.bus = &coreboot_bus_type; 102 107 device->dev.release = coreboot_device_release; 103 - memcpy(&device->entry, ptr_entry, entry->size); 108 + memcpy(device->raw, ptr_entry, entry->size); 104 109 105 110 switch (device->entry.tag) { 106 111 case LB_TAG_CBMEM_ENTRY:
+1
drivers/firmware/google/coreboot_table.h
··· 79 79 struct lb_cbmem_ref cbmem_ref; 80 80 struct lb_cbmem_entry cbmem_entry; 81 81 struct lb_framebuffer framebuffer; 82 + DECLARE_FLEX_ARRAY(u8, raw); 82 83 }; 83 84 }; 84 85
+3
drivers/firmware/psci/psci.c
··· 440 440 441 441 static int __init psci_debugfs_init(void) 442 442 { 443 + if (!invoke_psci_fn || !psci_ops.get_version) 444 + return 0; 445 + 443 446 return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL, 444 447 &psci_debugfs_ops)); 445 448 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 2099 2099 } 2100 2100 2101 2101 amdgpu_amdkfd_remove_eviction_fence( 2102 - bo, bo->kfd_bo->process_info->eviction_fence); 2102 + bo, bo->vm_bo->vm->process_info->eviction_fence); 2103 2103 2104 2104 amdgpu_bo_unreserve(bo); 2105 2105
+34 -17
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 61 61 amdgpu_ctx_put(p->ctx); 62 62 return -ECANCELED; 63 63 } 64 + 65 + amdgpu_sync_create(&p->sync); 64 66 return 0; 65 67 } 66 68 ··· 454 452 } 455 453 456 454 r = amdgpu_sync_fence(&p->sync, fence); 457 - if (r) 458 - goto error; 459 - 460 - /* 461 - * When we have an explicit dependency it might be necessary to insert a 462 - * pipeline sync to make sure that all caches etc are flushed and the 463 - * next job actually sees the results from the previous one. 464 - */ 465 - if (fence->context == p->gang_leader->base.entity->fence_context) 466 - r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); 467 - 468 - error: 469 455 dma_fence_put(fence); 470 456 return r; 471 457 } ··· 1178 1188 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 1179 1189 { 1180 1190 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1191 + struct drm_gpu_scheduler *sched; 1181 1192 struct amdgpu_bo_list_entry *e; 1193 + struct dma_fence *fence; 1182 1194 unsigned int i; 1183 1195 int r; 1196 + 1197 + r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); 1198 + if (r) { 1199 + if (r != -ERESTARTSYS) 1200 + DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); 1201 + return r; 1202 + } 1184 1203 1185 1204 list_for_each_entry(e, &p->validated, tv.head) { 1186 1205 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); ··· 1210 1211 return r; 1211 1212 } 1212 1213 1213 - r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); 1214 - if (r && r != -ERESTARTSYS) 1215 - DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); 1216 - return r; 1214 + sched = p->gang_leader->base.entity->rq->sched; 1215 + while ((fence = amdgpu_sync_get_fence(&p->sync))) { 1216 + struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); 1217 + 1218 + /* 1219 + * When we have an dependency it might be necessary to insert a 1220 + * pipeline sync to make sure that all caches etc are flushed and the 1221 + * next job actually sees the results from the previous one 1222 + * before we start executing on the same scheduler ring. 1223 + */ 1224 + if (!s_fence || s_fence->sched != sched) 1225 + continue; 1226 + 1227 + r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); 1228 + if (r) 1229 + return r; 1230 + } 1231 + return 0; 1217 1232 } 1218 1233 1219 1234 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) ··· 1267 1254 continue; 1268 1255 1269 1256 fence = &p->jobs[i]->base.s_fence->scheduled; 1257 + dma_fence_get(fence); 1270 1258 r = drm_sched_job_add_dependency(&leader->base, fence); 1271 - if (r) 1259 + if (r) { 1260 + dma_fence_put(fence); 1272 1261 goto error_cleanup; 1262 + } 1273 1263 } 1274 1264 1275 1265 if (p->gang_size > 1) { ··· 1360 1344 { 1361 1345 unsigned i; 1362 1346 1347 + amdgpu_sync_free(&parser->sync); 1363 1348 for (i = 0; i < parser->num_post_deps; i++) { 1364 1349 drm_syncobj_put(parser->post_deps[i].syncobj); 1365 1350 kfree(parser->post_deps[i].chain);
+8
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 36 36 #include <generated/utsrelease.h> 37 37 #include <linux/pci-p2pdma.h> 38 38 39 + #include <drm/drm_aperture.h> 39 40 #include <drm/drm_atomic_helper.h> 40 41 #include <drm/drm_fb_helper.h> 41 42 #include <drm/drm_probe_helper.h> ··· 90 89 #define AMDGPU_RESUME_MS 2000 91 90 #define AMDGPU_MAX_RETRY_LIMIT 2 92 91 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL) 92 + 93 + static const struct drm_driver amdgpu_kms_driver; 93 94 94 95 const char *amdgpu_asic_name[] = { 95 96 "TAHITI", ··· 3687 3684 3688 3685 /* early init functions */ 3689 3686 r = amdgpu_device_ip_early_init(adev); 3687 + if (r) 3688 + return r; 3689 + 3690 + /* Get rid of things like offb */ 3691 + r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); 3690 3692 if (r) 3691 3693 return r; 3692 3694
-6
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 23 23 */ 24 24 25 25 #include <drm/amdgpu_drm.h> 26 - #include <drm/drm_aperture.h> 27 26 #include <drm/drm_drv.h> 28 27 #include <drm/drm_fbdev_generic.h> 29 28 #include <drm/drm_gem.h> ··· 2120 2121 } 2121 2122 } 2122 2123 #endif 2123 - 2124 - /* Get rid of things like offb */ 2125 - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); 2126 - if (ret) 2127 - return ret; 2128 2124 2129 2125 adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); 2130 2126 if (IS_ERR(adev))
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 470 470 return true; 471 471 472 472 fail: 473 - DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, 474 - man->size); 473 + if (man) 474 + DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, 475 + man->size); 475 476 return false; 476 477 } 477 478
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
··· 391 391 392 392 dma_fence_get(f); 393 393 r = drm_sched_job_add_dependency(&job->base, f); 394 - if (r) 394 + if (r) { 395 + dma_fence_put(f); 395 396 return r; 397 + } 396 398 } 397 399 return 0; 398 400 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 882 882 kfree(rsv); 883 883 884 884 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { 885 - drm_buddy_free_list(&mgr->mm, &rsv->blocks); 885 + drm_buddy_free_list(&mgr->mm, &rsv->allocated); 886 886 kfree(rsv); 887 887 } 888 888 drm_buddy_fini(&mgr->mm);
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 200 200 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr; 201 201 202 202 if (q->wptr_bo) { 203 - wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va; 203 + wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1); 204 204 queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off; 205 205 } 206 206
+9
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 570 570 goto reserve_bo_failed; 571 571 } 572 572 573 + if (clear) { 574 + r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 575 + if (r) { 576 + pr_debug("failed %d to sync bo\n", r); 577 + amdgpu_bo_unreserve(bo); 578 + goto reserve_bo_failed; 579 + } 580 + } 581 + 573 582 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 574 583 if (r) { 575 584 pr_debug("failed %d to reserve bo\n", r);
+6 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 1261 1261 uint32_t speed) 1262 1262 { 1263 1263 struct amdgpu_device *adev = smu->adev; 1264 - uint32_t tach_period, crystal_clock_freq; 1264 + uint32_t crystal_clock_freq = 2500; 1265 + uint32_t tach_period; 1265 1266 int ret; 1266 1267 1267 1268 if (!speed) ··· 1272 1271 if (ret) 1273 1272 return ret; 1274 1273 1275 - crystal_clock_freq = amdgpu_asic_get_xclk(adev); 1276 1274 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1277 1275 WREG32_SOC15(THM, 0, regCG_TACH_CTRL, 1278 1276 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL), ··· 2297 2297 if (amdgpu_sriov_vf(smu->adev) || 2298 2298 !smu_baco->platform_support) 2299 2299 return false; 2300 + 2301 + /* return true if ASIC is in BACO state already */ 2302 + if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) 2303 + return true; 2300 2304 2301 2305 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 2302 2306 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 213 213 FEA_MAP(SOC_PCC), 214 214 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 215 215 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 216 + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 216 217 }; 217 218 218 219 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
+1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 192 192 FEA_MAP(SOC_PCC), 193 193 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 194 194 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 195 + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 195 196 }; 196 197 197 198 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
+54 -27
drivers/gpu/drm/drm_buddy.c
··· 38 38 kmem_cache_free(slab_blocks, block); 39 39 } 40 40 41 + static void list_insert_sorted(struct drm_buddy *mm, 42 + struct drm_buddy_block *block) 43 + { 44 + struct drm_buddy_block *node; 45 + struct list_head *head; 46 + 47 + head = &mm->free_list[drm_buddy_block_order(block)]; 48 + if (list_empty(head)) { 49 + list_add(&block->link, head); 50 + return; 51 + } 52 + 53 + list_for_each_entry(node, head, link) 54 + if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node)) 55 + break; 56 + 57 + __list_add(&block->link, node->link.prev, &node->link); 58 + } 59 + 41 60 static void mark_allocated(struct drm_buddy_block *block) 42 61 { 43 62 block->header &= ~DRM_BUDDY_HEADER_STATE; ··· 71 52 block->header &= ~DRM_BUDDY_HEADER_STATE; 72 53 block->header |= DRM_BUDDY_FREE; 73 54 74 - list_add(&block->link, 75 - &mm->free_list[drm_buddy_block_order(block)]); 55 + list_insert_sorted(mm, block); 76 56 } 77 57 78 58 static void mark_split(struct drm_buddy_block *block) ··· 405 387 } 406 388 407 389 static struct drm_buddy_block * 408 - get_maxblock(struct list_head *head) 390 + get_maxblock(struct drm_buddy *mm, unsigned int order) 409 391 { 410 392 struct drm_buddy_block *max_block = NULL, *node; 393 + unsigned int i; 411 394 412 - max_block = list_first_entry_or_null(head, 413 - struct drm_buddy_block, 414 - link); 415 - if (!max_block) 416 - return NULL; 395 + for (i = order; i <= mm->max_order; ++i) { 396 + if (!list_empty(&mm->free_list[i])) { 397 + node = list_last_entry(&mm->free_list[i], 398 + struct drm_buddy_block, 399 + link); 400 + if (!max_block) { 401 + max_block = node; 402 + continue; 403 + } 417 404 418 - list_for_each_entry(node, head, link) { 419 - if (drm_buddy_block_offset(node) > 420 - drm_buddy_block_offset(max_block)) 421 - max_block = node; 405 + if (drm_buddy_block_offset(node) > 406 + drm_buddy_block_offset(max_block)) { 407 + max_block = node; 408 + } 409 + } 422 410 } 423 411 424 412 return max_block; ··· 436 412 unsigned long flags) 437 413 { 438 414 struct drm_buddy_block *block = NULL; 439 - unsigned int i; 415 + unsigned int tmp; 440 416 int err; 441 417 442 - for (i = order; i <= mm->max_order; ++i) { 443 - if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { 444 - block = get_maxblock(&mm->free_list[i]); 445 - if (block) 446 - break; 447 - } else { 448 - block = list_first_entry_or_null(&mm->free_list[i], 449 - struct drm_buddy_block, 450 - link); 451 - if (block) 452 - break; 418 + if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { 419 + block = get_maxblock(mm, order); 420 + if (block) 421 + /* Store the obtained block order */ 422 + tmp = drm_buddy_block_order(block); 423 + } else { 424 + for (tmp = order; tmp <= mm->max_order; ++tmp) { 425 + if (!list_empty(&mm->free_list[tmp])) { 426 + block = list_last_entry(&mm->free_list[tmp], 427 + struct drm_buddy_block, 428 + link); 429 + if (block) 430 + break; 431 + } 453 432 } 454 433 } 455 434 ··· 461 434 462 435 BUG_ON(!drm_buddy_block_is_free(block)); 463 436 464 - while (i != order) { 437 + while (tmp != order) { 465 438 err = split_block(mm, block); 466 439 if (unlikely(err)) 467 440 goto err_undo; 468 441 469 442 block = block->right; 470 - i--; 443 + tmp--; 471 444 } 472 445 return block; 473 446 474 447 err_undo: 475 - if (i != order) 448 + if (tmp != order) 476 449 __drm_buddy_free(mm, block); 477 450 return ERR_PTR(err); 478 451 }
+6
drivers/gpu/drm/drm_panel_orientation_quirks.c
··· 304 304 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), 305 305 }, 306 306 .driver_data = (void *)&lcd1200x1920_rightside_up, 307 + }, { /* Lenovo Ideapad D330-10IGL (HD) */ 308 + .matches = { 309 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), 310 + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"), 311 + }, 312 + .driver_data = (void *)&lcd800x1280_rightside_up, 307 313 }, { /* Lenovo Yoga Book X90F / X91F / X91L */ 308 314 .matches = { 309 315 /* Non exact match to match all versions */
+18 -6
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 1688 1688 init_contexts(&i915->gem.contexts); 1689 1689 } 1690 1690 1691 + /* 1692 + * Note that this implicitly consumes the ctx reference, by placing 1693 + * the ctx in the context_xa. 1694 + */ 1691 1695 static void gem_context_register(struct i915_gem_context *ctx, 1692 1696 struct drm_i915_file_private *fpriv, 1693 1697 u32 id) ··· 1707 1703 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 1708 1704 current->comm, pid_nr(ctx->pid)); 1709 1705 1710 - /* And finally expose ourselves to userspace via the idr */ 1711 - old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); 1712 - WARN_ON(old); 1713 - 1714 1706 spin_lock(&ctx->client->ctx_lock); 1715 1707 list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list); 1716 1708 spin_unlock(&ctx->client->ctx_lock); ··· 1714 1714 spin_lock(&i915->gem.contexts.lock); 1715 1715 list_add_tail(&ctx->link, &i915->gem.contexts.list); 1716 1716 spin_unlock(&i915->gem.contexts.lock); 1717 + 1718 + /* And finally expose ourselves to userspace via the idr */ 1719 + old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); 1720 + WARN_ON(old); 1717 1721 } 1718 1722 1719 1723 int i915_gem_context_open(struct drm_i915_private *i915, ··· 2203 2199 if (IS_ERR(ctx)) 2204 2200 return ctx; 2205 2201 2202 + /* 2203 + * One for the xarray and one for the caller. We need to grab 2204 + * the reference *prior* to making the ctx visble to userspace 2205 + * in gem_context_register(), as at any point after that 2206 + * userspace can try to race us with another thread destroying 2207 + * the context under our feet. 2208 + */ 2209 + i915_gem_context_get(ctx); 2210 + 2206 2211 gem_context_register(ctx, file_priv, id); 2207 2212 2208 2213 old = xa_erase(&file_priv->proto_context_xa, id); 2209 2214 GEM_BUG_ON(old != pc); 2210 2215 proto_context_close(file_priv->dev_priv, pc); 2211 2216 2212 - /* One for the xarray and one for the caller */ 2213 - return i915_gem_context_get(ctx); 2217 + return ctx; 2214 2218 } 2215 2219 2216 2220 struct i915_gem_context *
+2 -2
drivers/gpu/drm/i915/gt/intel_gt_regs.h
··· 406 406 #define GEN9_WM_CHICKEN3 _MMIO(0x5588) 407 407 #define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9) 408 408 409 - #define CHICKEN_RASTER_1 _MMIO(0x6204) 409 + #define CHICKEN_RASTER_1 MCR_REG(0x6204) 410 410 #define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8) 411 411 412 - #define CHICKEN_RASTER_2 _MMIO(0x6208) 412 + #define CHICKEN_RASTER_2 MCR_REG(0x6208) 413 413 #define TBIMR_FAST_CLIP REG_BIT(5) 414 414 415 415 #define VFLSKPD MCR_REG(0x62a8)
+28 -6
drivers/gpu/drm/i915/gt/intel_reset.c
··· 278 278 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) 279 279 { 280 280 struct intel_uncore *uncore = gt->uncore; 281 + int loops = 2; 281 282 int err; 282 283 283 284 /* ··· 286 285 * for fifo space for the write or forcewake the chip for 287 286 * the read 288 287 */ 289 - intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); 288 + do { 289 + intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); 290 290 291 - /* Wait for the device to ack the reset requests */ 292 - err = __intel_wait_for_register_fw(uncore, 293 - GEN6_GDRST, hw_domain_mask, 0, 294 - 500, 0, 295 - NULL); 291 + /* 292 + * Wait for the device to ack the reset requests. 293 + * 294 + * On some platforms, e.g. Jasperlake, we see that the 295 + * engine register state is not cleared until shortly after 296 + * GDRST reports completion, causing a failure as we try 297 + * to immediately resume while the internal state is still 298 + * in flux. If we immediately repeat the reset, the second 299 + * reset appears to serialise with the first, and since 300 + * it is a no-op, the registers should retain their reset 301 + * value. However, there is still a concern that upon 302 + * leaving the second reset, the internal engine state 303 + * is still in flux and not ready for resuming. 304 + */ 305 + err = __intel_wait_for_register_fw(uncore, GEN6_GDRST, 306 + hw_domain_mask, 0, 307 + 2000, 0, 308 + NULL); 309 + } while (err == 0 && --loops); 296 310 if (err) 297 311 GT_TRACE(gt, 298 312 "Wait for 0x%08x engines reset failed\n", 299 313 hw_domain_mask); 314 + 315 + /* 316 + * As we have observed that the engine state is still volatile 317 + * after GDRST is acked, impose a small delay to let everything settle. 318 + */ 319 + udelay(50); 300 320 301 321 return err; 302 322 }
+2 -2
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 645 645 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, 646 646 struct i915_wa_list *wal) 647 647 { 648 - wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP); 648 + wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP); 649 649 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, 650 650 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); 651 651 wa_mcr_add(wal, ··· 775 775 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000); 776 776 777 777 /* Wa_15010599737:dg2 */ 778 - wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN); 778 + wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN); 779 779 } 780 780 781 781 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
+1 -1
drivers/gpu/drm/i915/i915_vma.c
··· 2116 2116 if (!obj->mm.rsgt) 2117 2117 return -EBUSY; 2118 2118 2119 - err = dma_resv_reserve_fences(obj->base.resv, 1); 2119 + err = dma_resv_reserve_fences(obj->base.resv, 2); 2120 2120 if (err) 2121 2121 return -EBUSY; 2122 2122
+9 -6
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 876 876 #define GBIF_CLIENT_HALT_MASK BIT(0) 877 877 #define GBIF_ARB_HALT_MASK BIT(1) 878 878 879 - static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 879 + static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, 880 + bool gx_off) 880 881 { 881 882 struct msm_gpu *gpu = &adreno_gpu->base; 882 883 ··· 890 889 return; 891 890 } 892 891 893 - /* Halt the gx side of GBIF */ 894 - gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); 895 - spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); 892 + if (gx_off) { 893 + /* Halt the gx side of GBIF */ 894 + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); 895 + spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); 896 + } 896 897 897 898 /* Halt new client requests on GBIF */ 898 899 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); ··· 932 929 /* Halt the gmu cm3 core */ 933 930 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 934 931 935 - a6xx_bus_clear_pending_transactions(adreno_gpu); 932 + a6xx_bus_clear_pending_transactions(adreno_gpu, true); 936 933 937 934 /* Reset GPU core blocks */ 938 935 gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1); ··· 1086 1083 return; 1087 1084 } 1088 1085 1089 - a6xx_bus_clear_pending_transactions(adreno_gpu); 1086 + a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); 1090 1087 1091 1088 /* tell the GMU we want to slumber */ 1092 1089 ret = a6xx_gmu_notify_slumber(gmu);
+7
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 1270 1270 if (hang_debug) 1271 1271 a6xx_dump(gpu); 1272 1272 1273 + /* 1274 + * To handle recovery specific sequences during the rpm suspend we are 1275 + * about to trigger 1276 + */ 1277 + a6xx_gpu->hung = true; 1278 + 1273 1279 /* Halt SQE first */ 1274 1280 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); 1275 1281 ··· 1318 1312 mutex_unlock(&gpu->active_lock); 1319 1313 1320 1314 msm_gpu_hw_init(gpu); 1315 + a6xx_gpu->hung = false; 1321 1316 } 1322 1317 1323 1318 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+1
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
··· 32 32 void *llc_slice; 33 33 void *htw_llc_slice; 34 34 bool have_mmu500; 35 + bool hung; 35 36 }; 36 37 37 38 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+4 -6
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 29 29 ADRENO_FW_MAX, 30 30 }; 31 31 32 - enum adreno_quirks { 33 - ADRENO_QUIRK_TWO_PASS_USE_WFI = 1, 34 - ADRENO_QUIRK_FAULT_DETECT_MASK = 2, 35 - ADRENO_QUIRK_LMLOADKILL_DISABLE = 3, 36 - }; 32 + #define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0) 33 + #define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1) 34 + #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) 37 35 38 36 struct adreno_rev { 39 37 uint8_t core; ··· 63 65 const char *name; 64 66 const char *fw[ADRENO_FW_MAX]; 65 67 uint32_t gmem; 66 - enum adreno_quirks quirks; 68 + u64 quirks; 67 69 struct msm_gpu *(*init)(struct drm_device *dev); 68 70 const char *zapfw; 69 71 u32 inactive_period;
+1 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
··· 132 132 * dpu_encoder_phys_wb_setup_fb - setup output framebuffer 133 133 * @phys_enc: Pointer to physical encoder 134 134 * @fb: Pointer to output framebuffer 135 - * @wb_roi: Pointer to output region of interest 136 135 */ 137 136 static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, 138 137 struct drm_framebuffer *fb) ··· 691 692 692 693 /** 693 694 * dpu_encoder_phys_wb_init - initialize writeback encoder 694 - * @init: Pointer to init info structure with initialization params 695 + * @p: Pointer to init info structure with initialization params 695 696 */ 696 697 struct dpu_encoder_phys *dpu_encoder_phys_wb_init( 697 698 struct dpu_enc_phys_init_params *p)
+4
drivers/gpu/drm/msm/dp/dp_aux.c
··· 423 423 424 424 isr = dp_catalog_aux_get_irq(aux->catalog); 425 425 426 + /* no interrupts pending, return immediately */ 427 + if (!isr) 428 + return; 429 + 426 430 if (!aux->cmd_busy) 427 431 return; 428 432
+10 -2
drivers/gpu/drm/msm/hdmi/hdmi.c
··· 532 532 533 533 ret = devm_pm_runtime_enable(&pdev->dev); 534 534 if (ret) 535 - return ret; 535 + goto err_put_phy; 536 536 537 537 platform_set_drvdata(pdev, hdmi); 538 538 539 - return component_add(&pdev->dev, &msm_hdmi_ops); 539 + ret = component_add(&pdev->dev, &msm_hdmi_ops); 540 + if (ret) 541 + goto err_put_phy; 542 + 543 + return 0; 544 + 545 + err_put_phy: 546 + msm_hdmi_put_phy(hdmi); 547 + return ret; 540 548 } 541 549 542 550 static int msm_hdmi_dev_remove(struct platform_device *pdev)
+1 -1
drivers/gpu/drm/msm/msm_drv.c
··· 1278 1278 * msm_drm_init, drm_dev->registered is used as an indicator that the 1279 1279 * shutdown will be successful. 1280 1280 */ 1281 - if (drm && drm->registered) 1281 + if (drm && drm->registered && priv->kms) 1282 1282 drm_atomic_helper_shutdown(drm); 1283 1283 } 1284 1284
+4 -2
drivers/gpu/drm/msm/msm_mdss.c
··· 47 47 static int msm_mdss_parse_data_bus_icc_path(struct device *dev, 48 48 struct msm_mdss *msm_mdss) 49 49 { 50 - struct icc_path *path0 = of_icc_get(dev, "mdp0-mem"); 51 - struct icc_path *path1 = of_icc_get(dev, "mdp1-mem"); 50 + struct icc_path *path0; 51 + struct icc_path *path1; 52 52 53 + path0 = of_icc_get(dev, "mdp0-mem"); 53 54 if (IS_ERR_OR_NULL(path0)) 54 55 return PTR_ERR_OR_ZERO(path0); 55 56 56 57 msm_mdss->path[0] = path0; 57 58 msm_mdss->num_paths = 1; 58 59 60 + path1 = of_icc_get(dev, "mdp1-mem"); 59 61 if (!IS_ERR_OR_NULL(path1)) { 60 62 msm_mdss->path[1] = path1; 61 63 msm_mdss->num_paths++;
-613
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 1 - /* 2 - * Copyright © 2007 David Airlie 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 - * DEALINGS IN THE SOFTWARE. 22 - * 23 - * Authors: 24 - * David Airlie 25 - */ 26 - 27 - #include <linux/module.h> 28 - #include <linux/kernel.h> 29 - #include <linux/errno.h> 30 - #include <linux/string.h> 31 - #include <linux/mm.h> 32 - #include <linux/tty.h> 33 - #include <linux/sysrq.h> 34 - #include <linux/delay.h> 35 - #include <linux/init.h> 36 - #include <linux/screen_info.h> 37 - #include <linux/vga_switcheroo.h> 38 - #include <linux/console.h> 39 - 40 - #include <drm/drm_crtc.h> 41 - #include <drm/drm_crtc_helper.h> 42 - #include <drm/drm_probe_helper.h> 43 - #include <drm/drm_fb_helper.h> 44 - #include <drm/drm_fourcc.h> 45 - #include <drm/drm_atomic.h> 46 - 47 - #include "nouveau_drv.h" 48 - #include "nouveau_gem.h" 49 - #include "nouveau_bo.h" 50 - #include "nouveau_fbcon.h" 51 - #include "nouveau_chan.h" 52 - #include "nouveau_vmm.h" 53 - 54 - #include "nouveau_crtc.h" 55 - 56 - MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); 57 - int nouveau_nofbaccel = 0; 58 - module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 59 - 60 - MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)"); 61 - static int nouveau_fbcon_bpp; 62 - module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400); 63 - 64 - static void 65 - nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 66 - { 67 - struct nouveau_fbdev *fbcon = info->par; 68 - struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 69 - struct nvif_device *device = &drm->client.device; 70 - int ret; 71 - 72 - if (info->state != FBINFO_STATE_RUNNING) 73 - return; 74 - 75 - ret = -ENODEV; 76 - if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 77 - mutex_trylock(&drm->client.mutex)) { 78 - if (device->info.family < NV_DEVICE_INFO_V0_TESLA) 79 - ret = nv04_fbcon_fillrect(info, rect); 80 - else 81 - if (device->info.family < NV_DEVICE_INFO_V0_FERMI) 82 - ret = nv50_fbcon_fillrect(info, rect); 83 - else 84 - ret = nvc0_fbcon_fillrect(info, rect); 85 - mutex_unlock(&drm->client.mutex); 86 - } 87 - 88 - if (ret == 0) 89 - return; 90 - 91 - if (ret != -ENODEV) 92 - nouveau_fbcon_gpu_lockup(info); 93 - drm_fb_helper_cfb_fillrect(info, rect); 94 - } 95 - 96 - static void 97 - nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) 98 - { 99 - struct nouveau_fbdev *fbcon = info->par; 100 - struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 101 - struct nvif_device *device = &drm->client.device; 102 - int ret; 103 - 104 - if (info->state != FBINFO_STATE_RUNNING) 105 - return; 106 - 107 - ret = -ENODEV; 108 - if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 109 - mutex_trylock(&drm->client.mutex)) { 110 - if (device->info.family < NV_DEVICE_INFO_V0_TESLA) 111 - ret = nv04_fbcon_copyarea(info, image); 112 - else 113 - if (device->info.family < NV_DEVICE_INFO_V0_FERMI) 114 - ret = nv50_fbcon_copyarea(info, image); 115 - else 116 - ret = nvc0_fbcon_copyarea(info, image); 117 - mutex_unlock(&drm->client.mutex); 118 - } 119 - 120 - if (ret == 0) 121 - return; 122 - 123 - if (ret != -ENODEV) 124 - nouveau_fbcon_gpu_lockup(info); 125 - drm_fb_helper_cfb_copyarea(info, image); 126 - } 127 - 128 - static void 129 - nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 130 - { 131 - struct nouveau_fbdev *fbcon = info->par; 132 - struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 133 - struct nvif_device *device = &drm->client.device; 134 - int ret; 135 - 136 - if (info->state != FBINFO_STATE_RUNNING) 137 - return; 138 - 139 - ret = -ENODEV; 140 - if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 141 - mutex_trylock(&drm->client.mutex)) { 142 - if (device->info.family < NV_DEVICE_INFO_V0_TESLA) 143 - ret = nv04_fbcon_imageblit(info, image); 144 - else 145 - if (device->info.family < NV_DEVICE_INFO_V0_FERMI) 146 - ret = nv50_fbcon_imageblit(info, image); 147 - else 148 - ret = nvc0_fbcon_imageblit(info, image); 149 - mutex_unlock(&drm->client.mutex); 150 - } 151 - 152 - if (ret == 0) 153 - return; 154 - 155 - if (ret != -ENODEV) 156 - nouveau_fbcon_gpu_lockup(info); 157 - drm_fb_helper_cfb_imageblit(info, image); 158 - } 159 - 160 - static int 161 - nouveau_fbcon_sync(struct fb_info *info) 162 - { 163 - struct nouveau_fbdev *fbcon = info->par; 164 - struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 165 - struct nouveau_channel *chan = drm->channel; 166 - int ret; 167 - 168 - if (!chan || !chan->accel_done || in_interrupt() || 169 - info->state != FBINFO_STATE_RUNNING || 170 - info->flags & FBINFO_HWACCEL_DISABLED) 171 - return 0; 172 - 173 - if (!mutex_trylock(&drm->client.mutex)) 174 - return 0; 175 - 176 - ret = nouveau_channel_idle(chan); 177 - mutex_unlock(&drm->client.mutex); 178 - if (ret) { 179 - nouveau_fbcon_gpu_lockup(info); 180 - return 0; 181 - } 182 - 183 - chan->accel_done = false; 184 - return 0; 185 - } 186 - 187 - static int 188 - nouveau_fbcon_open(struct fb_info *info, int user) 189 - { 190 - struct nouveau_fbdev *fbcon = info->par; 191 - struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 192 - int ret = pm_runtime_get_sync(drm->dev->dev); 193 - if (ret < 0 && ret != -EACCES) { 194 - pm_runtime_put(drm->dev->dev); 195 - return ret; 196 - } 197 - return 0; 198 - } 199 - 200 - static int 201 - nouveau_fbcon_release(struct fb_info *info, int user) 202 - { 203 - struct nouveau_fbdev *fbcon = info->par; 204 - struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 205 - pm_runtime_put(drm->dev->dev); 206 - return 0; 207 - } 208 - 209 - static const struct fb_ops nouveau_fbcon_ops = { 210 - .owner = THIS_MODULE, 211 - DRM_FB_HELPER_DEFAULT_OPS, 212 - .fb_open = nouveau_fbcon_open, 213 - .fb_release = nouveau_fbcon_release, 214 - .fb_fillrect = nouveau_fbcon_fillrect, 215 - .fb_copyarea = nouveau_fbcon_copyarea, 216 - .fb_imageblit = nouveau_fbcon_imageblit, 217 - .fb_sync = nouveau_fbcon_sync, 218 - }; 219 - 220 - static const struct fb_ops nouveau_fbcon_sw_ops = { 221 - .owner = THIS_MODULE, 222 - DRM_FB_HELPER_DEFAULT_OPS, 223 - .fb_open = nouveau_fbcon_open, 224 - .fb_release = nouveau_fbcon_release, 225 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 226 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 227 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 228 - }; 229 - 230 - void 231 - nouveau_fbcon_accel_save_disable(struct drm_device *dev) 232 - { 233 - struct nouveau_drm *drm = nouveau_drm(dev); 234 - if (drm->fbcon && drm->fbcon->helper.info) { 235 - drm->fbcon->saved_flags = drm->fbcon->helper.info->flags; 236 - drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED; 237 - } 238 - } 239 - 240 - void 241 - nouveau_fbcon_accel_restore(struct drm_device *dev) 242 - { 243 - struct nouveau_drm *drm = nouveau_drm(dev); 244 - if (drm->fbcon && drm->fbcon->helper.info) 245 - drm->fbcon->helper.info->flags = drm->fbcon->saved_flags; 246 - } 247 - 248 - static void 249 - nouveau_fbcon_accel_fini(struct drm_device *dev) 250 - { 251 - struct nouveau_drm *drm = nouveau_drm(dev); 252 - struct nouveau_fbdev *fbcon = drm->fbcon; 253 - if (fbcon && drm->channel) { 254 - console_lock(); 255 - if (fbcon->helper.info) 256 - fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED; 257 - console_unlock(); 258 - nouveau_channel_idle(drm->channel); 259 - nvif_object_dtor(&fbcon->twod); 260 - nvif_object_dtor(&fbcon->blit); 261 - nvif_object_dtor(&fbcon->gdi); 262 - nvif_object_dtor(&fbcon->patt); 263 - nvif_object_dtor(&fbcon->rop); 264 - nvif_object_dtor(&fbcon->clip); 265 - nvif_object_dtor(&fbcon->surf2d); 266 - } 267 - } 268 - 269 - static void 270 - nouveau_fbcon_accel_init(struct drm_device *dev) 271 - { 272 - struct nouveau_drm *drm = nouveau_drm(dev); 273 - struct nouveau_fbdev *fbcon = drm->fbcon; 274 - struct fb_info *info = fbcon->helper.info; 275 - int ret; 276 - 277 - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) 278 - ret = nv04_fbcon_accel_init(info); 279 - else 280 - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) 281 - ret = nv50_fbcon_accel_init(info); 282 - else 283 - ret = nvc0_fbcon_accel_init(info); 284 - 285 - if (ret == 0) 286 - info->fbops = &nouveau_fbcon_ops; 287 - } 288 - 289 - static void 290 - nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon) 291 - { 292 - struct fb_info *info = fbcon->helper.info; 293 - struct fb_fillrect rect; 294 - 295 - /* Clear the entire fbcon. The drm will program every connector 296 - * with it's preferred mode. If the sizes differ, one display will 297 - * quite likely have garbage around the console. 298 - */ 299 - rect.dx = rect.dy = 0; 300 - rect.width = info->var.xres_virtual; 301 - rect.height = info->var.yres_virtual; 302 - rect.color = 0; 303 - rect.rop = ROP_COPY; 304 - info->fbops->fb_fillrect(info, &rect); 305 - } 306 - 307 - static int 308 - nouveau_fbcon_create(struct drm_fb_helper *helper, 309 - struct drm_fb_helper_surface_size *sizes) 310 - { 311 - struct nouveau_fbdev *fbcon = 312 - container_of(helper, struct nouveau_fbdev, helper); 313 - struct drm_device *dev = fbcon->helper.dev; 314 - struct nouveau_drm *drm = nouveau_drm(dev); 315 - struct nvif_device *device = &drm->client.device; 316 - struct fb_info *info; 317 - struct drm_framebuffer *fb; 318 - struct nouveau_channel *chan; 319 - struct nouveau_bo *nvbo; 320 - struct drm_mode_fb_cmd2 mode_cmd = {}; 321 - int ret; 322 - 323 - mode_cmd.width = sizes->surface_width; 324 - mode_cmd.height = sizes->surface_height; 325 - 326 - mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3); 327 - mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256); 328 - 329 - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 330 - sizes->surface_depth); 331 - 332 - ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] * 333 - mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM, 334 - 0, 0x0000, &nvbo); 335 - if (ret) { 336 - NV_ERROR(drm, "failed to allocate framebuffer\n"); 337 - goto out; 338 - } 339 - 340 - ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb); 341 - if (ret) 342 - goto out_unref; 343 - 344 - ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); 345 - if (ret) { 346 - NV_ERROR(drm, "failed to pin fb: %d\n", ret); 347 - goto out_unref; 348 - } 349 - 350 - ret = nouveau_bo_map(nvbo); 351 - if (ret) { 352 - NV_ERROR(drm, "failed to map fb: %d\n", ret); 353 - goto out_unpin; 354 - } 355 - 356 - chan = nouveau_nofbaccel ? NULL : drm->channel; 357 - if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 358 - ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma); 359 - if (ret) { 360 - NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); 361 - chan = NULL; 362 - } 363 - } 364 - 365 - info = drm_fb_helper_alloc_info(helper); 366 - if (IS_ERR(info)) { 367 - ret = PTR_ERR(info); 368 - goto out_unlock; 369 - } 370 - 371 - /* setup helper */ 372 - fbcon->helper.fb = fb; 373 - 374 - if (!chan) 375 - info->flags = FBINFO_HWACCEL_DISABLED; 376 - else 377 - info->flags = FBINFO_HWACCEL_COPYAREA | 378 - FBINFO_HWACCEL_FILLRECT | 379 - FBINFO_HWACCEL_IMAGEBLIT; 380 - info->fbops = &nouveau_fbcon_sw_ops; 381 - info->fix.smem_start = nvbo->bo.resource->bus.offset; 382 - info->fix.smem_len = nvbo->bo.base.size; 383 - 384 - info->screen_base = nvbo_kmap_obj_iovirtual(nvbo); 385 - info->screen_size = nvbo->bo.base.size; 386 - 387 - drm_fb_helper_fill_info(info, &fbcon->helper, sizes); 388 - 389 - /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 390 - 391 - if (chan) 392 - nouveau_fbcon_accel_init(dev); 393 - nouveau_fbcon_zfill(dev, fbcon); 394 - 395 - /* To allow resizeing without swapping buffers */ 396 - NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", 397 - fb->width, fb->height, nvbo->offset, nvbo); 398 - 399 - if (dev_is_pci(dev->dev)) 400 - vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info); 401 - 402 - return 0; 403 - 404 - out_unlock: 405 - if (chan) 406 - nouveau_vma_del(&fbcon->vma); 407 - nouveau_bo_unmap(nvbo); 408 - out_unpin: 409 - nouveau_bo_unpin(nvbo); 410 - out_unref: 411 - nouveau_bo_ref(NULL, &nvbo); 412 - out: 413 - return ret; 414 - } 415 - 416 - static int 417 - nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) 418 - { 419 - struct drm_framebuffer *fb = fbcon->helper.fb; 420 - struct nouveau_bo *nvbo; 421 - 422 - drm_fb_helper_unregister_info(&fbcon->helper); 423 - drm_fb_helper_fini(&fbcon->helper); 424 - 425 - if (fb && fb->obj[0]) { 426 - nvbo = nouveau_gem_object(fb->obj[0]); 427 - nouveau_vma_del(&fbcon->vma); 428 - nouveau_bo_unmap(nvbo); 429 - nouveau_bo_unpin(nvbo); 430 - drm_framebuffer_put(fb); 431 - } 432 - 433 - return 0; 434 - } 435 - 436 - void nouveau_fbcon_gpu_lockup(struct fb_info *info) 437 - { 438 - struct nouveau_fbdev *fbcon = info->par; 439 - struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 440 - 441 - NV_ERROR(drm, "GPU lockup - switching to software fbcon\n"); 442 - info->flags |= FBINFO_HWACCEL_DISABLED; 443 - } 444 - 445 - static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { 446 - .fb_probe = nouveau_fbcon_create, 447 - }; 448 - 449 - static void 450 - nouveau_fbcon_set_suspend_work(struct work_struct *work) 451 - { 452 - struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); 453 - int state = READ_ONCE(drm->fbcon_new_state); 454 - 455 - if (state == FBINFO_STATE_RUNNING) 456 - pm_runtime_get_sync(drm->dev->dev); 457 - 458 - console_lock(); 459 - if (state == FBINFO_STATE_RUNNING) 460 - nouveau_fbcon_accel_restore(drm->dev); 461 - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 462 - if (state != FBINFO_STATE_RUNNING) 463 - nouveau_fbcon_accel_save_disable(drm->dev); 464 - console_unlock(); 465 - 466 - if (state == FBINFO_STATE_RUNNING) { 467 - nouveau_fbcon_hotplug_resume(drm->fbcon); 468 - pm_runtime_mark_last_busy(drm->dev->dev); 469 - pm_runtime_put_autosuspend(drm->dev->dev); 470 - } 471 - } 472 - 473 - void 474 - nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 475 - { 476 - struct nouveau_drm *drm = nouveau_drm(dev); 477 - 478 - if (!drm->fbcon) 479 - return; 480 - 481 - drm->fbcon_new_state = state; 482 - /* Since runtime resume can happen as a result of a sysfs operation, 483 - * it's possible we already have the console locked. So handle fbcon 484 - * init/deinit from a seperate work thread 485 - */ 486 - schedule_work(&drm->fbcon_work); 487 - } 488 - 489 - void 490 - nouveau_fbcon_output_poll_changed(struct drm_device *dev) 491 - { 492 - struct nouveau_drm *drm = nouveau_drm(dev); 493 - struct nouveau_fbdev *fbcon = drm->fbcon; 494 - int ret; 495 - 496 - if (!fbcon) 497 - return; 498 - 499 - mutex_lock(&fbcon->hotplug_lock); 500 - 501 - ret = pm_runtime_get(dev->dev); 502 - if (ret == 1 || ret == -EACCES) { 503 - drm_fb_helper_hotplug_event(&fbcon->helper); 504 - 505 - pm_runtime_mark_last_busy(dev->dev); 506 - pm_runtime_put_autosuspend(dev->dev); 507 - } else if (ret == 0) { 508 - /* If the GPU was already in the process of suspending before 509 - * this event happened, then we can't block here as we'll 510 - * deadlock the runtime pmops since they wait for us to 511 - * finish. So, just defer this event for when we runtime 512 - * resume again. It will be handled by fbcon_work. 513 - */ 514 - NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n"); 515 - fbcon->hotplug_waiting = true; 516 - pm_runtime_put_noidle(drm->dev->dev); 517 - } else { 518 - DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n", 519 - ret); 520 - } 521 - 522 - mutex_unlock(&fbcon->hotplug_lock); 523 - } 524 - 525 - void 526 - nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon) 527 - { 528 - struct nouveau_drm *drm; 529 - 530 - if (!fbcon) 531 - return; 532 - drm = nouveau_drm(fbcon->helper.dev); 533 - 534 - mutex_lock(&fbcon->hotplug_lock); 535 - if (fbcon->hotplug_waiting) { 536 - fbcon->hotplug_waiting = false; 537 - 538 - NV_DEBUG(drm, "Handling deferred fbcon HPD events\n"); 539 - drm_fb_helper_hotplug_event(&fbcon->helper); 540 - } 541 - mutex_unlock(&fbcon->hotplug_lock); 542 - } 543 - 544 - int 545 - nouveau_fbcon_init(struct drm_device *dev) 546 - { 547 - struct nouveau_drm *drm = nouveau_drm(dev); 548 - struct nouveau_fbdev *fbcon; 549 - int preferred_bpp = nouveau_fbcon_bpp; 550 - int ret; 551 - 552 - if (!dev->mode_config.num_crtc || 553 - (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA) 554 - return 0; 555 - 556 - fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 557 - if (!fbcon) 558 - return -ENOMEM; 559 - 560 - drm->fbcon = fbcon; 561 - INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); 562 - mutex_init(&fbcon->hotplug_lock); 563 - 564 - drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 565 - 566 - ret = drm_fb_helper_init(dev, &fbcon->helper); 567 - if (ret) 568 - goto free; 569 - 570 - if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) { 571 - if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) 572 - preferred_bpp = 8; 573 - else 574 - if (drm->client.device.info.ram_size <= 64 * 1024 * 1024) 575 - preferred_bpp = 16; 576 - else 577 - preferred_bpp = 32; 578 - } 579 - 580 - /* disable all the possible outputs/crtcs before entering KMS mode */ 581 - if (!drm_drv_uses_atomic_modeset(dev)) 582 - drm_helper_disable_unused_functions(dev); 583 - 584 - ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); 585 - if (ret) 586 - goto fini; 587 - 588 - if (fbcon->helper.info) 589 - fbcon->helper.info->pixmap.buf_align = 4; 590 - return 0; 591 - 592 - fini: 593 - drm_fb_helper_fini(&fbcon->helper); 594 - free: 595 - kfree(fbcon); 596 - drm->fbcon = NULL; 597 - return ret; 598 - } 599 - 600 - void 601 - nouveau_fbcon_fini(struct drm_device *dev) 602 - { 603 - struct nouveau_drm *drm = nouveau_drm(dev); 604 - 605 - if (!drm->fbcon) 606 - return; 607 - 608 - drm_kms_helper_poll_fini(dev); 609 - nouveau_fbcon_accel_fini(dev); 610 - nouveau_fbcon_destroy(dev, drm->fbcon); 611 - kfree(drm->fbcon); 612 - drm->fbcon = NULL; 613 - }
+1 -1
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 173 173 174 174 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); 175 175 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) 176 - ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter); 176 + ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter); 177 177 178 178 if (!src_iter->ops->maps_tt) 179 179 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
+17 -2
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 358 358 drm_gem_object_release(obj); 359 359 return ret; 360 360 } 361 - drm_gem_object_put(obj); 362 361 363 362 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ 364 363 rc->bo_handle = handle; 364 + 365 + /* 366 + * The handle owns the reference now. But we must drop our 367 + * remaining reference *after* we no longer need to dereference 368 + * the obj. Otherwise userspace could guess the handle and 369 + * race closing it from another thread. 370 + */ 371 + drm_gem_object_put(obj); 372 + 365 373 return 0; 366 374 } 367 375 ··· 731 723 drm_gem_object_release(obj); 732 724 return ret; 733 725 } 734 - drm_gem_object_put(obj); 735 726 736 727 rc_blob->res_handle = bo->hw_res_handle; 737 728 rc_blob->bo_handle = handle; 729 + 730 + /* 731 + * The handle owns the reference now. But we must drop our 732 + * remaining reference *after* we no longer need to dereference 733 + * the obj. Otherwise userspace could guess the handle and 734 + * race closing it from another thread. 735 + */ 736 + drm_gem_object_put(obj); 738 737 739 738 return 0; 740 739 }
+4 -37
drivers/gpu/drm/vmwgfx/ttm_object.c
··· 254 254 kref_put(&base->refcount, ttm_release_base); 255 255 } 256 256 257 - /** 258 - * ttm_base_object_noref_lookup - look up a base object without reference 259 - * @tfile: The struct ttm_object_file the object is registered with. 260 - * @key: The object handle. 261 - * 262 - * This function looks up a ttm base object and returns a pointer to it 263 - * without refcounting the pointer. The returned pointer is only valid 264 - * until ttm_base_object_noref_release() is called, and the object 265 - * pointed to by the returned pointer may be doomed. Any persistent usage 266 - * of the object requires a refcount to be taken using kref_get_unless_zero(). 267 - * Iff this function returns successfully it needs to be paired with 268 - * ttm_base_object_noref_release() and no sleeping- or scheduling functions 269 - * may be called inbetween these function callse. 270 - * 271 - * Return: A pointer to the object if successful or NULL otherwise. 272 - */ 273 - struct ttm_base_object * 274 - ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key) 275 - { 276 - struct vmwgfx_hash_item *hash; 277 - int ret; 278 - 279 - rcu_read_lock(); 280 - ret = ttm_tfile_find_ref_rcu(tfile, key, &hash); 281 - if (ret) { 282 - rcu_read_unlock(); 283 - return NULL; 284 - } 285 - 286 - __release(RCU); 287 - return hlist_entry(hash, struct ttm_ref_object, hash)->obj; 288 - } 289 - EXPORT_SYMBOL(ttm_base_object_noref_lookup); 290 - 291 257 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, 292 258 uint64_t key) 293 259 { ··· 261 295 struct vmwgfx_hash_item *hash; 262 296 int ret; 263 297 264 - rcu_read_lock(); 265 - ret = ttm_tfile_find_ref_rcu(tfile, key, &hash); 298 + spin_lock(&tfile->lock); 299 + ret = ttm_tfile_find_ref(tfile, key, &hash); 266 300 267 301 if (likely(ret == 0)) { 268 302 base = hlist_entry(hash, struct ttm_ref_object, hash)->obj; 269 303 if (!kref_get_unless_zero(&base->refcount)) 270 304 base = NULL; 271 305 } 272 - rcu_read_unlock(); 306 + spin_unlock(&tfile->lock); 307 + 273 308 274 309 return base; 275 310 }
-14
drivers/gpu/drm/vmwgfx/ttm_object.h
··· 307 307 #define ttm_prime_object_kfree(__obj, __prime) \ 308 308 kfree_rcu(__obj, __prime.base.rhead) 309 309 310 - struct ttm_base_object * 311 - ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key); 312 - 313 - /** 314 - * ttm_base_object_noref_release - release a base object pointer looked up 315 - * without reference 316 - * 317 - * Releases a base object pointer looked up with ttm_base_object_noref_lookup(). 318 - */ 319 - static inline void ttm_base_object_noref_release(void) 320 - { 321 - __acquire(RCU); 322 - rcu_read_unlock(); 323 - } 324 310 #endif
-38
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 716 716 } 717 717 718 718 /** 719 - * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference 720 - * @filp: The TTM object file the handle is registered with. 721 - * @handle: The user buffer object handle. 722 - * 723 - * This function looks up a struct vmw_bo and returns a pointer to the 724 - * struct vmw_buffer_object it derives from without refcounting the pointer. 725 - * The returned pointer is only valid until vmw_user_bo_noref_release() is 726 - * called, and the object pointed to by the returned pointer may be doomed. 727 - * Any persistent usage of the object requires a refcount to be taken using 728 - * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it 729 - * needs to be paired with vmw_user_bo_noref_release() and no sleeping- 730 - * or scheduling functions may be called in between these function calls. 731 - * 732 - * Return: A struct vmw_buffer_object pointer if successful or negative 733 - * error pointer on failure. 734 - */ 735 - struct vmw_buffer_object * 736 - vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle) 737 - { 738 - struct vmw_buffer_object *vmw_bo; 739 - struct ttm_buffer_object *bo; 740 - struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle); 741 - 742 - if (!gobj) { 743 - DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 744 - (unsigned long)handle); 745 - return ERR_PTR(-ESRCH); 746 - } 747 - vmw_bo = gem_to_vmw_bo(gobj); 748 - bo = ttm_bo_get_unless_zero(&vmw_bo->base); 749 - vmw_bo = vmw_buffer_object(bo); 750 - drm_gem_object_put(gobj); 751 - 752 - return vmw_bo; 753 - } 754 - 755 - 756 - /** 757 719 * vmw_bo_fence_single - Utility function to fence a single TTM buffer 758 720 * object without unreserving it. 759 721 *
+1 -17
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 830 830 uint32_t handle, 831 831 const struct vmw_user_resource_conv *converter, 832 832 struct vmw_resource **p_res); 833 - extern struct vmw_resource * 834 - vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, 835 - struct ttm_object_file *tfile, 836 - uint32_t handle, 837 - const struct vmw_user_resource_conv * 838 - converter); 833 + 839 834 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 840 835 struct drm_file *file_priv); 841 836 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, ··· 867 872 static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) 868 873 { 869 874 return !RB_EMPTY_NODE(&res->mob_node); 870 - } 871 - 872 - /** 873 - * vmw_user_resource_noref_release - release a user resource pointer looked up 874 - * without reference 875 - */ 876 - static inline void vmw_user_resource_noref_release(void) 877 - { 878 - ttm_base_object_noref_release(); 879 875 } 880 876 881 877 /** ··· 920 934 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, 921 935 struct ttm_resource *mem); 922 936 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 923 - extern struct vmw_buffer_object * 924 - vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle); 925 937 926 938 /** 927 939 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+85 -97
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 290 290 rcache->valid_handle = 0; 291 291 } 292 292 293 + enum vmw_val_add_flags { 294 + vmw_val_add_flag_none = 0, 295 + vmw_val_add_flag_noctx = 1 << 0, 296 + }; 297 + 293 298 /** 294 - * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced 295 - * rcu-protected pointer to the validation list. 299 + * vmw_execbuf_res_val_add - Add a resource to the validation list. 296 300 * 297 301 * @sw_context: Pointer to the software context. 298 302 * @res: Unreferenced rcu-protected pointer to the resource. 299 303 * @dirty: Whether to change dirty status. 304 + * @flags: specifies whether to use the context or not 300 305 * 301 306 * Returns: 0 on success. Negative error code on failure. Typical error codes 302 307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. 303 308 */ 304 - static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, 305 - struct vmw_resource *res, 306 - u32 dirty) 309 + static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context, 310 + struct vmw_resource *res, 311 + u32 dirty, 312 + u32 flags) 307 313 { 308 314 struct vmw_private *dev_priv = res->dev_priv; 309 315 int ret; ··· 324 318 if (dirty) 325 319 vmw_validation_res_set_dirty(sw_context->ctx, 326 320 rcache->private, dirty); 327 - vmw_user_resource_noref_release(); 328 321 return 0; 329 322 } 330 323 331 - priv_size = vmw_execbuf_res_size(dev_priv, res_type); 332 - ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, 333 - dirty, (void **)&ctx_info, 334 - &first_usage); 335 - vmw_user_resource_noref_release(); 336 - if (ret) 337 - return ret; 338 - 339 - if (priv_size && first_usage) { 340 - ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, 341 - ctx_info); 342 - if (ret) { 343 - VMW_DEBUG_USER("Failed first usage context setup.\n"); 324 + if ((flags & vmw_val_add_flag_noctx) != 0) { 325 + ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, 326 + (void **)&ctx_info, NULL); 327 + if (ret) 344 328 return ret; 329 + 330 + } else { 331 + priv_size = vmw_execbuf_res_size(dev_priv, res_type); 332 + ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, 333 + dirty, (void **)&ctx_info, 334 + &first_usage); 335 + if (ret) 336 + return ret; 337 + 338 + if (priv_size && first_usage) { 339 + ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, 340 + ctx_info); 341 + if (ret) { 342 + VMW_DEBUG_USER("Failed first usage context setup.\n"); 343 + return ret; 344 + } 345 345 } 346 346 } 347 347 348 348 vmw_execbuf_rcache_update(rcache, res, ctx_info); 349 - return 0; 350 - } 351 - 352 - /** 353 - * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource 354 - * validation list if it's not already on it 355 - * 356 - * @sw_context: Pointer to the software context. 357 - * @res: Pointer to the resource. 358 - * @dirty: Whether to change dirty status. 359 - * 360 - * Returns: Zero on success. Negative error code on failure. 361 - */ 362 - static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, 363 - struct vmw_resource *res, 364 - u32 dirty) 365 - { 366 - struct vmw_res_cache_entry *rcache; 367 - enum vmw_res_type res_type = vmw_res_type(res); 368 - void *ptr; 369 - int ret; 370 - 371 - rcache = &sw_context->res_cache[res_type]; 372 - if (likely(rcache->valid && rcache->res == res)) { 373 - if (dirty) 374 - vmw_validation_res_set_dirty(sw_context->ctx, 375 - rcache->private, dirty); 376 - return 0; 377 - } 378 - 379 - ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, 380 - &ptr, NULL); 381 - if (ret) 382 - return ret; 383 - 384 - vmw_execbuf_rcache_update(rcache, res, ptr); 385 - 386 349 return 0; 387 350 } 388 351 ··· 373 398 * First add the resource the view is pointing to, otherwise it may be 374 399 * swapped out when the view is validated. 375 400 */ 376 - ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view), 377 - vmw_view_dirtying(view)); 401 + ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view), 402 + vmw_view_dirtying(view), vmw_val_add_flag_noctx); 378 403 if (ret) 379 404 return ret; 380 405 381 - return vmw_execbuf_res_noctx_val_add(sw_context, view, 382 - VMW_RES_DIRTY_NONE); 406 + return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE, 407 + vmw_val_add_flag_noctx); 383 408 } 384 409 385 410 /** ··· 450 475 if (IS_ERR(res)) 451 476 continue; 452 477 453 - ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 454 - VMW_RES_DIRTY_SET); 478 + ret = vmw_execbuf_res_val_add(sw_context, res, 479 + VMW_RES_DIRTY_SET, 480 + vmw_val_add_flag_noctx); 455 481 if (unlikely(ret != 0)) 456 482 return ret; 457 483 } ··· 466 490 if (vmw_res_type(entry->res) == vmw_res_view) 467 491 ret = vmw_view_res_val_add(sw_context, entry->res); 468 492 else 469 - ret = vmw_execbuf_res_noctx_val_add 470 - (sw_context, entry->res, 471 - vmw_binding_dirtying(entry->bt)); 493 + ret = vmw_execbuf_res_val_add(sw_context, entry->res, 494 + vmw_binding_dirtying(entry->bt), 495 + vmw_val_add_flag_noctx); 472 496 if (unlikely(ret != 0)) 473 497 break; 474 498 } ··· 634 658 { 635 659 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; 636 660 struct vmw_resource *res; 637 - int ret; 661 + int ret = 0; 662 + bool needs_unref = false; 638 663 639 664 if (p_res) 640 665 *p_res = NULL; ··· 660 683 if (ret) 661 684 return ret; 662 685 663 - res = vmw_user_resource_noref_lookup_handle 664 - (dev_priv, sw_context->fp->tfile, *id_loc, converter); 665 - if (IS_ERR(res)) { 686 + ret = vmw_user_resource_lookup_handle 687 + (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res); 688 + if (ret != 0) { 666 689 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n", 667 690 (unsigned int) *id_loc); 668 - return PTR_ERR(res); 669 - } 670 - 671 - ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty); 672 - if (unlikely(ret != 0)) 673 691 return ret; 692 + } 693 + needs_unref = true; 694 + 695 + ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none); 696 + if (unlikely(ret != 0)) 697 + goto res_check_done; 674 698 675 699 if (rcache->valid && rcache->res == res) { 676 700 rcache->valid_handle = true; ··· 686 708 if (p_res) 687 709 *p_res = res; 688 710 689 - return 0; 711 + res_check_done: 712 + if (needs_unref) 713 + vmw_resource_unreference(&res); 714 + 715 + return ret; 690 716 } 691 717 692 718 /** ··· 1153 1171 int ret; 1154 1172 1155 1173 vmw_validation_preload_bo(sw_context->ctx); 1156 - vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); 1157 - if (IS_ERR(vmw_bo)) { 1158 - VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); 1174 + ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1175 + if (ret != 0) { 1176 + drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); 1159 1177 return PTR_ERR(vmw_bo); 1160 1178 } 1161 1179 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); ··· 1207 1225 int ret; 1208 1226 1209 1227 vmw_validation_preload_bo(sw_context->ctx); 1210 - vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); 1211 - if (IS_ERR(vmw_bo)) { 1212 - VMW_DEBUG_USER("Could not find or use GMR region.\n"); 1228 + ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1229 + if (ret != 0) { 1230 + drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); 1213 1231 return PTR_ERR(vmw_bo); 1214 1232 } 1215 1233 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); ··· 2007 2025 res = vmw_shader_lookup(vmw_context_res_man(ctx), 2008 2026 cmd->body.shid, cmd->body.type); 2009 2027 if (!IS_ERR(res)) { 2010 - ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2011 - VMW_RES_DIRTY_NONE); 2028 + ret = vmw_execbuf_res_val_add(sw_context, res, 2029 + VMW_RES_DIRTY_NONE, 2030 + vmw_val_add_flag_noctx); 2012 2031 if (unlikely(ret != 0)) 2013 2032 return ret; 2014 2033 ··· 2256 2273 return PTR_ERR(res); 2257 2274 } 2258 2275 2259 - ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2260 - VMW_RES_DIRTY_NONE); 2276 + ret = vmw_execbuf_res_val_add(sw_context, res, 2277 + VMW_RES_DIRTY_NONE, 2278 + vmw_val_add_flag_noctx); 2261 2279 if (ret) 2262 2280 return ret; 2263 2281 } ··· 2761 2777 return PTR_ERR(res); 2762 2778 } 2763 2779 2764 - ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2765 - VMW_RES_DIRTY_NONE); 2780 + ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 2781 + vmw_val_add_flag_noctx); 2766 2782 if (ret) { 2767 2783 VMW_DEBUG_USER("Error creating resource validation node.\n"); 2768 2784 return ret; ··· 3082 3098 3083 3099 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes); 3084 3100 3085 - ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 3086 - VMW_RES_DIRTY_NONE); 3101 + ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 3102 + vmw_val_add_flag_noctx); 3087 3103 if (ret) { 3088 3104 DRM_ERROR("Error creating resource validation node.\n"); 3089 3105 return ret; ··· 3132 3148 return 0; 3133 3149 } 3134 3150 3135 - ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 3136 - VMW_RES_DIRTY_NONE); 3151 + ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 3152 + vmw_val_add_flag_noctx); 3137 3153 if (ret) { 3138 3154 DRM_ERROR("Error creating resource validation node.\n"); 3139 3155 return ret; ··· 4050 4066 if (ret) 4051 4067 return ret; 4052 4068 4053 - res = vmw_user_resource_noref_lookup_handle 4069 + ret = vmw_user_resource_lookup_handle 4054 4070 (dev_priv, sw_context->fp->tfile, handle, 4055 - user_context_converter); 4056 - if (IS_ERR(res)) { 4071 + user_context_converter, &res); 4072 + if (ret != 0) { 4057 4073 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n", 4058 4074 (unsigned int) handle); 4059 - return PTR_ERR(res); 4075 + return ret; 4060 4076 } 4061 4077 4062 - ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET); 4063 - if (unlikely(ret != 0)) 4078 + ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET, 4079 + vmw_val_add_flag_none); 4080 + if (unlikely(ret != 0)) { 4081 + vmw_resource_unreference(&res); 4064 4082 return ret; 4083 + } 4065 4084 4066 4085 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); 4067 4086 sw_context->man = vmw_context_res_man(res); 4068 4087 4088 + vmw_resource_unreference(&res); 4069 4089 return 0; 4070 4090 } 4071 4091
-33
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 281 281 return ret; 282 282 } 283 283 284 - /** 285 - * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a 286 - * TTM user-space handle and perform basic type checks 287 - * 288 - * @dev_priv: Pointer to a device private struct 289 - * @tfile: Pointer to a struct ttm_object_file identifying the caller 290 - * @handle: The TTM user-space handle 291 - * @converter: Pointer to an object describing the resource type 292 - * 293 - * If the handle can't be found or is associated with an incorrect resource 294 - * type, -EINVAL will be returned. 295 - */ 296 - struct vmw_resource * 297 - vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, 298 - struct ttm_object_file *tfile, 299 - uint32_t handle, 300 - const struct vmw_user_resource_conv 301 - *converter) 302 - { 303 - struct ttm_base_object *base; 304 - 305 - base = ttm_base_object_noref_lookup(tfile, handle); 306 - if (!base) 307 - return ERR_PTR(-ESRCH); 308 - 309 - if (unlikely(ttm_base_object_type(base) != converter->object_type)) { 310 - ttm_base_object_noref_release(); 311 - return ERR_PTR(-EINVAL); 312 - } 313 - 314 - return converter->base_obj_to_res(base); 315 - } 316 - 317 284 /* 318 285 * Helper function that looks either a surface or bo. 319 286 *
+1 -1
drivers/hid/amd-sfh-hid/amd_sfh_client.c
··· 282 282 } 283 283 rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]); 284 284 if (rc) 285 - return rc; 285 + goto cleanup; 286 286 mp2_ops->start(privdata, info); 287 287 status = amd_sfh_wait_for_response 288 288 (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+1 -1
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
··· 160 160 } 161 161 rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]); 162 162 if (rc) 163 - return rc; 163 + goto cleanup; 164 164 165 165 writel(0, privdata->mmio + AMD_P2C_MSG(0)); 166 166 mp2_ops->start(privdata, info);
+9 -8
drivers/hid/hid-betopff.c
··· 60 60 struct list_head *report_list = 61 61 &hid->report_enum[HID_OUTPUT_REPORT].report_list; 62 62 struct input_dev *dev; 63 - int field_count = 0; 64 63 int error; 65 64 int i, j; 66 65 ··· 85 86 * ----------------------------------------- 86 87 * Do init them with default value. 87 88 */ 89 + if (report->maxfield < 4) { 90 + hid_err(hid, "not enough fields in the report: %d\n", 91 + report->maxfield); 92 + return -ENODEV; 93 + } 88 94 for (i = 0; i < report->maxfield; i++) { 95 + if (report->field[i]->report_count < 1) { 96 + hid_err(hid, "no values in the field\n"); 97 + return -ENODEV; 98 + } 89 99 for (j = 0; j < report->field[i]->report_count; j++) { 90 100 report->field[i]->value[j] = 0x00; 91 - field_count++; 92 101 } 93 - } 94 - 95 - if (field_count < 4) { 96 - hid_err(hid, "not enough fields in the report: %d\n", 97 - field_count); 98 - return -ENODEV; 99 102 } 100 103 101 104 betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
+5
drivers/hid/hid-bigbenff.c
··· 344 344 } 345 345 346 346 report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; 347 + if (list_empty(report_list)) { 348 + hid_err(hid, "no output report found\n"); 349 + error = -ENODEV; 350 + goto error_hw_stop; 351 + } 347 352 bigben->report = list_entry(report_list->next, 348 353 struct hid_report, list); 349 354
+2 -2
drivers/hid/hid-core.c
··· 993 993 * Validating on id 0 means we should examine the first 994 994 * report in the list. 995 995 */ 996 - report = list_entry( 997 - hid->report_enum[type].report_list.next, 996 + report = list_first_entry_or_null( 997 + &hid->report_enum[type].report_list, 998 998 struct hid_report, list); 999 999 } else { 1000 1000 report = hid->report_enum[type].report_id_hash[id];
+1 -1
drivers/hid/hid-ids.h
··· 274 274 #define USB_DEVICE_ID_CH_AXIS_295 0x001c 275 275 276 276 #define USB_VENDOR_ID_CHERRY 0x046a 277 - #define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c 278 277 #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 279 278 #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027 280 279 ··· 1294 1295 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540 0x0075 1295 1296 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640 0x0094 1296 1297 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01 0x0042 1298 + #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2 0x0905 1297 1299 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L 0x0935 1298 1300 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S 0x0909 1299 1301 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
+63
drivers/hid/hid-playstation.c
··· 944 944 945 945 static int dualsense_get_calibration_data(struct dualsense *ds) 946 946 { 947 + struct hid_device *hdev = ds->base.hdev; 947 948 short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus; 948 949 short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus; 949 950 short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus; ··· 955 954 int speed_2x; 956 955 int range_2g; 957 956 int ret = 0; 957 + int i; 958 958 uint8_t *buf; 959 959 960 960 buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL); ··· 1008 1006 ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus; 1009 1007 1010 1008 /* 1009 + * Sanity check gyro calibration data. This is needed to prevent crashes 1010 + * during report handling of virtual, clone or broken devices not implementing 1011 + * calibration data properly. 1012 + */ 1013 + for (i = 0; i < ARRAY_SIZE(ds->gyro_calib_data); i++) { 1014 + if (ds->gyro_calib_data[i].sens_denom == 0) { 1015 + hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.", 1016 + ds->gyro_calib_data[i].abs_code); 1017 + ds->gyro_calib_data[i].bias = 0; 1018 + ds->gyro_calib_data[i].sens_numer = DS_GYRO_RANGE; 1019 + ds->gyro_calib_data[i].sens_denom = S16_MAX; 1020 + } 1021 + } 1022 + 1023 + /* 1011 1024 * Set accelerometer calibration and normalization parameters. 1012 1025 * Data values will be normalized to 1/DS_ACC_RES_PER_G g. 1013 1026 */ ··· 1043 1026 ds->accel_calib_data[2].bias = acc_z_plus - range_2g / 2; 1044 1027 ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G; 1045 1028 ds->accel_calib_data[2].sens_denom = range_2g; 1029 + 1030 + /* 1031 + * Sanity check accelerometer calibration data. This is needed to prevent crashes 1032 + * during report handling of virtual, clone or broken devices not implementing calibration 1033 + * data properly. 1034 + */ 1035 + for (i = 0; i < ARRAY_SIZE(ds->accel_calib_data); i++) { 1036 + if (ds->accel_calib_data[i].sens_denom == 0) { 1037 + hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.", 1038 + ds->accel_calib_data[i].abs_code); 1039 + ds->accel_calib_data[i].bias = 0; 1040 + ds->accel_calib_data[i].sens_numer = DS_ACC_RANGE; 1041 + ds->accel_calib_data[i].sens_denom = S16_MAX; 1042 + } 1043 + } 1046 1044 1047 1045 err_free: 1048 1046 kfree(buf); ··· 1769 1737 int speed_2x; 1770 1738 int range_2g; 1771 1739 int ret = 0; 1740 + int i; 1772 1741 uint8_t *buf; 1773 1742 1774 1743 if (ds4->base.hdev->bus == BUS_USB) { ··· 1864 1831 ds4->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus; 1865 1832 1866 1833 /* 1834 + * Sanity check gyro calibration data. This is needed to prevent crashes 1835 + * during report handling of virtual, clone or broken devices not implementing 1836 + * calibration data properly. 1837 + */ 1838 + for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) { 1839 + if (ds4->gyro_calib_data[i].sens_denom == 0) { 1840 + hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.", 1841 + ds4->gyro_calib_data[i].abs_code); 1842 + ds4->gyro_calib_data[i].bias = 0; 1843 + ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE; 1844 + ds4->gyro_calib_data[i].sens_denom = S16_MAX; 1845 + } 1846 + } 1847 + 1848 + /* 1867 1849 * Set accelerometer calibration and normalization parameters. 1868 1850 * Data values will be normalized to 1/DS4_ACC_RES_PER_G g. 1869 1851 */ ··· 1899 1851 ds4->accel_calib_data[2].bias = acc_z_plus - range_2g / 2; 1900 1852 ds4->accel_calib_data[2].sens_numer = 2*DS4_ACC_RES_PER_G; 1901 1853 ds4->accel_calib_data[2].sens_denom = range_2g; 1854 + 1855 + /* 1856 + * Sanity check accelerometer calibration data. This is needed to prevent crashes 1857 + * during report handling of virtual, clone or broken devices not implementing calibration 1858 + * data properly. 1859 + */ 1860 + for (i = 0; i < ARRAY_SIZE(ds4->accel_calib_data); i++) { 1861 + if (ds4->accel_calib_data[i].sens_denom == 0) { 1862 + hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.", 1863 + ds4->accel_calib_data[i].abs_code); 1864 + ds4->accel_calib_data[i].bias = 0; 1865 + ds4->accel_calib_data[i].sens_numer = DS4_ACC_RANGE; 1866 + ds4->accel_calib_data[i].sens_denom = S16_MAX; 1867 + } 1868 + } 1902 1869 1903 1870 err_free: 1904 1871 kfree(buf);
-1
drivers/hid/hid-quirks.c
··· 54 54 { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET }, 55 55 { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET }, 56 56 { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET }, 57 - { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL }, 58 57 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS }, 59 58 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 60 59 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+2
drivers/hid/hid-uclogic-core.c
··· 526 526 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, 527 527 USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01) }, 528 528 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, 529 + USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2) }, 530 + { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, 529 531 USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) }, 530 532 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, 531 533 USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S) },
+2
drivers/hid/hid-uclogic-params.c
··· 1656 1656 case VID_PID(USB_VENDOR_ID_UGEE, 1657 1657 USB_DEVICE_ID_UGEE_PARBLO_A610_PRO): 1658 1658 case VID_PID(USB_VENDOR_ID_UGEE, 1659 + USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2): 1660 + case VID_PID(USB_VENDOR_ID_UGEE, 1659 1661 USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L): 1660 1662 case VID_PID(USB_VENDOR_ID_UGEE, 1661 1663 USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S):
+10
drivers/hid/intel-ish-hid/ishtp/dma-if.c
··· 104 104 int required_slots = (size / DMA_SLOT_SIZE) 105 105 + 1 * (size % DMA_SLOT_SIZE != 0); 106 106 107 + if (!dev->ishtp_dma_tx_map) { 108 + dev_err(dev->devc, "Fail to allocate Tx map\n"); 109 + return NULL; 110 + } 111 + 107 112 spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags); 108 113 for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) { 109 114 free = 1; ··· 152 147 153 148 if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) { 154 149 dev_err(dev->devc, "Bad DMA Tx ack address\n"); 150 + return; 151 + } 152 + 153 + if (!dev->ishtp_dma_tx_map) { 154 + dev_err(dev->devc, "Fail to allocate Tx map\n"); 155 155 return; 156 156 } 157 157
+3 -1
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
··· 3858 3858 3859 3859 static void arm_smmu_device_shutdown(struct platform_device *pdev) 3860 3860 { 3861 - arm_smmu_device_remove(pdev); 3861 + struct arm_smmu_device *smmu = platform_get_drvdata(pdev); 3862 + 3863 + arm_smmu_device_disable(smmu); 3862 3864 } 3863 3865 3864 3866 static const struct of_device_id arm_smmu_of_match[] = {
+22 -10
drivers/iommu/arm/arm-smmu/arm-smmu.c
··· 1316 1316 1317 1317 switch (cap) { 1318 1318 case IOMMU_CAP_CACHE_COHERENCY: 1319 - /* Assume that a coherent TCU implies coherent TBUs */ 1320 - return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; 1319 + /* 1320 + * It's overwhelmingly the case in practice that when the pagetable 1321 + * walk interface is connected to a coherent interconnect, all the 1322 + * translation interfaces are too. Furthermore if the device is 1323 + * natively coherent, then its translation interface must also be. 1324 + */ 1325 + return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK || 1326 + device_get_dma_attr(dev) == DEV_DMA_COHERENT; 1321 1327 case IOMMU_CAP_NOEXEC: 1322 1328 return true; 1323 1329 default: ··· 2191 2185 return 0; 2192 2186 } 2193 2187 2194 - static int arm_smmu_device_remove(struct platform_device *pdev) 2188 + static void arm_smmu_device_shutdown(struct platform_device *pdev) 2195 2189 { 2196 2190 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); 2197 2191 2198 2192 if (!smmu) 2199 - return -ENODEV; 2193 + return; 2200 2194 2201 2195 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) 2202 2196 dev_notice(&pdev->dev, "disabling translation\n"); 2203 - 2204 - iommu_device_unregister(&smmu->iommu); 2205 - iommu_device_sysfs_remove(&smmu->iommu); 2206 2197 2207 2198 arm_smmu_rpm_get(smmu); 2208 2199 /* Turn the thing off */ ··· 2212 2209 clk_bulk_disable(smmu->num_clks, smmu->clks); 2213 2210 2214 2211 clk_bulk_unprepare(smmu->num_clks, smmu->clks); 2215 - return 0; 2216 2212 } 2217 2213 2218 - static void arm_smmu_device_shutdown(struct platform_device *pdev) 2214 + static int arm_smmu_device_remove(struct platform_device *pdev) 2219 2215 { 2220 - arm_smmu_device_remove(pdev); 2216 + struct arm_smmu_device *smmu = platform_get_drvdata(pdev); 2217 + 2218 + if (!smmu) 2219 + return -ENODEV; 2220 + 2221 + iommu_device_unregister(&smmu->iommu); 2222 + iommu_device_sysfs_remove(&smmu->iommu); 2223 + 2224 + arm_smmu_device_shutdown(pdev); 2225 + 2226 + return 0; 2221 2227 } 2222 2228 2223 2229 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
+5 -3
drivers/iommu/iommu.c
··· 3185 3185 */ 3186 3186 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3187 3187 { 3188 - struct iommu_group *group = iommu_group_get(dev); 3188 + struct iommu_group *group; 3189 3189 int ret = 0; 3190 3190 3191 - if (!group) 3192 - return -ENODEV; 3193 3191 if (WARN_ON(!owner)) 3194 3192 return -EINVAL; 3193 + 3194 + group = iommu_group_get(dev); 3195 + if (!group) 3196 + return -ENODEV; 3195 3197 3196 3198 mutex_lock(&group->mutex); 3197 3199 if (group->owner_cnt) {
+2 -2
drivers/iommu/iova.c
··· 197 197 198 198 curr = __get_cached_rbnode(iovad, limit_pfn); 199 199 curr_iova = to_iova(curr); 200 - retry_pfn = curr_iova->pfn_hi + 1; 200 + retry_pfn = curr_iova->pfn_hi; 201 201 202 202 retry: 203 203 do { ··· 211 211 if (high_pfn < size || new_pfn < low_pfn) { 212 212 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) { 213 213 high_pfn = limit_pfn; 214 - low_pfn = retry_pfn; 214 + low_pfn = retry_pfn + 1; 215 215 curr = iova_find_limit(iovad, limit_pfn); 216 216 curr_iova = to_iova(curr); 217 217 goto retry;
+3 -1
drivers/iommu/mtk_iommu_v1.c
··· 683 683 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, 684 684 dev_name(&pdev->dev)); 685 685 if (ret) 686 - return ret; 686 + goto out_clk_unprepare; 687 687 688 688 ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev); 689 689 if (ret) ··· 698 698 iommu_device_unregister(&data->iommu); 699 699 out_sysfs_remove: 700 700 iommu_device_sysfs_remove(&data->iommu); 701 + out_clk_unprepare: 702 + clk_disable_unprepare(data->bclk); 701 703 return ret; 702 704 } 703 705
+2 -2
drivers/net/dsa/microchip/ksz9477.c
··· 540 540 ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); 541 541 542 542 /* clear forwarding port */ 543 - alu_table[2] &= ~BIT(port); 543 + alu_table[1] &= ~BIT(port); 544 544 545 545 /* if there is no port to forward, clear table */ 546 - if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { 546 + if ((alu_table[1] & ALU_V_PORT_MAP) == 0) { 547 547 alu_table[0] = 0; 548 548 alu_table[1] = 0; 549 549 alu_table[2] = 0;
+15 -8
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 524 524 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); 525 525 } 526 526 527 + static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) 528 + { 529 + unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 530 + 531 + /* From MAC ver 30H the TFCR is per priority, instead of per queue */ 532 + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) 533 + return max_q_count; 534 + else 535 + return min_t(unsigned int, pdata->tx_q_count, max_q_count); 536 + } 537 + 527 538 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 528 539 { 529 - unsigned int max_q_count, q_count; 530 540 unsigned int reg, reg_val; 531 - unsigned int i; 541 + unsigned int i, q_count; 532 542 533 543 /* Clear MTL flow control */ 534 544 for (i = 0; i < pdata->rx_q_count; i++) 535 545 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 536 546 537 547 /* Clear MAC flow control */ 538 - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 539 - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 548 + q_count = xgbe_get_fc_queue_count(pdata); 540 549 reg = MAC_Q0TFCR; 541 550 for (i = 0; i < q_count; i++) { 542 551 reg_val = XGMAC_IOREAD(pdata, reg); ··· 562 553 { 563 554 struct ieee_pfc *pfc = pdata->pfc; 564 555 struct ieee_ets *ets = pdata->ets; 565 - unsigned int max_q_count, q_count; 566 556 unsigned int reg, reg_val; 567 - unsigned int i; 557 + unsigned int i, q_count; 568 558 569 559 /* Set MTL flow control */ 570 560 for (i = 0; i < pdata->rx_q_count; i++) { ··· 587 579 } 588 580 589 581 /* Set MAC flow control */ 590 - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 591 - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 582 + q_count = xgbe_get_fc_queue_count(pdata); 592 583 reg = MAC_Q0TFCR; 593 584 for (i = 0; i < q_count; i++) { 594 585 reg_val = XGMAC_IOREAD(pdata, reg);
+24
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
··· 508 508 reg |= XGBE_KR_TRAINING_ENABLE; 509 509 reg |= XGBE_KR_TRAINING_START; 510 510 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); 511 + pdata->kr_start_time = jiffies; 511 512 512 513 netif_dbg(pdata, link, pdata->netdev, 513 514 "KR training initiated\n"); ··· 644 643 xgbe_an_disable(pdata); 645 644 646 645 xgbe_switch_mode(pdata); 646 + 647 + pdata->an_result = XGBE_AN_READY; 647 648 648 649 xgbe_an_restart(pdata); 649 650 ··· 1295 1292 static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) 1296 1293 { 1297 1294 unsigned long link_timeout; 1295 + unsigned long kr_time; 1296 + int wait; 1298 1297 1299 1298 link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ); 1300 1299 if (time_after(jiffies, link_timeout)) { 1300 + if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) && 1301 + pdata->phy.autoneg == AUTONEG_ENABLE) { 1302 + /* AN restart should not happen while KR training is in progress. 1303 + * The while loop ensures no AN restart during KR training, 1304 + * waits up to 500ms and AN restart is triggered only if KR 1305 + * training is failed. 1306 + */ 1307 + wait = XGBE_KR_TRAINING_WAIT_ITER; 1308 + while (wait--) { 1309 + kr_time = pdata->kr_start_time + 1310 + msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); 1311 + if (time_after(jiffies, kr_time)) 1312 + break; 1313 + /* AN restart is not required, if AN result is COMPLETE */ 1314 + if (pdata->an_result == XGBE_AN_COMPLETE) 1315 + return; 1316 + usleep_range(10000, 11000); 1317 + } 1318 + } 1301 1319 netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n"); 1302 1320 xgbe_phy_config_aneg(pdata); 1303 1321 }
+2
drivers/net/ethernet/amd/xgbe/xgbe.h
··· 290 290 /* Auto-negotiation */ 291 291 #define XGBE_AN_MS_TIMEOUT 500 292 292 #define XGBE_LINK_TIMEOUT 5 293 + #define XGBE_KR_TRAINING_WAIT_ITER 50 293 294 294 295 #define XGBE_SGMII_AN_LINK_STATUS BIT(1) 295 296 #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) ··· 1286 1285 unsigned int parallel_detect; 1287 1286 unsigned int fec_ability; 1288 1287 unsigned long an_start; 1288 + unsigned long kr_start_time; 1289 1289 enum xgbe_an_mode an_mode; 1290 1290 1291 1291 /* I2C support */
+4 -9
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 3969 3969 test_info->timeout = HWRM_CMD_TIMEOUT; 3970 3970 for (i = 0; i < bp->num_tests; i++) { 3971 3971 char *str = test_info->string[i]; 3972 - char *fw_str = resp->test0_name + i * 32; 3972 + char *fw_str = resp->test_name[i]; 3973 3973 3974 3974 if (i == BNXT_MACLPBK_TEST_IDX) { 3975 3975 strcpy(str, "Mac loopback test (offline)"); ··· 3980 3980 } else if (i == BNXT_IRQ_TEST_IDX) { 3981 3981 strcpy(str, "Interrupt_test (offline)"); 3982 3982 } else { 3983 - strscpy(str, fw_str, ETH_GSTRING_LEN); 3984 - strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); 3985 - if (test_info->offline_mask & (1 << i)) 3986 - strncat(str, " (offline)", 3987 - ETH_GSTRING_LEN - strlen(str)); 3988 - else 3989 - strncat(str, " (online)", 3990 - ETH_GSTRING_LEN - strlen(str)); 3983 + snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 3984 + fw_str, test_info->offline_mask & (1 << i) ? 3985 + "offline" : "online"); 3991 3986 } 3992 3987 } 3993 3988
+1 -8
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
··· 10249 10249 u8 unused_0; 10250 10250 __le16 test_timeout; 10251 10251 u8 unused_1[2]; 10252 - char test0_name[32]; 10253 - char test1_name[32]; 10254 - char test2_name[32]; 10255 - char test3_name[32]; 10256 - char test4_name[32]; 10257 - char test5_name[32]; 10258 - char test6_name[32]; 10259 - char test7_name[32]; 10252 + char test_name[8][32]; 10260 10253 u8 eyescope_target_BER_support; 10261 10254 #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL 10262 10255 #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+1 -8
drivers/net/ethernet/cadence/macb_main.c
··· 2238 2238 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || 2239 2239 skb_is_nonlinear(*skb); 2240 2240 int padlen = ETH_ZLEN - (*skb)->len; 2241 - int headroom = skb_headroom(*skb); 2242 2241 int tailroom = skb_tailroom(*skb); 2243 2242 struct sk_buff *nskb; 2244 2243 u32 fcs; ··· 2251 2252 /* FCS could be appeded to tailroom. */ 2252 2253 if (tailroom >= ETH_FCS_LEN) 2253 2254 goto add_fcs; 2254 - /* FCS could be appeded by moving data to headroom. */ 2255 - else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 2256 - padlen = 0; 2257 2255 /* No room for FCS, need to reallocate skb. */ 2258 2256 else 2259 2257 padlen = ETH_FCS_LEN; ··· 2259 2263 padlen += ETH_FCS_LEN; 2260 2264 } 2261 2265 2262 - if (!cloned && headroom + tailroom >= padlen) { 2263 - (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 2264 - skb_set_tail_pointer(*skb, (*skb)->len); 2265 - } else { 2266 + if (cloned || tailroom < padlen) { 2266 2267 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 2267 2268 if (!nskb) 2268 2269 return -ENOMEM;
+2 -2
drivers/net/ethernet/freescale/enetc/enetc.c
··· 2391 2391 2392 2392 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); 2393 2393 2394 - netif_tx_lock(priv->ndev); 2394 + netif_tx_lock_bh(priv->ndev); 2395 2395 2396 2396 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); 2397 2397 skb = skb_dequeue(&priv->tx_skbs); 2398 2398 if (skb) 2399 2399 enetc_start_xmit(skb, priv->ndev); 2400 2400 2401 - netif_tx_unlock(priv->ndev); 2401 + netif_tx_unlock_bh(priv->ndev); 2402 2402 } 2403 2403 2404 2404 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+2 -9
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
··· 1012 1012 rbpool = cq->rbpool; 1013 1013 free_ptrs = cq->pool_ptrs; 1014 1014 1015 - get_cpu(); 1016 1015 while (cq->pool_ptrs) { 1017 1016 if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { 1018 1017 /* Schedule a WQ if we fails to free atleast half of the ··· 1031 1032 pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); 1032 1033 cq->pool_ptrs--; 1033 1034 } 1034 - put_cpu(); 1035 1035 cq->refill_task_sched = false; 1036 1036 } 1037 1037 ··· 1368 1370 if (err) 1369 1371 goto fail; 1370 1372 1371 - get_cpu(); 1372 1373 /* Allocate pointers and free them to aura/pool */ 1373 1374 for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { 1374 1375 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); ··· 1391 1394 } 1392 1395 1393 1396 err_mem: 1394 - put_cpu(); 1395 1397 return err ? -ENOMEM : 0; 1396 1398 1397 1399 fail: ··· 1431 1435 if (err) 1432 1436 goto fail; 1433 1437 1434 - get_cpu(); 1435 1438 /* Allocate pointers and free them to aura/pool */ 1436 1439 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { 1437 1440 pool = &pfvf->qset.pool[pool_id]; 1438 1441 for (ptr = 0; ptr < num_ptrs; ptr++) { 1439 1442 err = otx2_alloc_rbuf(pfvf, pool, &bufptr); 1440 1443 if (err) 1441 - goto err_mem; 1444 + return -ENOMEM; 1442 1445 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, 1443 1446 bufptr + OTX2_HEAD_ROOM); 1444 1447 } 1445 1448 } 1446 - err_mem: 1447 - put_cpu(); 1448 - return err ? -ENOMEM : 0; 1449 + return 0; 1449 1450 fail: 1450 1451 otx2_mbox_reset(&pfvf->mbox.mbox, 0); 1451 1452 otx2_aura_pool_free(pfvf);
+2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 736 736 u64 ptrs[2]; 737 737 738 738 ptrs[1] = buf; 739 + get_cpu(); 739 740 /* Free only one buffer at time during init and teardown */ 740 741 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 742 + put_cpu(); 741 743 } 742 744 743 745 /* Alloc pointer from pool/aura */
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
··· 637 637 if (child->bw_share == old_bw_share) 638 638 continue; 639 639 640 - err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share, 640 + err_one = mlx5_qos_update_node(htb->mdev, child->bw_share, 641 641 child->max_average_bw, child->hw_id); 642 642 if (!err && err_one) { 643 643 err = err_one; ··· 671 671 mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share); 672 672 mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw); 673 673 674 - err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share, 674 + err = mlx5_qos_update_node(htb->mdev, bw_share, 675 675 max_average_bw, node->hw_id); 676 676 if (err) { 677 677 NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 583 583 { 584 584 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 585 585 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 586 - bool unaligned = xsk ? xsk->unaligned : false; 587 586 u16 max_mtu_pkts; 588 587 589 588 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) { ··· 600 601 * needed number of WQEs exceeds the maximum. 601 602 */ 602 603 max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, 603 - mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned)); 604 + mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned)); 604 605 if (params->log_rq_mtu_frames > max_mtu_pkts) { 605 606 mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n", 606 607 1 << params->log_rq_mtu_frames, xsk->chunk_size);
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
··· 477 477 struct mlx5e_sample_flow *sample_flow; 478 478 struct mlx5e_sample_attr *sample_attr; 479 479 struct mlx5_flow_attr *pre_attr; 480 - u32 tunnel_id = attr->tunnel_id; 481 480 struct mlx5_eswitch *esw; 482 481 u32 default_tbl_id; 483 482 u32 obj_id; ··· 521 522 restore_obj.sample.group_id = sample_attr->group_num; 522 523 restore_obj.sample.rate = sample_attr->rate; 523 524 restore_obj.sample.trunc_size = sample_attr->trunc_size; 524 - restore_obj.sample.tunnel_id = tunnel_id; 525 + restore_obj.sample.tunnel_id = attr->tunnel_id; 525 526 err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id); 526 527 if (err) 527 528 goto err_obj_id; ··· 547 548 /* For decap action, do decap in the original flow table instead of the 548 549 * default flow table. 549 550 */ 550 - if (tunnel_id) 551 + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 551 552 pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 552 553 pre_attr->modify_hdr = sample_flow->restore->modify_hdr; 553 554 pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
+2 -5
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 122 122 u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; 123 123 dma_addr_t dma_addr; 124 124 struct mlx5_aso *aso; 125 - /* IPsec ASO caches data on every query call, 126 - * so in nested calls, we can use this boolean to save 127 - * recursive calls to mlx5e_ipsec_aso_query() 128 - */ 129 - u8 use_cache : 1; 125 + /* Protect ASO WQ access, as it is global to whole IPsec */ 126 + spinlock_t lock; 130 127 }; 131 128 132 129 struct mlx5e_ipsec {
+6 -6
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 320 320 if (ret) 321 321 goto unlock; 322 322 323 - aso->use_cache = true; 324 323 if (attrs->esn_trigger && 325 324 !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) { 326 325 u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter); ··· 332 333 !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) || 333 334 !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable)) 334 335 xfrm_state_check_expire(sa_entry->x); 335 - aso->use_cache = false; 336 336 337 337 unlock: 338 338 spin_unlock(&sa_entry->x->lock); ··· 396 398 goto err_aso_create; 397 399 } 398 400 401 + spin_lock_init(&aso->lock); 399 402 ipsec->nb.notifier_call = mlx5e_ipsec_event; 400 403 mlx5_notifier_register(mdev, &ipsec->nb); 401 404 ··· 455 456 struct mlx5e_hw_objs *res; 456 457 struct mlx5_aso_wqe *wqe; 457 458 u8 ds_cnt; 459 + int ret; 458 460 459 461 lockdep_assert_held(&sa_entry->x->lock); 460 - if (aso->use_cache) 461 - return 0; 462 - 463 462 res = &mdev->mlx5e_res.hw_objs; 464 463 464 + spin_lock_bh(&aso->lock); 465 465 memset(aso->ctx, 0, sizeof(aso->ctx)); 466 466 wqe = mlx5_aso_get_wqe(aso->aso); 467 467 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); ··· 475 477 mlx5e_ipsec_aso_copy(ctrl, data); 476 478 477 479 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl); 478 - return mlx5_aso_poll_cq(aso->aso, false); 480 + ret = mlx5_aso_poll_cq(aso->aso, false); 481 + spin_unlock_bh(&aso->lock); 482 + return ret; 479 483 } 480 484 481 485 void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+3
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 174 174 * it's different than the ht->mutex here. 175 175 */ 176 176 static struct lock_class_key tc_ht_lock_key; 177 + static struct lock_class_key tc_ht_wq_key; 177 178 178 179 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 179 180 static void free_flow_post_acts(struct mlx5e_tc_flow *flow); ··· 5283 5282 return err; 5284 5283 5285 5284 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); 5285 + lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5286 5286 5287 5287 mapping_id = mlx5_query_nic_system_image_guid(dev); 5288 5288 ··· 5396 5394 return err; 5397 5395 5398 5396 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); 5397 + lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5399 5398 5400 5399 return 0; 5401 5400 }
+3 -15
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 22 22 }; 23 23 24 24 static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx, 25 - u32 parent_ix, u32 tsar_ix, 26 - u32 max_rate, u32 bw_share) 25 + u32 tsar_ix, u32 max_rate, u32 bw_share) 27 26 { 28 27 u32 bitmask = 0; 29 28 30 29 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) 31 30 return -EOPNOTSUPP; 32 31 33 - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix); 34 32 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); 35 33 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); 36 34 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; ··· 49 51 int err; 50 52 51 53 err = esw_qos_tsar_config(dev, sched_ctx, 52 - esw->qos.root_tsar_ix, group->tsar_ix, 54 + group->tsar_ix, 53 55 max_rate, bw_share); 54 56 if (err) 55 57 NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed"); ··· 65 67 struct netlink_ext_ack *extack) 66 68 { 67 69 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; 68 - struct mlx5_esw_rate_group *group = vport->qos.group; 69 70 struct mlx5_core_dev *dev = esw->dev; 70 - u32 parent_tsar_ix; 71 - void *vport_elem; 72 71 int err; 73 72 74 73 if (!vport->qos.enabled) 75 74 return -EIO; 76 75 77 - parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; 78 - MLX5_SET(scheduling_context, sched_ctx, element_type, 79 - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); 80 - vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, 81 - element_attributes); 82 - MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); 83 - 84 - err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix, 76 + err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix, 85 77 max_rate, bw_share); 86 78 if (err) { 87 79 esw_warn(esw->dev,
+1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1462 1462 mlx5_lag_disable_change(esw->dev); 1463 1463 down_write(&esw->mode_lock); 1464 1464 mlx5_eswitch_disable_locked(esw); 1465 + esw->mode = MLX5_ESWITCH_LEGACY; 1465 1466 up_write(&esw->mode_lock); 1466 1467 mlx5_lag_enable_change(esw->dev); 1467 1468 }
+1
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 677 677 mutex_lock(&dev->intf_state_mutex); 678 678 if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) { 679 679 mlx5_core_err(dev, "health works are not permitted at this stage\n"); 680 + mutex_unlock(&dev->intf_state_mutex); 680 681 return; 681 682 } 682 683 mutex_unlock(&dev->intf_state_mutex);
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 2098 2098 } 2099 2099 } 2100 2100 2101 - static int __init init(void) 2101 + static int __init mlx5_init(void) 2102 2102 { 2103 2103 int err; 2104 2104 ··· 2133 2133 return err; 2134 2134 } 2135 2135 2136 - static void __exit cleanup(void) 2136 + static void __exit mlx5_cleanup(void) 2137 2137 { 2138 2138 mlx5e_cleanup(); 2139 2139 mlx5_sf_driver_unregister(); ··· 2141 2141 mlx5_unregister_debugfs(); 2142 2142 } 2143 2143 2144 - module_init(init); 2145 - module_exit(cleanup); 2144 + module_init(mlx5_init); 2145 + module_exit(mlx5_cleanup);
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/qos.c
··· 62 62 return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id); 63 63 } 64 64 65 - int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, 65 + int mlx5_qos_update_node(struct mlx5_core_dev *mdev, 66 66 u32 bw_share, u32 max_avg_bw, u32 id) 67 67 { 68 68 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; 69 69 u32 bitmask = 0; 70 70 71 - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); 72 71 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); 73 72 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); 74 73
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/qos.h
··· 23 23 int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, 24 24 u32 bw_share, u32 max_avg_bw, u32 *id); 25 25 int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id); 26 - int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share, 26 + int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share, 27 27 u32 max_avg_bw, u32 id); 28 28 int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id); 29 29
+8 -5
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
··· 1043 1043 lan966x->base_mac[5] &= 0xf0; 1044 1044 } 1045 1045 1046 - ports = device_get_named_child_node(&pdev->dev, "ethernet-ports"); 1047 - if (!ports) 1048 - return dev_err_probe(&pdev->dev, -ENODEV, 1049 - "no ethernet-ports child found\n"); 1050 - 1051 1046 err = lan966x_create_targets(pdev, lan966x); 1052 1047 if (err) 1053 1048 return dev_err_probe(&pdev->dev, err, ··· 1120 1125 } 1121 1126 } 1122 1127 1128 + ports = device_get_named_child_node(&pdev->dev, "ethernet-ports"); 1129 + if (!ports) 1130 + return dev_err_probe(&pdev->dev, -ENODEV, 1131 + "no ethernet-ports child found\n"); 1132 + 1123 1133 /* init switch */ 1124 1134 lan966x_init(lan966x); 1125 1135 lan966x_stats_init(lan966x); ··· 1162 1162 goto cleanup_ports; 1163 1163 } 1164 1164 1165 + fwnode_handle_put(ports); 1166 + 1165 1167 lan966x_mdb_init(lan966x); 1166 1168 err = lan966x_fdb_init(lan966x); 1167 1169 if (err) ··· 1193 1191 lan966x_fdb_deinit(lan966x); 1194 1192 1195 1193 cleanup_ports: 1194 + fwnode_handle_put(ports); 1196 1195 fwnode_handle_put(portnp); 1197 1196 1198 1197 lan966x_cleanup_ports(lan966x);
+14
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
··· 186 186 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp, 187 187 struct stmmac_safety_feature_cfg *safety_feat_cfg) 188 188 { 189 + struct stmmac_safety_feature_cfg all_safety_feats = { 190 + .tsoee = 1, 191 + .mrxpee = 1, 192 + .mestee = 1, 193 + .mrxee = 1, 194 + .mtxee = 1, 195 + .epsi = 1, 196 + .edpp = 1, 197 + .prtyen = 1, 198 + .tmouten = 1, 199 + }; 189 200 u32 value; 190 201 191 202 if (!asp) 192 203 return -EINVAL; 204 + 205 + if (!safety_feat_cfg) 206 + safety_feat_cfg = &all_safety_feats; 193 207 194 208 /* 1. Enable Safety Features */ 195 209 value = readl(ioaddr + MTL_ECC_CONTROL);
+4 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 551 551 p = (char *)priv + offsetof(struct stmmac_priv, 552 552 xstats.txq_stats[q].tx_pkt_n); 553 553 for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { 554 - *data++ = (*(u64 *)p); 555 - p += sizeof(u64 *); 554 + *data++ = (*(unsigned long *)p); 555 + p += sizeof(unsigned long); 556 556 } 557 557 } 558 558 for (q = 0; q < rx_cnt; q++) { 559 559 p = (char *)priv + offsetof(struct stmmac_priv, 560 560 xstats.rxq_stats[q].rx_pkt_n); 561 561 for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { 562 - *data++ = (*(u64 *)p); 563 - p += sizeof(u64 *); 562 + *data++ = (*(unsigned long *)p); 563 + p += sizeof(unsigned long); 564 564 } 565 565 } 566 566 }
+5
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1150 1150 int addr = priv->plat->phy_addr; 1151 1151 struct phy_device *phydev; 1152 1152 1153 + if (addr < 0) { 1154 + netdev_err(priv->dev, "no phy found\n"); 1155 + return -ENODEV; 1156 + } 1157 + 1153 1158 phydev = mdiobus_get_phy(priv->mii, addr); 1154 1159 if (!phydev) { 1155 1160 netdev_err(priv->dev, "no phy at addr %d\n", addr);
+10
drivers/net/ipa/ipa_interrupt.c
··· 152 152 ipa_interrupt_enabled_update(ipa); 153 153 } 154 154 155 + void ipa_interrupt_irq_disable(struct ipa *ipa) 156 + { 157 + disable_irq(ipa->interrupt->irq); 158 + } 159 + 160 + void ipa_interrupt_irq_enable(struct ipa *ipa) 161 + { 162 + enable_irq(ipa->interrupt->irq); 163 + } 164 + 155 165 /* Common function used to enable/disable TX_SUSPEND for an endpoint */ 156 166 static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, 157 167 u32 endpoint_id, bool enable)
+16
drivers/net/ipa/ipa_interrupt.h
··· 68 68 void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq); 69 69 70 70 /** 71 + * ipa_interrupt_irq_enable() - Enable IPA interrupts 72 + * @ipa: IPA pointer 73 + * 74 + * This enables the IPA interrupt line 75 + */ 76 + void ipa_interrupt_irq_enable(struct ipa *ipa); 77 + 78 + /** 79 + * ipa_interrupt_irq_disable() - Disable IPA interrupts 80 + * @ipa: IPA pointer 81 + * 82 + * This disables the IPA interrupt line 83 + */ 84 + void ipa_interrupt_irq_disable(struct ipa *ipa); 85 + 86 + /** 71 87 * ipa_interrupt_config() - Configure the IPA interrupt framework 72 88 * @ipa: IPA pointer 73 89 *
+17
drivers/net/ipa/ipa_power.c
··· 181 181 182 182 __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); 183 183 184 + /* Increment the disable depth to ensure that the IRQ won't 185 + * be re-enabled until the matching _enable call in 186 + * ipa_resume(). We do this to ensure that the interrupt 187 + * handler won't run whilst PM runtime is disabled. 188 + * 189 + * Note that disabling the IRQ is NOT the same as disabling 190 + * irq wake. If wakeup is enabled for the IPA then the IRQ 191 + * will still cause the system to wake up, see irq_set_irq_wake(). 192 + */ 193 + ipa_interrupt_irq_disable(ipa); 194 + 184 195 return pm_runtime_force_suspend(dev); 185 196 } 186 197 ··· 203 192 ret = pm_runtime_force_resume(dev); 204 193 205 194 __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); 195 + 196 + /* Now that PM runtime is enabled again it's safe 197 + * to turn the IRQ back on and process any data 198 + * that was received during suspend. 199 + */ 200 + ipa_interrupt_irq_enable(ipa); 206 201 207 202 return ret; 208 203 }
+6 -1
drivers/net/phy/mdio_bus.c
··· 109 109 110 110 struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) 111 111 { 112 - struct mdio_device *mdiodev = bus->mdio_map[addr]; 112 + struct mdio_device *mdiodev; 113 + 114 + if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map)) 115 + return NULL; 116 + 117 + mdiodev = bus->mdio_map[addr]; 113 118 114 119 if (!mdiodev) 115 120 return NULL;
-2
drivers/net/team/team.c
··· 1044 1044 goto err_port_enter; 1045 1045 } 1046 1046 } 1047 - port->dev->priv_flags |= IFF_NO_ADDRCONF; 1048 1047 1049 1048 return 0; 1050 1049 ··· 1057 1058 { 1058 1059 if (team->ops.port_leave) 1059 1060 team->ops.port_leave(team, port); 1060 - port->dev->priv_flags &= ~IFF_NO_ADDRCONF; 1061 1061 dev_put(team->dev); 1062 1062 } 1063 1063
+1 -1
drivers/net/usb/sr9700.c
··· 413 413 /* ignore the CRC length */ 414 414 len = (skb->data[1] | (skb->data[2] << 8)) - 4; 415 415 416 - if (len > ETH_FRAME_LEN || len > skb->len) 416 + if (len > ETH_FRAME_LEN || len > skb->len || len < 0) 417 417 return 0; 418 418 419 419 /* the last packet of current skb */
+4 -2
drivers/net/virtio_net.c
··· 1998 1998 */ 1999 1999 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 2000 2000 netif_stop_subqueue(dev, qnum); 2001 - if (!use_napi && 2002 - unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 2001 + if (use_napi) { 2002 + if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 2003 + virtqueue_napi_schedule(&sq->napi, sq->vq); 2004 + } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 2003 2005 /* More just got used, free them then recheck. */ 2004 2006 free_old_xmit_skbs(sq, false); 2005 2007 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+4 -2
drivers/net/wan/fsl_ucc_hdlc.c
··· 1243 1243 free_dev: 1244 1244 free_netdev(dev); 1245 1245 undo_uhdlc_init: 1246 - iounmap(utdm->siram); 1246 + if (utdm) 1247 + iounmap(utdm->siram); 1247 1248 unmap_si_regs: 1248 - iounmap(utdm->si_regs); 1249 + if (utdm) 1250 + iounmap(utdm->si_regs); 1249 1251 free_utdm: 1250 1252 if (uhdlc_priv->tsa) 1251 1253 kfree(utdm);
+19 -18
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 7937 7937 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 7938 7938 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 7939 7939 7940 + if (chan->flags & IEEE80211_CHAN_DISABLED) 7941 + return -EINVAL; 7942 + 7940 7943 /* set_channel */ 7941 7944 chspec = channel_to_chanspec(&cfg->d11inf, chan); 7942 7945 if (chspec != INVCHANSPEC) { ··· 7964 7961 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 7965 7962 struct brcmf_dump_survey survey = {}; 7966 7963 struct ieee80211_supported_band *band; 7967 - struct ieee80211_channel *chan; 7964 + enum nl80211_band band_id; 7968 7965 struct cca_msrmnt_query req; 7969 7966 u32 noise; 7970 7967 int err; ··· 7977 7974 return -EBUSY; 7978 7975 } 7979 7976 7980 - band = wiphy->bands[NL80211_BAND_2GHZ]; 7981 - if (band && idx >= band->n_channels) { 7982 - idx -= band->n_channels; 7983 - band = NULL; 7984 - } 7977 + for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) { 7978 + band = wiphy->bands[band_id]; 7979 + if (!band) 7980 + continue; 7981 + if (idx >= band->n_channels) { 7982 + idx -= band->n_channels; 7983 + continue; 7984 + } 7985 7985 7986 - if (!band || idx >= band->n_channels) { 7987 - band = wiphy->bands[NL80211_BAND_5GHZ]; 7988 - if (idx >= band->n_channels) 7989 - return -ENOENT; 7986 + info->channel = &band->channels[idx]; 7987 + break; 7990 7988 } 7989 + if (band_id == NUM_NL80211_BANDS) 7990 + return -ENOENT; 7991 7991 7992 7992 /* Setting current channel to the requested channel */ 7993 - chan = &band->channels[idx]; 7994 - err = cfg80211_set_channel(wiphy, ndev, chan, NL80211_CHAN_HT20); 7995 - if (err) { 7996 - info->channel = chan; 7997 - info->filled = 0; 7993 + info->filled = 0; 7994 + if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20)) 7998 7995 return 0; 7999 - } 8000 7996 8001 7997 /* Disable mpc */ 8002 7998 brcmf_set_mpc(ifp, 0); ··· 8030 8028 if (err) 8031 8029 goto exit; 8032 8030 8033 - info->channel = chan; 8034 8031 info->noise = noise; 8035 8032 info->time = ACS_MSRMNT_DELAY; 8036 8033 info->time_busy = ACS_MSRMNT_DELAY - survey.idle; ··· 8041 8040 SURVEY_INFO_TIME_TX; 8042 8041 8043 8042 brcmf_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n", 8044 - ieee80211_frequency_to_channel(chan->center_freq), 8043 + ieee80211_frequency_to_channel(info->channel->center_freq), 8045 8044 ACS_MSRMNT_DELAY); 8046 8045 brcmf_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n", 8047 8046 info->noise, info->time_busy, info->time_rx, info->time_tx);
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1228 1228 BRCMF_NROF_H2D_COMMON_MSGRINGS; 1229 1229 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS; 1230 1230 } 1231 - if (max_flowrings > 256) { 1231 + if (max_flowrings > 512) { 1232 1232 brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings); 1233 1233 return -EIO; 1234 1234 }
+78 -49
drivers/net/wireless/mediatek/mt76/dma.c
··· 206 206 } 207 207 208 208 static int 209 + mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, 210 + struct mt76_queue_buf *buf, void *data) 211 + { 212 + struct mt76_desc *desc = &q->desc[q->head]; 213 + struct mt76_queue_entry *entry = &q->entry[q->head]; 214 + struct mt76_txwi_cache *txwi = NULL; 215 + u32 buf1 = 0, ctrl; 216 + int idx = q->head; 217 + int rx_token; 218 + 219 + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 220 + 221 + if ((q->flags & MT_QFLAG_WED) && 222 + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 223 + txwi = mt76_get_rxwi(dev); 224 + if (!txwi) 225 + return -ENOMEM; 226 + 227 + rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); 228 + if (rx_token < 0) { 229 + mt76_put_rxwi(dev, txwi); 230 + return -ENOMEM; 231 + } 232 + 233 + buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 234 + ctrl |= MT_DMA_CTL_TO_HOST; 235 + } 236 + 237 + WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); 238 + WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 239 + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 240 + WRITE_ONCE(desc->info, 0); 241 + 242 + entry->dma_addr[0] = buf->addr; 243 + entry->dma_len[0] = buf->len; 244 + entry->txwi = txwi; 245 + entry->buf = data; 246 + entry->wcid = 0xffff; 247 + entry->skip_buf1 = true; 248 + q->head = (q->head + 1) % q->ndesc; 249 + q->queued++; 250 + 251 + return idx; 252 + } 253 + 254 + static int 209 255 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 210 256 struct mt76_queue_buf *buf, int nbufs, u32 info, 211 257 struct sk_buff *skb, void *txwi) 212 258 { 213 259 struct mt76_queue_entry *entry; 214 260 struct mt76_desc *desc; 215 - u32 ctrl; 216 261 int i, idx = -1; 262 + u32 ctrl, next; 263 + 264 + if (txwi) { 265 + q->entry[q->head].txwi = DMA_DUMMY_DATA; 266 + q->entry[q->head].skip_buf0 = true; 267 + } 217 268 218 269 for (i = 0; i < nbufs; i += 2, buf += 2) { 219 270 u32 buf0 = buf[0].addr, buf1 = 0; 220 271 221 272 idx = q->head; 222 - q->head = (q->head + 1) % q->ndesc; 273 + next = (q->head + 1) % q->ndesc; 223 274 224 275 desc = &q->desc[idx]; 225 276 entry = &q->entry[idx]; 226 277 227 - if ((q->flags & MT_QFLAG_WED) && 228 - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 229 - struct mt76_txwi_cache *t = txwi; 230 - int rx_token; 278 + if (buf[0].skip_unmap) 279 + entry->skip_buf0 = true; 280 + entry->skip_buf1 = i == nbufs - 1; 231 281 232 - if (!t) 233 - return -ENOMEM; 282 + entry->dma_addr[0] = buf[0].addr; 283 + entry->dma_len[0] = buf[0].len; 234 284 235 - rx_token = mt76_rx_token_consume(dev, (void *)skb, t, 236 - buf[0].addr); 237 - buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 238 - ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) | 239 - MT_DMA_CTL_TO_HOST; 240 - } else { 241 - if (txwi) { 242 - q->entry[q->head].txwi = DMA_DUMMY_DATA; 243 - q->entry[q->head].skip_buf0 = true; 244 - } 245 - 246 - if (buf[0].skip_unmap) 247 - entry->skip_buf0 = true; 248 - entry->skip_buf1 = i == nbufs - 1; 249 - 250 - entry->dma_addr[0] = buf[0].addr; 251 - entry->dma_len[0] = buf[0].len; 252 - 253 - ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 254 - if (i < nbufs - 1) { 255 - entry->dma_addr[1] = buf[1].addr; 256 - entry->dma_len[1] = buf[1].len; 257 - buf1 = buf[1].addr; 258 - ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 259 - if (buf[1].skip_unmap) 260 - entry->skip_buf1 = true; 261 - } 262 - 263 - if (i == nbufs - 1) 264 - ctrl |= MT_DMA_CTL_LAST_SEC0; 265 - else if (i == nbufs - 2) 266 - ctrl |= MT_DMA_CTL_LAST_SEC1; 285 + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 286 + if (i < nbufs - 1) { 287 + entry->dma_addr[1] = buf[1].addr; 288 + entry->dma_len[1] = buf[1].len; 289 + buf1 = buf[1].addr; 290 + ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 291 + if (buf[1].skip_unmap) 292 + entry->skip_buf1 = true; 267 293 } 294 + 295 + if (i == nbufs - 1) 296 + ctrl |= MT_DMA_CTL_LAST_SEC0; 297 + else if (i == nbufs - 2) 298 + ctrl |= MT_DMA_CTL_LAST_SEC1; 268 299 269 300 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 270 301 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 271 302 WRITE_ONCE(desc->info, cpu_to_le32(info)); 272 303 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 273 304 305 + q->head = next; 274 306 q->queued++; 275 307 } 276 308 ··· 609 577 spin_lock_bh(&q->lock); 610 578 611 579 while (q->queued < q->ndesc - 1) { 612 - struct mt76_txwi_cache *t = NULL; 613 580 struct mt76_queue_buf qbuf; 614 581 void *buf = NULL; 615 - 616 - if ((q->flags & MT_QFLAG_WED) && 617 - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 618 - t = mt76_get_rxwi(dev); 619 - if (!t) 620 - break; 621 - } 622 582 623 583 buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC); 624 584 if (!buf) ··· 625 601 qbuf.addr = addr + offset; 626 602 qbuf.len = len - offset; 627 603 qbuf.skip_unmap = false; 628 - mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t); 604 + if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { 605 + dma_unmap_single(dev->dma_dev, addr, len, 606 + DMA_FROM_DEVICE); 607 + skb_free_frag(buf); 608 + break; 609 + } 629 610 frames++; 630 611 } 631 612
+7
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
··· 653 653 654 654 desc->buf0 = cpu_to_le32(phy_addr); 655 655 token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr); 656 + if (token < 0) { 657 + dma_unmap_single(dev->mt76.dma_dev, phy_addr, 658 + wed->wlan.rx_size, DMA_TO_DEVICE); 659 + skb_free_frag(ptr); 660 + goto unmap; 661 + } 662 + 656 663 desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN, 657 664 token)); 658 665 desc++;
+4 -3
drivers/net/wireless/mediatek/mt76/tx.c
··· 764 764 spin_lock_bh(&dev->rx_token_lock); 765 765 token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, 766 766 GFP_ATOMIC); 767 + if (token >= 0) { 768 + t->ptr = ptr; 769 + t->dma_addr = phys; 770 + } 767 771 spin_unlock_bh(&dev->rx_token_lock); 768 - 769 - t->ptr = ptr; 770 - t->dma_addr = phys; 771 772 772 773 return token; 773 774 }
+6 -13
drivers/net/wireless/rndis_wlan.c
··· 696 696 struct rndis_query *get; 697 697 struct rndis_query_c *get_c; 698 698 } u; 699 - int ret, buflen; 700 - int resplen, respoffs, copylen; 699 + int ret; 700 + size_t buflen, resplen, respoffs, copylen; 701 701 702 702 buflen = *len + sizeof(*u.get); 703 703 if (buflen < CONTROL_BUFFER_SIZE) ··· 732 732 733 733 if (respoffs > buflen) { 734 734 /* Device returned data offset outside buffer, error. */ 735 - netdev_dbg(dev->net, "%s(%s): received invalid " 736 - "data offset: %d > %d\n", __func__, 737 - oid_to_string(oid), respoffs, buflen); 735 + netdev_dbg(dev->net, 736 + "%s(%s): received invalid data offset: %zu > %zu\n", 737 + __func__, oid_to_string(oid), respoffs, buflen); 738 738 739 739 ret = -EINVAL; 740 740 goto exit_unlock; 741 741 } 742 742 743 - if ((resplen + respoffs) > buflen) { 744 - /* Device would have returned more data if buffer would 745 - * have been big enough. Copy just the bits that we got. 746 - */ 747 - copylen = buflen - respoffs; 748 - } else { 749 - copylen = resplen; 750 - } 743 + copylen = min(resplen, buflen - respoffs); 751 744 752 745 if (copylen > *len) 753 746 copylen = *len;
+1 -1
drivers/nvme/host/apple.c
··· 1493 1493 } 1494 1494 1495 1495 ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops, 1496 - NVME_QUIRK_SKIP_CID_GEN); 1496 + NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS); 1497 1497 if (ret) { 1498 1498 dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl"); 1499 1499 goto put_dev;
+63 -47
drivers/nvme/host/ioctl.c
··· 8 8 #include <linux/io_uring.h> 9 9 #include "nvme.h" 10 10 11 + enum { 12 + NVME_IOCTL_VEC = (1 << 0), 13 + NVME_IOCTL_PARTITION = (1 << 1), 14 + }; 15 + 11 16 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, 12 - fmode_t mode) 17 + unsigned int flags, fmode_t mode) 13 18 { 14 19 u32 effects; 15 20 16 21 if (capable(CAP_SYS_ADMIN)) 17 22 return true; 23 + 24 + /* 25 + * Do not allow unprivileged passthrough on partitions, as that allows an 26 + * escape from the containment of the partition. 27 + */ 28 + if (flags & NVME_IOCTL_PARTITION) 29 + return false; 18 30 19 31 /* 20 32 * Do not allow unprivileged processes to send vendor specific or fabrics ··· 162 150 static int nvme_map_user_request(struct request *req, u64 ubuffer, 163 151 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 164 152 u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd, 165 - bool vec) 153 + unsigned int flags) 166 154 { 167 155 struct request_queue *q = req->q; 168 156 struct nvme_ns *ns = q->queuedata; ··· 175 163 struct iov_iter iter; 176 164 177 165 /* fixedbufs is only for non-vectored io */ 178 - if (WARN_ON_ONCE(vec)) 166 + if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) 179 167 return -EINVAL; 180 168 ret = io_uring_cmd_import_fixed(ubuffer, bufflen, 181 169 rq_data_dir(req), &iter, ioucmd); ··· 184 172 ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); 185 173 } else { 186 174 ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer), 187 - bufflen, GFP_KERNEL, vec, 0, 0, 188 - rq_data_dir(req)); 175 + bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0, 176 + 0, rq_data_dir(req)); 189 177 } 190 178 191 179 if (ret) ··· 215 203 } 216 204 217 205 static int nvme_submit_user_cmd(struct request_queue *q, 218 - struct nvme_command *cmd, u64 ubuffer, 219 - unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 220 - u32 meta_seed, u64 *result, unsigned timeout, bool vec) 206 + struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, 207 + void __user *meta_buffer, unsigned meta_len, u32 meta_seed, 208 + u64 *result, unsigned timeout, unsigned int flags) 221 209 { 222 210 struct nvme_ctrl *ctrl; 223 211 struct request *req; ··· 233 221 req->timeout = timeout; 234 222 if (ubuffer && bufflen) { 235 223 ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, 236 - meta_len, meta_seed, &meta, NULL, vec); 224 + meta_len, meta_seed, &meta, NULL, flags); 237 225 if (ret) 238 226 return ret; 239 227 } ··· 316 304 c.rw.apptag = cpu_to_le16(io.apptag); 317 305 c.rw.appmask = cpu_to_le16(io.appmask); 318 306 319 - return nvme_submit_user_cmd(ns->queue, &c, 320 - io.addr, length, 321 - metadata, meta_len, lower_32_bits(io.slba), NULL, 0, 322 - false); 307 + return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, 308 + meta_len, lower_32_bits(io.slba), NULL, 0, 0); 323 309 } 324 310 325 311 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, ··· 335 325 } 336 326 337 327 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 338 - struct nvme_passthru_cmd __user *ucmd, fmode_t mode) 328 + struct nvme_passthru_cmd __user *ucmd, unsigned int flags, 329 + fmode_t mode) 339 330 { 340 331 struct nvme_passthru_cmd cmd; 341 332 struct nvme_command c; ··· 364 353 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 365 354 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 366 355 367 - if (!nvme_cmd_allowed(ns, &c, mode)) 356 + if (!nvme_cmd_allowed(ns, &c, 0, mode)) 368 357 return -EACCES; 369 358 370 359 if (cmd.timeout_ms) 371 360 timeout = msecs_to_jiffies(cmd.timeout_ms); 372 361 373 362 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 374 - cmd.addr, cmd.data_len, 375 - nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 376 - 0, &result, timeout, false); 363 + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), 364 + cmd.metadata_len, 0, &result, timeout, 0); 377 365 378 366 if (status >= 0) { 379 367 if (put_user(result, &ucmd->result)) ··· 383 373 } 384 374 385 375 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 386 - struct nvme_passthru_cmd64 __user *ucmd, bool vec, 387 - fmode_t mode) 376 + struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, 377 + fmode_t mode) 388 378 { 389 379 struct nvme_passthru_cmd64 cmd; 390 380 struct nvme_command c; ··· 411 401 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 412 402 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 413 403 414 - if (!nvme_cmd_allowed(ns, &c, mode)) 404 + if (!nvme_cmd_allowed(ns, &c, flags, mode)) 415 405 return -EACCES; 416 406 417 407 if (cmd.timeout_ms) 418 408 timeout = msecs_to_jiffies(cmd.timeout_ms); 419 409 420 410 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 421 - cmd.addr, cmd.data_len, 422 - nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 423 - 0, &cmd.result, timeout, vec); 411 + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), 412 + cmd.metadata_len, 0, &cmd.result, timeout, flags); 424 413 425 414 if (status >= 0) { 426 415 if (put_user(cmd.result, &ucmd->result)) ··· 580 571 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); 581 572 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); 582 573 583 - if (!nvme_cmd_allowed(ns, &c, ioucmd->file->f_mode)) 574 + if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode)) 584 575 return -EACCES; 585 576 586 577 d.metadata = READ_ONCE(cmd->metadata); ··· 650 641 { 651 642 switch (cmd) { 652 643 case NVME_IOCTL_ADMIN_CMD: 653 - return nvme_user_cmd(ctrl, NULL, argp, mode); 644 + return nvme_user_cmd(ctrl, NULL, argp, 0, mode); 654 645 case NVME_IOCTL_ADMIN64_CMD: 655 - return nvme_user_cmd64(ctrl, NULL, argp, false, mode); 646 + return nvme_user_cmd64(ctrl, NULL, argp, 0, mode); 656 647 default: 657 648 return sed_ioctl(ctrl->opal_dev, cmd, argp); 658 649 } ··· 677 668 #endif /* COMPAT_FOR_U64_ALIGNMENT */ 678 669 679 670 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, 680 - void __user *argp, fmode_t mode) 671 + void __user *argp, unsigned int flags, fmode_t mode) 681 672 { 682 673 switch (cmd) { 683 674 case NVME_IOCTL_ID: 684 675 force_successful_syscall_return(); 685 676 return ns->head->ns_id; 686 677 case NVME_IOCTL_IO_CMD: 687 - return nvme_user_cmd(ns->ctrl, ns, argp, mode); 678 + return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode); 688 679 /* 689 680 * struct nvme_user_io can have different padding on some 32-bit ABIs. 690 681 * Just accept the compat version as all fields that are used are the ··· 695 686 #endif 696 687 case NVME_IOCTL_SUBMIT_IO: 697 688 return nvme_submit_io(ns, argp); 698 - case NVME_IOCTL_IO64_CMD: 699 - return nvme_user_cmd64(ns->ctrl, ns, argp, false, mode); 700 689 case NVME_IOCTL_IO64_CMD_VEC: 701 - return nvme_user_cmd64(ns->ctrl, ns, argp, true, mode); 690 + flags |= NVME_IOCTL_VEC; 691 + fallthrough; 692 + case NVME_IOCTL_IO64_CMD: 693 + return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode); 702 694 default: 703 695 return -ENOTTY; 704 696 } 705 - } 706 - 707 - static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg, 708 - fmode_t mode) 709 - { 710 - if (is_ctrl_ioctl(cmd)) 711 - return nvme_ctrl_ioctl(ns->ctrl, cmd, arg, mode); 712 - return nvme_ns_ioctl(ns, cmd, arg, mode); 713 697 } 714 698 715 699 int nvme_ioctl(struct block_device *bdev, fmode_t mode, 716 700 unsigned int cmd, unsigned long arg) 717 701 { 718 702 struct nvme_ns *ns = bdev->bd_disk->private_data; 703 + void __user *argp = (void __user *)arg; 704 + unsigned int flags = 0; 719 705 720 - return __nvme_ioctl(ns, cmd, (void __user *)arg, mode); 706 + if (bdev_is_partition(bdev)) 707 + flags |= NVME_IOCTL_PARTITION; 708 + 709 + if (is_ctrl_ioctl(cmd)) 710 + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode); 711 + return nvme_ns_ioctl(ns, cmd, argp, flags, mode); 721 712 } 722 713 723 714 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 724 715 { 725 716 struct nvme_ns *ns = 726 717 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); 718 + void __user *argp = (void __user *)arg; 727 719 728 - return __nvme_ioctl(ns, cmd, (void __user *)arg, file->f_mode); 720 + if (is_ctrl_ioctl(cmd)) 721 + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode); 722 + return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode); 729 723 } 730 724 731 725 static int nvme_uring_cmd_checks(unsigned int issue_flags) ··· 818 806 void __user *argp = (void __user *)arg; 819 807 struct nvme_ns *ns; 820 808 int srcu_idx, ret = -EWOULDBLOCK; 809 + unsigned int flags = 0; 810 + 811 + if (bdev_is_partition(bdev)) 812 + flags |= NVME_IOCTL_PARTITION; 821 813 822 814 srcu_idx = srcu_read_lock(&head->srcu); 823 815 ns = nvme_find_path(head); ··· 837 821 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, 838 822 mode); 839 823 840 - ret = nvme_ns_ioctl(ns, cmd, argp, mode); 824 + ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode); 841 825 out_unlock: 842 826 srcu_read_unlock(&head->srcu, srcu_idx); 843 827 return ret; ··· 862 846 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, 863 847 file->f_mode); 864 848 865 - ret = nvme_ns_ioctl(ns, cmd, argp, file->f_mode); 849 + ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode); 866 850 out_unlock: 867 851 srcu_read_unlock(&head->srcu, srcu_idx); 868 852 return ret; ··· 961 945 kref_get(&ns->kref); 962 946 up_read(&ctrl->namespaces_rwsem); 963 947 964 - ret = nvme_user_cmd(ctrl, ns, argp, mode); 948 + ret = nvme_user_cmd(ctrl, ns, argp, 0, mode); 965 949 nvme_put_ns(ns); 966 950 return ret; 967 951 ··· 978 962 979 963 switch (cmd) { 980 964 case NVME_IOCTL_ADMIN_CMD: 981 - return nvme_user_cmd(ctrl, NULL, argp, file->f_mode); 965 + return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode); 982 966 case NVME_IOCTL_ADMIN64_CMD: 983 - return nvme_user_cmd64(ctrl, NULL, argp, false, file->f_mode); 967 + return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode); 984 968 case NVME_IOCTL_IO_CMD: 985 969 return nvme_dev_user_cmd(ctrl, argp, file->f_mode); 986 970 case NVME_IOCTL_RESET:
+9 -3
drivers/nvme/host/pci.c
··· 2533 2533 */ 2534 2534 result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2535 2535 if (result < 0) 2536 - return result; 2536 + goto disable; 2537 2537 2538 2538 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 2539 2539 ··· 2586 2586 pci_enable_pcie_error_reporting(pdev); 2587 2587 pci_save_state(pdev); 2588 2588 2589 - return nvme_pci_configure_admin_queue(dev); 2589 + result = nvme_pci_configure_admin_queue(dev); 2590 + if (result) 2591 + goto free_irq; 2592 + return result; 2590 2593 2594 + free_irq: 2595 + pci_free_irq_vectors(pdev); 2591 2596 disable: 2592 2597 pci_disable_device(pdev); 2593 2598 return result; ··· 3500 3495 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3501 3496 NVME_QUIRK_128_BYTES_SQES | 3502 3497 NVME_QUIRK_SHARED_TAGS | 3503 - NVME_QUIRK_SKIP_CID_GEN }, 3498 + NVME_QUIRK_SKIP_CID_GEN | 3499 + NVME_QUIRK_IDENTIFY_CNS }, 3504 3500 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3505 3501 { 0, } 3506 3502 };
+1 -1
drivers/pci/controller/dwc/Kconfig
··· 225 225 config PCIE_BT1 226 226 tristate "Baikal-T1 PCIe controller" 227 227 depends on MIPS_BAIKAL_T1 || COMPILE_TEST 228 - depends on PCI_MSI_IRQ_DOMAIN 228 + depends on PCI_MSI 229 229 select PCIE_DW_HOST 230 230 help 231 231 Enables support for the PCIe controller in the Baikal-T1 SoC to work
+3 -1
drivers/platform/surface/aggregator/controller.c
··· 1700 1700 return status; 1701 1701 1702 1702 status = ssam_request_sync_init(rqst, spec->flags); 1703 - if (status) 1703 + if (status) { 1704 + ssam_request_sync_free(rqst); 1704 1705 return status; 1706 + } 1705 1707 1706 1708 ssam_request_sync_set_resp(rqst, rsp); 1707 1709
+14
drivers/platform/surface/aggregator/ssh_request_layer.c
··· 916 916 if (sshp_parse_command(dev, data, &command, &command_data)) 917 917 return; 918 918 919 + /* 920 + * Check if the message was intended for us. If not, drop it. 921 + * 922 + * Note: We will need to change this to handle debug messages. On newer 923 + * generation devices, these seem to be sent to tid_out=0x03. We as 924 + * host can still receive them as they can be forwarded via an override 925 + * option on SAM, but doing so does not change tid_out=0x00. 926 + */ 927 + if (command->tid_out != 0x00) { 928 + rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n", 929 + command->tid_out); 930 + return; 931 + } 932 + 919 933 if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid))) 920 934 ssh_rtl_rx_event(rtl, command, &command_data); 921 935 else
+1 -1
drivers/platform/x86/amd/pmc.c
··· 932 932 if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) { 933 933 err = amd_pmc_s2d_init(dev); 934 934 if (err) 935 - return err; 935 + goto err_pci_dev_put; 936 936 } 937 937 938 938 platform_set_drvdata(pdev, dev);
+15
drivers/platform/x86/asus-nb-wmi.c
··· 121 121 .tablet_switch_mode = asus_wmi_lid_flip_rog_devid, 122 122 }; 123 123 124 + static struct quirk_entry quirk_asus_ignore_fan = { 125 + .wmi_ignore_fan = true, 126 + }; 127 + 124 128 static int dmi_matched(const struct dmi_system_id *dmi) 125 129 { 126 130 pr_info("Identified laptop model '%s'\n", dmi->ident); ··· 477 473 }, 478 474 .driver_data = &quirk_asus_tablet_mode, 479 475 }, 476 + { 477 + .callback = dmi_matched, 478 + .ident = "ASUS VivoBook E410MA", 479 + .matches = { 480 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 481 + DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"), 482 + }, 483 + .driver_data = &quirk_asus_ignore_fan, 484 + }, 480 485 {}, 481 486 }; 482 487 ··· 524 511 { KE_KEY, 0x30, { KEY_VOLUMEUP } }, 525 512 { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, 526 513 { KE_KEY, 0x32, { KEY_MUTE } }, 514 + { KE_KEY, 0x33, { KEY_SCREENLOCK } }, 527 515 { KE_KEY, 0x35, { KEY_SCREENLOCK } }, 528 516 { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */ 529 517 { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, ··· 558 544 { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */ 559 545 { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */ 560 546 { KE_KEY, 0x82, { KEY_CAMERA } }, 547 + { KE_KEY, 0x85, { KEY_CAMERA } }, 561 548 { KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */ 562 549 { KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */ 563 550 { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+6 -1
drivers/platform/x86/asus-wmi.c
··· 2243 2243 asus->fan_type = FAN_TYPE_NONE; 2244 2244 asus->agfn_pwm = -1; 2245 2245 2246 - if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL)) 2246 + if (asus->driver->quirks->wmi_ignore_fan) 2247 + asus->fan_type = FAN_TYPE_NONE; 2248 + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL)) 2247 2249 asus->fan_type = FAN_TYPE_SPEC83; 2248 2250 else if (asus_wmi_has_agfn_fan(asus)) 2249 2251 asus->fan_type = FAN_TYPE_AGFN; ··· 2437 2435 int err; 2438 2436 2439 2437 *available = false; 2438 + 2439 + if (asus->fan_type == FAN_TYPE_NONE) 2440 + return 0; 2440 2441 2441 2442 err = fan_curve_get_factory_default(asus, fan_dev); 2442 2443 if (err) {
+1
drivers/platform/x86/asus-wmi.h
··· 38 38 bool store_backlight_power; 39 39 bool wmi_backlight_set_devstate; 40 40 bool wmi_force_als_set; 41 + bool wmi_ignore_fan; 41 42 enum asus_wmi_tablet_switch_mode tablet_switch_mode; 42 43 int wapf; 43 44 /*
+31 -10
drivers/platform/x86/dell/dell-wmi-privacy.c
··· 61 61 /* privacy mic mute */ 62 62 { KE_KEY, 0x0001, { KEY_MICMUTE } }, 63 63 /* privacy camera mute */ 64 - { KE_SW, 0x0002, { SW_CAMERA_LENS_COVER } }, 64 + { KE_VSW, 0x0002, { SW_CAMERA_LENS_COVER } }, 65 65 { KE_END, 0}, 66 66 }; 67 67 ··· 115 115 116 116 switch (code) { 117 117 case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */ 118 - case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */ 119 118 priv->last_status = status; 120 119 sparse_keymap_report_entry(priv->input_dev, key, 1, true); 120 + ret = true; 121 + break; 122 + case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */ 123 + priv->last_status = status; 124 + sparse_keymap_report_entry(priv->input_dev, key, !(status & CAMERA_STATUS), false); 121 125 ret = true; 122 126 break; 123 127 default: ··· 296 292 { 297 293 struct privacy_wmi_data *priv; 298 294 struct key_entry *keymap; 299 - int ret, i; 295 + int ret, i, j; 300 296 301 297 ret = wmi_has_guid(DELL_PRIVACY_GUID); 302 298 if (!ret) ··· 308 304 309 305 dev_set_drvdata(&wdev->dev, priv); 310 306 priv->wdev = wdev; 307 + 308 + ret = get_current_status(priv->wdev); 309 + if (ret) 310 + return ret; 311 + 311 312 /* create evdev passing interface */ 312 313 priv->input_dev = devm_input_allocate_device(&wdev->dev); 313 314 if (!priv->input_dev) ··· 327 318 /* remap the keymap code with Dell privacy key type 0x12 as prefix 328 319 * KEY_MICMUTE scancode will be reported as 0x120001 329 320 */ 330 - for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) { 331 - keymap[i] = dell_wmi_keymap_type_0012[i]; 332 - keymap[i].code |= (0x0012 << 16); 321 + for (i = 0, j = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) { 322 + /* 323 + * Unlike keys where only presses matter, userspace may act 324 + * on switches in both of their positions. Only register 325 + * SW_CAMERA_LENS_COVER if it is actually there. 326 + */ 327 + if (dell_wmi_keymap_type_0012[i].type == KE_VSW && 328 + dell_wmi_keymap_type_0012[i].sw.code == SW_CAMERA_LENS_COVER && 329 + !(priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA))) 330 + continue; 331 + 332 + keymap[j] = dell_wmi_keymap_type_0012[i]; 333 + keymap[j].code |= (0x0012 << 16); 334 + j++; 333 335 } 334 336 ret = sparse_keymap_setup(priv->input_dev, keymap, NULL); 335 337 kfree(keymap); ··· 351 331 priv->input_dev->name = "Dell Privacy Driver"; 352 332 priv->input_dev->id.bustype = BUS_HOST; 353 333 354 - ret = input_register_device(priv->input_dev); 355 - if (ret) 356 - return ret; 334 + /* Report initial camera-cover status */ 335 + if (priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA)) 336 + input_report_switch(priv->input_dev, SW_CAMERA_LENS_COVER, 337 + !(priv->last_status & CAMERA_STATUS)); 357 338 358 - ret = get_current_status(priv->wdev); 339 + ret = input_register_device(priv->input_dev); 359 340 if (ret) 360 341 return ret; 361 342
+6
drivers/platform/x86/ideapad-laptop.c
··· 1621 1621 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"), 1622 1622 } 1623 1623 }, 1624 + { 1625 + .matches = { 1626 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1627 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion 5 15ARH05"), 1628 + } 1629 + }, 1624 1630 {} 1625 1631 }; 1626 1632
+3
drivers/platform/x86/intel/int3472/clk_and_regulator.c
··· 181 181 return PTR_ERR(int3472->regulator.gpio); 182 182 } 183 183 184 + /* Ensure the pin is in output mode and non-active state */ 185 + gpiod_direction_output(int3472->regulator.gpio, 0); 186 + 184 187 cfg.dev = &int3472->adev->dev; 185 188 cfg.init_data = &init_data; 186 189 cfg.ena_gpiod = int3472->regulator.gpio;
+4
drivers/platform/x86/intel/int3472/discrete.c
··· 168 168 return (PTR_ERR(gpio)); 169 169 170 170 int3472->clock.ena_gpio = gpio; 171 + /* Ensure the pin is in output mode and non-active state */ 172 + gpiod_direction_output(int3472->clock.ena_gpio, 0); 171 173 break; 172 174 case INT3472_GPIO_TYPE_PRIVACY_LED: 173 175 gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led"); ··· 177 175 return (PTR_ERR(gpio)); 178 176 179 177 int3472->clock.led_gpio = gpio; 178 + /* Ensure the pin is in output mode and non-active state */ 179 + gpiod_direction_output(int3472->clock.led_gpio, 0); 180 180 break; 181 181 default: 182 182 dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
+1
drivers/platform/x86/intel/pmc/core.c
··· 1029 1029 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init), 1030 1030 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init), 1031 1031 X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, mtl_core_init), 1032 + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init), 1032 1033 {} 1033 1034 }; 1034 1035
+2 -1
drivers/platform/x86/simatic-ipc.c
··· 46 46 {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE}, 47 47 {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E}, 48 48 {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E}, 49 - {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G}, 49 + {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G}, 50 + {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G}, 50 51 }; 51 52 52 53 static int register_platform_devices(u32 station_id)
+14 -7
drivers/platform/x86/sony-laptop.c
··· 1887 1887 break; 1888 1888 } 1889 1889 1890 - ret = sony_call_snc_handle(handle, probe_base, &result); 1891 - if (ret) 1892 - return ret; 1890 + /* 1891 + * Only probe if there is a separate probe_base, otherwise the probe call 1892 + * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in 1893 + * the keyboard backlight being turned off. 1894 + */ 1895 + if (probe_base) { 1896 + ret = sony_call_snc_handle(handle, probe_base, &result); 1897 + if (ret) 1898 + return ret; 1893 1899 1894 - if ((handle == 0x0137 && !(result & 0x02)) || 1895 - !(result & 0x01)) { 1896 - dprintk("no backlight keyboard found\n"); 1897 - return 0; 1900 + if ((handle == 0x0137 && !(result & 0x02)) || 1901 + !(result & 0x01)) { 1902 + dprintk("no backlight keyboard found\n"); 1903 + return 0; 1904 + } 1898 1905 } 1899 1906 1900 1907 kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+17 -6
drivers/platform/x86/thinkpad_acpi.c
··· 10311 10311 static int dytc_capabilities; 10312 10312 static bool dytc_mmc_get_available; 10313 10313 10314 - static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile) 10314 + static int convert_dytc_to_profile(int funcmode, int dytcmode, 10315 + enum platform_profile_option *profile) 10315 10316 { 10316 - if (dytc_capabilities & BIT(DYTC_FC_MMC)) { 10317 + switch (funcmode) { 10318 + case DYTC_FUNCTION_MMC: 10317 10319 switch (dytcmode) { 10318 10320 case DYTC_MODE_MMC_LOWPOWER: 10319 10321 *profile = PLATFORM_PROFILE_LOW_POWER; ··· 10331 10329 return -EINVAL; 10332 10330 } 10333 10331 return 0; 10334 - } 10335 - if (dytc_capabilities & BIT(DYTC_FC_PSC)) { 10332 + case DYTC_FUNCTION_PSC: 10336 10333 switch (dytcmode) { 10337 10334 case DYTC_MODE_PSC_LOWPOWER: 10338 10335 *profile = PLATFORM_PROFILE_LOW_POWER; ··· 10345 10344 default: /* Unknown mode */ 10346 10345 return -EINVAL; 10347 10346 } 10347 + return 0; 10348 + case DYTC_FUNCTION_AMT: 10349 + /* For now return balanced. It's the closest we have to 'auto' */ 10350 + *profile = PLATFORM_PROFILE_BALANCED; 10351 + return 0; 10352 + default: 10353 + /* Unknown function */ 10354 + return -EOPNOTSUPP; 10348 10355 } 10349 10356 return 0; 10350 10357 } ··· 10501 10492 err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output); 10502 10493 if (err) 10503 10494 goto unlock; 10495 + 10504 10496 /* system supports AMT, activate it when on balanced */ 10505 10497 if (dytc_capabilities & BIT(DYTC_FC_AMT)) 10506 10498 dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED); ··· 10517 10507 { 10518 10508 enum platform_profile_option profile; 10519 10509 int output, err = 0; 10520 - int perfmode; 10510 + int perfmode, funcmode; 10521 10511 10522 10512 mutex_lock(&dytc_mutex); 10523 10513 if (dytc_capabilities & BIT(DYTC_FC_MMC)) { ··· 10532 10522 if (err) 10533 10523 return; 10534 10524 10525 + funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF; 10535 10526 perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF; 10536 - convert_dytc_to_profile(perfmode, &profile); 10527 + convert_dytc_to_profile(funcmode, perfmode, &profile); 10537 10528 if (profile != dytc_current_profile) { 10538 10529 dytc_current_profile = profile; 10539 10530 platform_profile_notify();
+25
drivers/platform/x86/touchscreen_dmi.c
··· 264 264 .properties = connect_tablet9_props, 265 265 }; 266 266 267 + static const struct property_entry csl_panther_tab_hd_props[] = { 268 + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), 269 + PROPERTY_ENTRY_U32("touchscreen-min-y", 20), 270 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), 271 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1526), 272 + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), 273 + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), 274 + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"), 275 + PROPERTY_ENTRY_U32("silead,max-fingers", 10), 276 + { } 277 + }; 278 + 279 + static const struct ts_dmi_data csl_panther_tab_hd_data = { 280 + .acpi_name = "MSSL1680:00", 281 + .properties = csl_panther_tab_hd_props, 282 + }; 283 + 267 284 static const struct property_entry cube_iwork8_air_props[] = { 268 285 PROPERTY_ENTRY_U32("touchscreen-min-x", 1), 269 286 PROPERTY_ENTRY_U32("touchscreen-min-y", 3), ··· 1139 1122 .matches = { 1140 1123 DMI_MATCH(DMI_SYS_VENDOR, "Connect"), 1141 1124 DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"), 1125 + }, 1126 + }, 1127 + { 1128 + /* CSL Panther Tab HD */ 1129 + .driver_data = (void *)&csl_panther_tab_hd_data, 1130 + .matches = { 1131 + DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"), 1132 + DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"), 1142 1133 }, 1143 1134 }, 1144 1135 {
+2 -2
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 704 704 int_to_scsilun(0, &lun); 705 705 706 706 while (retry-- > 0) { 707 - rc = sas_clear_task_set(device, lun.scsi_lun); 707 + rc = sas_abort_task_set(device, lun.scsi_lun); 708 708 if (rc == TMF_RESP_FUNC_COMPLETE) { 709 709 hisi_sas_release_task(hisi_hba, device); 710 710 break; ··· 1316 1316 device->linkrate = phy->sas_phy.linkrate; 1317 1317 1318 1318 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1319 - } else 1319 + } else if (!port->port_attached) 1320 1320 port->id = 0xff; 1321 1321 } 1322 1322 }
+5 -15
drivers/tty/serial/kgdboc.c
··· 171 171 int err = -ENODEV; 172 172 char *cptr = config; 173 173 struct console *cons; 174 + int cookie; 174 175 175 176 if (!strlen(config) || isspace(config[0])) { 176 177 err = 0; ··· 190 189 if (kgdboc_register_kbd(&cptr)) 191 190 goto do_register; 192 191 193 - /* 194 - * tty_find_polling_driver() can call uart_set_options() 195 - * (via poll_init) to configure the uart. Take the console_list_lock 196 - * in order to synchronize against register_console(), which can also 197 - * configure the uart via uart_set_options(). This also allows safe 198 - * traversal of the console list. 199 - */ 200 - console_list_lock(); 201 - 202 192 p = tty_find_polling_driver(cptr, &tty_line); 203 - if (!p) { 204 - console_list_unlock(); 193 + if (!p) 205 194 goto noconfig; 206 - } 207 195 208 196 /* 209 197 * Take console_lock to serialize device() callback with ··· 201 211 */ 202 212 console_lock(); 203 213 204 - for_each_console(cons) { 214 + cookie = console_srcu_read_lock(); 215 + for_each_console_srcu(cons) { 205 216 int idx; 206 217 if (cons->device && cons->device(cons, &idx) == p && 207 218 idx == tty_line) { ··· 210 219 break; 211 220 } 212 221 } 222 + console_srcu_read_unlock(cookie); 213 223 214 224 console_unlock(); 215 - 216 - console_list_unlock(); 217 225 218 226 kgdb_tty_driver = p; 219 227 kgdb_tty_line = tty_line;
+5
drivers/tty/serial/serial_core.c
··· 2212 2212 * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even) 2213 2213 * @bits: number of data bits 2214 2214 * @flow: flow control character - 'r' (rts) 2215 + * 2216 + * Locking: Caller must hold console_list_lock in order to serialize 2217 + * early initialization of the serial-console lock. 2215 2218 */ 2216 2219 int 2217 2220 uart_set_options(struct uart_port *port, struct console *co, ··· 2622 2619 2623 2620 if (!ret && options) { 2624 2621 uart_parse_options(options, &baud, &parity, &bits, &flow); 2622 + console_list_lock(); 2625 2623 ret = uart_set_options(port, NULL, baud, parity, bits, flow); 2624 + console_list_unlock(); 2626 2625 } 2627 2626 out: 2628 2627 mutex_unlock(&tport->mutex);
+1 -1
fs/affs/file.c
··· 880 880 if (inode->i_size > AFFS_I(inode)->mmu_private) { 881 881 struct address_space *mapping = inode->i_mapping; 882 882 struct page *page; 883 - void *fsdata; 883 + void *fsdata = NULL; 884 884 loff_t isize = inode->i_size; 885 885 int res; 886 886
+2 -2
fs/binfmt_elf.c
··· 2034 2034 * The number of segs are recored into ELF header as 16bit value. 2035 2035 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. 2036 2036 */ 2037 - segs = cprm->vma_count + elf_core_extra_phdrs(); 2037 + segs = cprm->vma_count + elf_core_extra_phdrs(cprm); 2038 2038 2039 2039 /* for notes section */ 2040 2040 segs++; ··· 2074 2074 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); 2075 2075 2076 2076 offset += cprm->vma_data_size; 2077 - offset += elf_core_extra_data_size(); 2077 + offset += elf_core_extra_data_size(cprm); 2078 2078 e_shoff = offset; 2079 2079 2080 2080 if (e_phnum == PN_XNUM) {
+2 -2
fs/binfmt_elf_fdpic.c
··· 1509 1509 tmp->next = thread_list; 1510 1510 thread_list = tmp; 1511 1511 1512 - segs = cprm->vma_count + elf_core_extra_phdrs(); 1512 + segs = cprm->vma_count + elf_core_extra_phdrs(cprm); 1513 1513 1514 1514 /* for notes section */ 1515 1515 segs++; ··· 1555 1555 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); 1556 1556 1557 1557 offset += cprm->vma_data_size; 1558 - offset += elf_core_extra_data_size(); 1558 + offset += elf_core_extra_data_size(cprm); 1559 1559 e_shoff = offset; 1560 1560 1561 1561 if (e_phnum == PN_XNUM) {
+8 -1
fs/btrfs/disk-io.c
··· 367 367 btrfs_print_tree(eb, 0); 368 368 btrfs_err(fs_info, "block=%llu write time tree block corruption detected", 369 369 eb->start); 370 - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 370 + /* 371 + * Be noisy if this is an extent buffer from a log tree. We don't abort 372 + * a transaction in case there's a bad log tree extent buffer, we just 373 + * fallback to a transaction commit. Still we want to know when there is 374 + * a bad log tree extent buffer, as that may signal a bug somewhere. 375 + */ 376 + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) || 377 + btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID); 371 378 return ret; 372 379 } 373 380
+6
fs/btrfs/fs.h
··· 119 119 /* Indicate that we want to commit the transaction. */ 120 120 BTRFS_FS_NEED_TRANS_COMMIT, 121 121 122 + /* 123 + * Indicate metadata over-commit is disabled. This is set when active 124 + * zone tracking is needed. 125 + */ 126 + BTRFS_FS_NO_OVERCOMMIT, 127 + 122 128 #if BITS_PER_LONG == 32 123 129 /* Indicate if we have error/warn message printed on 32bit systems */ 124 130 BTRFS_FS_32BIT_ERROR,
+12 -2
fs/btrfs/qgroup.c
··· 2765 2765 2766 2766 /* 2767 2767 * Old roots should be searched when inserting qgroup 2768 - * extent record 2768 + * extent record. 2769 + * 2770 + * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case, 2771 + * we may have some record inserted during 2772 + * NO_ACCOUNTING (thus no old_roots populated), but 2773 + * later we start rescan, which clears NO_ACCOUNTING, 2774 + * leaving some inserted records without old_roots 2775 + * populated. 2776 + * 2777 + * Those cases are rare and should not cause too much 2778 + * time spent during commit_transaction(). 2769 2779 */ 2770 - if (WARN_ON(!record->old_roots)) { 2780 + if (!record->old_roots) { 2771 2781 /* Search commit root to find old_roots */ 2772 2782 ret = btrfs_find_all_roots(&ctx, false); 2773 2783 if (ret < 0)
+2 -1
fs/btrfs/space-info.c
··· 407 407 return 0; 408 408 409 409 used = btrfs_space_info_used(space_info, true); 410 - if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA)) 410 + if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) && 411 + (space_info->flags & BTRFS_BLOCK_GROUP_METADATA)) 411 412 avail = 0; 412 413 else 413 414 avail = calc_available_free_space(fs_info, space_info, flush);
+31 -16
fs/btrfs/tree-log.c
··· 2980 2980 ret = 0; 2981 2981 if (ret) { 2982 2982 blk_finish_plug(&plug); 2983 - btrfs_abort_transaction(trans, ret); 2984 2983 btrfs_set_log_full_commit(trans); 2985 2984 mutex_unlock(&root->log_mutex); 2986 2985 goto out; ··· 3044 3045 3045 3046 blk_finish_plug(&plug); 3046 3047 btrfs_set_log_full_commit(trans); 3047 - 3048 - if (ret != -ENOSPC) { 3049 - btrfs_abort_transaction(trans, ret); 3050 - mutex_unlock(&log_root_tree->log_mutex); 3051 - goto out; 3052 - } 3048 + if (ret != -ENOSPC) 3049 + btrfs_err(fs_info, 3050 + "failed to update log for root %llu ret %d", 3051 + root->root_key.objectid, ret); 3053 3052 btrfs_wait_tree_log_extents(log, mark); 3054 3053 mutex_unlock(&log_root_tree->log_mutex); 3055 - ret = BTRFS_LOG_FORCE_COMMIT; 3056 3054 goto out; 3057 3055 } 3058 3056 ··· 3108 3112 goto out_wake_log_root; 3109 3113 } else if (ret) { 3110 3114 btrfs_set_log_full_commit(trans); 3111 - btrfs_abort_transaction(trans, ret); 3112 3115 mutex_unlock(&log_root_tree->log_mutex); 3113 3116 goto out_wake_log_root; 3114 3117 } ··· 3821 3826 path->slots[0]); 3822 3827 if (tmp.type == BTRFS_DIR_INDEX_KEY) 3823 3828 last_old_dentry_offset = tmp.offset; 3829 + } else if (ret < 0) { 3830 + err = ret; 3824 3831 } 3832 + 3825 3833 goto done; 3826 3834 } 3827 3835 ··· 3844 3846 */ 3845 3847 if (tmp.type == BTRFS_DIR_INDEX_KEY) 3846 3848 last_old_dentry_offset = tmp.offset; 3849 + } else if (ret < 0) { 3850 + err = ret; 3851 + goto done; 3847 3852 } 3853 + 3848 3854 btrfs_release_path(path); 3849 3855 3850 3856 /* 3851 - * Find the first key from this transaction again. See the note for 3852 - * log_new_dir_dentries, if we're logging a directory recursively we 3853 - * won't be holding its i_mutex, which means we can modify the directory 3854 - * while we're logging it. If we remove an entry between our first 3855 - * search and this search we'll not find the key again and can just 3856 - * bail. 3857 + * Find the first key from this transaction again or the one we were at 3858 + * in the loop below in case we had to reschedule. We may be logging the 3859 + * directory without holding its VFS lock, which happen when logging new 3860 + * dentries (through log_new_dir_dentries()) or in some cases when we 3861 + * need to log the parent directory of an inode. This means a dir index 3862 + * key might be deleted from the inode's root, and therefore we may not 3863 + * find it anymore. If we can't find it, just move to the next key. We 3864 + * can not bail out and ignore, because if we do that we will simply 3865 + * not log dir index keys that come after the one that was just deleted 3866 + * and we can end up logging a dir index range that ends at (u64)-1 3867 + * (@last_offset is initialized to that), resulting in removing dir 3868 + * entries we should not remove at log replay time. 3857 3869 */ 3858 3870 search: 3859 3871 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3872 + if (ret > 0) 3873 + ret = btrfs_next_item(root, path); 3874 + if (ret < 0) 3875 + err = ret; 3876 + /* If ret is 1, there are no more keys in the inode's root. */ 3860 3877 if (ret != 0) 3861 3878 goto done; 3862 3879 ··· 5593 5580 * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction 5594 5581 * commits. 5595 5582 */ 5596 - if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) 5583 + if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) { 5584 + btrfs_set_log_full_commit(trans); 5597 5585 return BTRFS_LOG_FORCE_COMMIT; 5586 + } 5598 5587 5599 5588 inode = btrfs_iget(root->fs_info->sb, ino, root); 5600 5589 /*
+10 -1
fs/btrfs/volumes.c
··· 768 768 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 769 769 770 770 error = lookup_bdev(path, &path_devt); 771 - if (error) 771 + if (error) { 772 + btrfs_err(NULL, "failed to lookup block device for path %s: %d", 773 + path, error); 772 774 return ERR_PTR(error); 775 + } 773 776 774 777 if (fsid_change_in_progress) { 775 778 if (!has_metadata_uuid) ··· 839 836 unsigned int nofs_flag; 840 837 841 838 if (fs_devices->opened) { 839 + btrfs_err(NULL, 840 + "device %s belongs to fsid %pU, and the fs is already mounted", 841 + path, fs_devices->fsid); 842 842 mutex_unlock(&fs_devices->device_list_mutex); 843 843 return ERR_PTR(-EBUSY); 844 844 } ··· 911 905 * generation are equal. 912 906 */ 913 907 mutex_unlock(&fs_devices->device_list_mutex); 908 + btrfs_err(NULL, 909 + "device %s already registered with a higher generation, found %llu expect %llu", 910 + path, found_transid, device->generation); 914 911 return ERR_PTR(-EEXIST); 915 912 } 916 913
+2
fs/btrfs/zoned.c
··· 539 539 } 540 540 atomic_set(&zone_info->active_zones_left, 541 541 max_active_zones - nactive); 542 + /* Overcommit does not work well with active zone tacking. */ 543 + set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags); 542 544 } 543 545 544 546 /* Validate superblock log */
+1
fs/cifs/cifsencrypt.c
··· 278 278 * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) + 279 279 * unicode length of a netbios domain name 280 280 */ 281 + kfree_sensitive(ses->auth_key.response); 281 282 ses->auth_key.len = size + 2 * dlen; 282 283 ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL); 283 284 if (!ses->auth_key.response) {
+6 -3
fs/cifs/connect.c
··· 2606 2606 INIT_LIST_HEAD(&tcon->pending_opens); 2607 2607 tcon->status = TID_GOOD; 2608 2608 2609 - /* schedule query interfaces poll */ 2610 2609 INIT_DELAYED_WORK(&tcon->query_interfaces, 2611 2610 smb2_query_server_interfaces); 2612 - queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 2613 - (SMB_INTERFACE_POLL_INTERVAL * HZ)); 2611 + if (ses->server->dialect >= SMB30_PROT_ID && 2612 + (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 2613 + /* schedule query interfaces poll */ 2614 + queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 2615 + (SMB_INTERFACE_POLL_INTERVAL * HZ)); 2616 + } 2614 2617 2615 2618 spin_lock(&cifs_tcp_ses_lock); 2616 2619 list_add(&tcon->tcon_list, &ses->tcon_list);
-1
fs/cifs/dfs_cache.c
··· 1299 1299 * Resolve share's hostname and check if server address matches. Otherwise just ignore it 1300 1300 * as we could not have upcall to resolve hostname or failed to convert ip address. 1301 1301 */ 1302 - match = true; 1303 1302 extract_unc_hostname(s1, &host, &hostlen); 1304 1303 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host); 1305 1304
+1
fs/cifs/link.c
··· 428 428 oparms.disposition = FILE_CREATE; 429 429 oparms.fid = &fid; 430 430 oparms.reconnect = false; 431 + oparms.mode = 0644; 431 432 432 433 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, 433 434 NULL, NULL);
+2
fs/cifs/sess.c
··· 815 815 return -EINVAL; 816 816 } 817 817 if (tilen) { 818 + kfree_sensitive(ses->auth_key.response); 818 819 ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen, 819 820 GFP_KERNEL); 820 821 if (!ses->auth_key.response) { ··· 1429 1428 goto out_put_spnego_key; 1430 1429 } 1431 1430 1431 + kfree_sensitive(ses->auth_key.response); 1432 1432 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, 1433 1433 GFP_KERNEL); 1434 1434 if (!ses->auth_key.response) {
+34 -29
fs/cifs/smb1ops.c
··· 562 562 if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) { 563 563 rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls, 564 564 cifs_remap(cifs_sb)); 565 - if (!rc) 566 - move_cifs_info_to_smb2(&data->fi, &fi); 567 565 *adjustTZ = true; 568 566 } 569 567 570 - if (!rc && (le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) { 568 + if (!rc) { 571 569 int tmprc; 572 570 int oplock = 0; 573 571 struct cifs_fid fid; 574 572 struct cifs_open_parms oparms; 573 + 574 + move_cifs_info_to_smb2(&data->fi, &fi); 575 + 576 + if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) 577 + return 0; 575 578 576 579 oparms.tcon = tcon; 577 580 oparms.cifs_sb = cifs_sb; ··· 719 716 static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, 720 717 void *buf) 721 718 { 722 - FILE_ALL_INFO *fi = buf; 719 + struct cifs_open_info_data *data = buf; 720 + FILE_ALL_INFO fi = {}; 721 + int rc; 723 722 724 723 if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS)) 725 - return SMBLegacyOpen(xid, oparms->tcon, oparms->path, 726 - oparms->disposition, 727 - oparms->desired_access, 728 - oparms->create_options, 729 - &oparms->fid->netfid, oplock, fi, 730 - oparms->cifs_sb->local_nls, 731 - cifs_remap(oparms->cifs_sb)); 732 - return CIFS_open(xid, oparms, oplock, fi); 724 + rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path, 725 + oparms->disposition, 726 + oparms->desired_access, 727 + oparms->create_options, 728 + &oparms->fid->netfid, oplock, &fi, 729 + oparms->cifs_sb->local_nls, 730 + cifs_remap(oparms->cifs_sb)); 731 + else 732 + rc = CIFS_open(xid, oparms, oplock, &fi); 733 + 734 + if (!rc && data) 735 + move_cifs_info_to_smb2(&data->fi, &fi); 736 + 737 + return rc; 733 738 } 734 739 735 740 static void ··· 1061 1050 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1062 1051 struct inode *newinode = NULL; 1063 1052 int rc = -EPERM; 1064 - FILE_ALL_INFO *buf = NULL; 1053 + struct cifs_open_info_data buf = {}; 1065 1054 struct cifs_io_parms io_parms; 1066 1055 __u32 oplock = 0; 1067 1056 struct cifs_fid fid; ··· 1093 1082 cifs_sb->local_nls, 1094 1083 cifs_remap(cifs_sb)); 1095 1084 if (rc) 1096 - goto out; 1085 + return rc; 1097 1086 1098 1087 rc = cifs_get_inode_info_unix(&newinode, full_path, 1099 1088 inode->i_sb, xid); 1100 1089 1101 1090 if (rc == 0) 1102 1091 d_instantiate(dentry, newinode); 1103 - goto out; 1092 + return rc; 1104 1093 } 1105 1094 1106 1095 /* ··· 1108 1097 * support block and char device (no socket & fifo) 1109 1098 */ 1110 1099 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) 1111 - goto out; 1100 + return rc; 1112 1101 1113 1102 if (!S_ISCHR(mode) && !S_ISBLK(mode)) 1114 - goto out; 1103 + return rc; 1115 1104 1116 1105 cifs_dbg(FYI, "sfu compat create special file\n"); 1117 - 1118 - buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 1119 - if (buf == NULL) { 1120 - rc = -ENOMEM; 1121 - goto out; 1122 - } 1123 1106 1124 1107 oparms.tcon = tcon; 1125 1108 oparms.cifs_sb = cifs_sb; ··· 1129 1124 oplock = REQ_OPLOCK; 1130 1125 else 1131 1126 oplock = 0; 1132 - rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf); 1127 + rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf); 1133 1128 if (rc) 1134 - goto out; 1129 + return rc; 1135 1130 1136 1131 /* 1137 1132 * BB Do not bother to decode buf since no local inode yet to put 1138 1133 * timestamps in, but we can reuse it safely. 1139 1134 */ 1140 1135 1141 - pdev = (struct win_dev *)buf; 1136 + pdev = (struct win_dev *)&buf.fi; 1142 1137 io_parms.pid = current->tgid; 1143 1138 io_parms.tcon = tcon; 1144 1139 io_parms.offset = 0; 1145 1140 io_parms.length = sizeof(struct win_dev); 1146 - iov[1].iov_base = buf; 1141 + iov[1].iov_base = &buf.fi; 1147 1142 iov[1].iov_len = sizeof(struct win_dev); 1148 1143 if (S_ISCHR(mode)) { 1149 1144 memcpy(pdev->type, "IntxCHR", 8); ··· 1162 1157 d_drop(dentry); 1163 1158 1164 1159 /* FIXME: add code here to set EAs */ 1165 - out: 1166 - kfree(buf); 1160 + 1161 + cifs_free_open_info(&buf); 1167 1162 return rc; 1168 1163 } 1169 1164
+5 -1
fs/cifs/smb2pdu.c
··· 1453 1453 1454 1454 /* keep session key if binding */ 1455 1455 if (!is_binding) { 1456 + kfree_sensitive(ses->auth_key.response); 1456 1457 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, 1457 1458 GFP_KERNEL); 1458 1459 if (!ses->auth_key.response) { ··· 1483 1482 out_put_spnego_key: 1484 1483 key_invalidate(spnego_key); 1485 1484 key_put(spnego_key); 1486 - if (rc) 1485 + if (rc) { 1487 1486 kfree_sensitive(ses->auth_key.response); 1487 + ses->auth_key.response = NULL; 1488 + ses->auth_key.len = 0; 1489 + } 1488 1490 out: 1489 1491 sess_data->result = rc; 1490 1492 sess_data->func = NULL;
+7 -8
fs/erofs/super.c
··· 577 577 } 578 578 ++ctx->devs->extra_devices; 579 579 break; 580 - case Opt_fsid: 581 580 #ifdef CONFIG_EROFS_FS_ONDEMAND 581 + case Opt_fsid: 582 582 kfree(ctx->fsid); 583 583 ctx->fsid = kstrdup(param->string, GFP_KERNEL); 584 584 if (!ctx->fsid) 585 585 return -ENOMEM; 586 - #else 587 - errorfc(fc, "fsid option not supported"); 588 - #endif 589 586 break; 590 587 case Opt_domain_id: 591 - #ifdef CONFIG_EROFS_FS_ONDEMAND 592 588 kfree(ctx->domain_id); 593 589 ctx->domain_id = kstrdup(param->string, GFP_KERNEL); 594 590 if (!ctx->domain_id) 595 591 return -ENOMEM; 596 - #else 597 - errorfc(fc, "domain_id option not supported"); 598 - #endif 599 592 break; 593 + #else 594 + case Opt_fsid: 595 + case Opt_domain_id: 596 + errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 597 + break; 598 + #endif 600 599 default: 601 600 return -ENOPARAM; 602 601 }
+6 -6
fs/erofs/zdata.c
··· 1032 1032 1033 1033 if (!be->decompressed_pages) 1034 1034 be->decompressed_pages = 1035 - kvcalloc(be->nr_pages, sizeof(struct page *), 1036 - GFP_KERNEL | __GFP_NOFAIL); 1035 + kcalloc(be->nr_pages, sizeof(struct page *), 1036 + GFP_KERNEL | __GFP_NOFAIL); 1037 1037 if (!be->compressed_pages) 1038 1038 be->compressed_pages = 1039 - kvcalloc(pclusterpages, sizeof(struct page *), 1040 - GFP_KERNEL | __GFP_NOFAIL); 1039 + kcalloc(pclusterpages, sizeof(struct page *), 1040 + GFP_KERNEL | __GFP_NOFAIL); 1041 1041 1042 1042 z_erofs_parse_out_bvecs(be); 1043 1043 err2 = z_erofs_parse_in_bvecs(be, &overlapped); ··· 1085 1085 } 1086 1086 if (be->compressed_pages < be->onstack_pages || 1087 1087 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1088 - kvfree(be->compressed_pages); 1088 + kfree(be->compressed_pages); 1089 1089 z_erofs_fill_other_copies(be, err); 1090 1090 1091 1091 for (i = 0; i < be->nr_pages; ++i) { ··· 1104 1104 } 1105 1105 1106 1106 if (be->decompressed_pages != be->onstack_pages) 1107 - kvfree(be->decompressed_pages); 1107 + kfree(be->decompressed_pages); 1108 1108 1109 1109 pcl->length = 0; 1110 1110 pcl->partial = true;
+7 -3
fs/erofs/zmap.c
··· 793 793 iomap->type = IOMAP_HOLE; 794 794 iomap->addr = IOMAP_NULL_ADDR; 795 795 /* 796 - * No strict rule how to describe extents for post EOF, yet 797 - * we need do like below. Otherwise, iomap itself will get 796 + * No strict rule on how to describe extents for post EOF, yet 797 + * we need to do like below. Otherwise, iomap itself will get 798 798 * into an endless loop on post EOF. 799 + * 800 + * Calculate the effective offset by subtracting extent start 801 + * (map.m_la) from the requested offset, and add it to length. 802 + * (NB: offset >= map.m_la always) 799 803 */ 800 804 if (iomap->offset >= inode->i_size) 801 - iomap->length = length + map.m_la - offset; 805 + iomap->length = length + offset - map.m_la; 802 806 } 803 807 iomap->flags = 0; 804 808 return 0;
+1 -1
fs/nfsd/netns.h
··· 195 195 196 196 atomic_t nfsd_courtesy_clients; 197 197 struct shrinker nfsd_client_shrinker; 198 - struct delayed_work nfsd_shrinker_work; 198 + struct work_struct nfsd_shrinker_work; 199 199 }; 200 200 201 201 /* Simple check to find out if a given net was properly initialized */
+1
fs/nfsd/nfs4proc.c
··· 1318 1318 /* allow 20secs for mount/unmount for now - revisit */ 1319 1319 if (signal_pending(current) || 1320 1320 (schedule_timeout(20*HZ) == 0)) { 1321 + finish_wait(&nn->nfsd_ssc_waitq, &wait); 1321 1322 kfree(work); 1322 1323 return nfserr_eagain; 1323 1324 }
+15 -15
fs/nfsd/nfs4state.c
··· 4411 4411 if (!count) 4412 4412 count = atomic_long_read(&num_delegations); 4413 4413 if (count) 4414 - mod_delayed_work(laundry_wq, &nn->nfsd_shrinker_work, 0); 4414 + queue_work(laundry_wq, &nn->nfsd_shrinker_work); 4415 4415 return (unsigned long)count; 4416 4416 } 4417 4417 ··· 4421 4421 return SHRINK_STOP; 4422 4422 } 4423 4423 4424 - int 4424 + void 4425 4425 nfsd4_init_leases_net(struct nfsd_net *nn) 4426 4426 { 4427 4427 struct sysinfo si; ··· 4443 4443 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); 4444 4444 4445 4445 atomic_set(&nn->nfsd_courtesy_clients, 0); 4446 - nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan; 4447 - nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count; 4448 - nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS; 4449 - return register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client"); 4450 - } 4451 - 4452 - void 4453 - nfsd4_leases_net_shutdown(struct nfsd_net *nn) 4454 - { 4455 - unregister_shrinker(&nn->nfsd_client_shrinker); 4456 4446 } 4457 4447 4458 4448 static void init_nfs4_replay(struct nfs4_replay *rp) ··· 6225 6235 static void 6226 6236 nfsd4_state_shrinker_worker(struct work_struct *work) 6227 6237 { 6228 - struct delayed_work *dwork = to_delayed_work(work); 6229 - struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 6238 + struct nfsd_net *nn = container_of(work, struct nfsd_net, 6230 6239 nfsd_shrinker_work); 6231 6240 6232 6241 courtesy_client_reaper(nn); ··· 8055 8066 INIT_LIST_HEAD(&nn->blocked_locks_lru); 8056 8067 8057 8068 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 8058 - INIT_DELAYED_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); 8069 + INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); 8059 8070 get_net(net); 8060 8071 8072 + nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan; 8073 + nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count; 8074 + nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS; 8075 + 8076 + if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client")) 8077 + goto err_shrinker; 8061 8078 return 0; 8062 8079 8080 + err_shrinker: 8081 + put_net(net); 8082 + kfree(nn->sessionid_hashtbl); 8063 8083 err_sessionid: 8064 8084 kfree(nn->unconf_id_hashtbl); 8065 8085 err_unconf_id: ··· 8161 8163 struct list_head *pos, *next, reaplist; 8162 8164 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8163 8165 8166 + unregister_shrinker(&nn->nfsd_client_shrinker); 8167 + cancel_work(&nn->nfsd_shrinker_work); 8164 8168 cancel_delayed_work_sync(&nn->laundromat_work); 8165 8169 locks_end_grace(&nn->nfsd4_manager); 8166 8170
+1 -6
fs/nfsd/nfsctl.c
··· 1457 1457 goto out_idmap_error; 1458 1458 nn->nfsd_versions = NULL; 1459 1459 nn->nfsd4_minorversions = NULL; 1460 - retval = nfsd4_init_leases_net(nn); 1461 - if (retval) 1462 - goto out_drc_error; 1460 + nfsd4_init_leases_net(nn); 1463 1461 retval = nfsd_reply_cache_init(nn); 1464 1462 if (retval) 1465 1463 goto out_cache_error; ··· 1467 1469 return 0; 1468 1470 1469 1471 out_cache_error: 1470 - nfsd4_leases_net_shutdown(nn); 1471 - out_drc_error: 1472 1472 nfsd_idmap_shutdown(net); 1473 1473 out_idmap_error: 1474 1474 nfsd_export_shutdown(net); ··· 1482 1486 nfsd_idmap_shutdown(net); 1483 1487 nfsd_export_shutdown(net); 1484 1488 nfsd_netns_free_versions(net_generic(net, nfsd_net_id)); 1485 - nfsd4_leases_net_shutdown(nn); 1486 1489 } 1487 1490 1488 1491 static struct pernet_operations nfsd_net_ops = {
+2 -4
fs/nfsd/nfsd.h
··· 504 504 extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn); 505 505 #endif 506 506 507 - extern int nfsd4_init_leases_net(struct nfsd_net *nn); 508 - extern void nfsd4_leases_net_shutdown(struct nfsd_net *nn); 507 + extern void nfsd4_init_leases_net(struct nfsd_net *nn); 509 508 510 509 #else /* CONFIG_NFSD_V4 */ 511 510 static inline int nfsd4_is_junction(struct dentry *dentry) ··· 512 513 return 0; 513 514 } 514 515 515 - static inline int nfsd4_init_leases_net(struct nfsd_net *nn) { return 0; }; 516 - static inline void nfsd4_leases_net_shutdown(struct nfsd_net *nn) {}; 516 + static inline void nfsd4_init_leases_net(struct nfsd_net *nn) { }; 517 517 518 518 #define register_cld_notifier() 0 519 519 #define unregister_cld_notifier() do { } while(0)
+12 -3
fs/nilfs2/btree.c
··· 480 480 ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, &bh, 481 481 &submit_ptr); 482 482 if (ret) { 483 - if (ret != -EEXIST) 484 - return ret; 485 - goto out_check; 483 + if (likely(ret == -EEXIST)) 484 + goto out_check; 485 + if (ret == -ENOENT) { 486 + /* 487 + * Block address translation failed due to invalid 488 + * value of 'ptr'. In this case, return internal code 489 + * -EINVAL (broken bmap) to notify bmap layer of fatal 490 + * metadata corruption. 491 + */ 492 + ret = -EINVAL; 493 + } 494 + return ret; 486 495 } 487 496 488 497 if (ra) {
+22 -6
fs/userfaultfd.c
··· 108 108 return ctx->features & UFFD_FEATURE_INITIALIZED; 109 109 } 110 110 111 + static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, 112 + vm_flags_t flags) 113 + { 114 + const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; 115 + 116 + vma->vm_flags = flags; 117 + /* 118 + * For shared mappings, we want to enable writenotify while 119 + * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply 120 + * recalculate vma->vm_page_prot whenever userfaultfd-wp changes. 121 + */ 122 + if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed) 123 + vma_set_page_prot(vma); 124 + } 125 + 111 126 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, 112 127 int wake_flags, void *key) 113 128 { ··· 633 618 for_each_vma(vmi, vma) { 634 619 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { 635 620 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 636 - vma->vm_flags &= ~__VM_UFFD_FLAGS; 621 + userfaultfd_set_vm_flags(vma, 622 + vma->vm_flags & ~__VM_UFFD_FLAGS); 637 623 } 638 624 } 639 625 mmap_write_unlock(mm); ··· 668 652 octx = vma->vm_userfaultfd_ctx.ctx; 669 653 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { 670 654 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 671 - vma->vm_flags &= ~__VM_UFFD_FLAGS; 655 + userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); 672 656 return 0; 673 657 } 674 658 ··· 749 733 } else { 750 734 /* Drop uffd context if remap feature not enabled */ 751 735 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 752 - vma->vm_flags &= ~__VM_UFFD_FLAGS; 736 + userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); 753 737 } 754 738 } 755 739 ··· 911 895 prev = vma; 912 896 } 913 897 914 - vma->vm_flags = new_flags; 898 + userfaultfd_set_vm_flags(vma, new_flags); 915 899 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 916 900 } 917 901 mmap_write_unlock(mm); ··· 1479 1463 * the next vma was merged into the current one and 1480 1464 * the current one has not been updated yet. 1481 1465 */ 1482 - vma->vm_flags = new_flags; 1466 + userfaultfd_set_vm_flags(vma, new_flags); 1483 1467 vma->vm_userfaultfd_ctx.ctx = ctx; 1484 1468 1485 1469 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) ··· 1667 1651 * the next vma was merged into the current one and 1668 1652 * the current one has not been updated yet. 1669 1653 */ 1670 - vma->vm_flags = new_flags; 1654 + userfaultfd_set_vm_flags(vma, new_flags); 1671 1655 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 1672 1656 1673 1657 skip:
+22
fs/zonefs/super.c
··· 442 442 data_size = zonefs_check_zone_condition(inode, zone, 443 443 false, false); 444 444 } 445 + } else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO && 446 + data_size > isize) { 447 + /* Do not expose garbage data */ 448 + data_size = isize; 445 449 } 446 450 447 451 /* ··· 808 804 bio_set_polled(bio, iocb); 809 805 810 806 ret = submit_bio_wait(bio); 807 + 808 + /* 809 + * If the file zone was written underneath the file system, the zone 810 + * write pointer may not be where we expect it to be, but the zone 811 + * append write can still succeed. So check manually that we wrote where 812 + * we intended to, that is, at zi->i_wpoffset. 813 + */ 814 + if (!ret) { 815 + sector_t wpsector = 816 + zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT); 817 + 818 + if (bio->bi_iter.bi_sector != wpsector) { 819 + zonefs_warn(inode->i_sb, 820 + "Corrupted write pointer %llu for zone at %llu\n", 821 + wpsector, zi->i_zsector); 822 + ret = -EIO; 823 + } 824 + } 811 825 812 826 zonefs_file_write_dio_end_io(iocb, size, ret, 0); 813 827 trace_zonefs_file_dio_append(inode, size, ret);
+2 -1
include/acpi/acpi_bus.h
··· 230 230 u32 hardware_id:1; 231 231 u32 bus_address:1; 232 232 u32 platform_id:1; 233 - u32 reserved:29; 233 + u32 backlight:1; 234 + u32 reserved:28; 234 235 }; 235 236 236 237 struct acpi_device_pnp {
+1 -1
include/linux/bpf.h
··· 1832 1832 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1833 1833 void bpf_prog_put(struct bpf_prog *prog); 1834 1834 1835 - void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1835 + void bpf_prog_free_id(struct bpf_prog *prog); 1836 1836 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1837 1837 1838 1838 struct btf_field *btf_record_find(const struct btf_record *rec,
+4 -4
include/linux/elfcore.h
··· 105 105 * Dumping its extra ELF program headers includes all the other information 106 106 * a debugger needs to easily find how the gate DSO was being used. 107 107 */ 108 - extern Elf_Half elf_core_extra_phdrs(void); 108 + extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm); 109 109 extern int 110 110 elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); 111 111 extern int 112 112 elf_core_write_extra_data(struct coredump_params *cprm); 113 - extern size_t elf_core_extra_data_size(void); 113 + extern size_t elf_core_extra_data_size(struct coredump_params *cprm); 114 114 #else 115 - static inline Elf_Half elf_core_extra_phdrs(void) 115 + static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm) 116 116 { 117 117 return 0; 118 118 } ··· 127 127 return 1; 128 128 } 129 129 130 - static inline size_t elf_core_extra_data_size(void) 130 + static inline size_t elf_core_extra_data_size(struct coredump_params *cprm) 131 131 { 132 132 return 0; 133 133 }
+3 -3
include/linux/mm.h
··· 1270 1270 __folio_put(folio); 1271 1271 } 1272 1272 1273 - /** 1274 - * release_pages - release an array of pages or folios 1273 + /* 1274 + * union release_pages_arg - an array of pages or folios 1275 1275 * 1276 - * This just releases a simple array of multiple pages, and 1276 + * release_pages() releases a simple array of multiple pages, and 1277 1277 * accepts various different forms of said page array: either 1278 1278 * a regular old boring array of pages, an array of folios, or 1279 1279 * an array of encoded page pointers.
+1 -2
include/linux/mm_inline.h
··· 413 413 * Not using anon_vma_name because it generates a warning if mmap_lock 414 414 * is not held, which might be the case here. 415 415 */ 416 - if (!vma->vm_file) 417 - anon_vma_name_put(vma->anon_name); 416 + anon_vma_name_put(vma->anon_name); 418 417 } 419 418 420 419 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+1 -1
include/linux/mm_types.h
··· 581 581 /* 582 582 * For private and shared anonymous mappings, a pointer to a null 583 583 * terminated string containing the name given to the vma, or NULL if 584 - * unnamed. Serialized by mmap_sem. Use anon_vma_name to access. 584 + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. 585 585 */ 586 586 struct anon_vma_name *anon_name; 587 587 #endif
+1 -1
include/linux/page_ref.h
··· 301 301 * 302 302 * You can also use this function if you're holding a lock that prevents 303 303 * pages being frozen & removed; eg the i_pages lock for the page cache 304 - * or the mmap_sem or page table lock for page tables. In this case, 304 + * or the mmap_lock or page table lock for page tables. In this case, 305 305 * it will always succeed, and you could have used a plain folio_get(), 306 306 * but it's sometimes more convenient to have a common function called 307 307 * from both locked and RCU-protected contexts.
+2 -1
include/linux/platform_data/x86/simatic-ipc.h
··· 32 32 SIMATIC_IPC_IPC477E = 0x00000A02, 33 33 SIMATIC_IPC_IPC127E = 0x00000D01, 34 34 SIMATIC_IPC_IPC227G = 0x00000F01, 35 - SIMATIC_IPC_IPC427G = 0x00001001, 35 + SIMATIC_IPC_IPCBX_39A = 0x00001001, 36 + SIMATIC_IPC_IPCPX_39A = 0x00001002, 36 37 }; 37 38 38 39 static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
+2 -2
include/linux/tpm_eventlog.h
··· 198 198 * The loop below will unmap these fields if the log is larger than 199 199 * one page, so save them here for reference: 200 200 */ 201 - count = READ_ONCE(event->count); 202 - event_type = READ_ONCE(event->event_type); 201 + count = event->count; 202 + event_type = event->event_type; 203 203 204 204 /* Verify that it's the log header */ 205 205 if (event_header->pcr_idx != 0 ||
-4
include/net/mac80211.h
··· 1832 1832 * @drv_priv: data area for driver use, will always be aligned to 1833 1833 * sizeof(void \*). 1834 1834 * @txq: the multicast data TX queue 1835 - * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped, 1836 - * protected by fq->lock. 1837 1835 * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see 1838 1836 * &enum ieee80211_offload_flags. 1839 1837 * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled. ··· 1860 1862 1861 1863 bool probe_req_reg; 1862 1864 bool rx_mcast_action_reg; 1863 - 1864 - bool txqs_stopped[IEEE80211_NUM_ACS]; 1865 1865 1866 1866 struct ieee80211_vif *mbssid_tx_vif; 1867 1867
+7
include/net/sch_generic.h
··· 1288 1288 1289 1289 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1290 1290 1291 + /* Make sure qdisc is no longer in SCHED state. */ 1292 + static inline void qdisc_synchronize(const struct Qdisc *q) 1293 + { 1294 + while (test_bit(__QDISC_STATE_SCHED, &q->state)) 1295 + msleep(1); 1296 + } 1297 + 1291 1298 #endif
+2 -2
include/uapi/linux/psci.h
··· 58 58 59 59 #define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18) 60 60 #define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19) 61 - #define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(19) 61 + #define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20) 62 62 63 63 #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12) 64 64 #define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13) ··· 67 67 #define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17) 68 68 69 69 #define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18) 70 - #define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(19) 70 + #define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20) 71 71 72 72 /* PSCI v0.2 power state encoding for CPU_SUSPEND function */ 73 73 #define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
+1 -1
init/Kconfig
··· 776 776 depends on PRINTK 777 777 help 778 778 Select the size of an alternate printk per-CPU buffer where messages 779 - printed from usafe contexts are temporary stored. One example would 779 + printed from unsafe contexts are temporary stored. One example would 780 780 be NMI messages, another one - printk recursion. The messages are 781 781 copied to the main log buffer in a safe context to avoid a deadlock. 782 782 The value defines the size as a power of 2.
+1
init/Makefile
··· 59 59 60 60 $(obj)/version-timestamp.o: include/generated/utsversion.h 61 61 CFLAGS_version-timestamp.o := -include include/generated/utsversion.h 62 + KASAN_SANITIZE_version-timestamp.o := n
+10 -2
io_uring/fdinfo.c
··· 170 170 xa_for_each(&ctx->personalities, index, cred) 171 171 io_uring_show_cred(m, index, cred); 172 172 } 173 - if (has_lock) 174 - mutex_unlock(&ctx->uring_lock); 175 173 176 174 seq_puts(m, "PollList:\n"); 177 175 for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) { 178 176 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; 177 + struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i]; 179 178 struct io_kiocb *req; 180 179 181 180 spin_lock(&hb->lock); ··· 182 183 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, 183 184 task_work_pending(req->task)); 184 185 spin_unlock(&hb->lock); 186 + 187 + if (!has_lock) 188 + continue; 189 + hlist_for_each_entry(req, &hbl->list, hash_node) 190 + seq_printf(m, " op=%d, task_works=%d\n", req->opcode, 191 + task_work_pending(req->task)); 185 192 } 193 + 194 + if (has_lock) 195 + mutex_unlock(&ctx->uring_lock); 186 196 187 197 seq_puts(m, "CqOverflowList:\n"); 188 198 spin_lock(&ctx->completion_lock);
+6 -1
io_uring/io-wq.c
··· 1230 1230 1231 1231 worker = container_of(cb, struct io_worker, create_work); 1232 1232 io_worker_cancel_cb(worker); 1233 - kfree(worker); 1233 + /* 1234 + * Only the worker continuation helper has worker allocated and 1235 + * hence needs freeing. 1236 + */ 1237 + if (cb->func == create_worker_cont) 1238 + kfree(worker); 1234 1239 } 1235 1240 } 1236 1241
+32 -16
io_uring/poll.c
··· 223 223 IOU_POLL_DONE = 0, 224 224 IOU_POLL_NO_ACTION = 1, 225 225 IOU_POLL_REMOVE_POLL_USE_RES = 2, 226 + IOU_POLL_REISSUE = 3, 226 227 }; 227 228 228 229 /* 229 230 * All poll tw should go through this. Checks for poll events, manages 230 231 * references, does rewait, etc. 231 232 * 232 - * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require, 233 - * which is either spurious wakeup or multishot CQE is served. 234 - * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res. 235 - * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result 236 - * is stored in req->cqe. 233 + * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action 234 + * require, which is either spurious wakeup or multishot CQE is served. 235 + * IOU_POLL_DONE when it's done with the request, then the mask is stored in 236 + * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot 237 + * poll and that the result is stored in req->cqe. 237 238 */ 238 239 static int io_poll_check_events(struct io_kiocb *req, bool *locked) 239 240 { 240 - int v, ret; 241 + int v; 241 242 242 243 /* req->task == current here, checking PF_EXITING is safe */ 243 244 if (unlikely(req->task->flags & PF_EXITING)) ··· 277 276 if (!req->cqe.res) { 278 277 struct poll_table_struct pt = { ._key = req->apoll_events }; 279 278 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; 279 + /* 280 + * We got woken with a mask, but someone else got to 281 + * it first. The above vfs_poll() doesn't add us back 282 + * to the waitqueue, so if we get nothing back, we 283 + * should be safe and attempt a reissue. 284 + */ 285 + if (unlikely(!req->cqe.res)) 286 + return IOU_POLL_REISSUE; 280 287 } 281 - 282 - if ((unlikely(!req->cqe.res))) 283 - continue; 284 288 if (req->apoll_events & EPOLLONESHOT) 285 289 return IOU_POLL_DONE; 286 290 ··· 300 294 return IOU_POLL_REMOVE_POLL_USE_RES; 301 295 } 302 296 } else { 303 - ret = io_poll_issue(req, locked); 297 + int ret = io_poll_issue(req, locked); 304 298 if (ret == IOU_STOP_MULTISHOT) 305 299 return IOU_POLL_REMOVE_POLL_USE_RES; 306 300 if (ret < 0) ··· 336 330 337 331 poll = io_kiocb_to_cmd(req, struct io_poll); 338 332 req->cqe.res = mangle_poll(req->cqe.res & poll->events); 333 + } else if (ret == IOU_POLL_REISSUE) { 334 + io_req_task_submit(req, locked); 335 + return; 339 336 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { 340 337 req->cqe.res = ret; 341 338 req_set_fail(req); ··· 351 342 352 343 if (ret == IOU_POLL_REMOVE_POLL_USE_RES) 353 344 io_req_task_complete(req, locked); 354 - else if (ret == IOU_POLL_DONE) 345 + else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE) 355 346 io_req_task_submit(req, locked); 356 347 else 357 348 io_req_defer_failed(req, ret); ··· 542 533 return pt->owning || io_poll_get_ownership(req); 543 534 } 544 535 536 + static void io_poll_add_hash(struct io_kiocb *req) 537 + { 538 + if (req->flags & REQ_F_HASH_LOCKED) 539 + io_poll_req_insert_locked(req); 540 + else 541 + io_poll_req_insert(req); 542 + } 543 + 545 544 /* 546 545 * Returns 0 when it's handed over for polling. The caller owns the requests if 547 546 * it returns non-zero, but otherwise should not touch it. Negative values ··· 608 591 609 592 if (mask && 610 593 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { 611 - if (!io_poll_can_finish_inline(req, ipt)) 594 + if (!io_poll_can_finish_inline(req, ipt)) { 595 + io_poll_add_hash(req); 612 596 return 0; 597 + } 613 598 io_poll_remove_entries(req); 614 599 ipt->result_mask = mask; 615 600 /* no one else has access to the req, forget about the ref */ 616 601 return 1; 617 602 } 618 603 619 - if (req->flags & REQ_F_HASH_LOCKED) 620 - io_poll_req_insert_locked(req); 621 - else 622 - io_poll_req_insert(req); 604 + io_poll_add_hash(req); 623 605 624 606 if (mask && (poll->events & EPOLLET) && 625 607 io_poll_can_finish_inline(req, ipt)) {
+5 -1
io_uring/rw.c
··· 1062 1062 continue; 1063 1063 1064 1064 req->cqe.flags = io_put_kbuf(req, 0); 1065 - io_fill_cqe_req(req->ctx, req); 1065 + if (unlikely(!__io_fill_cqe_req(ctx, req))) { 1066 + spin_lock(&ctx->completion_lock); 1067 + io_req_cqe_overflow(req); 1068 + spin_unlock(&ctx->completion_lock); 1069 + } 1066 1070 } 1067 1071 1068 1072 if (unlikely(!nr_events))
+2 -2
kernel/bpf/hashtab.c
··· 152 152 { 153 153 unsigned long flags; 154 154 155 - hash = hash & HASHTAB_MAP_LOCK_MASK; 155 + hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 156 156 157 157 preempt_disable(); 158 158 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { ··· 171 171 struct bucket *b, u32 hash, 172 172 unsigned long flags) 173 173 { 174 - hash = hash & HASHTAB_MAP_LOCK_MASK; 174 + hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 175 175 raw_spin_unlock_irqrestore(&b->raw_lock, flags); 176 176 __this_cpu_dec(*(htab->map_locked[hash])); 177 177 preempt_enable();
-3
kernel/bpf/offload.c
··· 216 216 if (offload->dev_state) 217 217 offload->offdev->ops->destroy(prog); 218 218 219 - /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 220 - bpf_prog_free_id(prog, true); 221 - 222 219 list_del_init(&offload->offloads); 223 220 kfree(offload); 224 221 prog->aux->offload = NULL;
+7 -17
kernel/bpf/syscall.c
··· 1972 1972 return; 1973 1973 if (audit_enabled == AUDIT_OFF) 1974 1974 return; 1975 - if (op == BPF_AUDIT_LOAD) 1975 + if (!in_irq() && !irqs_disabled()) 1976 1976 ctx = audit_context(); 1977 1977 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1978 1978 if (unlikely(!ab)) ··· 2001 2001 return id > 0 ? 0 : id; 2002 2002 } 2003 2003 2004 - void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 2004 + void bpf_prog_free_id(struct bpf_prog *prog) 2005 2005 { 2006 2006 unsigned long flags; 2007 2007 ··· 2013 2013 if (!prog->aux->id) 2014 2014 return; 2015 2015 2016 - if (do_idr_lock) 2017 - spin_lock_irqsave(&prog_idr_lock, flags); 2018 - else 2019 - __acquire(&prog_idr_lock); 2020 - 2016 + spin_lock_irqsave(&prog_idr_lock, flags); 2021 2017 idr_remove(&prog_idr, prog->aux->id); 2022 2018 prog->aux->id = 0; 2023 - 2024 - if (do_idr_lock) 2025 - spin_unlock_irqrestore(&prog_idr_lock, flags); 2026 - else 2027 - __release(&prog_idr_lock); 2019 + spin_unlock_irqrestore(&prog_idr_lock, flags); 2028 2020 } 2029 2021 2030 2022 static void __bpf_prog_put_rcu(struct rcu_head *rcu) ··· 2059 2067 prog = aux->prog; 2060 2068 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2061 2069 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2070 + bpf_prog_free_id(prog); 2062 2071 __bpf_prog_put_noref(prog, true); 2063 2072 } 2064 2073 2065 - static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 2074 + static void __bpf_prog_put(struct bpf_prog *prog) 2066 2075 { 2067 2076 struct bpf_prog_aux *aux = prog->aux; 2068 2077 2069 2078 if (atomic64_dec_and_test(&aux->refcnt)) { 2070 - /* bpf_prog_free_id() must be called first */ 2071 - bpf_prog_free_id(prog, do_idr_lock); 2072 - 2073 2079 if (in_irq() || irqs_disabled()) { 2074 2080 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2075 2081 schedule_work(&aux->work); ··· 2079 2089 2080 2090 void bpf_prog_put(struct bpf_prog *prog) 2081 2091 { 2082 - __bpf_prog_put(prog, true); 2092 + __bpf_prog_put(prog); 2083 2093 } 2084 2094 EXPORT_SYMBOL_GPL(bpf_prog_put); 2085 2095
+9 -1
kernel/bpf/verifier.c
··· 2752 2752 */ 2753 2753 if (insn->src_reg == 0 && is_callback_calling_function(insn->imm)) 2754 2754 return -ENOTSUPP; 2755 + /* kfunc with imm==0 is invalid and fixup_kfunc_call will 2756 + * catch this error later. Make backtracking conservative 2757 + * with ENOTSUPP. 2758 + */ 2759 + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) 2760 + return -ENOTSUPP; 2755 2761 /* regular helper call sets R0 */ 2756 2762 *reg_mask &= ~1; 2757 2763 if (*reg_mask & 0x3f) { ··· 3299 3293 bool sanitize = reg && is_spillable_regtype(reg->type); 3300 3294 3301 3295 for (i = 0; i < size; i++) { 3302 - if (state->stack[spi].slot_type[i] == STACK_INVALID) { 3296 + u8 type = state->stack[spi].slot_type[i]; 3297 + 3298 + if (type != STACK_MISC && type != STACK_ZERO) { 3303 3299 sanitize = true; 3304 3300 break; 3305 3301 }
+6 -15
kernel/kallsyms_selftest.c
··· 157 157 static int lookup_name(void *data, const char *name, struct module *mod, unsigned long addr) 158 158 { 159 159 u64 t0, t1, t; 160 - unsigned long flags; 161 160 struct test_stat *stat = (struct test_stat *)data; 162 161 163 - local_irq_save(flags); 164 - t0 = sched_clock(); 162 + t0 = ktime_get_ns(); 165 163 (void)kallsyms_lookup_name(name); 166 - t1 = sched_clock(); 167 - local_irq_restore(flags); 164 + t1 = ktime_get_ns(); 168 165 169 166 t = t1 - t0; 170 167 if (t < stat->min) ··· 231 234 static void test_perf_kallsyms_on_each_symbol(void) 232 235 { 233 236 u64 t0, t1; 234 - unsigned long flags; 235 237 struct test_stat stat; 236 238 237 239 memset(&stat, 0, sizeof(stat)); 238 240 stat.max = INT_MAX; 239 241 stat.name = stub_name; 240 242 stat.perf = 1; 241 - local_irq_save(flags); 242 - t0 = sched_clock(); 243 + t0 = ktime_get_ns(); 243 244 kallsyms_on_each_symbol(find_symbol, &stat); 244 - t1 = sched_clock(); 245 - local_irq_restore(flags); 245 + t1 = ktime_get_ns(); 246 246 pr_info("kallsyms_on_each_symbol() traverse all: %lld ns\n", t1 - t0); 247 247 } 248 248 ··· 264 270 static void test_perf_kallsyms_on_each_match_symbol(void) 265 271 { 266 272 u64 t0, t1; 267 - unsigned long flags; 268 273 struct test_stat stat; 269 274 270 275 memset(&stat, 0, sizeof(stat)); 271 276 stat.max = INT_MAX; 272 277 stat.name = stub_name; 273 - local_irq_save(flags); 274 - t0 = sched_clock(); 278 + t0 = ktime_get_ns(); 275 279 kallsyms_on_each_match_symbol(match_symbol, stat.name, &stat); 276 - t1 = sched_clock(); 277 - local_irq_restore(flags); 280 + t1 = ktime_get_ns(); 278 281 pr_info("kallsyms_on_each_match_symbol() traverse all: %lld ns\n", t1 - t0); 279 282 } 280 283
+2
kernel/printk/printk.c
··· 123 123 { 124 124 return srcu_read_lock_held(&console_srcu); 125 125 } 126 + EXPORT_SYMBOL(console_srcu_read_lock_is_held); 126 127 #endif 127 128 128 129 enum devkmsg_log_bits { ··· 1892 1891 /** 1893 1892 * console_lock_spinning_disable_and_check - mark end of code where another 1894 1893 * thread was able to busy wait and check if there is a waiter 1894 + * @cookie: cookie returned from console_srcu_read_lock() 1895 1895 * 1896 1896 * This is called at the end of the section where spinning is allowed. 1897 1897 * It has two functions. First, it is a signal that it is no longer
+3
kernel/trace/bpf_trace.c
··· 846 846 return -EPERM; 847 847 if (unlikely(!nmi_uaccess_okay())) 848 848 return -EPERM; 849 + /* Task should not be pid=1 to avoid kernel panic. */ 850 + if (unlikely(is_global_init(current))) 851 + return -EPERM; 849 852 850 853 if (irqs_disabled()) { 851 854 /* Do an early check on signal validity. Otherwise,
-1
lib/lockref.c
··· 23 23 } \ 24 24 if (!--retry) \ 25 25 break; \ 26 - cpu_relax(); \ 27 26 } \ 28 27 } while (0) 29 28
+1 -1
lib/win_minmax.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /** 2 + /* 3 3 * lib/minmax.c: windowed min/max tracker 4 4 * 5 5 * Kathleen Nichols' algorithm for tracking the minimum (or maximum)
+64 -35
mm/hugetlb.c
··· 94 94 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 95 95 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 96 96 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 97 + static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 98 + unsigned long start, unsigned long end); 97 99 98 100 static inline bool subpool_is_free(struct hugepage_subpool *spool) 99 101 { ··· 1183 1181 1184 1182 /* 1185 1183 * Reset and decrement one ref on hugepage private reservation. 1186 - * Called with mm->mmap_sem writer semaphore held. 1184 + * Called with mm->mmap_lock writer semaphore held. 1187 1185 * This function should be only used by move_vma() and operate on 1188 1186 * same sized vma. It should never come here with last ref on the 1189 1187 * reservation. ··· 4836 4834 { 4837 4835 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 4838 4836 return -EINVAL; 4837 + 4838 + /* 4839 + * PMD sharing is only possible for PUD_SIZE-aligned address ranges 4840 + * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 4841 + * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 4842 + */ 4843 + if (addr & ~PUD_MASK) { 4844 + /* 4845 + * hugetlb_vm_op_split is called right before we attempt to 4846 + * split the VMA. We will need to unshare PMDs in the old and 4847 + * new VMAs, so let's unshare before we split. 4848 + */ 4849 + unsigned long floor = addr & PUD_MASK; 4850 + unsigned long ceil = floor + PUD_SIZE; 4851 + 4852 + if (floor >= vma->vm_start && ceil <= vma->vm_end) 4853 + hugetlb_unshare_pmds(vma, floor, ceil); 4854 + } 4855 + 4839 4856 return 0; 4840 4857 } 4841 4858 ··· 5152 5131 5153 5132 /* 5154 5133 * We don't have to worry about the ordering of src and dst ptlocks 5155 - * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock. 5134 + * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5156 5135 */ 5157 5136 if (src_ptl != dst_ptl) 5158 5137 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); ··· 6660 6639 spinlock_t *ptl; 6661 6640 ptep = huge_pte_offset(mm, address, psize); 6662 6641 if (!ptep) { 6663 - address |= last_addr_mask; 6664 - continue; 6642 + if (!uffd_wp) { 6643 + address |= last_addr_mask; 6644 + continue; 6645 + } 6646 + /* 6647 + * Userfaultfd wr-protect requires pgtable 6648 + * pre-allocations to install pte markers. 6649 + */ 6650 + ptep = huge_pte_alloc(mm, vma, address, psize); 6651 + if (!ptep) 6652 + break; 6665 6653 } 6666 6654 ptl = huge_pte_lock(h, mm, ptep); 6667 6655 if (huge_pmd_unshare(mm, vma, address, ptep)) { ··· 6688 6658 } 6689 6659 pte = huge_ptep_get(ptep); 6690 6660 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6691 - spin_unlock(ptl); 6692 - continue; 6693 - } 6694 - if (unlikely(is_hugetlb_entry_migration(pte))) { 6661 + /* Nothing to do. */ 6662 + } else if (unlikely(is_hugetlb_entry_migration(pte))) { 6695 6663 swp_entry_t entry = pte_to_swp_entry(pte); 6696 6664 struct page *page = pfn_swap_entry_to_page(entry); 6665 + pte_t newpte = pte; 6697 6666 6698 - if (!is_readable_migration_entry(entry)) { 6699 - pte_t newpte; 6700 - 6667 + if (is_writable_migration_entry(entry)) { 6701 6668 if (PageAnon(page)) 6702 6669 entry = make_readable_exclusive_migration_entry( 6703 6670 swp_offset(entry)); ··· 6702 6675 entry = make_readable_migration_entry( 6703 6676 swp_offset(entry)); 6704 6677 newpte = swp_entry_to_pte(entry); 6705 - if (uffd_wp) 6706 - newpte = pte_swp_mkuffd_wp(newpte); 6707 - else if (uffd_wp_resolve) 6708 - newpte = pte_swp_clear_uffd_wp(newpte); 6709 - set_huge_pte_at(mm, address, ptep, newpte); 6710 6678 pages++; 6711 6679 } 6712 - spin_unlock(ptl); 6713 - continue; 6714 - } 6715 - if (unlikely(pte_marker_uffd_wp(pte))) { 6716 - /* 6717 - * This is changing a non-present pte into a none pte, 6718 - * no need for huge_ptep_modify_prot_start/commit(). 6719 - */ 6680 + 6681 + if (uffd_wp) 6682 + newpte = pte_swp_mkuffd_wp(newpte); 6683 + else if (uffd_wp_resolve) 6684 + newpte = pte_swp_clear_uffd_wp(newpte); 6685 + if (!pte_same(pte, newpte)) 6686 + set_huge_pte_at(mm, address, ptep, newpte); 6687 + } else if (unlikely(is_pte_marker(pte))) { 6688 + /* No other markers apply for now. */ 6689 + WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); 6720 6690 if (uffd_wp_resolve) 6691 + /* Safe to modify directly (non-present->none). */ 6721 6692 huge_pte_clear(mm, address, ptep, psize); 6722 - } 6723 - if (!huge_pte_none(pte)) { 6693 + } else if (!huge_pte_none(pte)) { 6724 6694 pte_t old_pte; 6725 6695 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6726 6696 ··· 7352 7328 } 7353 7329 } 7354 7330 7355 - /* 7356 - * This function will unconditionally remove all the shared pmd pgtable entries 7357 - * within the specific vma for a hugetlbfs memory range. 7358 - */ 7359 - void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7331 + static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7332 + unsigned long start, 7333 + unsigned long end) 7360 7334 { 7361 7335 struct hstate *h = hstate_vma(vma); 7362 7336 unsigned long sz = huge_page_size(h); 7363 7337 struct mm_struct *mm = vma->vm_mm; 7364 7338 struct mmu_notifier_range range; 7365 - unsigned long address, start, end; 7339 + unsigned long address; 7366 7340 spinlock_t *ptl; 7367 7341 pte_t *ptep; 7368 7342 7369 7343 if (!(vma->vm_flags & VM_MAYSHARE)) 7370 7344 return; 7371 - 7372 - start = ALIGN(vma->vm_start, PUD_SIZE); 7373 - end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7374 7345 7375 7346 if (start >= end) 7376 7347 return; ··· 7396 7377 * Documentation/mm/mmu_notifier.rst. 7397 7378 */ 7398 7379 mmu_notifier_invalidate_range_end(&range); 7380 + } 7381 + 7382 + /* 7383 + * This function will unconditionally remove all the shared pmd pgtable entries 7384 + * within the specific vma for a hugetlbfs memory range. 7385 + */ 7386 + void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7387 + { 7388 + hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7389 + ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7399 7390 } 7400 7391 7401 7392 #ifdef CONFIG_CMA
+1 -1
mm/kasan/report.c
··· 119 119 * Whether the KASAN KUnit test suite is currently being executed. 120 120 * Updated in kasan_test.c. 121 121 */ 122 - bool kasan_kunit_executing; 122 + static bool kasan_kunit_executing; 123 123 124 124 void kasan_kunit_test_suite_start(void) 125 125 {
+7 -9
mm/khugepaged.c
··· 1460 1460 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 1461 1461 return SCAN_VMA_CHECK; 1462 1462 1463 - /* 1464 - * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings 1465 - * that got written to. Without this, we'd have to also lock the 1466 - * anon_vma if one exists. 1467 - */ 1468 - if (vma->anon_vma) 1469 - return SCAN_VMA_CHECK; 1470 - 1471 1463 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ 1472 1464 if (userfaultfd_wp(vma)) 1473 1465 return SCAN_PTE_UFFD_WP; ··· 1559 1567 } 1560 1568 1561 1569 /* step 4: remove pte entries */ 1570 + /* we make no change to anon, but protect concurrent anon page lookup */ 1571 + if (vma->anon_vma) 1572 + anon_vma_lock_write(vma->anon_vma); 1573 + 1562 1574 collapse_and_free_pmd(mm, vma, haddr, pmd); 1563 1575 1576 + if (vma->anon_vma) 1577 + anon_vma_unlock_write(vma->anon_vma); 1564 1578 i_mmap_unlock_write(vma->vm_file->f_mapping); 1565 1579 1566 1580 maybe_install_pmd: ··· 2647 2649 goto out_nolock; 2648 2650 } 2649 2651 2650 - hend = vma->vm_end & HPAGE_PMD_MASK; 2652 + hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); 2651 2653 } 2652 2654 mmap_assert_locked(mm); 2653 2655 memset(cc->node_load, 0, sizeof(cc->node_load));
+1 -1
mm/madvise.c
··· 130 130 #endif /* CONFIG_ANON_VMA_NAME */ 131 131 /* 132 132 * Update the vm_flags on region of a vma, splitting it or merging it as 133 - * necessary. Must be called with mmap_sem held for writing; 133 + * necessary. Must be called with mmap_lock held for writing; 134 134 * Caller should ensure anon_name stability by raising its refcount even when 135 135 * anon_name belongs to a valid vma because this function might free that vma. 136 136 */
+7 -1
mm/memblock.c
··· 1640 1640 end = PFN_DOWN(base + size); 1641 1641 1642 1642 for (; cursor < end; cursor++) { 1643 - memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1643 + /* 1644 + * Reserved pages are always initialized by the end of 1645 + * memblock_free_all() (by memmap_init() and, if deferred 1646 + * initialization is enabled, memmap_init_reserved_pages()), so 1647 + * these pages can be released directly to the buddy allocator. 1648 + */ 1649 + __free_pages_core(pfn_to_page(cursor), 0); 1644 1650 totalram_pages_inc(); 1645 1651 } 1646 1652 }
+6 -2
mm/mmap.c
··· 1524 1524 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1525 1525 return 1; 1526 1526 1527 + /* Do we need write faults for uffd-wp tracking? */ 1528 + if (userfaultfd_wp(vma)) 1529 + return 1; 1530 + 1527 1531 /* Specialty mapping? */ 1528 1532 if (vm_flags & VM_PFNMAP) 1529 1533 return 0; ··· 2294 2290 * @start: The aligned start address to munmap. 2295 2291 * @end: The aligned end address to munmap. 2296 2292 * @uf: The userfaultfd list_head 2297 - * @downgrade: Set to true to attempt a write downgrade of the mmap_sem 2293 + * @downgrade: Set to true to attempt a write downgrade of the mmap_lock 2298 2294 * 2299 2295 * If @downgrade is true, check return code for potential release of the lock. 2300 2296 */ ··· 2469 2465 * @len: The length of the range to munmap 2470 2466 * @uf: The userfaultfd list_head 2471 2467 * @downgrade: set to true if the user wants to attempt to write_downgrade the 2472 - * mmap_sem 2468 + * mmap_lock 2473 2469 * 2474 2470 * This function takes a @mas that is either pointing to the previous VMA or set 2475 2471 * to MA_START and sets it up to remove the mapping(s). The @len will be
+6 -3
mm/nommu.c
··· 559 559 560 560 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) 561 561 { 562 - mm->map_count++; 563 562 vma->vm_mm = mm; 564 563 565 564 /* add the VMA to the mapping */ ··· 586 587 BUG_ON(!vma->vm_region); 587 588 588 589 setup_vma_to_mm(vma, mm); 590 + mm->map_count++; 589 591 590 592 /* add the VMA to the tree */ 591 593 vma_mas_store(vma, mas); ··· 1240 1240 error_just_free: 1241 1241 up_write(&nommu_region_sem); 1242 1242 error: 1243 + mas_destroy(&mas); 1243 1244 if (region->vm_file) 1244 1245 fput(region->vm_file); 1245 1246 kmem_cache_free(vm_region_jar, region); ··· 1251 1250 1252 1251 sharing_violation: 1253 1252 up_write(&nommu_region_sem); 1254 - mas_destroy(&mas); 1255 1253 pr_warn("Attempt to share mismatched mappings\n"); 1256 1254 ret = -EINVAL; 1257 1255 goto error; ··· 1347 1347 if (vma->vm_file) 1348 1348 return -ENOMEM; 1349 1349 1350 + mm = vma->vm_mm; 1350 1351 if (mm->map_count >= sysctl_max_map_count) 1351 1352 return -ENOMEM; 1352 1353 ··· 1399 1398 mas_set_range(&mas, vma->vm_start, vma->vm_end - 1); 1400 1399 mas_store(&mas, vma); 1401 1400 vma_mas_store(new, &mas); 1401 + mm->map_count++; 1402 1402 return 0; 1403 1403 1404 1404 err_mas_preallocate: ··· 1511 1509 erase_whole_vma: 1512 1510 if (delete_vma_from_mm(vma)) 1513 1511 ret = -ENOMEM; 1514 - delete_vma(mm, vma); 1512 + else 1513 + delete_vma(mm, vma); 1515 1514 return ret; 1516 1515 } 1517 1516
+2 -4
mm/shmem.c
··· 478 478 if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) || 479 479 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) 480 480 return false; 481 - if (shmem_huge_force) 482 - return true; 483 - if (shmem_huge == SHMEM_HUGE_FORCE) 484 - return true; 485 481 if (shmem_huge == SHMEM_HUGE_DENY) 486 482 return false; 483 + if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) 484 + return true; 487 485 488 486 switch (SHMEM_SB(inode->i_sb)->huge) { 489 487 case SHMEM_HUGE_ALWAYS:
+2
mm/slab.c
··· 2211 2211 raw_spin_unlock_irq(&n->list_lock); 2212 2212 slab_destroy(cache, slab); 2213 2213 nr_freed++; 2214 + 2215 + cond_resched(); 2214 2216 } 2215 2217 out: 2216 2218 return nr_freed;
+14 -4
net/bluetooth/hci_conn.c
··· 821 821 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis) 822 822 { 823 823 struct iso_list_data *d; 824 + int ret; 824 825 825 826 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis); 826 827 ··· 832 831 d->big = big; 833 832 d->bis = bis; 834 833 835 - return hci_cmd_sync_queue(hdev, terminate_big_sync, d, 836 - terminate_big_destroy); 834 + ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d, 835 + terminate_big_destroy); 836 + if (ret) 837 + kfree(d); 838 + 839 + return ret; 837 840 } 838 841 839 842 static int big_terminate_sync(struct hci_dev *hdev, void *data) ··· 862 857 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle) 863 858 { 864 859 struct iso_list_data *d; 860 + int ret; 865 861 866 862 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle); 867 863 ··· 873 867 d->big = big; 874 868 d->sync_handle = sync_handle; 875 869 876 - return hci_cmd_sync_queue(hdev, big_terminate_sync, d, 877 - terminate_big_destroy); 870 + ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, 871 + terminate_big_destroy); 872 + if (ret) 873 + kfree(d); 874 + 875 + return ret; 878 876 } 879 877 880 878 /* Cleanup BIS connection
+4 -1
net/bluetooth/hci_event.c
··· 3848 3848 conn->handle, conn->link); 3849 3849 3850 3850 /* Create CIS if LE is already connected */ 3851 - if (conn->link && conn->link->state == BT_CONNECTED) 3851 + if (conn->link && conn->link->state == BT_CONNECTED) { 3852 + rcu_read_unlock(); 3852 3853 hci_le_create_cis(conn->link); 3854 + rcu_read_lock(); 3855 + } 3853 3856 3854 3857 if (i == rp->num_handles) 3855 3858 break;
+6 -13
net/bluetooth/hci_sync.c
··· 3572 3572 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) 3573 3573 { 3574 3574 /* Use Read LE Buffer Size V2 if supported */ 3575 - if (hdev->commands[41] & 0x20) 3575 + if (iso_capable(hdev) && hdev->commands[41] & 0x20) 3576 3576 return __hci_cmd_sync_status(hdev, 3577 3577 HCI_OP_LE_READ_BUFFER_SIZE_V2, 3578 3578 0, NULL, HCI_CMD_TIMEOUT); ··· 3597 3597 3598 3598 /* LE Controller init stage 2 command sequence */ 3599 3599 static const struct hci_init_stage le_init2[] = { 3600 - /* HCI_OP_LE_READ_BUFFER_SIZE */ 3601 - HCI_INIT(hci_le_read_buffer_size_sync), 3602 3600 /* HCI_OP_LE_READ_LOCAL_FEATURES */ 3603 3601 HCI_INIT(hci_le_read_local_features_sync), 3602 + /* HCI_OP_LE_READ_BUFFER_SIZE */ 3603 + HCI_INIT(hci_le_read_buffer_size_sync), 3604 3604 /* HCI_OP_LE_READ_SUPPORTED_STATES */ 3605 3605 HCI_INIT(hci_le_read_supported_states_sync), 3606 3606 {} ··· 6187 6187 6188 6188 static int _update_adv_data_sync(struct hci_dev *hdev, void *data) 6189 6189 { 6190 - u8 instance = *(u8 *)data; 6191 - 6192 - kfree(data); 6190 + u8 instance = PTR_ERR(data); 6193 6191 6194 6192 return hci_update_adv_data_sync(hdev, instance); 6195 6193 } 6196 6194 6197 6195 int hci_update_adv_data(struct hci_dev *hdev, u8 instance) 6198 6196 { 6199 - u8 *inst_ptr = kmalloc(1, GFP_KERNEL); 6200 - 6201 - if (!inst_ptr) 6202 - return -ENOMEM; 6203 - 6204 - *inst_ptr = instance; 6205 - return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL); 6197 + return hci_cmd_sync_queue(hdev, _update_adv_data_sync, 6198 + ERR_PTR(instance), NULL); 6206 6199 }
+26 -38
net/bluetooth/iso.c
··· 289 289 hci_dev_unlock(hdev); 290 290 hci_dev_put(hdev); 291 291 292 + err = iso_chan_add(conn, sk, NULL); 293 + if (err) 294 + return err; 295 + 292 296 lock_sock(sk); 293 297 294 298 /* Update source addr of the socket */ 295 299 bacpy(&iso_pi(sk)->src, &hcon->src); 296 - 297 - err = iso_chan_add(conn, sk, NULL); 298 - if (err) 299 - goto release; 300 300 301 301 if (hcon->state == BT_CONNECTED) { 302 302 iso_sock_clear_timer(sk); ··· 306 306 iso_sock_set_timer(sk, sk->sk_sndtimeo); 307 307 } 308 308 309 - release: 310 309 release_sock(sk); 311 310 return err; 312 311 ··· 371 372 hci_dev_unlock(hdev); 372 373 hci_dev_put(hdev); 373 374 375 + err = iso_chan_add(conn, sk, NULL); 376 + if (err) 377 + return err; 378 + 374 379 lock_sock(sk); 375 380 376 381 /* Update source addr of the socket */ 377 382 bacpy(&iso_pi(sk)->src, &hcon->src); 378 - 379 - err = iso_chan_add(conn, sk, NULL); 380 - if (err) 381 - goto release; 382 383 383 384 if (hcon->state == BT_CONNECTED) { 384 385 iso_sock_clear_timer(sk); ··· 391 392 iso_sock_set_timer(sk, sk->sk_sndtimeo); 392 393 } 393 394 394 - release: 395 395 release_sock(sk); 396 396 return err; 397 397 ··· 893 895 if (!hdev) 894 896 return -EHOSTUNREACH; 895 897 896 - hci_dev_lock(hdev); 897 - 898 898 err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, 899 899 le_addr_type(iso_pi(sk)->dst_type), 900 900 iso_pi(sk)->bc_sid); 901 901 902 - hci_dev_unlock(hdev); 903 902 hci_dev_put(hdev); 904 903 905 904 return err; ··· 1427 1432 struct sock *parent; 1428 1433 struct sock *sk = conn->sk; 1429 1434 struct hci_ev_le_big_sync_estabilished *ev; 1435 + struct hci_conn *hcon; 1430 1436 1431 1437 BT_DBG("conn %p", conn); 1432 1438 1433 1439 if (sk) { 1434 1440 iso_sock_ready(conn->sk); 1435 1441 } else { 1436 - iso_conn_lock(conn); 1437 - 1438 - if (!conn->hcon) { 1439 - iso_conn_unlock(conn); 1442 + hcon = conn->hcon; 1443 + if (!hcon) 1440 1444 return; 1441 - } 1442 1445 1443 - ev = hci_recv_event_data(conn->hcon->hdev, 1446 + ev = hci_recv_event_data(hcon->hdev, 1444 1447 HCI_EVT_LE_BIG_SYNC_ESTABILISHED); 1445 1448 if (ev) 1446 - parent = iso_get_sock_listen(&conn->hcon->src, 1447 - &conn->hcon->dst, 1449 + parent = iso_get_sock_listen(&hcon->src, 1450 + &hcon->dst, 1448 1451 iso_match_big, ev); 1449 1452 else 1450 - parent = iso_get_sock_listen(&conn->hcon->src, 1453 + parent = iso_get_sock_listen(&hcon->src, 1451 1454 BDADDR_ANY, NULL, NULL); 1452 1455 1453 - if (!parent) { 1454 - iso_conn_unlock(conn); 1456 + if (!parent) 1455 1457 return; 1456 - } 1457 1458 1458 1459 lock_sock(parent); 1459 1460 ··· 1457 1466 BTPROTO_ISO, GFP_ATOMIC, 0); 1458 1467 if (!sk) { 1459 1468 release_sock(parent); 1460 - iso_conn_unlock(conn); 1461 1469 return; 1462 1470 } 1463 1471 1464 1472 iso_sock_init(sk, parent); 1465 1473 1466 - bacpy(&iso_pi(sk)->src, &conn->hcon->src); 1467 - iso_pi(sk)->src_type = conn->hcon->src_type; 1474 + bacpy(&iso_pi(sk)->src, &hcon->src); 1475 + iso_pi(sk)->src_type = hcon->src_type; 1468 1476 1469 1477 /* If hcon has no destination address (BDADDR_ANY) it means it 1470 1478 * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to 1471 1479 * initialize using the parent socket destination address. 1472 1480 */ 1473 - if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) { 1474 - bacpy(&conn->hcon->dst, &iso_pi(parent)->dst); 1475 - conn->hcon->dst_type = iso_pi(parent)->dst_type; 1476 - conn->hcon->sync_handle = iso_pi(parent)->sync_handle; 1481 + if (!bacmp(&hcon->dst, BDADDR_ANY)) { 1482 + bacpy(&hcon->dst, &iso_pi(parent)->dst); 1483 + hcon->dst_type = iso_pi(parent)->dst_type; 1484 + hcon->sync_handle = iso_pi(parent)->sync_handle; 1477 1485 } 1478 1486 1479 - bacpy(&iso_pi(sk)->dst, &conn->hcon->dst); 1480 - iso_pi(sk)->dst_type = conn->hcon->dst_type; 1487 + bacpy(&iso_pi(sk)->dst, &hcon->dst); 1488 + iso_pi(sk)->dst_type = hcon->dst_type; 1481 1489 1482 - hci_conn_hold(conn->hcon); 1483 - __iso_chan_add(conn, sk, parent); 1490 + hci_conn_hold(hcon); 1491 + iso_chan_add(conn, sk, parent); 1484 1492 1485 1493 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) 1486 1494 sk->sk_state = BT_CONNECT2; ··· 1490 1500 parent->sk_data_ready(parent); 1491 1501 1492 1502 release_sock(parent); 1493 - 1494 - iso_conn_unlock(conn); 1495 1503 } 1496 1504 } 1497 1505
+1 -1
net/bluetooth/mgmt_util.h
··· 27 27 struct sock *sk; 28 28 u8 handle; 29 29 u8 instance; 30 - u8 param[sizeof(struct mgmt_cp_mesh_send) + 29]; 30 + u8 param[sizeof(struct mgmt_cp_mesh_send) + 31]; 31 31 }; 32 32 33 33 struct mgmt_pending_cmd {
+6 -1
net/bluetooth/rfcomm/sock.c
··· 391 391 addr->sa_family != AF_BLUETOOTH) 392 392 return -EINVAL; 393 393 394 + sock_hold(sk); 394 395 lock_sock(sk); 395 396 396 397 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { ··· 411 410 d->sec_level = rfcomm_pi(sk)->sec_level; 412 411 d->role_switch = rfcomm_pi(sk)->role_switch; 413 412 413 + /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */ 414 + release_sock(sk); 414 415 err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, 415 416 sa->rc_channel); 416 - if (!err) 417 + lock_sock(sk); 418 + if (!err && !sock_flag(sk, SOCK_ZAPPED)) 417 419 err = bt_sock_wait_state(sk, BT_CONNECTED, 418 420 sock_sndtimeo(sk, flags & O_NONBLOCK)); 419 421 420 422 done: 421 423 release_sock(sk); 424 + sock_put(sk); 422 425 return err; 423 426 } 424 427
+7 -4
net/ethtool/rss.c
··· 122 122 { 123 123 const struct rss_reply_data *data = RSS_REPDATA(reply_base); 124 124 125 - if (nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc) || 126 - nla_put(skb, ETHTOOL_A_RSS_INDIR, 127 - sizeof(u32) * data->indir_size, data->indir_table) || 128 - nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)) 125 + if ((data->hfunc && 126 + nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) || 127 + (data->indir_size && 128 + nla_put(skb, ETHTOOL_A_RSS_INDIR, 129 + sizeof(u32) * data->indir_size, data->indir_table)) || 130 + (data->hkey_size && 131 + nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey))) 129 132 return -EMSGSIZE; 130 133 131 134 return 0;
+15 -2
net/ipv4/inet_hashtables.c
··· 650 650 spin_lock(lock); 651 651 if (osk) { 652 652 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); 653 - ret = sk_nulls_del_node_init_rcu(osk); 654 - } else if (found_dup_sk) { 653 + ret = sk_hashed(osk); 654 + if (ret) { 655 + /* Before deleting the node, we insert a new one to make 656 + * sure that the look-up-sk process would not miss either 657 + * of them and that at least one node would exist in ehash 658 + * table all the time. Otherwise there's a tiny chance 659 + * that lookup process could find nothing in ehash table. 660 + */ 661 + __sk_nulls_add_node_tail_rcu(sk, list); 662 + sk_nulls_del_node_init_rcu(osk); 663 + } 664 + goto unlock; 665 + } 666 + if (found_dup_sk) { 655 667 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); 656 668 if (*found_dup_sk) 657 669 ret = false; ··· 672 660 if (ret) 673 661 __sk_nulls_add_node_rcu(sk, list); 674 662 663 + unlock: 675 664 spin_unlock(lock); 676 665 677 666 return ret;
+4 -4
net/ipv4/inet_timewait_sock.c
··· 91 91 } 92 92 EXPORT_SYMBOL_GPL(inet_twsk_put); 93 93 94 - static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, 95 - struct hlist_nulls_head *list) 94 + static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw, 95 + struct hlist_nulls_head *list) 96 96 { 97 - hlist_nulls_add_head_rcu(&tw->tw_node, list); 97 + hlist_nulls_add_tail_rcu(&tw->tw_node, list); 98 98 } 99 99 100 100 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, ··· 147 147 148 148 spin_lock(lock); 149 149 150 - inet_twsk_add_node_rcu(tw, &ehead->chain); 150 + inet_twsk_add_node_tail_rcu(tw, &ehead->chain); 151 151 152 152 /* Step 3: Remove SK from hash chain */ 153 153 if (__sk_nulls_del_node_init_rcu(sk))
+2
net/ipv4/tcp.c
··· 435 435 436 436 /* There's a bubble in the pipe until at least the first ACK. */ 437 437 tp->app_limited = ~0U; 438 + tp->rate_app_limited = 1; 438 439 439 440 /* See draft-stevens-tcpca-spec-01 for discussion of the 440 441 * initialization of these values. ··· 3179 3178 tp->plb_rehash = 0; 3180 3179 /* There's a bubble in the pipe until at least the first ACK. */ 3181 3180 tp->app_limited = ~0U; 3181 + tp->rate_app_limited = 1; 3182 3182 tp->rack.mstamp = 0; 3183 3183 tp->rack.advanced = 0; 3184 3184 tp->rack.reo_wnd_steps = 1;
+1 -1
net/ipv4/tcp_ulp.c
··· 139 139 if (sk->sk_socket) 140 140 clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 141 141 142 - err = -EINVAL; 142 + err = -ENOTCONN; 143 143 if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN) 144 144 goto out_err; 145 145
+50 -52
net/l2tp/l2tp_core.c
··· 104 104 /* per-net private data for this module */ 105 105 static unsigned int l2tp_net_id; 106 106 struct l2tp_net { 107 - struct list_head l2tp_tunnel_list; 108 - /* Lock for write access to l2tp_tunnel_list */ 109 - spinlock_t l2tp_tunnel_list_lock; 107 + /* Lock for write access to l2tp_tunnel_idr */ 108 + spinlock_t l2tp_tunnel_idr_lock; 109 + struct idr l2tp_tunnel_idr; 110 110 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; 111 111 /* Lock for write access to l2tp_session_hlist */ 112 112 spinlock_t l2tp_session_hlist_lock; ··· 208 208 struct l2tp_tunnel *tunnel; 209 209 210 210 rcu_read_lock_bh(); 211 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 212 - if (tunnel->tunnel_id == tunnel_id && 213 - refcount_inc_not_zero(&tunnel->ref_count)) { 214 - rcu_read_unlock_bh(); 215 - 216 - return tunnel; 217 - } 211 + tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id); 212 + if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) { 213 + rcu_read_unlock_bh(); 214 + return tunnel; 218 215 } 219 216 rcu_read_unlock_bh(); 220 217 ··· 221 224 222 225 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) 223 226 { 224 - const struct l2tp_net *pn = l2tp_pernet(net); 227 + struct l2tp_net *pn = l2tp_pernet(net); 228 + unsigned long tunnel_id, tmp; 225 229 struct l2tp_tunnel *tunnel; 226 230 int count = 0; 227 231 228 232 rcu_read_lock_bh(); 229 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 230 - if (++count > nth && 233 + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { 234 + if (tunnel && ++count > nth && 231 235 refcount_inc_not_zero(&tunnel->ref_count)) { 232 236 rcu_read_unlock_bh(); 233 237 return tunnel; ··· 1041 1043 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); 1042 1044 nf_reset_ct(skb); 1043 1045 1044 - bh_lock_sock(sk); 1046 + bh_lock_sock_nested(sk); 1045 1047 if (sock_owned_by_user(sk)) { 1046 1048 kfree_skb(skb); 1047 1049 ret = NET_XMIT_DROP; ··· 1225 1227 l2tp_tunnel_delete(tunnel); 1226 1228 } 1227 1229 1230 + static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel) 1231 + { 1232 + struct l2tp_net *pn = l2tp_pernet(net); 1233 + 1234 + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); 1235 + idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id); 1236 + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); 1237 + } 1238 + 1228 1239 /* Workqueue tunnel deletion function */ 1229 1240 static void l2tp_tunnel_del_work(struct work_struct *work) 1230 1241 { ··· 1241 1234 del_work); 1242 1235 struct sock *sk = tunnel->sock; 1243 1236 struct socket *sock = sk->sk_socket; 1244 - struct l2tp_net *pn; 1245 1237 1246 1238 l2tp_tunnel_closeall(tunnel); 1247 1239 ··· 1254 1248 } 1255 1249 } 1256 1250 1257 - /* Remove the tunnel struct from the tunnel list */ 1258 - pn = l2tp_pernet(tunnel->l2tp_net); 1259 - spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1260 - list_del_rcu(&tunnel->list); 1261 - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1262 - 1251 + l2tp_tunnel_remove(tunnel->l2tp_net, tunnel); 1263 1252 /* drop initial ref */ 1264 1253 l2tp_tunnel_dec_refcount(tunnel); 1265 1254 ··· 1385 1384 return err; 1386 1385 } 1387 1386 1388 - static struct lock_class_key l2tp_socket_class; 1389 - 1390 1387 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, 1391 1388 struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) 1392 1389 { ··· 1454 1455 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, 1455 1456 struct l2tp_tunnel_cfg *cfg) 1456 1457 { 1457 - struct l2tp_tunnel *tunnel_walk; 1458 - struct l2tp_net *pn; 1458 + struct l2tp_net *pn = l2tp_pernet(net); 1459 + u32 tunnel_id = tunnel->tunnel_id; 1459 1460 struct socket *sock; 1460 1461 struct sock *sk; 1461 1462 int ret; 1463 + 1464 + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); 1465 + ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id, 1466 + GFP_ATOMIC); 1467 + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); 1468 + if (ret) 1469 + return ret == -ENOSPC ? -EEXIST : ret; 1462 1470 1463 1471 if (tunnel->fd < 0) { 1464 1472 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id, ··· 1480 1474 } 1481 1475 1482 1476 sk = sock->sk; 1477 + lock_sock(sk); 1483 1478 write_lock_bh(&sk->sk_callback_lock); 1484 1479 ret = l2tp_validate_socket(sk, net, tunnel->encap); 1485 1480 if (ret < 0) 1486 1481 goto err_inval_sock; 1487 1482 rcu_assign_sk_user_data(sk, tunnel); 1488 1483 write_unlock_bh(&sk->sk_callback_lock); 1489 - 1490 - tunnel->l2tp_net = net; 1491 - pn = l2tp_pernet(net); 1492 - 1493 - sock_hold(sk); 1494 - tunnel->sock = sk; 1495 - 1496 - spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1497 - list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) { 1498 - if (tunnel_walk->tunnel_id == tunnel->tunnel_id) { 1499 - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1500 - sock_put(sk); 1501 - ret = -EEXIST; 1502 - goto err_sock; 1503 - } 1504 - } 1505 - list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); 1506 - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1507 1484 1508 1485 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { 1509 1486 struct udp_tunnel_sock_cfg udp_cfg = { ··· 1501 1512 1502 1513 tunnel->old_sk_destruct = sk->sk_destruct; 1503 1514 sk->sk_destruct = &l2tp_tunnel_destruct; 1504 - lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, 1505 - "l2tp_sock"); 1506 1515 sk->sk_allocation = GFP_ATOMIC; 1516 + release_sock(sk); 1517 + 1518 + sock_hold(sk); 1519 + tunnel->sock = sk; 1520 + tunnel->l2tp_net = net; 1521 + 1522 + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); 1523 + idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id); 1524 + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); 1507 1525 1508 1526 trace_register_tunnel(tunnel); 1509 1527 ··· 1519 1523 1520 1524 return 0; 1521 1525 1522 - err_sock: 1523 - write_lock_bh(&sk->sk_callback_lock); 1524 - rcu_assign_sk_user_data(sk, NULL); 1525 1526 err_inval_sock: 1526 1527 write_unlock_bh(&sk->sk_callback_lock); 1528 + release_sock(sk); 1527 1529 1528 1530 if (tunnel->fd < 0) 1529 1531 sock_release(sock); 1530 1532 else 1531 1533 sockfd_put(sock); 1532 1534 err: 1535 + l2tp_tunnel_remove(net, tunnel); 1533 1536 return ret; 1534 1537 } 1535 1538 EXPORT_SYMBOL_GPL(l2tp_tunnel_register); ··· 1642 1647 struct l2tp_net *pn = net_generic(net, l2tp_net_id); 1643 1648 int hash; 1644 1649 1645 - INIT_LIST_HEAD(&pn->l2tp_tunnel_list); 1646 - spin_lock_init(&pn->l2tp_tunnel_list_lock); 1650 + idr_init(&pn->l2tp_tunnel_idr); 1651 + spin_lock_init(&pn->l2tp_tunnel_idr_lock); 1647 1652 1648 1653 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) 1649 1654 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); ··· 1657 1662 { 1658 1663 struct l2tp_net *pn = l2tp_pernet(net); 1659 1664 struct l2tp_tunnel *tunnel = NULL; 1665 + unsigned long tunnel_id, tmp; 1660 1666 int hash; 1661 1667 1662 1668 rcu_read_lock_bh(); 1663 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 1664 - l2tp_tunnel_delete(tunnel); 1669 + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { 1670 + if (tunnel) 1671 + l2tp_tunnel_delete(tunnel); 1665 1672 } 1666 1673 rcu_read_unlock_bh(); 1667 1674 ··· 1673 1676 1674 1677 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) 1675 1678 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash])); 1679 + idr_destroy(&pn->l2tp_tunnel_idr); 1676 1680 } 1677 1681 1678 1682 static struct pernet_operations l2tp_net_ops = {
+5 -3
net/mac80211/agg-tx.c
··· 491 491 { 492 492 struct tid_ampdu_tx *tid_tx; 493 493 struct ieee80211_local *local = sta->local; 494 - struct ieee80211_sub_if_data *sdata = sta->sdata; 494 + struct ieee80211_sub_if_data *sdata; 495 495 struct ieee80211_ampdu_params params = { 496 496 .sta = &sta->sta, 497 497 .action = IEEE80211_AMPDU_TX_START, ··· 511 511 */ 512 512 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 513 513 514 - ieee80211_agg_stop_txq(sta, tid); 515 - 516 514 /* 517 515 * Make sure no packets are being processed. This ensures that 518 516 * we have a valid starting sequence number and that in-flight ··· 519 521 */ 520 522 synchronize_net(); 521 523 524 + sdata = sta->sdata; 522 525 params.ssn = sta->tid_seq[tid] >> 4; 523 526 ret = drv_ampdu_action(local, sdata, &params); 524 527 tid_tx->ssn = params.ssn; ··· 533 534 */ 534 535 set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state); 535 536 } else if (ret) { 537 + if (!sdata) 538 + return; 539 + 536 540 ht_dbg(sdata, 537 541 "BA request denied - HW unavailable for %pM tid %d\n", 538 542 sta->sta.addr, tid);
+7
net/mac80211/cfg.c
··· 147 147 link_conf->bssid_index = 0; 148 148 link_conf->nontransmitted = false; 149 149 link_conf->ema_ap = false; 150 + link_conf->bssid_indicator = 0; 150 151 151 152 if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev) 152 153 return -EINVAL; ··· 1511 1510 1512 1511 kfree(link_conf->ftmr_params); 1513 1512 link_conf->ftmr_params = NULL; 1513 + 1514 + sdata->vif.mbssid_tx_vif = NULL; 1515 + link_conf->bssid_index = 0; 1516 + link_conf->nontransmitted = false; 1517 + link_conf->ema_ap = false; 1518 + link_conf->bssid_indicator = 0; 1514 1519 1515 1520 __sta_info_flush(sdata, true); 1516 1521 ieee80211_free_keys(sdata, true);
+3 -2
net/mac80211/debugfs_sta.c
··· 167 167 continue; 168 168 txqi = to_txq_info(sta->sta.txq[i]); 169 169 p += scnprintf(p, bufsz + buf - p, 170 - "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n", 170 + "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n", 171 171 txqi->txq.tid, 172 172 txqi->txq.ac, 173 173 txqi->tin.backlog_bytes, ··· 182 182 txqi->flags, 183 183 test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN", 184 184 test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "", 185 - test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : ""); 185 + test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "", 186 + test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : ""); 186 187 } 187 188 188 189 rcu_read_unlock();
+3
net/mac80211/driver-ops.c
··· 392 392 393 393 might_sleep(); 394 394 395 + if (!sdata) 396 + return -EIO; 397 + 395 398 sdata = get_bss_sdata(sdata); 396 399 if (!check_sdata_in_driver(sdata)) 397 400 return -EIO;
+1 -1
net/mac80211/driver-ops.h
··· 1199 1199 1200 1200 /* In reconfig don't transmit now, but mark for waking later */ 1201 1201 if (local->in_reconfig) { 1202 - set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags); 1202 + set_bit(IEEE80211_TXQ_DIRTY, &txq->flags); 1203 1203 return; 1204 1204 } 1205 1205
+31
net/mac80211/ht.c
··· 391 391 392 392 tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; 393 393 if (!blocked && tid_tx) { 394 + struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 395 + struct ieee80211_sub_if_data *sdata = 396 + vif_to_sdata(txqi->txq.vif); 397 + struct fq *fq = &sdata->local->fq; 398 + 399 + spin_lock_bh(&fq->lock); 400 + 401 + /* Allow only frags to be dequeued */ 402 + set_bit(IEEE80211_TXQ_STOP, &txqi->flags); 403 + 404 + if (!skb_queue_empty(&txqi->frags)) { 405 + /* Fragmented Tx is ongoing, wait for it to 406 + * finish. Reschedule worker to retry later. 407 + */ 408 + 409 + spin_unlock_bh(&fq->lock); 410 + spin_unlock_bh(&sta->lock); 411 + 412 + /* Give the task working on the txq a chance 413 + * to send out the queued frags 414 + */ 415 + synchronize_net(); 416 + 417 + mutex_unlock(&sta->ampdu_mlme.mtx); 418 + 419 + ieee80211_queue_work(&sdata->local->hw, work); 420 + return; 421 + } 422 + 423 + spin_unlock_bh(&fq->lock); 424 + 394 425 /* 395 426 * Assign it over to the normal tid_tx array 396 427 * where it "goes live".
+1 -1
net/mac80211/ieee80211_i.h
··· 838 838 IEEE80211_TXQ_STOP, 839 839 IEEE80211_TXQ_AMPDU, 840 840 IEEE80211_TXQ_NO_AMSDU, 841 - IEEE80211_TXQ_STOP_NETIF_TX, 841 + IEEE80211_TXQ_DIRTY, 842 842 }; 843 843 844 844 /**
+3 -2
net/mac80211/iface.c
··· 364 364 365 365 /* No support for VLAN with MLO yet */ 366 366 if (iftype == NL80211_IFTYPE_AP_VLAN && 367 - nsdata->wdev.use_4addr) 367 + sdata->wdev.use_4addr && 368 + nsdata->vif.type == NL80211_IFTYPE_AP && 369 + nsdata->vif.valid_links) 368 370 return -EOPNOTSUPP; 369 371 370 372 /* ··· 2197 2195 2198 2196 ret = cfg80211_register_netdevice(ndev); 2199 2197 if (ret) { 2200 - ieee80211_if_free(ndev); 2201 2198 free_netdev(ndev); 2202 2199 return ret; 2203 2200 }
+102 -123
net/mac80211/rx.c
··· 4049 4049 #undef CALL_RXH 4050 4050 } 4051 4051 4052 + static bool 4053 + ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) 4054 + { 4055 + if (!sta->mlo) 4056 + return false; 4057 + 4058 + return !!(sta->valid_links & BIT(link_id)); 4059 + } 4060 + 4061 + static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx, 4062 + u8 link_id) 4063 + { 4064 + rx->link_id = link_id; 4065 + rx->link = rcu_dereference(rx->sdata->link[link_id]); 4066 + 4067 + if (!rx->sta) 4068 + return rx->link; 4069 + 4070 + if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id)) 4071 + return false; 4072 + 4073 + rx->link_sta = rcu_dereference(rx->sta->link[link_id]); 4074 + 4075 + return rx->link && rx->link_sta; 4076 + } 4077 + 4078 + static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, 4079 + struct ieee80211_sta *pubsta, 4080 + int link_id) 4081 + { 4082 + struct sta_info *sta; 4083 + 4084 + sta = container_of(pubsta, struct sta_info, sta); 4085 + 4086 + rx->link_id = link_id; 4087 + rx->sta = sta; 4088 + 4089 + if (sta) { 4090 + rx->local = sta->sdata->local; 4091 + if (!rx->sdata) 4092 + rx->sdata = sta->sdata; 4093 + rx->link_sta = &sta->deflink; 4094 + } 4095 + 4096 + if (link_id < 0) 4097 + rx->link = &rx->sdata->deflink; 4098 + else if (!ieee80211_rx_data_set_link(rx, link_id)) 4099 + return false; 4100 + 4101 + return true; 4102 + } 4103 + 4052 4104 /* 4053 4105 * This function makes calls into the RX path, therefore 4054 4106 * it has to be invoked under RCU read lock. ··· 4109 4057 { 4110 4058 struct sk_buff_head frames; 4111 4059 struct ieee80211_rx_data rx = { 4112 - .sta = sta, 4113 - .sdata = sta->sdata, 4114 - .local = sta->local, 4115 4060 /* This is OK -- must be QoS data frame */ 4116 4061 .security_idx = tid, 4117 4062 .seqno_idx = tid, 4118 - .link_id = -1, 4119 4063 }; 4120 4064 struct tid_ampdu_rx *tid_agg_rx; 4121 - u8 link_id; 4065 + int link_id = -1; 4066 + 4067 + /* FIXME: statistics won't be right with this */ 4068 + if (sta->sta.valid_links) 4069 + link_id = ffs(sta->sta.valid_links) - 1; 4070 + 4071 + if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id)) 4072 + return; 4122 4073 4123 4074 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4124 4075 if (!tid_agg_rx) ··· 4141 4086 }; 4142 4087 drv_event_callback(rx.local, rx.sdata, &event); 4143 4088 } 4144 - /* FIXME: statistics won't be right with this */ 4145 - link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; 4146 - rx.link = rcu_dereference(sta->sdata->link[link_id]); 4147 - rx.link_sta = rcu_dereference(sta->link[link_id]); 4148 4089 4149 4090 ieee80211_rx_handlers(&rx, &frames); 4150 4091 } ··· 4156 4105 /* This is OK -- must be QoS data frame */ 4157 4106 .security_idx = tid, 4158 4107 .seqno_idx = tid, 4159 - .link_id = -1, 4160 4108 }; 4161 4109 int i, diff; 4162 4110 ··· 4166 4116 4167 4117 sta = container_of(pubsta, struct sta_info, sta); 4168 4118 4169 - rx.sta = sta; 4170 - rx.sdata = sta->sdata; 4171 - rx.link = &rx.sdata->deflink; 4172 - rx.local = sta->local; 4119 + if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1)) 4120 + return; 4173 4121 4174 4122 rcu_read_lock(); 4175 4123 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); ··· 4554 4506 mutex_unlock(&local->sta_mtx); 4555 4507 } 4556 4508 4557 - static bool 4558 - ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) 4559 - { 4560 - if (!sta->mlo) 4561 - return false; 4562 - 4563 - return !!(sta->valid_links & BIT(link_id)); 4564 - } 4565 - 4566 4509 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, 4567 4510 struct ieee80211_fast_rx *fast_rx, 4568 4511 int orig_len) ··· 4664 4625 struct sk_buff *skb = rx->skb; 4665 4626 struct ieee80211_hdr *hdr = (void *)skb->data; 4666 4627 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4667 - struct sta_info *sta = rx->sta; 4668 4628 int orig_len = skb->len; 4669 4629 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4670 4630 int snap_offs = hdrlen; ··· 4675 4637 u8 da[ETH_ALEN]; 4676 4638 u8 sa[ETH_ALEN]; 4677 4639 } addrs __aligned(2); 4678 - struct link_sta_info *link_sta; 4679 4640 struct ieee80211_sta_rx_stats *stats; 4680 4641 4681 4642 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write ··· 4777 4740 drop: 4778 4741 dev_kfree_skb(skb); 4779 4742 4780 - if (rx->link_id >= 0) { 4781 - link_sta = rcu_dereference(sta->link[rx->link_id]); 4782 - if (!link_sta) 4783 - return true; 4784 - } else { 4785 - link_sta = &sta->deflink; 4786 - } 4787 - 4788 4743 if (fast_rx->uses_rss) 4789 - stats = this_cpu_ptr(link_sta->pcpu_rx_stats); 4744 + stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); 4790 4745 else 4791 - stats = &link_sta->rx_stats; 4746 + stats = &rx->link_sta->rx_stats; 4792 4747 4793 4748 stats->dropped++; 4794 4749 return true; ··· 4798 4769 struct ieee80211_local *local = rx->local; 4799 4770 struct ieee80211_sub_if_data *sdata = rx->sdata; 4800 4771 struct ieee80211_hdr *hdr = (void *)skb->data; 4801 - struct link_sta_info *link_sta = NULL; 4802 - struct ieee80211_link_data *link; 4772 + struct link_sta_info *link_sta = rx->link_sta; 4773 + struct ieee80211_link_data *link = rx->link; 4803 4774 4804 4775 rx->skb = skb; 4805 4776 ··· 4821 4792 if (!ieee80211_accept_frame(rx)) 4822 4793 return false; 4823 4794 4824 - if (rx->link_id >= 0) { 4825 - link = rcu_dereference(rx->sdata->link[rx->link_id]); 4826 - 4827 - /* we might race link removal */ 4828 - if (!link) 4829 - return true; 4830 - rx->link = link; 4831 - 4832 - if (rx->sta) { 4833 - rx->link_sta = 4834 - rcu_dereference(rx->sta->link[rx->link_id]); 4835 - if (!rx->link_sta) 4836 - return true; 4837 - } 4838 - } else { 4839 - if (rx->sta) 4840 - rx->link_sta = &rx->sta->deflink; 4841 - 4842 - rx->link = &sdata->deflink; 4843 - } 4844 - 4845 - if (unlikely(!is_multicast_ether_addr(hdr->addr1) && 4846 - rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) { 4847 - link_sta = rcu_dereference(rx->sta->link[rx->link_id]); 4848 - 4849 - if (WARN_ON_ONCE(!link_sta)) 4850 - return true; 4851 - } 4852 - 4853 4795 if (!consume) { 4854 4796 struct skb_shared_hwtstamps *shwt; 4855 4797 ··· 4838 4838 */ 4839 4839 shwt = skb_hwtstamps(rx->skb); 4840 4840 shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp; 4841 + 4842 + /* Update the hdr pointer to the new skb for translation below */ 4843 + hdr = (struct ieee80211_hdr *)rx->skb->data; 4841 4844 } 4842 4845 4843 - if (unlikely(link_sta)) { 4846 + if (unlikely(rx->sta && rx->sta->sta.mlo)) { 4844 4847 /* translate to MLD addresses */ 4845 4848 if (ether_addr_equal(link->conf->addr, hdr->addr1)) 4846 4849 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); ··· 4873 4870 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4874 4871 struct ieee80211_fast_rx *fast_rx; 4875 4872 struct ieee80211_rx_data rx; 4873 + int link_id = -1; 4876 4874 4877 4875 memset(&rx, 0, sizeof(rx)); 4878 4876 rx.skb = skb; ··· 4890 4886 if (!pubsta) 4891 4887 goto drop; 4892 4888 4893 - rx.sta = container_of(pubsta, struct sta_info, sta); 4894 - rx.sdata = rx.sta->sdata; 4895 - 4896 - if (status->link_valid && 4897 - !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id)) 4898 - goto drop; 4889 + if (status->link_valid) 4890 + link_id = status->link_id; 4899 4891 4900 4892 /* 4901 4893 * TODO: Should the frame be dropped if the right link_id is not ··· 4900 4900 * link_id is used only for stats purpose and updating the stats on 4901 4901 * the deflink is fine? 4902 4902 */ 4903 - if (status->link_valid) 4904 - rx.link_id = status->link_id; 4905 - 4906 - if (rx.link_id >= 0) { 4907 - struct ieee80211_link_data *link; 4908 - 4909 - link = rcu_dereference(rx.sdata->link[rx.link_id]); 4910 - if (!link) 4911 - goto drop; 4912 - rx.link = link; 4913 - } else { 4914 - rx.link = &rx.sdata->deflink; 4915 - } 4903 + if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id)) 4904 + goto drop; 4916 4905 4917 4906 fast_rx = rcu_dereference(rx.sta->fast_rx); 4918 4907 if (!fast_rx) ··· 4919 4930 { 4920 4931 struct link_sta_info *link_sta; 4921 4932 struct ieee80211_hdr *hdr = (void *)skb->data; 4933 + struct sta_info *sta; 4934 + int link_id = -1; 4922 4935 4923 4936 /* 4924 4937 * Look up link station first, in case there's a ··· 4930 4939 */ 4931 4940 link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2); 4932 4941 if (link_sta) { 4933 - rx->sta = link_sta->sta; 4934 - rx->link_id = link_sta->link_id; 4942 + sta = link_sta->sta; 4943 + link_id = link_sta->link_id; 4935 4944 } else { 4936 4945 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4937 4946 4938 - rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2); 4939 - if (rx->sta) { 4940 - if (status->link_valid && 4941 - !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, 4942 - status->link_id)) 4943 - return false; 4944 - 4945 - rx->link_id = status->link_valid ? status->link_id : -1; 4946 - } else { 4947 - rx->link_id = -1; 4948 - } 4947 + sta = sta_info_get_bss(rx->sdata, hdr->addr2); 4948 + if (status->link_valid) 4949 + link_id = status->link_id; 4949 4950 } 4951 + 4952 + if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id)) 4953 + return false; 4950 4954 4951 4955 return ieee80211_prepare_and_rx_handle(rx, skb, consume); 4952 4956 } ··· 5001 5015 5002 5016 if (ieee80211_is_data(fc)) { 5003 5017 struct sta_info *sta, *prev_sta; 5004 - u8 link_id = status->link_id; 5018 + int link_id = -1; 5019 + 5020 + if (status->link_valid) 5021 + link_id = status->link_id; 5005 5022 5006 5023 if (pubsta) { 5007 - rx.sta = container_of(pubsta, struct sta_info, sta); 5008 - rx.sdata = rx.sta->sdata; 5009 - 5010 - if (status->link_valid && 5011 - !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id)) 5024 + if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id)) 5012 5025 goto out; 5013 - 5014 - if (status->link_valid) 5015 - rx.link_id = status->link_id; 5016 5026 5017 5027 /* 5018 5028 * In MLO connection, fetch the link_id using addr2 ··· 5027 5045 if (!link_sta) 5028 5046 goto out; 5029 5047 5030 - rx.link_id = link_sta->link_id; 5048 + ieee80211_rx_data_set_link(&rx, link_sta->link_id); 5031 5049 } 5032 5050 5033 5051 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) ··· 5043 5061 continue; 5044 5062 } 5045 5063 5046 - if ((status->link_valid && 5047 - !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, 5048 - link_id)) || 5049 - (!status->link_valid && prev_sta->sta.mlo)) 5064 + rx.sdata = prev_sta->sdata; 5065 + if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta, 5066 + link_id)) 5067 + goto out; 5068 + 5069 + if (!status->link_valid && prev_sta->sta.mlo) 5050 5070 continue; 5051 5071 5052 - rx.link_id = status->link_valid ? link_id : -1; 5053 - rx.sta = prev_sta; 5054 - rx.sdata = prev_sta->sdata; 5055 5072 ieee80211_prepare_and_rx_handle(&rx, skb, false); 5056 5073 5057 5074 prev_sta = sta; 5058 5075 } 5059 5076 5060 5077 if (prev_sta) { 5061 - if ((status->link_valid && 5062 - !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, 5063 - link_id)) || 5064 - (!status->link_valid && prev_sta->sta.mlo)) 5078 + rx.sdata = prev_sta->sdata; 5079 + if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta, 5080 + link_id)) 5065 5081 goto out; 5066 5082 5067 - rx.link_id = status->link_valid ? link_id : -1; 5068 - rx.sta = prev_sta; 5069 - rx.sdata = prev_sta->sdata; 5083 + if (!status->link_valid && prev_sta->sta.mlo) 5084 + goto out; 5070 5085 5071 5086 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5072 5087 return;
+17 -17
net/mac80211/tx.c
··· 1129 1129 struct sk_buff *purge_skb = NULL; 1130 1130 1131 1131 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1132 - info->flags |= IEEE80211_TX_CTL_AMPDU; 1133 1132 reset_agg_timer = true; 1134 1133 } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 1135 1134 /* ··· 1160 1161 if (!tid_tx) { 1161 1162 /* do nothing, let packet pass through */ 1162 1163 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1163 - info->flags |= IEEE80211_TX_CTL_AMPDU; 1164 1164 reset_agg_timer = true; 1165 1165 } else { 1166 1166 queued = true; ··· 3675 3677 info->band = fast_tx->band; 3676 3678 info->control.vif = &sdata->vif; 3677 3679 info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT | 3678 - IEEE80211_TX_CTL_DONTFRAG | 3679 - (ampdu ? IEEE80211_TX_CTL_AMPDU : 0); 3680 + IEEE80211_TX_CTL_DONTFRAG; 3680 3681 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT | 3681 3682 u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, 3682 3683 IEEE80211_TX_CTRL_MLO_LINK); ··· 3780 3783 struct ieee80211_tx_data tx; 3781 3784 ieee80211_tx_result r; 3782 3785 struct ieee80211_vif *vif = txq->vif; 3786 + int q = vif->hw_queue[txq->ac]; 3787 + bool q_stopped; 3783 3788 3784 3789 WARN_ON_ONCE(softirq_count() == 0); 3785 3790 ··· 3789 3790 return NULL; 3790 3791 3791 3792 begin: 3792 - spin_lock_bh(&fq->lock); 3793 + spin_lock(&local->queue_stop_reason_lock); 3794 + q_stopped = local->queue_stop_reasons[q]; 3795 + spin_unlock(&local->queue_stop_reason_lock); 3793 3796 3794 - if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) || 3795 - test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) 3796 - goto out; 3797 - 3798 - if (vif->txqs_stopped[txq->ac]) { 3799 - set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); 3800 - goto out; 3797 + if (unlikely(q_stopped)) { 3798 + /* mark for waking later */ 3799 + set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags); 3800 + return NULL; 3801 3801 } 3802 + 3803 + spin_lock_bh(&fq->lock); 3802 3804 3803 3805 /* Make sure fragments stay together. */ 3804 3806 skb = __skb_dequeue(&txqi->frags); ··· 3810 3810 IEEE80211_SKB_CB(skb)->control.flags &= 3811 3811 ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 3812 3812 } else { 3813 + if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags))) 3814 + goto out; 3815 + 3813 3816 skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func); 3814 3817 } 3815 3818 ··· 3863 3860 } 3864 3861 3865 3862 if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) 3866 - info->flags |= IEEE80211_TX_CTL_AMPDU; 3867 - else 3868 - info->flags &= ~IEEE80211_TX_CTL_AMPDU; 3863 + info->flags |= (IEEE80211_TX_CTL_AMPDU | 3864 + IEEE80211_TX_CTL_DONTFRAG); 3869 3865 3870 3866 if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { 3871 3867 if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { ··· 4598 4596 4599 4597 info = IEEE80211_SKB_CB(skb); 4600 4598 memset(info, 0, sizeof(*info)); 4601 - if (tid_tx) 4602 - info->flags |= IEEE80211_TX_CTL_AMPDU; 4603 4599 4604 4600 info->hw_queue = sdata->vif.hw_queue[queue]; 4605 4601
+3 -39
net/mac80211/util.c
··· 292 292 struct ieee80211_sub_if_data *sdata, 293 293 struct ieee80211_txq *queue) 294 294 { 295 - int q = sdata->vif.hw_queue[queue->ac]; 296 295 struct ieee80211_tx_control control = { 297 296 .sta = queue->sta, 298 297 }; 299 298 struct sk_buff *skb; 300 - unsigned long flags; 301 - bool q_stopped; 302 299 303 300 while (1) { 304 - spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 305 - q_stopped = local->queue_stop_reasons[q]; 306 - spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 307 - 308 - if (q_stopped) 309 - break; 310 - 311 301 skb = ieee80211_tx_dequeue(&local->hw, queue); 312 302 if (!skb) 313 303 break; ··· 337 347 local_bh_disable(); 338 348 spin_lock(&fq->lock); 339 349 340 - sdata->vif.txqs_stopped[ac] = false; 341 - 342 350 if (!test_bit(SDATA_STATE_RUNNING, &sdata->state)) 343 351 goto out; 344 352 ··· 358 370 if (ac != txq->ac) 359 371 continue; 360 372 361 - if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, 373 + if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, 362 374 &txqi->flags)) 363 375 continue; 364 376 ··· 373 385 374 386 txqi = to_txq_info(vif->txq); 375 387 376 - if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) || 388 + if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) || 377 389 (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac) 378 390 goto out; 379 391 ··· 505 517 bool refcounted) 506 518 { 507 519 struct ieee80211_local *local = hw_to_local(hw); 508 - struct ieee80211_sub_if_data *sdata; 509 - int n_acs = IEEE80211_NUM_ACS; 510 520 511 521 trace_stop_queue(local, queue, reason); 512 522 ··· 516 530 else 517 531 local->q_stop_reasons[queue][reason]++; 518 532 519 - if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue])) 520 - return; 521 - 522 - if (local->hw.queues < IEEE80211_NUM_ACS) 523 - n_acs = 1; 524 - 525 - rcu_read_lock(); 526 - list_for_each_entry_rcu(sdata, &local->interfaces, list) { 527 - int ac; 528 - 529 - if (!sdata->dev) 530 - continue; 531 - 532 - for (ac = 0; ac < n_acs; ac++) { 533 - if (sdata->vif.hw_queue[ac] == queue || 534 - sdata->vif.cab_queue == queue) { 535 - spin_lock(&local->fq.lock); 536 - sdata->vif.txqs_stopped[ac] = true; 537 - spin_unlock(&local->fq.lock); 538 - } 539 - } 540 - } 541 - rcu_read_unlock(); 533 + set_bit(reason, &local->queue_stop_reasons[queue]); 542 534 } 543 535 544 536 void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
+25
net/mptcp/pm.c
··· 420 420 } 421 421 } 422 422 423 + /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, 424 + * otherwise allow any matching local/remote pair 425 + */ 426 + bool mptcp_pm_addr_families_match(const struct sock *sk, 427 + const struct mptcp_addr_info *loc, 428 + const struct mptcp_addr_info *rem) 429 + { 430 + bool mptcp_is_v4 = sk->sk_family == AF_INET; 431 + 432 + #if IS_ENABLED(CONFIG_MPTCP_IPV6) 433 + bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); 434 + bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); 435 + 436 + if (mptcp_is_v4) 437 + return loc_is_v4 && rem_is_v4; 438 + 439 + if (ipv6_only_sock(sk)) 440 + return !loc_is_v4 && !rem_is_v4; 441 + 442 + return loc_is_v4 == rem_is_v4; 443 + #else 444 + return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; 445 + #endif 446 + } 447 + 423 448 void mptcp_pm_data_reset(struct mptcp_sock *msk) 424 449 { 425 450 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
+7
net/mptcp/pm_userspace.c
··· 294 294 } 295 295 296 296 sk = (struct sock *)msk; 297 + 298 + if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) { 299 + GENL_SET_ERR_MSG(info, "families mismatch"); 300 + err = -EINVAL; 301 + goto create_err; 302 + } 303 + 297 304 lock_sock(sk); 298 305 299 306 err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+1 -1
net/mptcp/protocol.c
··· 98 98 struct socket *ssock; 99 99 int err; 100 100 101 - err = mptcp_subflow_create_socket(sk, &ssock); 101 + err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock); 102 102 if (err) 103 103 return err; 104 104
+5 -1
net/mptcp/protocol.h
··· 641 641 /* called with sk socket lock held */ 642 642 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, 643 643 const struct mptcp_addr_info *remote); 644 - int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock); 644 + int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 645 + struct socket **new_sock); 645 646 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 646 647 struct sockaddr_storage *addr, 647 648 unsigned short family); ··· 777 776 int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, 778 777 bool require_family, 779 778 struct mptcp_pm_addr_entry *entry); 779 + bool mptcp_pm_addr_families_match(const struct sock *sk, 780 + const struct mptcp_addr_info *loc, 781 + const struct mptcp_addr_info *rem); 780 782 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); 781 783 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); 782 784 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
+5 -4
net/mptcp/subflow.c
··· 1547 1547 if (!mptcp_is_fully_established(sk)) 1548 1548 goto err_out; 1549 1549 1550 - err = mptcp_subflow_create_socket(sk, &sf); 1550 + err = mptcp_subflow_create_socket(sk, loc->family, &sf); 1551 1551 if (err) 1552 1552 goto err_out; 1553 1553 ··· 1660 1660 #endif 1661 1661 ssk->sk_prot = &tcp_prot; 1662 1662 } 1663 - int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) 1663 + 1664 + int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 1665 + struct socket **new_sock) 1664 1666 { 1665 1667 struct mptcp_subflow_context *subflow; 1666 1668 struct net *net = sock_net(sk); ··· 1675 1673 if (unlikely(!sk->sk_socket)) 1676 1674 return -EINVAL; 1677 1675 1678 - err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP, 1679 - &sf); 1676 + err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); 1680 1677 if (err) 1681 1678 return err; 1682 1679
+2 -2
net/netfilter/ipset/ip_set_bitmap_ip.c
··· 308 308 return -IPSET_ERR_BITMAP_RANGE; 309 309 310 310 pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask); 311 - hosts = 2 << (32 - netmask - 1); 312 - elements = 2 << (netmask - mask_bits - 1); 311 + hosts = 2U << (32 - netmask - 1); 312 + elements = 2UL << (netmask - mask_bits - 1); 313 313 } 314 314 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 315 315 return -IPSET_ERR_BITMAP_RANGE_SIZE;
+15
net/netfilter/nf_conntrack_proto_tcp.c
··· 1066 1066 ct->proto.tcp.last_flags |= 1067 1067 IP_CT_EXP_CHALLENGE_ACK; 1068 1068 } 1069 + 1070 + /* possible challenge ack reply to syn */ 1071 + if (old_state == TCP_CONNTRACK_SYN_SENT && 1072 + index == TCP_ACK_SET && 1073 + dir == IP_CT_DIR_REPLY) 1074 + ct->proto.tcp.last_ack = ntohl(th->ack_seq); 1075 + 1069 1076 spin_unlock_bh(&ct->lock); 1070 1077 nf_ct_l4proto_log_invalid(skb, ct, state, 1071 1078 "packet (index %d) in dir %d ignored, state %s", ··· 1198 1191 * segments we ignored. */ 1199 1192 goto in_window; 1200 1193 } 1194 + 1195 + /* Reset in response to a challenge-ack we let through earlier */ 1196 + if (old_state == TCP_CONNTRACK_SYN_SENT && 1197 + ct->proto.tcp.last_index == TCP_ACK_SET && 1198 + ct->proto.tcp.last_dir == IP_CT_DIR_REPLY && 1199 + ntohl(th->seq) == ct->proto.tcp.last_ack) 1200 + goto in_window; 1201 + 1201 1202 break; 1202 1203 default: 1203 1204 /* Keep compilers happy. */
+1 -1
net/netfilter/nft_payload.c
··· 63 63 return false; 64 64 65 65 if (offset + len > VLAN_ETH_HLEN + vlan_hlen) 66 - ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen; 66 + ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen; 67 67 68 68 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); 69 69
+1
net/nfc/llcp_core.c
··· 157 157 cancel_work_sync(&local->rx_work); 158 158 cancel_work_sync(&local->timeout_work); 159 159 kfree_skb(local->rx_pending); 160 + local->rx_pending = NULL; 160 161 del_timer_sync(&local->sdreq_timer); 161 162 cancel_work_sync(&local->sdreq_timeout_work); 162 163 nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
+1 -1
net/rxrpc/call_object.c
··· 294 294 static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp) 295 295 { 296 296 struct rxrpc_local *local = call->local; 297 - int ret = 0; 297 + int ret = -ENOMEM; 298 298 299 299 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 300 300
+2
net/sched/sch_gred.c
··· 377 377 /* Even if driver returns failure adjust the stats - in case offload 378 378 * ended but driver still wants to adjust the values. 379 379 */ 380 + sch_tree_lock(sch); 380 381 for (i = 0; i < MAX_DPs; i++) { 381 382 if (!table->tab[i]) 382 383 continue; ··· 394 393 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; 395 394 } 396 395 _bstats_update(&sch->bstats, bytes, packets); 396 + sch_tree_unlock(sch); 397 397 398 398 kfree(hw_stats); 399 399 return ret;
+16 -11
net/sched/sch_htb.c
··· 1549 1549 struct tc_htb_qopt_offload offload_opt; 1550 1550 struct netdev_queue *dev_queue; 1551 1551 struct Qdisc *q = cl->leaf.q; 1552 - struct Qdisc *old = NULL; 1552 + struct Qdisc *old; 1553 1553 int err; 1554 1554 1555 1555 if (cl->level) ··· 1557 1557 1558 1558 WARN_ON(!q); 1559 1559 dev_queue = htb_offload_get_queue(cl); 1560 - old = htb_graft_helper(dev_queue, NULL); 1561 - if (destroying) 1562 - /* Before HTB is destroyed, the kernel grafts noop_qdisc to 1563 - * all queues. 1560 + /* When destroying, caller qdisc_graft grafts the new qdisc and invokes 1561 + * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload 1562 + * does not need to graft or qdisc_put the qdisc being destroyed. 1563 + */ 1564 + if (!destroying) { 1565 + old = htb_graft_helper(dev_queue, NULL); 1566 + /* Last qdisc grafted should be the same as cl->leaf.q when 1567 + * calling htb_delete. 1564 1568 */ 1565 - WARN_ON(!(old->flags & TCQ_F_BUILTIN)); 1566 - else 1567 1569 WARN_ON(old != q); 1570 + } 1568 1571 1569 1572 if (cl->parent) { 1570 1573 _bstats_update(&cl->parent->bstats_bias, ··· 1584 1581 }; 1585 1582 err = htb_offload(qdisc_dev(sch), &offload_opt); 1586 1583 1587 - if (!err || destroying) 1588 - qdisc_put(old); 1589 - else 1590 - htb_graft_helper(dev_queue, old); 1584 + if (!destroying) { 1585 + if (!err) 1586 + qdisc_put(old); 1587 + else 1588 + htb_graft_helper(dev_queue, old); 1589 + } 1591 1590 1592 1591 if (last_child) 1593 1592 return err;
+3
net/sched/sch_taprio.c
··· 1700 1700 int i; 1701 1701 1702 1702 hrtimer_cancel(&q->advance_timer); 1703 + qdisc_synchronize(sch); 1704 + 1703 1705 if (q->qdiscs) { 1704 1706 for (i = 0; i < dev->num_tx_queues; i++) 1705 1707 if (q->qdiscs[i]) ··· 1722 1720 * happens in qdisc_create(), after taprio_init() has been called. 1723 1721 */ 1724 1722 hrtimer_cancel(&q->advance_timer); 1723 + qdisc_synchronize(sch); 1725 1724 1726 1725 taprio_disable_offload(dev, q, NULL); 1727 1726
+1
scripts/Makefile.vmlinux
··· 18 18 $(call if_changed_dep,cc_o_c) 19 19 20 20 ifdef CONFIG_MODULES 21 + KASAN_SANITIZE_.vmlinux.export.o := n 21 22 targets += .vmlinux.export.o 22 23 vmlinux: .vmlinux.export.o 23 24 endif
+1 -3
security/tomoyo/Kconfig
··· 6 6 select SECURITYFS 7 7 select SECURITY_PATH 8 8 select SECURITY_NETWORK 9 - select SRCU 10 - select BUILD_BIN2C 11 9 default n 12 10 help 13 11 This selects TOMOYO Linux, pathname-based access control. 14 12 Required userspace tools and further information may be 15 - found at <http://tomoyo.sourceforge.jp/>. 13 + found at <https://tomoyo.osdn.jp/>. 16 14 If you are unsure how to answer this question, answer N. 17 15 18 16 config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
+11 -8
security/tomoyo/Makefile
··· 2 2 obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o 3 3 4 4 targets += builtin-policy.h 5 - define do_policy 6 - echo "static char tomoyo_builtin_$(1)[] __initdata ="; \ 7 - $(objtree)/scripts/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \ 8 - echo ";" 9 - endef 10 - quiet_cmd_policy = POLICY $@ 11 - cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@ 12 5 13 - $(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE 6 + quiet_cmd_policy = POLICY $@ 7 + cmd_policy = { \ 8 + $(foreach x, profile exception_policy domain_policy manager stat, \ 9 + printf 'static char tomoyo_builtin_$x[] __initdata =\n'; \ 10 + sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/\t"\1\\n"/' -- $(firstword $(filter %/$x.conf %/$x.conf.default, $^) /dev/null); \ 11 + printf '\t"";\n';) \ 12 + } > $@ 13 + 14 + $(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE 14 15 $(call if_changed,policy) 15 16 17 + ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING 16 18 $(obj)/common.o: $(obj)/builtin-policy.h 19 + endif
+15 -9
sound/core/control.c
··· 1203 1203 const u32 pattern = 0xdeadbeef; 1204 1204 int ret; 1205 1205 1206 + down_read(&card->controls_rwsem); 1206 1207 kctl = snd_ctl_find_id(card, &control->id); 1207 - if (kctl == NULL) 1208 - return -ENOENT; 1208 + if (kctl == NULL) { 1209 + ret = -ENOENT; 1210 + goto unlock; 1211 + } 1209 1212 1210 1213 index_offset = snd_ctl_get_ioff(kctl, &control->id); 1211 1214 vd = &kctl->vd[index_offset]; 1212 - if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) 1213 - return -EPERM; 1215 + if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) { 1216 + ret = -EPERM; 1217 + goto unlock; 1218 + } 1214 1219 1215 1220 snd_ctl_build_ioff(&control->id, kctl, index_offset); 1216 1221 ··· 1225 1220 info.id = control->id; 1226 1221 ret = __snd_ctl_elem_info(card, kctl, &info, NULL); 1227 1222 if (ret < 0) 1228 - return ret; 1223 + goto unlock; 1229 1224 #endif 1230 1225 1231 1226 if (!snd_ctl_skip_validation(&info)) ··· 1235 1230 ret = kctl->get(kctl, control); 1236 1231 snd_power_unref(card); 1237 1232 if (ret < 0) 1238 - return ret; 1233 + goto unlock; 1239 1234 if (!snd_ctl_skip_validation(&info) && 1240 1235 sanity_check_elem_value(card, control, &info, pattern) < 0) { 1241 1236 dev_err(card->dev, ··· 1243 1238 control->id.iface, control->id.device, 1244 1239 control->id.subdevice, control->id.name, 1245 1240 control->id.index); 1246 - return -EINVAL; 1241 + ret = -EINVAL; 1242 + goto unlock; 1247 1243 } 1244 + unlock: 1245 + up_read(&card->controls_rwsem); 1248 1246 return ret; 1249 1247 } 1250 1248 ··· 1261 1253 if (IS_ERR(control)) 1262 1254 return PTR_ERR(control); 1263 1255 1264 - down_read(&card->controls_rwsem); 1265 1256 result = snd_ctl_elem_read(card, control); 1266 - up_read(&card->controls_rwsem); 1267 1257 if (result < 0) 1268 1258 goto error; 1269 1259
+2 -3
sound/core/control_led.c
··· 530 530 bool attach) 531 531 { 532 532 char buf2[256], *s, *os; 533 - size_t len = max(sizeof(s) - 1, count); 534 533 struct snd_ctl_elem_id id; 535 534 int err; 536 535 537 - strncpy(buf2, buf, len); 538 - buf2[len] = '\0'; 536 + if (strscpy(buf2, buf, sizeof(buf2)) < 0) 537 + return -E2BIG; 539 538 memset(&id, 0, sizeof(id)); 540 539 id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; 541 540 s = buf2;
+15 -5
sound/pci/hda/cs35l41_hda.c
··· 598 598 dev_dbg(cs35l41->dev, "System Suspend\n"); 599 599 600 600 if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) { 601 - dev_err(cs35l41->dev, "System Suspend not supported\n"); 602 - return -EINVAL; 601 + dev_err_once(cs35l41->dev, "System Suspend not supported\n"); 602 + return 0; /* don't block the whole system suspend */ 603 603 } 604 604 605 605 ret = pm_runtime_force_suspend(dev); ··· 624 624 dev_dbg(cs35l41->dev, "System Resume\n"); 625 625 626 626 if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) { 627 - dev_err(cs35l41->dev, "System Resume not supported\n"); 628 - return -EINVAL; 627 + dev_err_once(cs35l41->dev, "System Resume not supported\n"); 628 + return 0; /* don't block the whole system resume */ 629 629 } 630 630 631 631 if (cs35l41->reset_gpio) { ··· 645 645 mutex_unlock(&cs35l41->fw_mutex); 646 646 647 647 return ret; 648 + } 649 + 650 + static int cs35l41_runtime_idle(struct device *dev) 651 + { 652 + struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev); 653 + 654 + if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) 655 + return -EBUSY; /* suspend not supported yet on this model */ 656 + return 0; 648 657 } 649 658 650 659 static int cs35l41_runtime_suspend(struct device *dev) ··· 1545 1536 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41); 1546 1537 1547 1538 const struct dev_pm_ops cs35l41_hda_pm_ops = { 1548 - RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, NULL) 1539 + RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, 1540 + cs35l41_runtime_idle) 1549 1541 SYSTEM_SLEEP_PM_OPS(cs35l41_system_suspend, cs35l41_system_resume) 1550 1542 }; 1551 1543 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_pm_ops, SND_HDA_SCODEC_CS35L41);
+1
sound/pci/hda/patch_hdmi.c
··· 1981 1981 SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), 1982 1982 SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), 1983 1983 SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1), 1984 + SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1), 1984 1985 SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1), 1985 1986 SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1), 1986 1987 {}
+41 -14
sound/pci/hda/patch_realtek.c
··· 3564 3564 hda_nid_t hp_pin = alc_get_hp_pin(spec); 3565 3565 bool hp_pin_sense; 3566 3566 3567 + if (spec->ultra_low_power) { 3568 + alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1); 3569 + alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2); 3570 + alc_update_coef_idx(codec, 0x08, 7<<4, 0); 3571 + alc_update_coef_idx(codec, 0x3b, 1<<15, 0); 3572 + alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); 3573 + msleep(30); 3574 + } 3575 + 3567 3576 if (!hp_pin) 3568 3577 hp_pin = 0x21; 3569 3578 ··· 3584 3575 msleep(2); 3585 3576 3586 3577 alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ 3587 - if (spec->ultra_low_power) { 3588 - alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1); 3589 - alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2); 3590 - alc_update_coef_idx(codec, 0x08, 7<<4, 0); 3591 - alc_update_coef_idx(codec, 0x3b, 1<<15, 0); 3592 - alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); 3593 - msleep(30); 3594 - } 3595 3578 3596 3579 snd_hda_codec_write(codec, hp_pin, 0, 3597 3580 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); ··· 3714 3713 hda_nid_t hp_pin = alc_get_hp_pin(spec); 3715 3714 bool hp1_pin_sense, hp2_pin_sense; 3716 3715 3716 + if (spec->ultra_low_power) { 3717 + alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2); 3718 + alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); 3719 + alc_update_coef_idx(codec, 0x33, 1<<11, 0); 3720 + msleep(30); 3721 + } 3722 + 3717 3723 if (spec->codec_variant != ALC269_TYPE_ALC287 && 3718 3724 spec->codec_variant != ALC269_TYPE_ALC245) 3719 3725 /* required only at boot or S3 and S4 resume time */ ··· 3742 3734 msleep(2); 3743 3735 3744 3736 alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ 3745 - if (spec->ultra_low_power) { 3746 - alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2); 3747 - alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); 3748 - alc_update_coef_idx(codec, 0x33, 1<<11, 0); 3749 - msleep(30); 3750 - } 3751 3737 3752 3738 if (hp1_pin_sense || spec->ultra_low_power) 3753 3739 snd_hda_codec_write(codec, hp_pin, 0, ··· 4646 4644 } 4647 4645 } 4648 4646 4647 + static void alc285_fixup_hp_gpio_micmute_led(struct hda_codec *codec, 4648 + const struct hda_fixup *fix, int action) 4649 + { 4650 + struct alc_spec *spec = codec->spec; 4651 + 4652 + if (action == HDA_FIXUP_ACT_PRE_PROBE) 4653 + spec->micmute_led_polarity = 1; 4654 + alc_fixup_hp_gpio_led(codec, action, 0, 0x04); 4655 + } 4656 + 4649 4657 static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, 4650 4658 const struct hda_fixup *fix, int action) 4651 4659 { ··· 4675 4663 { 4676 4664 alc285_fixup_hp_mute_led_coefbit(codec, fix, action); 4677 4665 alc285_fixup_hp_coef_micmute_led(codec, fix, action); 4666 + } 4667 + 4668 + static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec, 4669 + const struct hda_fixup *fix, int action) 4670 + { 4671 + alc285_fixup_hp_mute_led_coefbit(codec, fix, action); 4672 + alc285_fixup_hp_gpio_micmute_led(codec, fix, action); 4678 4673 } 4679 4674 4680 4675 static void alc236_fixup_hp_mute_led(struct hda_codec *codec, ··· 7125 7106 ALC285_FIXUP_ASUS_G533Z_PINS, 7126 7107 ALC285_FIXUP_HP_GPIO_LED, 7127 7108 ALC285_FIXUP_HP_MUTE_LED, 7109 + ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED, 7128 7110 ALC236_FIXUP_HP_GPIO_LED, 7129 7111 ALC236_FIXUP_HP_MUTE_LED, 7130 7112 ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, ··· 8506 8486 .type = HDA_FIXUP_FUNC, 8507 8487 .v.func = alc285_fixup_hp_mute_led, 8508 8488 }, 8489 + [ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED] = { 8490 + .type = HDA_FIXUP_FUNC, 8491 + .v.func = alc285_fixup_hp_spectre_x360_mute_led, 8492 + }, 8509 8493 [ALC236_FIXUP_HP_GPIO_LED] = { 8510 8494 .type = HDA_FIXUP_FUNC, 8511 8495 .v.func = alc236_fixup_hp_gpio_led, ··· 9263 9239 SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), 9264 9240 SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), 9265 9241 SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), 9242 + SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), 9266 9243 SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), 9267 9244 SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), 9268 9245 SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), ··· 9352 9327 SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), 9353 9328 SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), 9354 9329 SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), 9330 + SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), 9355 9331 SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), 9356 9332 SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), 9357 9333 SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED), ··· 9432 9406 SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 9433 9407 SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 9434 9408 SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 9409 + SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED), 9435 9410 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 9436 9411 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 9437 9412 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+14
sound/soc/amd/yc/acp6x-mach.c
··· 209 209 { 210 210 .driver_data = &acp6x_card, 211 211 .matches = { 212 + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), 213 + DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"), 214 + } 215 + }, 216 + { 217 + .driver_data = &acp6x_card, 218 + .matches = { 212 219 DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"), 213 220 DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"), 214 221 } ··· 225 218 .matches = { 226 219 DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"), 227 220 DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"), 221 + } 222 + }, 223 + { 224 + .driver_data = &acp6x_card, 225 + .matches = { 226 + DMI_MATCH(DMI_BOARD_VENDOR, "Razer"), 227 + DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"), 228 228 } 229 229 }, 230 230 {}
+12
sound/soc/codecs/rt9120.c
··· 177 177 return 0; 178 178 } 179 179 180 + static int rt9120_codec_suspend(struct snd_soc_component *comp) 181 + { 182 + return pm_runtime_force_suspend(comp->dev); 183 + } 184 + 185 + static int rt9120_codec_resume(struct snd_soc_component *comp) 186 + { 187 + return pm_runtime_force_resume(comp->dev); 188 + } 189 + 180 190 static const struct snd_soc_component_driver rt9120_component_driver = { 181 191 .probe = rt9120_codec_probe, 192 + .suspend = rt9120_codec_suspend, 193 + .resume = rt9120_codec_resume, 182 194 .controls = rt9120_snd_controls, 183 195 .num_controls = ARRAY_SIZE(rt9120_snd_controls), 184 196 .dapm_widgets = rt9120_dapm_widgets,
+7
sound/soc/codecs/wm8904.c
··· 697 697 int dcs_mask; 698 698 int dcs_l, dcs_r; 699 699 int dcs_l_reg, dcs_r_reg; 700 + int an_out_reg; 700 701 int timeout; 701 702 int pwr_reg; 702 703 ··· 713 712 dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1; 714 713 dcs_r_reg = WM8904_DC_SERVO_8; 715 714 dcs_l_reg = WM8904_DC_SERVO_9; 715 + an_out_reg = WM8904_ANALOGUE_OUT1_LEFT; 716 716 dcs_l = 0; 717 717 dcs_r = 1; 718 718 break; ··· 722 720 dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3; 723 721 dcs_r_reg = WM8904_DC_SERVO_6; 724 722 dcs_l_reg = WM8904_DC_SERVO_7; 723 + an_out_reg = WM8904_ANALOGUE_OUT2_LEFT; 725 724 dcs_l = 2; 726 725 dcs_r = 3; 727 726 break; ··· 795 792 snd_soc_component_update_bits(component, reg, 796 793 WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP, 797 794 WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP); 795 + 796 + /* Update volume, requires PGA to be powered */ 797 + val = snd_soc_component_read(component, an_out_reg); 798 + snd_soc_component_write(component, an_out_reg, val); 798 799 break; 799 800 800 801 case SND_SOC_DAPM_POST_PMU:
+4 -4
sound/soc/fsl/fsl-asoc-card.c
··· 121 121 122 122 static const struct snd_soc_dapm_route audio_map_ac97[] = { 123 123 /* 1st half -- Normal DAPM routes */ 124 - {"Playback", NULL, "AC97 Playback"}, 125 - {"AC97 Capture", NULL, "Capture"}, 124 + {"AC97 Playback", NULL, "CPU AC97 Playback"}, 125 + {"CPU AC97 Capture", NULL, "AC97 Capture"}, 126 126 /* 2nd half -- ASRC DAPM routes */ 127 - {"AC97 Playback", NULL, "ASRC-Playback"}, 128 - {"ASRC-Capture", NULL, "AC97 Capture"}, 127 + {"CPU AC97 Playback", NULL, "ASRC-Playback"}, 128 + {"ASRC-Capture", NULL, "CPU AC97 Capture"}, 129 129 }; 130 130 131 131 static const struct snd_soc_dapm_route audio_map_tx[] = {
+8 -8
sound/soc/fsl/fsl_micfil.c
··· 315 315 316 316 static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = { 317 317 SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL, 318 - MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv), 318 + MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv), 319 319 SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL, 320 - MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv), 320 + MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv), 321 321 SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL, 322 - MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv), 322 + MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv), 323 323 SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL, 324 - MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv), 324 + MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv), 325 325 SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL, 326 - MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv), 326 + MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv), 327 327 SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL, 328 - MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv), 328 + MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv), 329 329 SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL, 330 - MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv), 330 + MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv), 331 331 SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL, 332 - MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv), 332 + MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv), 333 333 SOC_ENUM_EXT("MICFIL Quality Select", 334 334 fsl_micfil_quality_enum, 335 335 micfil_quality_get, micfil_quality_set),
+2 -2
sound/soc/fsl/fsl_ssi.c
··· 1189 1189 .symmetric_channels = 1, 1190 1190 .probe = fsl_ssi_dai_probe, 1191 1191 .playback = { 1192 - .stream_name = "AC97 Playback", 1192 + .stream_name = "CPU AC97 Playback", 1193 1193 .channels_min = 2, 1194 1194 .channels_max = 2, 1195 1195 .rates = SNDRV_PCM_RATE_8000_48000, 1196 1196 .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20, 1197 1197 }, 1198 1198 .capture = { 1199 - .stream_name = "AC97 Capture", 1199 + .stream_name = "CPU AC97 Capture", 1200 1200 .channels_min = 2, 1201 1201 .channels_max = 2, 1202 1202 .rates = SNDRV_PCM_RATE_48000,
+2
sound/soc/intel/boards/Kconfig
··· 554 554 select SND_SOC_RT1015P 555 555 select SND_SOC_MAX98373_I2C 556 556 select SND_SOC_MAX98357A 557 + select SND_SOC_NAU8315 557 558 select SND_SOC_DMIC 558 559 select SND_SOC_HDAC_HDMI 559 560 select SND_SOC_INTEL_HDA_DSP_COMMON 560 561 select SND_SOC_INTEL_SOF_MAXIM_COMMON 562 + select SND_SOC_INTEL_SOF_REALTEK_COMMON 561 563 help 562 564 This adds support for ASoC machine driver for SOF platforms 563 565 with nau8825 codec.
+27 -4
sound/soc/intel/boards/sof_nau8825.c
··· 48 48 #define SOF_MAX98373_SPEAKER_AMP_PRESENT BIT(15) 49 49 #define SOF_MAX98360A_SPEAKER_AMP_PRESENT BIT(16) 50 50 #define SOF_RT1015P_SPEAKER_AMP_PRESENT BIT(17) 51 + #define SOF_NAU8318_SPEAKER_AMP_PRESENT BIT(18) 51 52 52 53 static unsigned long sof_nau8825_quirk = SOF_NAU8825_SSP_CODEC(0); 53 54 ··· 339 338 } 340 339 }; 341 340 341 + static struct snd_soc_dai_link_component nau8318_components[] = { 342 + { 343 + .name = "NVTN2012:00", 344 + .dai_name = "nau8315-hifi", 345 + } 346 + }; 347 + 342 348 static struct snd_soc_dai_link_component dummy_component[] = { 343 349 { 344 350 .name = "snd-soc-dummy", ··· 494 486 max_98360a_dai_link(&links[id]); 495 487 } else if (sof_nau8825_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT) { 496 488 sof_rt1015p_dai_link(&links[id]); 489 + } else if (sof_nau8825_quirk & 490 + SOF_NAU8318_SPEAKER_AMP_PRESENT) { 491 + links[id].codecs = nau8318_components; 492 + links[id].num_codecs = ARRAY_SIZE(nau8318_components); 493 + links[id].init = speaker_codec_init; 497 494 } else { 498 495 goto devm_err; 499 496 } ··· 631 618 632 619 }, 633 620 { 634 - .name = "adl_rt1019p_nau8825", 621 + .name = "adl_rt1019p_8825", 635 622 .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) | 636 623 SOF_SPEAKER_AMP_PRESENT | 637 624 SOF_RT1019P_SPEAKER_AMP_PRESENT | ··· 639 626 SOF_NAU8825_NUM_HDMIDEV(4)), 640 627 }, 641 628 { 642 - .name = "adl_max98373_nau8825", 629 + .name = "adl_max98373_8825", 643 630 .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) | 644 631 SOF_SPEAKER_AMP_PRESENT | 645 632 SOF_MAX98373_SPEAKER_AMP_PRESENT | ··· 650 637 }, 651 638 { 652 639 /* The limitation of length of char array, shorten the name */ 653 - .name = "adl_mx98360a_nau8825", 640 + .name = "adl_mx98360a_8825", 654 641 .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) | 655 642 SOF_SPEAKER_AMP_PRESENT | 656 643 SOF_MAX98360A_SPEAKER_AMP_PRESENT | ··· 661 648 662 649 }, 663 650 { 664 - .name = "adl_rt1015p_nau8825", 651 + .name = "adl_rt1015p_8825", 665 652 .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) | 666 653 SOF_SPEAKER_AMP_PRESENT | 667 654 SOF_RT1015P_SPEAKER_AMP_PRESENT | 655 + SOF_NAU8825_SSP_AMP(1) | 656 + SOF_NAU8825_NUM_HDMIDEV(4) | 657 + SOF_BT_OFFLOAD_SSP(2) | 658 + SOF_SSP_BT_OFFLOAD_PRESENT), 659 + }, 660 + { 661 + .name = "adl_nau8318_8825", 662 + .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) | 663 + SOF_SPEAKER_AMP_PRESENT | 664 + SOF_NAU8318_SPEAKER_AMP_PRESENT | 668 665 SOF_NAU8825_SSP_AMP(1) | 669 666 SOF_NAU8825_NUM_HDMIDEV(4) | 670 667 SOF_BT_OFFLOAD_SSP(2) |
+16 -4
sound/soc/intel/common/soc-acpi-intel-adl-match.c
··· 450 450 .codecs = {"INTC10B0"} 451 451 }; 452 452 453 + static const struct snd_soc_acpi_codecs adl_nau8318_amp = { 454 + .num_codecs = 1, 455 + .codecs = {"NVTN2012"} 456 + }; 457 + 453 458 struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = { 454 459 { 455 460 .comp_ids = &adl_rt5682_rt5682s_hp, ··· 479 474 }, 480 475 { 481 476 .id = "10508825", 482 - .drv_name = "adl_rt1019p_nau8825", 477 + .drv_name = "adl_rt1019p_8825", 483 478 .machine_quirk = snd_soc_acpi_codec_list, 484 479 .quirk_data = &adl_rt1019p_amp, 485 480 .sof_tplg_filename = "sof-adl-rt1019-nau8825.tplg", 486 481 }, 487 482 { 488 483 .id = "10508825", 489 - .drv_name = "adl_max98373_nau8825", 484 + .drv_name = "adl_max98373_8825", 490 485 .machine_quirk = snd_soc_acpi_codec_list, 491 486 .quirk_data = &adl_max98373_amp, 492 487 .sof_tplg_filename = "sof-adl-max98373-nau8825.tplg", 493 488 }, 494 489 { 495 490 .id = "10508825", 496 - .drv_name = "adl_mx98360a_nau8825", 491 + .drv_name = "adl_mx98360a_8825", 497 492 .machine_quirk = snd_soc_acpi_codec_list, 498 493 .quirk_data = &adl_max98360a_amp, 499 494 .sof_tplg_filename = "sof-adl-max98360a-nau8825.tplg", ··· 507 502 }, 508 503 { 509 504 .id = "10508825", 510 - .drv_name = "adl_rt1015p_nau8825", 505 + .drv_name = "adl_rt1015p_8825", 511 506 .machine_quirk = snd_soc_acpi_codec_list, 512 507 .quirk_data = &adl_rt1015p_amp, 513 508 .sof_tplg_filename = "sof-adl-rt1015-nau8825.tplg", 509 + }, 510 + { 511 + .id = "10508825", 512 + .drv_name = "adl_nau8318_8825", 513 + .machine_quirk = snd_soc_acpi_codec_list, 514 + .quirk_data = &adl_nau8318_amp, 515 + .sof_tplg_filename = "sof-adl-nau8318-nau8825.tplg", 514 516 }, 515 517 { 516 518 .id = "10508825",
+50
sound/soc/intel/common/soc-acpi-intel-rpl-match.c
··· 203 203 {} 204 204 }; 205 205 206 + static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link2_rt1316_link01[] = { 207 + { 208 + .mask = BIT(2), 209 + .num_adr = ARRAY_SIZE(rt711_sdca_2_adr), 210 + .adr_d = rt711_sdca_2_adr, 211 + }, 212 + { 213 + .mask = BIT(0), 214 + .num_adr = ARRAY_SIZE(rt1316_0_group2_adr), 215 + .adr_d = rt1316_0_group2_adr, 216 + }, 217 + { 218 + .mask = BIT(1), 219 + .num_adr = ARRAY_SIZE(rt1316_1_group2_adr), 220 + .adr_d = rt1316_1_group2_adr, 221 + }, 222 + {} 223 + }; 224 + 206 225 static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12_rt714_link3[] = { 207 226 { 208 227 .mask = BIT(0), ··· 242 223 .mask = BIT(3), 243 224 .num_adr = ARRAY_SIZE(rt714_3_adr), 244 225 .adr_d = rt714_3_adr, 226 + }, 227 + {} 228 + }; 229 + 230 + static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12[] = { 231 + { 232 + .mask = BIT(0), 233 + .num_adr = ARRAY_SIZE(rt711_sdca_0_adr), 234 + .adr_d = rt711_sdca_0_adr, 235 + }, 236 + { 237 + .mask = BIT(1), 238 + .num_adr = ARRAY_SIZE(rt1318_1_group1_adr), 239 + .adr_d = rt1318_1_group1_adr, 240 + }, 241 + { 242 + .mask = BIT(2), 243 + .num_adr = ARRAY_SIZE(rt1318_2_group1_adr), 244 + .adr_d = rt1318_2_group1_adr, 245 245 }, 246 246 {} 247 247 }; ··· 310 272 .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12-rt714-l3.tplg", 311 273 }, 312 274 { 275 + .link_mask = 0x7, /* rt711 on link0 & two rt1318s on link1 and link2 */ 276 + .links = rpl_sdw_rt711_link0_rt1318_link12, 277 + .drv_name = "sof_sdw", 278 + .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12.tplg", 279 + }, 280 + { 313 281 .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */ 314 282 .links = rpl_sdw_rt1316_link12_rt714_link0, 315 283 .drv_name = "sof_sdw", 316 284 .sof_tplg_filename = "sof-rpl-rt1316-l12-rt714-l0.tplg", 285 + }, 286 + { 287 + .link_mask = 0x7, /* rt711 on link2 & two rt1316s on link0 and link1 */ 288 + .links = rpl_sdw_rt711_link2_rt1316_link01, 289 + .drv_name = "sof_sdw", 290 + .sof_tplg_filename = "sof-rpl-rt711-l2-rt1316-l01.tplg", 317 291 }, 318 292 { 319 293 .link_mask = 0x1, /* link0 required */
+3 -1
sound/soc/mediatek/Kconfig
··· 182 182 If unsure select "N". 183 183 184 184 config SND_SOC_MT8186_MT6366_RT1019_RT5682S 185 - tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec" 185 + tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S MAX98357A/MAX98360 codec" 186 186 depends on I2C && GPIOLIB 187 187 depends on SND_SOC_MT8186 && MTK_PMIC_WRAP 188 + select SND_SOC_MAX98357A 188 189 select SND_SOC_MT6358 190 + select SND_SOC_MAX98357A 189 191 select SND_SOC_RT1015P 190 192 select SND_SOC_RT5682S 191 193 select SND_SOC_BT_SCO
+21 -1
sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
··· 1083 1083 .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf), 1084 1084 }; 1085 1085 1086 + static struct snd_soc_card mt8186_mt6366_rt5682s_max98360_soc_card = { 1087 + .name = "mt8186_rt5682s_max98360", 1088 + .owner = THIS_MODULE, 1089 + .dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links, 1090 + .num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links), 1091 + .controls = mt8186_mt6366_rt1019_rt5682s_controls, 1092 + .num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls), 1093 + .dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets, 1094 + .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets), 1095 + .dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes, 1096 + .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes), 1097 + .codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf, 1098 + .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf), 1099 + }; 1100 + 1086 1101 static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev) 1087 1102 { 1088 1103 struct snd_soc_card *card; ··· 1247 1232 1248 1233 #if IS_ENABLED(CONFIG_OF) 1249 1234 static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = { 1250 - { .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound", 1235 + { 1236 + .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound", 1251 1237 .data = &mt8186_mt6366_rt1019_rt5682s_soc_card, 1238 + }, 1239 + { 1240 + .compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound", 1241 + .data = &mt8186_mt6366_rt5682s_max98360_soc_card, 1252 1242 }, 1253 1243 {} 1254 1244 };
+12 -9
sound/soc/qcom/Kconfig
··· 2 2 menuconfig SND_SOC_QCOM 3 3 tristate "ASoC support for QCOM platforms" 4 4 depends on ARCH_QCOM || COMPILE_TEST 5 - imply SND_SOC_QCOM_COMMON 6 5 help 7 6 Say Y or M if you want to add support to use audio devices 8 7 in Qualcomm Technologies SOC-based platforms. ··· 59 60 config SND_SOC_APQ8016_SBC 60 61 tristate "SoC Audio support for APQ8016 SBC platforms" 61 62 select SND_SOC_LPASS_APQ8016 62 - depends on SND_SOC_QCOM_COMMON 63 + select SND_SOC_QCOM_COMMON 63 64 help 64 65 Support for Qualcomm Technologies LPASS audio block in 65 66 APQ8016 SOC-based systems. 66 67 Say Y if you want to use audio devices on MI2S. 67 68 68 69 config SND_SOC_QCOM_COMMON 69 - depends on SOUNDWIRE 70 + tristate 71 + 72 + config SND_SOC_QCOM_SDW 70 73 tristate 71 74 72 75 config SND_SOC_QDSP6_COMMON ··· 145 144 depends on QCOM_APR 146 145 depends on COMMON_CLK 147 146 select SND_SOC_QDSP6 148 - depends on SND_SOC_QCOM_COMMON 147 + select SND_SOC_QCOM_COMMON 149 148 help 150 149 Support for Qualcomm Technologies LPASS audio block in 151 150 APQ8096 SoC-based systems. ··· 156 155 depends on QCOM_APR && I2C && SOUNDWIRE 157 156 depends on COMMON_CLK 158 157 select SND_SOC_QDSP6 159 - depends on SND_SOC_QCOM_COMMON 158 + select SND_SOC_QCOM_COMMON 160 159 select SND_SOC_RT5663 161 160 select SND_SOC_MAX98927 162 161 imply SND_SOC_CROS_EC_CODEC ··· 170 169 depends on QCOM_APR && SOUNDWIRE 171 170 depends on COMMON_CLK 172 171 select SND_SOC_QDSP6 173 - depends on SND_SOC_QCOM_COMMON 172 + select SND_SOC_QCOM_COMMON 173 + select SND_SOC_QCOM_SDW 174 174 help 175 175 To add support for audio on Qualcomm Technologies Inc. 176 176 SM8250 SoC-based systems. ··· 182 180 depends on QCOM_APR && SOUNDWIRE 183 181 depends on COMMON_CLK 184 182 select SND_SOC_QDSP6 185 - depends on SND_SOC_QCOM_COMMON 183 + select SND_SOC_QCOM_COMMON 184 + select SND_SOC_QCOM_SDW 186 185 help 187 186 To add support for audio on Qualcomm Technologies Inc. 188 187 SC8280XP SoC-based systems. ··· 193 190 tristate "SoC Machine driver for SC7180 boards" 194 191 depends on I2C && GPIOLIB 195 192 depends on SOUNDWIRE || SOUNDWIRE=n 196 - depends on SND_SOC_QCOM_COMMON 193 + select SND_SOC_QCOM_COMMON 197 194 select SND_SOC_LPASS_SC7180 198 195 select SND_SOC_MAX98357A 199 196 select SND_SOC_RT5682_I2C ··· 207 204 config SND_SOC_SC7280 208 205 tristate "SoC Machine driver for SC7280 boards" 209 206 depends on I2C && SOUNDWIRE 210 - depends on SND_SOC_QCOM_COMMON 207 + select SND_SOC_QCOM_COMMON 211 208 select SND_SOC_LPASS_SC7280 212 209 select SND_SOC_MAX98357A 213 210 select SND_SOC_WCD938X_SDW
+2
sound/soc/qcom/Makefile
··· 28 28 snd-soc-sm8250-objs := sm8250.o 29 29 snd-soc-sc8280xp-objs := sc8280xp.o 30 30 snd-soc-qcom-common-objs := common.o 31 + snd-soc-qcom-sdw-objs := sdw.o 31 32 32 33 obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o 33 34 obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o ··· 39 38 obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o 40 39 obj-$(CONFIG_SND_SOC_SM8250) += snd-soc-sm8250.o 41 40 obj-$(CONFIG_SND_SOC_QCOM_COMMON) += snd-soc-qcom-common.o 41 + obj-$(CONFIG_SND_SOC_QCOM_SDW) += snd-soc-qcom-sdw.o 42 42 43 43 #DSP lib 44 44 obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
-114
sound/soc/qcom/common.c
··· 180 180 } 181 181 EXPORT_SYMBOL_GPL(qcom_snd_parse_of); 182 182 183 - int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream, 184 - struct sdw_stream_runtime *sruntime, 185 - bool *stream_prepared) 186 - { 187 - struct snd_soc_pcm_runtime *rtd = substream->private_data; 188 - struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 189 - int ret; 190 - 191 - if (!sruntime) 192 - return 0; 193 - 194 - switch (cpu_dai->id) { 195 - case WSA_CODEC_DMA_RX_0: 196 - case WSA_CODEC_DMA_RX_1: 197 - case RX_CODEC_DMA_RX_0: 198 - case RX_CODEC_DMA_RX_1: 199 - case TX_CODEC_DMA_TX_0: 200 - case TX_CODEC_DMA_TX_1: 201 - case TX_CODEC_DMA_TX_2: 202 - case TX_CODEC_DMA_TX_3: 203 - break; 204 - default: 205 - return 0; 206 - } 207 - 208 - if (*stream_prepared) { 209 - sdw_disable_stream(sruntime); 210 - sdw_deprepare_stream(sruntime); 211 - *stream_prepared = false; 212 - } 213 - 214 - ret = sdw_prepare_stream(sruntime); 215 - if (ret) 216 - return ret; 217 - 218 - /** 219 - * NOTE: there is a strict hw requirement about the ordering of port 220 - * enables and actual WSA881x PA enable. PA enable should only happen 221 - * after soundwire ports are enabled if not DC on the line is 222 - * accumulated resulting in Click/Pop Noise 223 - * PA enable/mute are handled as part of codec DAPM and digital mute. 224 - */ 225 - 226 - ret = sdw_enable_stream(sruntime); 227 - if (ret) { 228 - sdw_deprepare_stream(sruntime); 229 - return ret; 230 - } 231 - *stream_prepared = true; 232 - 233 - return ret; 234 - } 235 - EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare); 236 - 237 - int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, 238 - struct snd_pcm_hw_params *params, 239 - struct sdw_stream_runtime **psruntime) 240 - { 241 - struct snd_soc_pcm_runtime *rtd = substream->private_data; 242 - struct snd_soc_dai *codec_dai; 243 - struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 244 - struct sdw_stream_runtime *sruntime; 245 - int i; 246 - 247 - switch (cpu_dai->id) { 248 - case WSA_CODEC_DMA_RX_0: 249 - case RX_CODEC_DMA_RX_0: 250 - case RX_CODEC_DMA_RX_1: 251 - case TX_CODEC_DMA_TX_0: 252 - case TX_CODEC_DMA_TX_1: 253 - case TX_CODEC_DMA_TX_2: 254 - case TX_CODEC_DMA_TX_3: 255 - for_each_rtd_codec_dais(rtd, i, codec_dai) { 256 - sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream); 257 - if (sruntime != ERR_PTR(-ENOTSUPP)) 258 - *psruntime = sruntime; 259 - } 260 - break; 261 - } 262 - 263 - return 0; 264 - 265 - } 266 - EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params); 267 - 268 - int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream, 269 - struct sdw_stream_runtime *sruntime, bool *stream_prepared) 270 - { 271 - struct snd_soc_pcm_runtime *rtd = substream->private_data; 272 - struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 273 - 274 - switch (cpu_dai->id) { 275 - case WSA_CODEC_DMA_RX_0: 276 - case WSA_CODEC_DMA_RX_1: 277 - case RX_CODEC_DMA_RX_0: 278 - case RX_CODEC_DMA_RX_1: 279 - case TX_CODEC_DMA_TX_0: 280 - case TX_CODEC_DMA_TX_1: 281 - case TX_CODEC_DMA_TX_2: 282 - case TX_CODEC_DMA_TX_3: 283 - if (sruntime && *stream_prepared) { 284 - sdw_disable_stream(sruntime); 285 - sdw_deprepare_stream(sruntime); 286 - *stream_prepared = false; 287 - } 288 - break; 289 - default: 290 - break; 291 - } 292 - 293 - return 0; 294 - } 295 - EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free); 296 - 297 183 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd, 298 184 struct snd_soc_jack *jack, bool *jack_setup) 299 185 {
-10
sound/soc/qcom/common.h
··· 5 5 #define __QCOM_SND_COMMON_H__ 6 6 7 7 #include <sound/soc.h> 8 - #include <linux/soundwire/sdw.h> 9 8 10 9 int qcom_snd_parse_of(struct snd_soc_card *card); 11 10 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd, 12 11 struct snd_soc_jack *jack, bool *jack_setup); 13 12 14 - int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream, 15 - struct sdw_stream_runtime *runtime, 16 - bool *stream_prepared); 17 - int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, 18 - struct snd_pcm_hw_params *params, 19 - struct sdw_stream_runtime **psruntime); 20 - int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream, 21 - struct sdw_stream_runtime *sruntime, 22 - bool *stream_prepared); 23 13 #endif
+3 -2
sound/soc/qcom/lpass-cpu.c
··· 1037 1037 struct lpass_data *data) 1038 1038 { 1039 1039 struct device_node *node; 1040 - int ret, id; 1040 + int ret, i, id; 1041 1041 1042 1042 /* Allow all channels by default for backwards compatibility */ 1043 - for (id = 0; id < data->variant->num_dai; id++) { 1043 + for (i = 0; i < data->variant->num_dai; i++) { 1044 + id = data->variant->dai_driver[i].id; 1044 1045 data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH; 1045 1046 data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH; 1046 1047 }
+1
sound/soc/qcom/sc8280xp.c
··· 12 12 #include <linux/input-event-codes.h> 13 13 #include "qdsp6/q6afe.h" 14 14 #include "common.h" 15 + #include "sdw.h" 15 16 16 17 #define DRIVER_NAME "sc8280xp" 17 18
+123
sound/soc/qcom/sdw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2018, Linaro Limited. 3 + // Copyright (c) 2018, The Linux Foundation. All rights reserved. 4 + 5 + #include <linux/module.h> 6 + #include <sound/soc.h> 7 + #include "qdsp6/q6afe.h" 8 + #include "sdw.h" 9 + 10 + int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream, 11 + struct sdw_stream_runtime *sruntime, 12 + bool *stream_prepared) 13 + { 14 + struct snd_soc_pcm_runtime *rtd = substream->private_data; 15 + struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 16 + int ret; 17 + 18 + if (!sruntime) 19 + return 0; 20 + 21 + switch (cpu_dai->id) { 22 + case WSA_CODEC_DMA_RX_0: 23 + case WSA_CODEC_DMA_RX_1: 24 + case RX_CODEC_DMA_RX_0: 25 + case RX_CODEC_DMA_RX_1: 26 + case TX_CODEC_DMA_TX_0: 27 + case TX_CODEC_DMA_TX_1: 28 + case TX_CODEC_DMA_TX_2: 29 + case TX_CODEC_DMA_TX_3: 30 + break; 31 + default: 32 + return 0; 33 + } 34 + 35 + if (*stream_prepared) { 36 + sdw_disable_stream(sruntime); 37 + sdw_deprepare_stream(sruntime); 38 + *stream_prepared = false; 39 + } 40 + 41 + ret = sdw_prepare_stream(sruntime); 42 + if (ret) 43 + return ret; 44 + 45 + /** 46 + * NOTE: there is a strict hw requirement about the ordering of port 47 + * enables and actual WSA881x PA enable. PA enable should only happen 48 + * after soundwire ports are enabled if not DC on the line is 49 + * accumulated resulting in Click/Pop Noise 50 + * PA enable/mute are handled as part of codec DAPM and digital mute. 51 + */ 52 + 53 + ret = sdw_enable_stream(sruntime); 54 + if (ret) { 55 + sdw_deprepare_stream(sruntime); 56 + return ret; 57 + } 58 + *stream_prepared = true; 59 + 60 + return ret; 61 + } 62 + EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare); 63 + 64 + int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, 65 + struct snd_pcm_hw_params *params, 66 + struct sdw_stream_runtime **psruntime) 67 + { 68 + struct snd_soc_pcm_runtime *rtd = substream->private_data; 69 + struct snd_soc_dai *codec_dai; 70 + struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 71 + struct sdw_stream_runtime *sruntime; 72 + int i; 73 + 74 + switch (cpu_dai->id) { 75 + case WSA_CODEC_DMA_RX_0: 76 + case RX_CODEC_DMA_RX_0: 77 + case RX_CODEC_DMA_RX_1: 78 + case TX_CODEC_DMA_TX_0: 79 + case TX_CODEC_DMA_TX_1: 80 + case TX_CODEC_DMA_TX_2: 81 + case TX_CODEC_DMA_TX_3: 82 + for_each_rtd_codec_dais(rtd, i, codec_dai) { 83 + sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream); 84 + if (sruntime != ERR_PTR(-ENOTSUPP)) 85 + *psruntime = sruntime; 86 + } 87 + break; 88 + } 89 + 90 + return 0; 91 + 92 + } 93 + EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params); 94 + 95 + int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream, 96 + struct sdw_stream_runtime *sruntime, bool *stream_prepared) 97 + { 98 + struct snd_soc_pcm_runtime *rtd = substream->private_data; 99 + struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 100 + 101 + switch (cpu_dai->id) { 102 + case WSA_CODEC_DMA_RX_0: 103 + case WSA_CODEC_DMA_RX_1: 104 + case RX_CODEC_DMA_RX_0: 105 + case RX_CODEC_DMA_RX_1: 106 + case TX_CODEC_DMA_TX_0: 107 + case TX_CODEC_DMA_TX_1: 108 + case TX_CODEC_DMA_TX_2: 109 + case TX_CODEC_DMA_TX_3: 110 + if (sruntime && *stream_prepared) { 111 + sdw_disable_stream(sruntime); 112 + sdw_deprepare_stream(sruntime); 113 + *stream_prepared = false; 114 + } 115 + break; 116 + default: 117 + break; 118 + } 119 + 120 + return 0; 121 + } 122 + EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free); 123 + MODULE_LICENSE("GPL v2");
+18
sound/soc/qcom/sdw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + 4 + #ifndef __QCOM_SND_SDW_H__ 5 + #define __QCOM_SND_SDW_H__ 6 + 7 + #include <linux/soundwire/sdw.h> 8 + 9 + int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream, 10 + struct sdw_stream_runtime *runtime, 11 + bool *stream_prepared); 12 + int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, 13 + struct snd_pcm_hw_params *params, 14 + struct sdw_stream_runtime **psruntime); 15 + int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream, 16 + struct sdw_stream_runtime *sruntime, 17 + bool *stream_prepared); 18 + #endif
+1
sound/soc/qcom/sm8250.c
··· 12 12 #include <linux/input-event-codes.h> 13 13 #include "qdsp6/q6afe.h" 14 14 #include "common.h" 15 + #include "sdw.h" 15 16 16 17 #define DRIVER_NAME "sm8250" 17 18 #define MI2S_BCLK_RATE 1536000
+3 -1
sound/soc/sof/debug.c
··· 353 353 return err; 354 354 } 355 355 356 - return 0; 356 + return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state, 357 + sizeof(sdev->fw_state), 358 + "fw_state", 0444); 357 359 } 358 360 EXPORT_SYMBOL_GPL(snd_sof_dbg_init); 359 361
+4 -5
sound/soc/sof/pm.c
··· 182 182 const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm; 183 183 const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg; 184 184 pm_message_t pm_state; 185 - u32 target_state = 0; 185 + u32 target_state = snd_sof_dsp_power_target(sdev); 186 186 int ret; 187 187 188 188 /* do nothing if dsp suspend callback is not set */ ··· 191 191 192 192 if (runtime_suspend && !sof_ops(sdev)->runtime_suspend) 193 193 return 0; 194 + 195 + if (tplg_ops && tplg_ops->tear_down_all_pipelines) 196 + tplg_ops->tear_down_all_pipelines(sdev, false); 194 197 195 198 if (sdev->fw_state != SOF_FW_BOOT_COMPLETE) 196 199 goto suspend; ··· 209 206 } 210 207 } 211 208 212 - target_state = snd_sof_dsp_power_target(sdev); 213 209 pm_state.event = target_state; 214 210 215 211 /* Skip to platform-specific suspend if DSP is entering D0 */ ··· 218 216 sof_suspend_clients(sdev, pm_state); 219 217 goto suspend; 220 218 } 221 - 222 - if (tplg_ops->tear_down_all_pipelines) 223 - tplg_ops->tear_down_all_pipelines(sdev, false); 224 219 225 220 /* suspend DMA trace */ 226 221 sof_fw_trace_suspend(sdev, pm_state);
+2 -1
sound/usb/implicit.c
··· 471 471 subs = find_matching_substream(chip, stream, target->sync_ep, 472 472 target->fmt_type); 473 473 if (!subs) 474 - return sync_fmt; 474 + goto end; 475 475 476 476 high_score = 0; 477 477 list_for_each_entry(fp, &subs->fmt_list, list) { ··· 485 485 } 486 486 } 487 487 488 + end: 488 489 if (fixed_rate) 489 490 *fixed_rate = snd_usb_pcm_has_fixed_rate(subs); 490 491 return sync_fmt;
+138 -84
sound/usb/pcm.c
··· 160 160 bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs) 161 161 { 162 162 const struct audioformat *fp; 163 - struct snd_usb_audio *chip = subs->stream->chip; 163 + struct snd_usb_audio *chip; 164 164 int rate = -1; 165 165 166 + if (!subs) 167 + return false; 168 + chip = subs->stream->chip; 166 169 if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE)) 167 170 return false; 168 171 list_for_each_entry(fp, &subs->fmt_list, list) { ··· 528 525 if (snd_usb_endpoint_compatible(chip, subs->data_endpoint, 529 526 fmt, hw_params)) 530 527 goto unlock; 528 + if (stop_endpoints(subs, false)) 529 + sync_pending_stops(subs); 531 530 close_endpoints(chip, subs); 532 531 } 533 532 ··· 792 787 return changed; 793 788 } 794 789 790 + /* get the specified endpoint object that is being used by other streams 791 + * (i.e. the parameter is locked) 792 + */ 793 + static const struct snd_usb_endpoint * 794 + get_endpoint_in_use(struct snd_usb_audio *chip, int endpoint, 795 + const struct snd_usb_endpoint *ref_ep) 796 + { 797 + const struct snd_usb_endpoint *ep; 798 + 799 + ep = snd_usb_get_endpoint(chip, endpoint); 800 + if (ep && ep->cur_audiofmt && (ep != ref_ep || ep->opened > 1)) 801 + return ep; 802 + return NULL; 803 + } 804 + 795 805 static int hw_rule_rate(struct snd_pcm_hw_params *params, 796 806 struct snd_pcm_hw_rule *rule) 797 807 { 798 808 struct snd_usb_substream *subs = rule->private; 799 809 struct snd_usb_audio *chip = subs->stream->chip; 810 + const struct snd_usb_endpoint *ep; 800 811 const struct audioformat *fp; 801 812 struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 802 813 unsigned int rmin, rmax, r; ··· 824 803 list_for_each_entry(fp, &subs->fmt_list, list) { 825 804 if (!hw_check_valid_format(subs, params, fp)) 826 805 continue; 806 + 807 + ep = get_endpoint_in_use(chip, fp->endpoint, 808 + subs->data_endpoint); 809 + if (ep) { 810 + hwc_debug("rate limit %d for ep#%x\n", 811 + ep->cur_rate, fp->endpoint); 812 + rmin = min(rmin, ep->cur_rate); 813 + rmax = max(rmax, ep->cur_rate); 814 + continue; 815 + } 816 + 817 + if (fp->implicit_fb) { 818 + ep = get_endpoint_in_use(chip, fp->sync_ep, 819 + subs->sync_endpoint); 820 + if (ep) { 821 + hwc_debug("rate limit %d for sync_ep#%x\n", 822 + ep->cur_rate, fp->sync_ep); 823 + rmin = min(rmin, ep->cur_rate); 824 + rmax = max(rmax, ep->cur_rate); 825 + continue; 826 + } 827 + } 828 + 827 829 r = snd_usb_endpoint_get_clock_rate(chip, fp->clock); 828 830 if (r > 0) { 829 831 if (!snd_interval_test(it, r)) ··· 916 872 struct snd_pcm_hw_rule *rule) 917 873 { 918 874 struct snd_usb_substream *subs = rule->private; 875 + struct snd_usb_audio *chip = subs->stream->chip; 876 + const struct snd_usb_endpoint *ep; 919 877 const struct audioformat *fp; 920 878 struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); 921 879 u64 fbits; ··· 927 881 list_for_each_entry(fp, &subs->fmt_list, list) { 928 882 if (!hw_check_valid_format(subs, params, fp)) 929 883 continue; 884 + 885 + ep = get_endpoint_in_use(chip, fp->endpoint, 886 + subs->data_endpoint); 887 + if (ep) { 888 + hwc_debug("format limit %d for ep#%x\n", 889 + ep->cur_format, fp->endpoint); 890 + fbits |= pcm_format_to_bits(ep->cur_format); 891 + continue; 892 + } 893 + 894 + if (fp->implicit_fb) { 895 + ep = get_endpoint_in_use(chip, fp->sync_ep, 896 + subs->sync_endpoint); 897 + if (ep) { 898 + hwc_debug("format limit %d for sync_ep#%x\n", 899 + ep->cur_format, fp->sync_ep); 900 + fbits |= pcm_format_to_bits(ep->cur_format); 901 + continue; 902 + } 903 + } 904 + 930 905 fbits |= fp->formats; 931 906 } 932 907 return apply_hw_params_format_bits(fmt, fbits); ··· 980 913 return apply_hw_params_minmax(it, pmin, UINT_MAX); 981 914 } 982 915 983 - /* get the EP or the sync EP for implicit fb when it's already set up */ 984 - static const struct snd_usb_endpoint * 985 - get_sync_ep_from_substream(struct snd_usb_substream *subs) 986 - { 987 - struct snd_usb_audio *chip = subs->stream->chip; 988 - const struct audioformat *fp; 989 - const struct snd_usb_endpoint *ep; 990 - 991 - list_for_each_entry(fp, &subs->fmt_list, list) { 992 - ep = snd_usb_get_endpoint(chip, fp->endpoint); 993 - if (ep && ep->cur_audiofmt) { 994 - /* if EP is already opened solely for this substream, 995 - * we still allow us to change the parameter; otherwise 996 - * this substream has to follow the existing parameter 997 - */ 998 - if (ep->cur_audiofmt != subs->cur_audiofmt || ep->opened > 1) 999 - return ep; 1000 - } 1001 - if (!fp->implicit_fb) 1002 - continue; 1003 - /* for the implicit fb, check the sync ep as well */ 1004 - ep = snd_usb_get_endpoint(chip, fp->sync_ep); 1005 - if (ep && ep->cur_audiofmt) 1006 - return ep; 1007 - } 1008 - return NULL; 1009 - } 1010 - 1011 916 /* additional hw constraints for implicit feedback mode */ 1012 - static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params, 1013 - struct snd_pcm_hw_rule *rule) 1014 - { 1015 - struct snd_usb_substream *subs = rule->private; 1016 - const struct snd_usb_endpoint *ep; 1017 - struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); 1018 - 1019 - ep = get_sync_ep_from_substream(subs); 1020 - if (!ep) 1021 - return 0; 1022 - 1023 - hwc_debug("applying %s\n", __func__); 1024 - return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format)); 1025 - } 1026 - 1027 - static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params, 1028 - struct snd_pcm_hw_rule *rule) 1029 - { 1030 - struct snd_usb_substream *subs = rule->private; 1031 - const struct snd_usb_endpoint *ep; 1032 - struct snd_interval *it; 1033 - 1034 - ep = get_sync_ep_from_substream(subs); 1035 - if (!ep) 1036 - return 0; 1037 - 1038 - hwc_debug("applying %s\n", __func__); 1039 - it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 1040 - return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate); 1041 - } 1042 - 1043 917 static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params, 1044 918 struct snd_pcm_hw_rule *rule) 1045 919 { 1046 920 struct snd_usb_substream *subs = rule->private; 921 + struct snd_usb_audio *chip = subs->stream->chip; 922 + const struct audioformat *fp; 1047 923 const struct snd_usb_endpoint *ep; 1048 924 struct snd_interval *it; 925 + unsigned int rmin, rmax; 1049 926 1050 - ep = get_sync_ep_from_substream(subs); 1051 - if (!ep) 1052 - return 0; 1053 - 1054 - hwc_debug("applying %s\n", __func__); 1055 927 it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); 1056 - return apply_hw_params_minmax(it, ep->cur_period_frames, 1057 - ep->cur_period_frames); 928 + hwc_debug("hw_rule_period_size: (%u,%u)\n", it->min, it->max); 929 + rmin = UINT_MAX; 930 + rmax = 0; 931 + list_for_each_entry(fp, &subs->fmt_list, list) { 932 + if (!hw_check_valid_format(subs, params, fp)) 933 + continue; 934 + ep = get_endpoint_in_use(chip, fp->endpoint, 935 + subs->data_endpoint); 936 + if (ep) { 937 + hwc_debug("period size limit %d for ep#%x\n", 938 + ep->cur_period_frames, fp->endpoint); 939 + rmin = min(rmin, ep->cur_period_frames); 940 + rmax = max(rmax, ep->cur_period_frames); 941 + continue; 942 + } 943 + 944 + if (fp->implicit_fb) { 945 + ep = get_endpoint_in_use(chip, fp->sync_ep, 946 + subs->sync_endpoint); 947 + if (ep) { 948 + hwc_debug("period size limit %d for sync_ep#%x\n", 949 + ep->cur_period_frames, fp->sync_ep); 950 + rmin = min(rmin, ep->cur_period_frames); 951 + rmax = max(rmax, ep->cur_period_frames); 952 + continue; 953 + } 954 + } 955 + } 956 + 957 + if (!rmax) 958 + return 0; /* no limit by implicit fb */ 959 + return apply_hw_params_minmax(it, rmin, rmax); 1058 960 } 1059 961 1060 962 static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params, 1061 963 struct snd_pcm_hw_rule *rule) 1062 964 { 1063 965 struct snd_usb_substream *subs = rule->private; 966 + struct snd_usb_audio *chip = subs->stream->chip; 967 + const struct audioformat *fp; 1064 968 const struct snd_usb_endpoint *ep; 1065 969 struct snd_interval *it; 970 + unsigned int rmin, rmax; 1066 971 1067 - ep = get_sync_ep_from_substream(subs); 1068 - if (!ep) 1069 - return 0; 1070 - 1071 - hwc_debug("applying %s\n", __func__); 1072 972 it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS); 1073 - return apply_hw_params_minmax(it, ep->cur_buffer_periods, 1074 - ep->cur_buffer_periods); 973 + hwc_debug("hw_rule_periods: (%u,%u)\n", it->min, it->max); 974 + rmin = UINT_MAX; 975 + rmax = 0; 976 + list_for_each_entry(fp, &subs->fmt_list, list) { 977 + if (!hw_check_valid_format(subs, params, fp)) 978 + continue; 979 + ep = get_endpoint_in_use(chip, fp->endpoint, 980 + subs->data_endpoint); 981 + if (ep) { 982 + hwc_debug("periods limit %d for ep#%x\n", 983 + ep->cur_buffer_periods, fp->endpoint); 984 + rmin = min(rmin, ep->cur_buffer_periods); 985 + rmax = max(rmax, ep->cur_buffer_periods); 986 + continue; 987 + } 988 + 989 + if (fp->implicit_fb) { 990 + ep = get_endpoint_in_use(chip, fp->sync_ep, 991 + subs->sync_endpoint); 992 + if (ep) { 993 + hwc_debug("periods limit %d for sync_ep#%x\n", 994 + ep->cur_buffer_periods, fp->sync_ep); 995 + rmin = min(rmin, ep->cur_buffer_periods); 996 + rmax = max(rmax, ep->cur_buffer_periods); 997 + continue; 998 + } 999 + } 1000 + } 1001 + 1002 + if (!rmax) 1003 + return 0; /* no limit by implicit fb */ 1004 + return apply_hw_params_minmax(it, rmin, rmax); 1075 1005 } 1076 1006 1077 1007 /* ··· 1177 1113 return err; 1178 1114 1179 1115 /* additional hw constraints for implicit fb */ 1180 - err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, 1181 - hw_rule_format_implicit_fb, subs, 1182 - SNDRV_PCM_HW_PARAM_FORMAT, -1); 1183 - if (err < 0) 1184 - return err; 1185 - err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 1186 - hw_rule_rate_implicit_fb, subs, 1187 - SNDRV_PCM_HW_PARAM_RATE, -1); 1188 - if (err < 0) 1189 - return err; 1190 1116 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 1191 1117 hw_rule_period_size_implicit_fb, subs, 1192 1118 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+6
sound/usb/stream.c
··· 1222 1222 if (err < 0) 1223 1223 return err; 1224 1224 } 1225 + 1226 + /* try to set the interface... */ 1227 + usb_set_interface(chip->dev, iface_no, 0); 1228 + snd_usb_init_pitch(chip, fp); 1229 + snd_usb_init_sample_rate(chip, fp, fp->rate_max); 1230 + usb_set_interface(chip->dev, iface_no, altno); 1225 1231 } 1226 1232 return 0; 1227 1233 }
+7 -1
tools/arch/arm64/include/asm/cputype.h
··· 41 41 (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) 42 42 43 43 #define MIDR_CPU_MODEL(imp, partnum) \ 44 - (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ 44 + ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \ 45 45 (0xf << MIDR_ARCHITECTURE_SHIFT) | \ 46 46 ((partnum) << MIDR_PARTNUM_SHIFT)) 47 47 ··· 80 80 #define ARM_CPU_PART_CORTEX_X1 0xD44 81 81 #define ARM_CPU_PART_CORTEX_A510 0xD46 82 82 #define ARM_CPU_PART_CORTEX_A710 0xD47 83 + #define ARM_CPU_PART_CORTEX_A715 0xD4D 83 84 #define ARM_CPU_PART_CORTEX_X2 0xD48 84 85 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 85 86 #define ARM_CPU_PART_CORTEX_A78C 0xD4B ··· 124 123 #define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025 125 124 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028 126 125 #define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029 126 + #define APPLE_CPU_PART_M2_BLIZZARD 0x032 127 + #define APPLE_CPU_PART_M2_AVALANCHE 0x033 127 128 128 129 #define AMPERE_CPU_PART_AMPERE1 0xAC3 129 130 ··· 145 142 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) 146 143 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) 147 144 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) 145 + #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715) 148 146 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) 149 147 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) 150 148 #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) ··· 179 175 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO) 180 176 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX) 181 177 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) 178 + #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD) 179 + #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE) 182 180 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) 183 181 184 182 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+1
tools/arch/arm64/include/uapi/asm/kvm.h
··· 43 43 #define __KVM_HAVE_VCPU_EVENTS 44 44 45 45 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 46 + #define KVM_DIRTY_LOG_PAGE_OFFSET 64 46 47 47 48 #define KVM_REG_SIZE(id) \ 48 49 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+5
tools/arch/x86/include/uapi/asm/kvm.h
··· 206 206 struct kvm_msr_filter_range { 207 207 #define KVM_MSR_FILTER_READ (1 << 0) 208 208 #define KVM_MSR_FILTER_WRITE (1 << 1) 209 + #define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | \ 210 + KVM_MSR_FILTER_WRITE) 209 211 __u32 flags; 210 212 __u32 nmsrs; /* number of msrs in bitmap */ 211 213 __u32 base; /* MSR index the bitmap starts at */ ··· 216 214 217 215 #define KVM_MSR_FILTER_MAX_RANGES 16 218 216 struct kvm_msr_filter { 217 + #ifndef __KERNEL__ 219 218 #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0) 219 + #endif 220 220 #define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0) 221 + #define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY) 221 222 __u32 flags; 222 223 struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES]; 223 224 };
+9
tools/include/linux/build_bug.h
··· 79 79 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) 80 80 #endif // static_assert 81 81 82 + 83 + /* 84 + * Compile time check that field has an expected offset 85 + */ 86 + #define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \ 87 + BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \ 88 + "Offset of " #field " in " #type " has changed.") 89 + 90 + 82 91 #endif /* _LINUX_BUILD_BUG_H */
+3
tools/include/uapi/linux/kvm.h
··· 1767 1767 __u8 runstate_update_flag; 1768 1768 struct { 1769 1769 __u64 gfn; 1770 + #define KVM_XEN_INVALID_GFN ((__u64)-1) 1770 1771 } shared_info; 1771 1772 struct { 1772 1773 __u32 send_port; ··· 1799 1798 } u; 1800 1799 }; 1801 1800 1801 + 1802 1802 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ 1803 1803 #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 1804 1804 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 ··· 1825 1823 __u16 pad[3]; 1826 1824 union { 1827 1825 __u64 gpa; 1826 + #define KVM_XEN_INVALID_GPA ((__u64)-1) 1828 1827 __u64 pad[8]; 1829 1828 struct { 1830 1829 __u64 state;
+14 -1
tools/perf/tests/shell/buildid.sh
··· 77 77 file=${build_id_dir}/.build-id/${id:0:2}/`readlink ${link}`/elf 78 78 echo "file: ${file}" 79 79 80 - if [ ! -x $file ]; then 80 + # Check for file permission of original file 81 + # in case of pe-file.exe file 82 + echo $1 | grep ".exe" 83 + if [ $? -eq 0 ]; then 84 + if [ -x $1 -a ! -x $file ]; then 85 + echo "failed: file ${file} executable does not exist" 86 + exit 1 87 + fi 88 + 89 + if [ ! -x $file -a ! -e $file ]; then 90 + echo "failed: file ${file} does not exist" 91 + exit 1 92 + fi 93 + elif [ ! -x $file ]; then 81 94 echo "failed: file ${file} does not exist" 82 95 exit 1 83 96 fi
+4 -1
tools/perf/trace/beauty/include/linux/socket.h
··· 33 33 34 34 struct sockaddr { 35 35 sa_family_t sa_family; /* address family, AF_xxx */ 36 - char sa_data[14]; /* 14 bytes of protocol address */ 36 + union { 37 + char sa_data_min[14]; /* Minimum 14 bytes of protocol address */ 38 + DECLARE_FLEX_ARRAY(char, sa_data); 39 + }; 37 40 }; 38 41 39 42 struct linger {
+7 -3
tools/perf/util/build-id.c
··· 715 715 } else if (nsi && nsinfo__need_setns(nsi)) { 716 716 if (copyfile_ns(name, filename, nsi)) 717 717 goto out_free; 718 - } else if (link(realname, filename) && errno != EEXIST && 719 - copyfile(name, filename)) 720 - goto out_free; 718 + } else if (link(realname, filename) && errno != EEXIST) { 719 + struct stat f_stat; 720 + 721 + if (!(stat(name, &f_stat) < 0) && 722 + copyfile_mode(name, filename, f_stat.st_mode)) 723 + goto out_free; 724 + } 721 725 } 722 726 723 727 /* Some binaries are stripped, but have .debug files with their symbol
+4 -1
tools/perf/util/expr.l
··· 42 42 char *dst = str; 43 43 44 44 while (*str) { 45 - if (*str == '\\') 45 + if (*str == '\\') { 46 46 *dst++ = *++str; 47 + if (!*str) 48 + break; 49 + } 47 50 else if (*str == '?') { 48 51 char *paramval; 49 52 int i = 0;
+4
tools/testing/memblock/internal.h
··· 15 15 16 16 struct page {}; 17 17 18 + void __free_pages_core(struct page *page, unsigned int order) 19 + { 20 + } 21 + 18 22 void memblock_free_pages(struct page *page, unsigned long pfn, 19 23 unsigned int order) 20 24 {
+47
tools/testing/selftests/net/mptcp/userspace_pm.sh
··· 752 752 "$server4_token" > /dev/null 2>&1 753 753 } 754 754 755 + test_subflows_v4_v6_mix() 756 + { 757 + # Attempt to add a listener at 10.0.2.1:<subflow-port> 758 + ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\ 759 + $app6_port > /dev/null 2>&1 & 760 + local listener_pid=$! 761 + 762 + # ADD_ADDR4 from server to client machine reusing the subflow port on 763 + # the established v6 connection 764 + :>"$client_evts" 765 + ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\ 766 + $server_addr_id dev ns1eth2 > /dev/null 2>&1 767 + stdbuf -o0 -e0 printf "ADD_ADDR4 id:%d 10.0.2.1 (ns1) => ns2, reuse port\t\t" $server_addr_id 768 + sleep 0.5 769 + verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\ 770 + "$server_addr_id" "$app6_port" 771 + 772 + # CREATE_SUBFLOW from client to server machine 773 + :>"$client_evts" 774 + ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\ 775 + $app6_port token "$client6_token" > /dev/null 2>&1 776 + sleep 0.5 777 + verify_subflow_events "$client_evts" "$SUB_ESTABLISHED" "$client6_token"\ 778 + "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\ 779 + "$server_addr_id" "ns2" "ns1" 780 + 781 + # Delete the listener from the server ns, if one was created 782 + kill_wait $listener_pid 783 + 784 + sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts") 785 + 786 + # DESTROY_SUBFLOW from client to server machine 787 + :>"$client_evts" 788 + ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\ 789 + $app6_port token "$client6_token" > /dev/null 2>&1 790 + sleep 0.5 791 + verify_subflow_events "$client_evts" "$SUB_CLOSED" "$client6_token" \ 792 + "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\ 793 + "$server_addr_id" "ns2" "ns1" 794 + 795 + # RM_ADDR from server to client machine 796 + ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\ 797 + "$server6_token" > /dev/null 2>&1 798 + sleep 0.5 799 + } 800 + 755 801 test_prio() 756 802 { 757 803 local count ··· 907 861 test_announce 908 862 test_remove 909 863 test_subflows 864 + test_subflows_v4_v6_mix 910 865 test_prio 911 866 test_listener 912 867
+7 -5
tools/testing/selftests/net/toeplitz.c
··· 215 215 } 216 216 217 217 /* A single TPACKET_V3 block can hold multiple frames */ 218 - static void recv_block(struct ring_state *ring) 218 + static bool recv_block(struct ring_state *ring) 219 219 { 220 220 struct tpacket_block_desc *block; 221 221 char *frame; ··· 223 223 224 224 block = (void *)(ring->mmap + ring->idx * ring_block_sz); 225 225 if (!(block->hdr.bh1.block_status & TP_STATUS_USER)) 226 - return; 226 + return false; 227 227 228 228 frame = (char *)block; 229 229 frame += block->hdr.bh1.offset_to_first_pkt; ··· 235 235 236 236 block->hdr.bh1.block_status = TP_STATUS_KERNEL; 237 237 ring->idx = (ring->idx + 1) % ring_block_nr; 238 + 239 + return true; 238 240 } 239 241 240 242 /* simple test: sleep once unconditionally and then process all rings */ ··· 247 245 usleep(1000 * cfg_timeout_msec); 248 246 249 247 for (i = 0; i < num_cpus; i++) 250 - recv_block(&rings[i]); 248 + do {} while (recv_block(&rings[i])); 251 249 252 250 fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n", 253 251 frames_received - frames_nohash - frames_error, ··· 259 257 struct tpacket_req3 req3 = {0}; 260 258 void *ring; 261 259 262 - req3.tp_retire_blk_tov = cfg_timeout_msec; 260 + req3.tp_retire_blk_tov = cfg_timeout_msec / 8; 263 261 req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; 264 262 265 263 req3.tp_frame_size = 2048; 266 264 req3.tp_frame_nr = 1 << 10; 267 - req3.tp_block_nr = 2; 265 + req3.tp_block_nr = 16; 268 266 269 267 req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr; 270 268 req3.tp_block_size /= req3.tp_block_nr;
+9 -7
tools/testing/selftests/netfilter/nft_trans_stress.sh
··· 10 10 ksft_skip=4 11 11 12 12 testns=testns-$(mktemp -u "XXXXXXXX") 13 + tmp="" 13 14 14 15 tables="foo bar baz quux" 15 16 global_ret=0 16 17 eret=0 17 18 lret=0 19 + 20 + cleanup() { 21 + ip netns pids "$testns" | xargs kill 2>/dev/null 22 + ip netns del "$testns" 23 + 24 + rm -f "$tmp" 25 + } 18 26 19 27 check_result() 20 28 { ··· 51 43 exit $ksft_skip 52 44 fi 53 45 46 + trap cleanup EXIT 54 47 tmp=$(mktemp) 55 48 56 49 for table in $tables; do ··· 147 138 done 148 139 149 140 check_result $lret "add/delete with nftrace enabled" 150 - 151 - pkill -9 ping 152 - 153 - wait 154 - 155 - rm -f "$tmp" 156 - ip netns del "$testns" 157 141 158 142 exit $global_ret
+1
tools/testing/selftests/netfilter/settings
··· 1 + timeout=120
+7 -5
tools/testing/selftests/proc/proc-empty-vm.c
··· 25 25 #undef NDEBUG 26 26 #include <assert.h> 27 27 #include <errno.h> 28 + #include <stdint.h> 28 29 #include <stdio.h> 29 30 #include <stdlib.h> 30 31 #include <string.h> ··· 42 41 * 1: vsyscall VMA is --xp vsyscall=xonly 43 42 * 2: vsyscall VMA is r-xp vsyscall=emulate 44 43 */ 45 - static int g_vsyscall; 44 + static volatile int g_vsyscall; 46 45 static const char *g_proc_pid_maps_vsyscall; 47 46 static const char *g_proc_pid_smaps_vsyscall; 48 47 ··· 148 147 149 148 g_vsyscall = 0; 150 149 /* gettimeofday(NULL, NULL); */ 150 + uint64_t rax = 0xffffffffff600000; 151 151 asm volatile ( 152 - "call %P0" 153 - : 154 - : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL) 155 - : "rax", "rcx", "r11" 152 + "call *%[rax]" 153 + : [rax] "+a" (rax) 154 + : "D" (NULL), "S" (NULL) 155 + : "rcx", "r11" 156 156 ); 157 157 158 158 g_vsyscall = 1;
+5 -4
tools/testing/selftests/proc/proc-pid-vm.c
··· 257 257 258 258 g_vsyscall = 0; 259 259 /* gettimeofday(NULL, NULL); */ 260 + uint64_t rax = 0xffffffffff600000; 260 261 asm volatile ( 261 - "call %P0" 262 - : 263 - : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL) 264 - : "rax", "rcx", "r11" 262 + "call *%[rax]" 263 + : [rax] "+a" (rax) 264 + : "D" (NULL), "S" (NULL) 265 + : "rcx", "r11" 265 266 ); 266 267 267 268 g_vsyscall = 1;
+7
virt/kvm/kvm_main.c
··· 3954 3954 } 3955 3955 3956 3956 mutex_lock(&kvm->lock); 3957 + 3958 + #ifdef CONFIG_LOCKDEP 3959 + /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */ 3960 + mutex_lock(&vcpu->mutex); 3961 + mutex_unlock(&vcpu->mutex); 3962 + #endif 3963 + 3957 3964 if (kvm_get_vcpu_by_id(kvm, id)) { 3958 3965 r = -EEXIST; 3959 3966 goto unlock_vcpu_destroy;