Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ASoC: qcom: q6dsp: fixes and updates

Merge series from Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>:

This patchset has 4 fixes and some enhancements to the Elite DSP driver
support.
Fixes includes
- setting correct flags for expected behaviour of appl_ptr
- fix closing of copp instances
- fix buffer alignment.
- fix state checks before closing asm stream
Enhancements include:
- adding q6asm_get_hw_pointer and ack callback support
- simplify code via __free(kfree) mechanism.
- use spinlock guards
- few cleanups discovered during doing above 2.

There is another set of updates comming soon, which will add support
for early memory mapping and few more modules support in audioreach.

+2135 -1339
+1
.mailmap
··· 644 644 Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com> 645 645 Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com> 646 646 Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com> 647 + Rae Moar <raemoar63@gmail.com> <rmoar@google.com> 647 648 Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl> 648 649 Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org> 649 650 Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>
+4
CREDITS
··· 2036 2036 S: 602 00 Brno 2037 2037 S: Czech Republic 2038 2038 2039 + N: Karsten Keil 2040 + E: isdn@linux-pingi.de 2041 + D: ISDN subsystem maintainer 2042 + 2039 2043 N: Jakob Kemi 2040 2044 E: jakob.kemi@telia.com 2041 2045 D: V4L W9966 Webcam driver
+2 -2
Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
··· 180 180 then: 181 181 properties: 182 182 reg: 183 - minItems: 2 183 + maxItems: 2 184 184 reg-names: 185 - minItems: 2 185 + maxItems: 2 186 186 else: 187 187 properties: 188 188 reg:
+2 -2
Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml
··· 32 32 33 33 $ref: /schemas/types.yaml#/definitions/uint32-array 34 34 minItems: 2 35 - maxItems: 2 35 + maxItems: 4 36 36 items: 37 37 enum: [1, 2, 3, 4] 38 38 ··· 48 48 49 49 $ref: /schemas/types.yaml#/definitions/uint32-array 50 50 minItems: 2 51 - maxItems: 2 51 + maxItems: 5 52 52 items: 53 53 enum: [1, 2, 3, 4, 5] 54 54
+2
Documentation/netlink/specs/dpll.yaml
··· 605 605 reply: &pin-attrs 606 606 attributes: 607 607 - id 608 + - module-name 609 + - clock-id 608 610 - board-label 609 611 - panel-label 610 612 - package-label
-3
Documentation/networking/netconsole.rst
··· 19 19 20 20 Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025 21 21 22 - Please send bug reports to Matt Mackall <mpm@selenic.com> 23 - Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com> 24 - 25 22 Introduction: 26 23 ============= 27 24
+8 -8
MAINTAINERS
··· 13260 13260 F: drivers/infiniband/ulp/isert 13261 13261 13262 13262 ISDN/CMTP OVER BLUETOOTH 13263 - M: Karsten Keil <isdn@linux-pingi.de> 13264 - L: isdn4linux@listserv.isdn4linux.de (subscribers-only) 13265 13263 L: netdev@vger.kernel.org 13266 - S: Odd Fixes 13264 + S: Orphan 13267 13265 W: http://www.isdn4linux.de 13268 13266 F: Documentation/isdn/ 13269 13267 F: drivers/isdn/capi/ ··· 13270 13272 F: net/bluetooth/cmtp/ 13271 13273 13272 13274 ISDN/mISDN SUBSYSTEM 13273 - M: Karsten Keil <isdn@linux-pingi.de> 13274 - L: isdn4linux@listserv.isdn4linux.de (subscribers-only) 13275 13275 L: netdev@vger.kernel.org 13276 - S: Maintained 13276 + S: Orphan 13277 13277 W: http://www.isdn4linux.de 13278 13278 F: drivers/isdn/Kconfig 13279 13279 F: drivers/isdn/Makefile ··· 13425 13429 F: scripts/Makefile.kasan 13426 13430 13427 13431 KCONFIG 13432 + M: Nathan Chancellor <nathan@kernel.org> 13433 + M: Nicolas Schier <nsc@kernel.org> 13428 13434 L: linux-kbuild@vger.kernel.org 13429 - S: Orphan 13435 + S: Odd Fixes 13430 13436 Q: https://patchwork.kernel.org/project/linux-kbuild/list/ 13437 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git 13431 13438 F: Documentation/kbuild/kconfig* 13432 13439 F: scripts/Kconfig.include 13433 13440 F: scripts/kconfig/ ··· 13615 13616 KERNEL UNIT TESTING FRAMEWORK (KUnit) 13616 13617 M: Brendan Higgins <brendan.higgins@linux.dev> 13617 13618 M: David Gow <davidgow@google.com> 13618 - R: Rae Moar <rmoar@google.com> 13619 + R: Rae Moar <raemoar63@gmail.com> 13619 13620 L: linux-kselftest@vger.kernel.org 13620 13621 L: kunit-dev@googlegroups.com 13621 13622 S: Maintained ··· 21331 21332 QUALCOMM WCN36XX WIRELESS DRIVER 21332 21333 M: Loic Poulain <loic.poulain@oss.qualcomm.com> 21333 21334 L: wcn36xx@lists.infradead.org 21335 + L: linux-wireless@vger.kernel.org 21334 21336 S: Supported 21335 21337 W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx 21336 21338 F: drivers/net/wireless/ath/wcn36xx/
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 18 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+3 -2
arch/arm64/net/bpf_jit_comp.c
··· 1213 1213 u8 src = bpf2a64[insn->src_reg]; 1214 1214 const u8 tmp = bpf2a64[TMP_REG_1]; 1215 1215 const u8 tmp2 = bpf2a64[TMP_REG_2]; 1216 + const u8 tmp3 = bpf2a64[TMP_REG_3]; 1216 1217 const u8 fp = bpf2a64[BPF_REG_FP]; 1217 1218 const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; 1218 1219 const u8 priv_sp = bpf2a64[PRIVATE_SP]; ··· 1758 1757 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1759 1758 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1760 1759 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { 1761 - emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); 1762 - dst = tmp2; 1760 + emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx); 1761 + dst = tmp3; 1763 1762 } 1764 1763 if (dst == fp) { 1765 1764 dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
-1
arch/s390/Kconfig
··· 158 158 select ARCH_WANT_IRQS_OFF_ACTIVATE_MM 159 159 select ARCH_WANT_KERNEL_PMD_MKWRITE 160 160 select ARCH_WANT_LD_ORPHAN_WARN 161 - select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP 162 161 select ARCH_WANTS_THP_SWAP 163 162 select BUILDTIME_TABLE_SORT 164 163 select CLONE_BACKWARDS2
+9 -5
arch/s390/configs/debug_defconfig
··· 101 101 CONFIG_MEMORY_HOTPLUG=y 102 102 CONFIG_MEMORY_HOTREMOVE=y 103 103 CONFIG_KSM=y 104 + CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y 104 105 CONFIG_TRANSPARENT_HUGEPAGE=y 105 106 CONFIG_CMA_DEBUGFS=y 106 107 CONFIG_CMA_SYSFS=y ··· 124 123 CONFIG_TLS_TOE=y 125 124 CONFIG_XFRM_USER=m 126 125 CONFIG_NET_KEY=m 127 - CONFIG_XDP_SOCKETS=y 128 - CONFIG_XDP_SOCKETS_DIAG=m 129 - CONFIG_DIBS=y 130 - CONFIG_DIBS_LO=y 131 126 CONFIG_SMC=m 132 127 CONFIG_SMC_DIAG=m 128 + CONFIG_DIBS=y 129 + CONFIG_DIBS_LO=y 130 + CONFIG_XDP_SOCKETS=y 131 + CONFIG_XDP_SOCKETS_DIAG=m 133 132 CONFIG_INET=y 134 133 CONFIG_IP_MULTICAST=y 135 134 CONFIG_IP_ADVANCED_ROUTER=y ··· 473 472 CONFIG_SCSI_DH_ALUA=m 474 473 CONFIG_MD=y 475 474 CONFIG_BLK_DEV_MD=y 475 + CONFIG_MD_LLBITMAP=y 476 476 # CONFIG_MD_BITMAP_FILE is not set 477 477 CONFIG_MD_LINEAR=m 478 478 CONFIG_MD_CLUSTER=m ··· 656 654 CONFIG_JFS_SECURITY=y 657 655 CONFIG_JFS_STATISTICS=y 658 656 CONFIG_XFS_FS=y 657 + CONFIG_XFS_SUPPORT_V4=y 658 + CONFIG_XFS_SUPPORT_ASCII_CI=y 659 659 CONFIG_XFS_QUOTA=y 660 660 CONFIG_XFS_POSIX_ACL=y 661 661 CONFIG_XFS_RT=y 662 + # CONFIG_XFS_ONLINE_SCRUB is not set 662 663 CONFIG_XFS_DEBUG=y 663 664 CONFIG_GFS2_FS=m 664 665 CONFIG_GFS2_FS_LOCKING_DLM=y ··· 671 666 CONFIG_BTRFS_DEBUG=y 672 667 CONFIG_BTRFS_ASSERT=y 673 668 CONFIG_NILFS2_FS=m 674 - CONFIG_FS_DAX=y 675 669 CONFIG_EXPORTFS_BLOCK_OPS=y 676 670 CONFIG_FS_ENCRYPTION=y 677 671 CONFIG_FS_VERITY=y
+9 -5
arch/s390/configs/defconfig
··· 94 94 CONFIG_MEMORY_HOTPLUG=y 95 95 CONFIG_MEMORY_HOTREMOVE=y 96 96 CONFIG_KSM=y 97 + CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y 97 98 CONFIG_TRANSPARENT_HUGEPAGE=y 98 99 CONFIG_CMA_SYSFS=y 99 100 CONFIG_CMA_AREAS=7 ··· 115 114 CONFIG_TLS_TOE=y 116 115 CONFIG_XFRM_USER=m 117 116 CONFIG_NET_KEY=m 118 - CONFIG_XDP_SOCKETS=y 119 - CONFIG_XDP_SOCKETS_DIAG=m 120 - CONFIG_DIBS=y 121 - CONFIG_DIBS_LO=y 122 117 CONFIG_SMC=m 123 118 CONFIG_SMC_DIAG=m 119 + CONFIG_DIBS=y 120 + CONFIG_DIBS_LO=y 121 + CONFIG_XDP_SOCKETS=y 122 + CONFIG_XDP_SOCKETS_DIAG=m 124 123 CONFIG_INET=y 125 124 CONFIG_IP_MULTICAST=y 126 125 CONFIG_IP_ADVANCED_ROUTER=y ··· 463 462 CONFIG_SCSI_DH_ALUA=m 464 463 CONFIG_MD=y 465 464 CONFIG_BLK_DEV_MD=y 465 + CONFIG_MD_LLBITMAP=y 466 466 # CONFIG_MD_BITMAP_FILE is not set 467 467 CONFIG_MD_LINEAR=m 468 468 CONFIG_MD_CLUSTER=m ··· 646 644 CONFIG_JFS_SECURITY=y 647 645 CONFIG_JFS_STATISTICS=y 648 646 CONFIG_XFS_FS=y 647 + CONFIG_XFS_SUPPORT_V4=y 648 + CONFIG_XFS_SUPPORT_ASCII_CI=y 649 649 CONFIG_XFS_QUOTA=y 650 650 CONFIG_XFS_POSIX_ACL=y 651 651 CONFIG_XFS_RT=y 652 + # CONFIG_XFS_ONLINE_SCRUB is not set 652 653 CONFIG_GFS2_FS=m 653 654 CONFIG_GFS2_FS_LOCKING_DLM=y 654 655 CONFIG_OCFS2_FS=m 655 656 CONFIG_BTRFS_FS=y 656 657 CONFIG_BTRFS_FS_POSIX_ACL=y 657 658 CONFIG_NILFS2_FS=m 658 - CONFIG_FS_DAX=y 659 659 CONFIG_EXPORTFS_BLOCK_OPS=y 660 660 CONFIG_FS_ENCRYPTION=y 661 661 CONFIG_FS_VERITY=y
-1
arch/s390/configs/zfcpdump_defconfig
··· 33 33 CONFIG_DEVTMPFS=y 34 34 CONFIG_DEVTMPFS_SAFE=y 35 35 CONFIG_BLK_DEV_RAM=y 36 - # CONFIG_DCSSBLK is not set 37 36 # CONFIG_DASD is not set 38 37 CONFIG_ENCLOSURE_SERVICES=y 39 38 CONFIG_SCSI=y
+34 -18
arch/s390/crypto/phmac_s390.c
··· 169 169 u64 buflen[2]; 170 170 }; 171 171 172 + enum async_op { 173 + OP_NOP = 0, 174 + OP_UPDATE, 175 + OP_FINAL, 176 + OP_FINUP, 177 + }; 178 + 172 179 /* phmac request context */ 173 180 struct phmac_req_ctx { 174 181 struct hash_walk_helper hwh; 175 182 struct kmac_sha2_ctx kmac_ctx; 176 - bool final; 183 + enum async_op async_op; 177 184 }; 178 185 179 186 /* ··· 617 610 * using engine to serialize requests. 618 611 */ 619 612 if (rc == 0 || rc == -EKEYEXPIRED) { 613 + req_ctx->async_op = OP_UPDATE; 620 614 atomic_inc(&tfm_ctx->via_engine_ctr); 621 615 rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); 622 616 if (rc != -EINPROGRESS) ··· 655 647 * using engine to serialize requests. 656 648 */ 657 649 if (rc == 0 || rc == -EKEYEXPIRED) { 658 - req->nbytes = 0; 659 - req_ctx->final = true; 650 + req_ctx->async_op = OP_FINAL; 660 651 atomic_inc(&tfm_ctx->via_engine_ctr); 661 652 rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); 662 653 if (rc != -EINPROGRESS) ··· 683 676 if (rc) 684 677 goto out; 685 678 679 + req_ctx->async_op = OP_FINUP; 680 + 686 681 /* Try synchronous operations if no active engine usage */ 687 682 if (!atomic_read(&tfm_ctx->via_engine_ctr)) { 688 683 rc = phmac_kmac_update(req, false); 689 684 if (rc == 0) 690 - req->nbytes = 0; 685 + req_ctx->async_op = OP_FINAL; 691 686 } 692 - if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) { 687 + if (!rc && req_ctx->async_op == OP_FINAL && 688 + !atomic_read(&tfm_ctx->via_engine_ctr)) { 693 689 rc = phmac_kmac_final(req, false); 694 690 if (rc == 0) 695 691 goto out; ··· 704 694 * using engine to serialize requests. 705 695 */ 706 696 if (rc == 0 || rc == -EKEYEXPIRED) { 707 - req_ctx->final = true; 697 + /* req->async_op has been set to either OP_FINUP or OP_FINAL */ 708 698 atomic_inc(&tfm_ctx->via_engine_ctr); 709 699 rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); 710 700 if (rc != -EINPROGRESS) ··· 865 855 866 856 /* 867 857 * Three kinds of requests come in here: 868 - * update when req->nbytes > 0 and req_ctx->final is false 869 - * final when req->nbytes = 0 and req_ctx->final is true 870 - * finup when req->nbytes > 0 and req_ctx->final is true 871 - * For update and finup the hwh walk needs to be prepared and 872 - * up to date but the actual nr of bytes in req->nbytes may be 873 - * any non zero number. For final there is no hwh walk needed. 858 + * 1. req->async_op == OP_UPDATE with req->nbytes > 0 859 + * 2. req->async_op == OP_FINUP with req->nbytes > 0 860 + * 3. req->async_op == OP_FINAL 861 + * For update and finup the hwh walk has already been prepared 862 + * by the caller. For final there is no hwh walk needed. 874 863 */ 875 864 876 - if (req->nbytes) { 865 + switch (req_ctx->async_op) { 866 + case OP_UPDATE: 867 + case OP_FINUP: 877 868 rc = phmac_kmac_update(req, true); 878 869 if (rc == -EKEYEXPIRED) { 879 870 /* ··· 891 880 hwh_advance(hwh, rc); 892 881 goto out; 893 882 } 894 - req->nbytes = 0; 895 - } 896 - 897 - if (req_ctx->final) { 883 + if (req_ctx->async_op == OP_UPDATE) 884 + break; 885 + req_ctx->async_op = OP_FINAL; 886 + fallthrough; 887 + case OP_FINAL: 898 888 rc = phmac_kmac_final(req, true); 899 889 if (rc == -EKEYEXPIRED) { 900 890 /* ··· 909 897 cond_resched(); 910 898 return -ENOSPC; 911 899 } 900 + break; 901 + default: 902 + /* unknown/unsupported/unimplemented asynch op */ 903 + return -EOPNOTSUPP; 912 904 } 913 905 914 906 out: 915 - if (rc || req_ctx->final) 907 + if (rc || req_ctx->async_op == OP_FINAL) 916 908 memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); 917 909 pr_debug("request complete with rc=%d\n", rc); 918 910 local_bh_disable();
-1
arch/s390/include/asm/pci.h
··· 145 145 u8 has_resources : 1; 146 146 u8 is_physfn : 1; 147 147 u8 util_str_avail : 1; 148 - u8 irqs_registered : 1; 149 148 u8 tid_avail : 1; 150 149 u8 rtr_avail : 1; /* Relaxed translation allowed */ 151 150 unsigned int devfn; /* DEVFN part of the RID*/
+7 -12
arch/s390/mm/dump_pagetables.c
··· 291 291 292 292 static int add_marker(unsigned long start, unsigned long end, const char *name) 293 293 { 294 - size_t oldsize, newsize; 294 + struct addr_marker *new; 295 + size_t newsize; 295 296 296 - oldsize = markers_cnt * sizeof(*markers); 297 - newsize = oldsize + 2 * sizeof(*markers); 298 - if (!oldsize) 299 - markers = kvmalloc(newsize, GFP_KERNEL); 300 - else 301 - markers = kvrealloc(markers, newsize, GFP_KERNEL); 302 - if (!markers) 303 - goto error; 297 + newsize = (markers_cnt + 2) * sizeof(*markers); 298 + new = kvrealloc(markers, newsize, GFP_KERNEL); 299 + if (!new) 300 + return -ENOMEM; 301 + markers = new; 304 302 markers[markers_cnt].is_start = 1; 305 303 markers[markers_cnt].start_address = start; 306 304 markers[markers_cnt].size = end - start; ··· 310 312 markers[markers_cnt].name = name; 311 313 markers_cnt++; 312 314 return 0; 313 - error: 314 - markers_cnt = 0; 315 - return -ENOMEM; 316 315 } 317 316 318 317 static int pt_dump_init(void)
+2 -2
arch/s390/pci/pci_event.c
··· 188 188 * is unbound or probed and that userspace can't access its 189 189 * configuration space while we perform recovery. 190 190 */ 191 - pci_dev_lock(pdev); 191 + device_lock(&pdev->dev); 192 192 if (pdev->error_state == pci_channel_io_perm_failure) { 193 193 ers_res = PCI_ERS_RESULT_DISCONNECT; 194 194 goto out_unlock; ··· 257 257 driver->err_handler->resume(pdev); 258 258 pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED); 259 259 out_unlock: 260 - pci_dev_unlock(pdev); 260 + device_unlock(&pdev->dev); 261 261 zpci_report_status(zdev, "recovery", status_str); 262 262 263 263 return ers_res;
+1 -8
arch/s390/pci/pci_irq.c
··· 107 107 else 108 108 rc = zpci_set_airq(zdev); 109 109 110 - if (!rc) 111 - zdev->irqs_registered = 1; 112 - 113 110 return rc; 114 111 } 115 112 ··· 119 122 rc = zpci_clear_directed_irq(zdev); 120 123 else 121 124 rc = zpci_clear_airq(zdev); 122 - 123 - if (!rc) 124 - zdev->irqs_registered = 0; 125 125 126 126 return rc; 127 127 } ··· 421 427 { 422 428 struct zpci_dev *zdev = to_zpci(pdev); 423 429 424 - if (!zdev->irqs_registered) 425 - zpci_set_irq(zdev); 430 + zpci_set_irq(zdev); 426 431 return true; 427 432 } 428 433
+1 -1
arch/x86/Makefile
··· 75 75 # 76 76 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383 77 77 # 78 - KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx 78 + KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a 79 79 KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json 80 80 KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 81 81
+1
arch/x86/events/intel/core.c
··· 7596 7596 break; 7597 7597 7598 7598 case INTEL_PANTHERLAKE_L: 7599 + case INTEL_WILDCATLAKE_L: 7599 7600 pr_cont("Pantherlake Hybrid events, "); 7600 7601 name = "pantherlake_hybrid"; 7601 7602 goto lnl_common;
+2 -1
arch/x86/events/intel/ds.c
··· 317 317 { 318 318 u64 val; 319 319 320 - WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); 320 + WARN_ON_ONCE(is_hybrid() && 321 + hybrid_pmu(event->pmu)->pmu_type == hybrid_big); 321 322 322 323 dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK; 323 324 val = hybrid_var(event->pmu, pebs_data_source)[dse];
+1
arch/x86/events/intel/uncore.c
··· 1895 1895 X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init), 1896 1896 X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init), 1897 1897 X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init), 1898 + X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init), 1898 1899 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init), 1899 1900 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init), 1900 1901 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),
+3 -3
arch/x86/include/asm/intel-family.h
··· 150 150 151 151 #define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */ 152 152 153 - #define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */ 153 + #define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Darkmont */ 154 154 155 155 #define INTEL_WILDCATLAKE_L IFM(6, 0xD5) 156 156 157 - #define INTEL_NOVALAKE IFM(18, 0x01) 158 - #define INTEL_NOVALAKE_L IFM(18, 0x03) 157 + #define INTEL_NOVALAKE IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */ 158 + #define INTEL_NOVALAKE_L IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */ 159 159 160 160 /* "Small Core" Processors (Atom/E-Core) */ 161 161
+3
arch/x86/include/asm/page_64.h
··· 43 43 void clear_page_orig(void *page); 44 44 void clear_page_rep(void *page); 45 45 void clear_page_erms(void *page); 46 + KCFI_REFERENCE(clear_page_orig); 47 + KCFI_REFERENCE(clear_page_rep); 48 + KCFI_REFERENCE(clear_page_erms); 46 49 47 50 static inline void clear_page(void *page) 48 51 {
+11 -1
arch/x86/kernel/cpu/amd.c
··· 516 516 setup_force_cpu_cap(X86_FEATURE_ZEN5); 517 517 break; 518 518 case 0x50 ... 0x5f: 519 - case 0x90 ... 0xaf: 519 + case 0x80 ... 0xaf: 520 520 case 0xc0 ... 0xcf: 521 521 setup_force_cpu_cap(X86_FEATURE_ZEN6); 522 522 break; ··· 1035 1035 } 1036 1036 } 1037 1037 1038 + static const struct x86_cpu_id zen5_rdseed_microcode[] = { 1039 + ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a), 1040 + ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054), 1041 + }; 1042 + 1038 1043 static void init_amd_zen5(struct cpuinfo_x86 *c) 1039 1044 { 1045 + if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) { 1046 + clear_cpu_cap(c, X86_FEATURE_RDSEED); 1047 + msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); 1048 + pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n"); 1049 + } 1040 1050 } 1041 1051 1042 1052 static void init_amd(struct cpuinfo_x86 *c)
+19 -1
arch/x86/kernel/cpu/microcode/amd.c
··· 233 233 return true; 234 234 } 235 235 236 + static bool cpu_has_entrysign(void) 237 + { 238 + unsigned int fam = x86_family(bsp_cpuid_1_eax); 239 + unsigned int model = x86_model(bsp_cpuid_1_eax); 240 + 241 + if (fam == 0x17 || fam == 0x19) 242 + return true; 243 + 244 + if (fam == 0x1a) { 245 + if (model <= 0x2f || 246 + (0x40 <= model && model <= 0x4f) || 247 + (0x60 <= model && model <= 0x6f)) 248 + return true; 249 + } 250 + 251 + return false; 252 + } 253 + 236 254 static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len) 237 255 { 238 256 struct patch_digest *pd = NULL; 239 257 u8 digest[SHA256_DIGEST_SIZE]; 240 258 int i; 241 259 242 - if (x86_family(bsp_cpuid_1_eax) < 0x17) 260 + if (!cpu_has_entrysign()) 243 261 return true; 244 262 245 263 if (!need_sha_check(cur_rev))
+3
arch/x86/kernel/fpu/core.c
··· 825 825 !fpregs_state_valid(fpu, smp_processor_id())) 826 826 os_xrstor_supervisor(fpu->fpstate); 827 827 828 + /* Ensure XFD state is in sync before reloading XSTATE */ 829 + xfd_update_state(fpu->fpstate); 830 + 828 831 /* Reset user states in registers. */ 829 832 restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); 830 833
+1 -1
arch/x86/net/bpf_jit_comp.c
··· 2701 2701 /* Update cleanup_addr */ 2702 2702 ctx->cleanup_addr = proglen; 2703 2703 if (bpf_prog_was_classic(bpf_prog) && 2704 - !capable(CAP_SYS_ADMIN)) { 2704 + !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) { 2705 2705 u8 *ip = image + addrs[i - 1]; 2706 2706 2707 2707 if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
+1 -1
block/blk-crypto.c
··· 292 292 } 293 293 294 294 if (!bio_crypt_check_alignment(bio)) { 295 - bio->bi_status = BLK_STS_IOERR; 295 + bio->bi_status = BLK_STS_INVAL; 296 296 goto fail; 297 297 } 298 298
+3
drivers/acpi/acpi_mrrm.c
··· 63 63 if (!mrrm) 64 64 return -ENODEV; 65 65 66 + if (mrrm->header.revision != 1) 67 + return -EINVAL; 68 + 66 69 if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS) 67 70 return -EOPNOTSUPP; 68 71
+3 -1
drivers/acpi/acpi_video.c
··· 1959 1959 struct acpi_video_device *dev; 1960 1960 1961 1961 mutex_lock(&video->device_list_lock); 1962 - list_for_each_entry(dev, &video->video_device_list, entry) 1962 + list_for_each_entry(dev, &video->video_device_list, entry) { 1963 1963 acpi_video_dev_remove_notify_handler(dev); 1964 + cancel_delayed_work_sync(&dev->switch_brightness_work); 1965 + } 1964 1966 mutex_unlock(&video->device_list_lock); 1965 1967 1966 1968 acpi_video_bus_stop_devices(video);
+3 -1
drivers/acpi/button.c
··· 619 619 620 620 input_set_drvdata(input, device); 621 621 error = input_register_device(input); 622 - if (error) 622 + if (error) { 623 + input_free_device(input); 623 624 goto err_remove_fs; 625 + } 624 626 625 627 switch (device->device_type) { 626 628 case ACPI_BUS_TYPE_POWER_BUTTON:
+4 -3
drivers/acpi/fan.h
··· 49 49 }; 50 50 51 51 struct acpi_fan { 52 + acpi_handle handle; 52 53 bool acpi4; 53 54 bool has_fst; 54 55 struct acpi_fan_fif fif; ··· 60 59 struct device_attribute fine_grain_control; 61 60 }; 62 61 63 - int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst); 62 + int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst); 64 63 int acpi_fan_create_attributes(struct acpi_device *device); 65 64 void acpi_fan_delete_attributes(struct acpi_device *device); 66 65 67 66 #if IS_REACHABLE(CONFIG_HWMON) 68 - int devm_acpi_fan_create_hwmon(struct acpi_device *device); 67 + int devm_acpi_fan_create_hwmon(struct device *dev); 69 68 #else 70 - static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; }; 69 + static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; }; 71 70 #endif 72 71 73 72 #endif
+1 -1
drivers/acpi/fan_attr.c
··· 55 55 struct acpi_fan_fst fst; 56 56 int status; 57 57 58 - status = acpi_fan_get_fst(acpi_dev, &fst); 58 + status = acpi_fan_get_fst(acpi_dev->handle, &fst); 59 59 if (status) 60 60 return status; 61 61
+23 -13
drivers/acpi/fan_core.c
··· 44 44 return 0; 45 45 } 46 46 47 - int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst) 47 + int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst) 48 48 { 49 49 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 50 50 union acpi_object *obj; 51 51 acpi_status status; 52 52 int ret = 0; 53 53 54 - status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer); 55 - if (ACPI_FAILURE(status)) { 56 - dev_err(&device->dev, "Get fan state failed\n"); 57 - return -ENODEV; 58 - } 54 + status = acpi_evaluate_object(handle, "_FST", NULL, &buffer); 55 + if (ACPI_FAILURE(status)) 56 + return -EIO; 59 57 60 58 obj = buffer.pointer; 61 - if (!obj || obj->type != ACPI_TYPE_PACKAGE || 62 - obj->package.count != 3 || 63 - obj->package.elements[1].type != ACPI_TYPE_INTEGER) { 64 - dev_err(&device->dev, "Invalid _FST data\n"); 65 - ret = -EINVAL; 59 + if (!obj) 60 + return -ENODATA; 61 + 62 + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) { 63 + ret = -EPROTO; 64 + goto err; 65 + } 66 + 67 + if (obj->package.elements[0].type != ACPI_TYPE_INTEGER || 68 + obj->package.elements[1].type != ACPI_TYPE_INTEGER || 69 + obj->package.elements[2].type != ACPI_TYPE_INTEGER) { 70 + ret = -EPROTO; 66 71 goto err; 67 72 } 68 73 ··· 86 81 struct acpi_fan_fst fst; 87 82 int status, i; 88 83 89 - status = acpi_fan_get_fst(device, &fst); 84 + status = acpi_fan_get_fst(device->handle, &fst); 90 85 if (status) 91 86 return status; 92 87 ··· 316 311 struct acpi_device *device = ACPI_COMPANION(&pdev->dev); 317 312 char *name; 318 313 314 + if (!device) 315 + return -ENODEV; 316 + 319 317 fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); 320 318 if (!fan) { 321 319 dev_err(&device->dev, "No memory for fan\n"); 322 320 return -ENOMEM; 323 321 } 322 + 323 + fan->handle = device->handle; 324 324 device->driver_data = fan; 325 325 platform_set_drvdata(pdev, fan); 326 326 ··· 347 337 } 348 338 349 339 if (fan->has_fst) { 350 - result = devm_acpi_fan_create_hwmon(device); 340 + result = devm_acpi_fan_create_hwmon(&pdev->dev); 351 341 if (result) 352 342 return result; 353 343
+5 -6
drivers/acpi/fan_hwmon.c
··· 93 93 static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, 94 94 int channel, long *val) 95 95 { 96 - struct acpi_device *adev = to_acpi_device(dev->parent); 97 96 struct acpi_fan *fan = dev_get_drvdata(dev); 98 97 struct acpi_fan_fps *fps; 99 98 struct acpi_fan_fst fst; 100 99 int ret; 101 100 102 - ret = acpi_fan_get_fst(adev, &fst); 101 + ret = acpi_fan_get_fst(fan->handle, &fst); 103 102 if (ret < 0) 104 103 return ret; 105 104 ··· 166 167 .info = acpi_fan_hwmon_info, 167 168 }; 168 169 169 - int devm_acpi_fan_create_hwmon(struct acpi_device *device) 170 + int devm_acpi_fan_create_hwmon(struct device *dev) 170 171 { 171 - struct acpi_fan *fan = acpi_driver_data(device); 172 + struct acpi_fan *fan = dev_get_drvdata(dev); 172 173 struct device *hdev; 173 174 174 - hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan, 175 - &acpi_fan_hwmon_chip_info, NULL); 175 + hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info, 176 + NULL); 176 177 return PTR_ERR_OR_ZERO(hdev); 177 178 }
+1 -1
drivers/acpi/spcr.c
··· 155 155 * Baud Rate field. If this field is zero or not present, Configured 156 156 * Baud Rate is used. 157 157 */ 158 - if (table->precise_baudrate) 158 + if (table->header.revision >= 4 && table->precise_baudrate) 159 159 baud_rate = table->precise_baudrate; 160 160 else switch (table->baud_rate) { 161 161 case 0:
+2 -4
drivers/base/regmap/regmap-slimbus.c
··· 48 48 if (IS_ERR(bus)) 49 49 return ERR_CAST(bus); 50 50 51 - return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config, 52 - lock_key, lock_name); 51 + return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); 53 52 } 54 53 EXPORT_SYMBOL_GPL(__regmap_init_slimbus); 55 54 ··· 62 63 if (IS_ERR(bus)) 63 64 return ERR_CAST(bus); 64 65 65 - return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config, 66 - lock_key, lock_name); 66 + return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); 67 67 } 68 68 EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); 69 69
+6
drivers/bcma/main.c
··· 294 294 int err; 295 295 296 296 list_for_each_entry(core, &bus->cores, list) { 297 + struct device_node *np; 298 + 297 299 /* We support that core ourselves */ 298 300 switch (core->id.id) { 299 301 case BCMA_CORE_4706_CHIPCOMMON: ··· 311 309 312 310 /* Early cores were already registered */ 313 311 if (bcma_is_core_needed_early(core->id.id)) 312 + continue; 313 + 314 + np = core->dev.of_node; 315 + if (np && !of_device_is_available(np)) 314 316 continue; 315 317 316 318 /* Only first GMAC core on BCM4706 is connected and working */
+1
drivers/block/null_blk/main.c
··· 1949 1949 .logical_block_size = dev->blocksize, 1950 1950 .physical_block_size = dev->blocksize, 1951 1951 .max_hw_sectors = dev->max_sectors, 1952 + .dma_alignment = dev->blocksize - 1, 1952 1953 }; 1953 1954 1954 1955 struct nullb *nullb;
+3 -1
drivers/bluetooth/bpa10x.c
··· 41 41 struct usb_anchor rx_anchor; 42 42 43 43 struct sk_buff *rx_skb[2]; 44 + struct hci_uart hu; 44 45 }; 45 46 46 47 static void bpa10x_tx_complete(struct urb *urb) ··· 97 96 if (urb->status == 0) { 98 97 bool idx = usb_pipebulk(urb->pipe); 99 98 100 - data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx], 99 + data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx], 101 100 urb->transfer_buffer, 102 101 urb->actual_length, 103 102 bpa10x_recv_pkts, ··· 389 388 hci_set_drvdata(hdev, data); 390 389 391 390 data->hdev = hdev; 391 + data->hu.hdev = hdev; 392 392 393 393 SET_HCIDEV_DEV(hdev, &intf->dev); 394 394
+6 -5
drivers/bluetooth/btintel_pcie.c
··· 1467 1467 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1) 1468 1468 btintel_pcie_msix_gp1_handler(data); 1469 1469 1470 - /* This interrupt is triggered by the firmware after updating 1471 - * boot_stage register and image_response register 1472 - */ 1473 - if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0) 1474 - btintel_pcie_msix_gp0_handler(data); 1475 1470 1476 1471 /* For TX */ 1477 1472 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) { ··· 1481 1486 if (!btintel_pcie_is_txackq_empty(data)) 1482 1487 btintel_pcie_msix_tx_handle(data); 1483 1488 } 1489 + 1490 + /* This interrupt is triggered by the firmware after updating 1491 + * boot_stage register and image_response register 1492 + */ 1493 + if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0) 1494 + btintel_pcie_msix_gp0_handler(data); 1484 1495 1485 1496 /* 1486 1497 * Before sending the interrupt the HW disables it to prevent a nested
+12
drivers/bluetooth/btmtksdio.c
··· 1270 1270 1271 1271 sdio_claim_host(bdev->func); 1272 1272 1273 + /* set drv_pmctrl if BT is closed before doing reset */ 1274 + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { 1275 + sdio_enable_func(bdev->func); 1276 + btmtksdio_drv_pmctrl(bdev); 1277 + } 1278 + 1273 1279 sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); 1274 1280 skb_queue_purge(&bdev->txq); 1275 1281 cancel_work_sync(&bdev->txrx_work); ··· 1289 1283 if (err < 0) { 1290 1284 bt_dev_err(hdev, "Failed to reset (%d)", err); 1291 1285 goto err; 1286 + } 1287 + 1288 + /* set fw_pmctrl back if BT is closed after doing reset */ 1289 + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { 1290 + btmtksdio_fw_pmctrl(bdev); 1291 + sdio_disable_func(bdev->func); 1292 1292 } 1293 1293 1294 1294 clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
+3 -1
drivers/bluetooth/btmtkuart.c
··· 79 79 u16 stp_dlen; 80 80 81 81 const struct btmtkuart_data *data; 82 + struct hci_uart hu; 82 83 }; 83 84 84 85 #define btmtkuart_is_standalone(bdev) \ ··· 369 368 sz_left -= adv; 370 369 p_left += adv; 371 370 372 - bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, 371 + bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4, 373 372 sz_h4, mtk_recv_pkts, 374 373 ARRAY_SIZE(mtk_recv_pkts)); 375 374 if (IS_ERR(bdev->rx_skb)) { ··· 859 858 } 860 859 861 860 bdev->hdev = hdev; 861 + bdev->hu.hdev = hdev; 862 862 863 863 hdev->bus = HCI_UART; 864 864 hci_set_drvdata(hdev, bdev);
+3 -1
drivers/bluetooth/btnxpuart.c
··· 212 212 struct ps_data psdata; 213 213 struct btnxpuart_data *nxp_data; 214 214 struct reset_control *pdn; 215 + struct hci_uart hu; 215 216 }; 216 217 217 218 #define NXP_V1_FW_REQ_PKT 0xa5 ··· 1757 1756 1758 1757 ps_start_timer(nxpdev); 1759 1758 1760 - nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count, 1759 + nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count, 1761 1760 nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts)); 1762 1761 if (IS_ERR(nxpdev->rx_skb)) { 1763 1762 int err = PTR_ERR(nxpdev->rx_skb); ··· 1876 1875 reset_control_deassert(nxpdev->pdn); 1877 1876 1878 1877 nxpdev->hdev = hdev; 1878 + nxpdev->hu.hdev = hdev; 1879 1879 1880 1880 hdev->bus = HCI_UART; 1881 1881 hci_set_drvdata(hdev, nxpdev);
+1 -1
drivers/bluetooth/hci_ag6xx.c
··· 105 105 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 106 106 return -EUNATCH; 107 107 108 - ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count, 108 + ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count, 109 109 ag6xx_recv_pkts, 110 110 ARRAY_SIZE(ag6xx_recv_pkts)); 111 111 if (IS_ERR(ag6xx->rx_skb)) {
+1 -1
drivers/bluetooth/hci_aml.c
··· 650 650 struct aml_data *aml_data = hu->priv; 651 651 int err; 652 652 653 - aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count, 653 + aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count, 654 654 aml_recv_pkts, 655 655 ARRAY_SIZE(aml_recv_pkts)); 656 656 if (IS_ERR(aml_data->rx_skb)) {
+1 -1
drivers/bluetooth/hci_ath.c
··· 191 191 { 192 192 struct ath_struct *ath = hu->priv; 193 193 194 - ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, 194 + ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count, 195 195 ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts)); 196 196 if (IS_ERR(ath->rx_skb)) { 197 197 int err = PTR_ERR(ath->rx_skb);
+1 -1
drivers/bluetooth/hci_bcm.c
··· 698 698 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 699 699 return -EUNATCH; 700 700 701 - bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count, 701 + bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count, 702 702 bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts)); 703 703 if (IS_ERR(bcm->rx_skb)) { 704 704 int err = PTR_ERR(bcm->rx_skb);
+3 -3
drivers/bluetooth/hci_h4.c
··· 112 112 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 113 113 return -EUNATCH; 114 114 115 - h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count, 115 + h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count, 116 116 h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts)); 117 117 if (IS_ERR(h4->rx_skb)) { 118 118 int err = PTR_ERR(h4->rx_skb); ··· 151 151 return hci_uart_unregister_proto(&h4p); 152 152 } 153 153 154 - struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, 154 + struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb, 155 155 const unsigned char *buffer, int count, 156 156 const struct h4_recv_pkt *pkts, int pkts_count) 157 157 { 158 - struct hci_uart *hu = hci_get_drvdata(hdev); 159 158 u8 alignment = hu->alignment ? hu->alignment : 1; 159 + struct hci_dev *hdev = hu->hdev; 160 160 161 161 /* Check for error from previous call */ 162 162 if (IS_ERR(skb))
+1 -1
drivers/bluetooth/hci_intel.c
··· 972 972 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 973 973 return -EUNATCH; 974 974 975 - intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count, 975 + intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count, 976 976 intel_recv_pkts, 977 977 ARRAY_SIZE(intel_recv_pkts)); 978 978 if (IS_ERR(intel->rx_skb)) {
+1 -1
drivers/bluetooth/hci_ll.c
··· 429 429 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 430 430 return -EUNATCH; 431 431 432 - ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count, 432 + ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count, 433 433 ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts)); 434 434 if (IS_ERR(ll->rx_skb)) { 435 435 int err = PTR_ERR(ll->rx_skb);
+3 -3
drivers/bluetooth/hci_mrvl.c
··· 264 264 !test_bit(STATE_FW_LOADED, &mrvl->flags)) 265 265 return count; 266 266 267 - mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count, 268 - mrvl_recv_pkts, 269 - ARRAY_SIZE(mrvl_recv_pkts)); 267 + mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count, 268 + mrvl_recv_pkts, 269 + ARRAY_SIZE(mrvl_recv_pkts)); 270 270 if (IS_ERR(mrvl->rx_skb)) { 271 271 int err = PTR_ERR(mrvl->rx_skb); 272 272 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+2 -2
drivers/bluetooth/hci_nokia.c
··· 624 624 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 625 625 return -EUNATCH; 626 626 627 - btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count, 628 - nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); 627 + btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count, 628 + nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); 629 629 if (IS_ERR(btdev->rx_skb)) { 630 630 err = PTR_ERR(btdev->rx_skb); 631 631 dev_err(dev, "Frame reassembly failed (%d)", err);
+1 -1
drivers/bluetooth/hci_qca.c
··· 1277 1277 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 1278 1278 return -EUNATCH; 1279 1279 1280 - qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, 1280 + qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count, 1281 1281 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts)); 1282 1282 if (IS_ERR(qca->rx_skb)) { 1283 1283 int err = PTR_ERR(qca->rx_skb);
+1 -1
drivers/bluetooth/hci_uart.h
··· 162 162 int h4_init(void); 163 163 int h4_deinit(void); 164 164 165 - struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, 165 + struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb, 166 166 const unsigned char *buffer, int count, 167 167 const struct h4_recv_pkt *pkts, int pkts_count); 168 168 #endif
+5 -2
drivers/cpuidle/governors/menu.c
··· 318 318 319 319 /* 320 320 * Use a physical idle state, not busy polling, unless a timer 321 - * is going to trigger soon enough. 321 + * is going to trigger soon enough or the exit latency of the 322 + * idle state in question is greater than the predicted idle 323 + * duration. 322 324 */ 323 325 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && 324 - s->target_residency_ns <= data->next_timer_ns) { 326 + s->target_residency_ns <= data->next_timer_ns && 327 + s->exit_latency_ns <= predicted_ns) { 325 328 predicted_ns = s->target_residency_ns; 326 329 idx = i; 327 330 break;
-2
drivers/crypto/aspeed/aspeed-acry.c
··· 787 787 err_engine_rsa_start: 788 788 crypto_engine_exit(acry_dev->crypt_engine_rsa); 789 789 clk_exit: 790 - clk_disable_unprepare(acry_dev->clk); 791 790 792 791 return rc; 793 792 } ··· 798 799 aspeed_acry_unregister(acry_dev); 799 800 crypto_engine_exit(acry_dev->crypt_engine_rsa); 800 801 tasklet_kill(&acry_dev->done_task); 801 - clk_disable_unprepare(acry_dev->clk); 802 802 } 803 803 804 804 MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
+1 -1
drivers/dma-buf/dma-fence.c
··· 1141 1141 "RCU protection is required for safe access to returned string"); 1142 1142 1143 1143 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1144 - return fence->ops->get_driver_name(fence); 1144 + return fence->ops->get_timeline_name(fence); 1145 1145 else 1146 1146 return "signaled-timeline"; 1147 1147 }
+20 -16
drivers/dpll/dpll_netlink.c
··· 1559 1559 return -EMSGSIZE; 1560 1560 } 1561 1561 pin = dpll_pin_find_from_nlattr(info); 1562 - if (!IS_ERR(pin)) { 1563 - if (!dpll_pin_available(pin)) { 1564 - nlmsg_free(msg); 1565 - return -ENODEV; 1566 - } 1567 - ret = dpll_msg_add_pin_handle(msg, pin); 1568 - if (ret) { 1569 - nlmsg_free(msg); 1570 - return ret; 1571 - } 1562 + if (IS_ERR(pin)) { 1563 + nlmsg_free(msg); 1564 + return PTR_ERR(pin); 1565 + } 1566 + if (!dpll_pin_available(pin)) { 1567 + nlmsg_free(msg); 1568 + return -ENODEV; 1569 + } 1570 + ret = dpll_msg_add_pin_handle(msg, pin); 1571 + if (ret) { 1572 + nlmsg_free(msg); 1573 + return ret; 1572 1574 } 1573 1575 genlmsg_end(msg, hdr); 1574 1576 ··· 1737 1735 } 1738 1736 1739 1737 dpll = dpll_device_find_from_nlattr(info); 1740 - if (!IS_ERR(dpll)) { 1741 - ret = dpll_msg_add_dev_handle(msg, dpll); 1742 - if (ret) { 1743 - nlmsg_free(msg); 1744 - return ret; 1745 - } 1738 + if (IS_ERR(dpll)) { 1739 + nlmsg_free(msg); 1740 + return PTR_ERR(dpll); 1741 + } 1742 + ret = dpll_msg_add_dev_handle(msg, dpll); 1743 + if (ret) { 1744 + nlmsg_free(msg); 1745 + return ret; 1746 1746 } 1747 1747 genlmsg_end(msg, hdr); 1748 1748
+1 -1
drivers/dpll/zl3073x/dpll.c
··· 1904 1904 } 1905 1905 1906 1906 is_diff = zl3073x_out_is_diff(zldev, out); 1907 - is_enabled = zl3073x_out_is_enabled(zldev, out); 1907 + is_enabled = zl3073x_output_pin_is_enabled(zldev, index); 1908 1908 } 1909 1909 1910 1910 /* Skip N-pin if the corresponding input/output is differential */
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + // SPDX-License-Identifier: MIT 2 2 /* 3 3 * Copyright 2025 Advanced Micro Devices, Inc. 4 4 *
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: MIT */ 2 2 /* 3 3 * Copyright 2025 Advanced Micro Devices, Inc. 4 4 *
+30 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
··· 322 322 return 0; 323 323 } 324 324 325 + static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev) 326 + { 327 + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 328 + case IP_VERSION(6, 1, 1): 329 + return adev->pm.fw_version < 0x0a640500; 330 + default: 331 + return false; 332 + } 333 + } 334 + 335 + static int vpe_get_dpm_level(struct amdgpu_device *adev) 336 + { 337 + struct amdgpu_vpe *vpe = &adev->vpe; 338 + 339 + if (!adev->pm.dpm_enabled) 340 + return 0; 341 + 342 + return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); 343 + } 344 + 325 345 static void vpe_idle_work_handler(struct work_struct *work) 326 346 { 327 347 struct amdgpu_device *adev = ··· 349 329 unsigned int fences = 0; 350 330 351 331 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); 332 + if (fences) 333 + goto reschedule; 352 334 353 - if (fences == 0) 354 - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); 355 - else 356 - schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); 335 + if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0) 336 + goto reschedule; 337 + 338 + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); 339 + return; 340 + 341 + reschedule: 342 + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); 357 343 } 358 344 359 345 static int vpe_common_init(struct amdgpu_vpe *vpe)
+1 -1
drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + // SPDX-License-Identifier: MIT 2 2 /* 3 3 * Copyright 2018 Advanced Micro Devices, Inc. 4 4 *
+18 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 248 248 struct vblank_control_work *vblank_work = 249 249 container_of(work, struct vblank_control_work, work); 250 250 struct amdgpu_display_manager *dm = vblank_work->dm; 251 + struct amdgpu_device *adev = drm_to_adev(dm->ddev); 252 + int r; 251 253 252 254 mutex_lock(&dm->dc_lock); 253 255 ··· 279 277 280 278 if (dm->active_vblank_irq_count == 0) { 281 279 dc_post_update_surfaces_to_stream(dm->dc); 280 + 281 + r = amdgpu_dpm_pause_power_profile(adev, true); 282 + if (r) 283 + dev_warn(adev->dev, "failed to set default power profile mode\n"); 284 + 282 285 dc_allow_idle_optimizations(dm->dc, true); 286 + 287 + r = amdgpu_dpm_pause_power_profile(adev, false); 288 + if (r) 289 + dev_warn(adev->dev, "failed to restore the power profile mode\n"); 283 290 } 284 291 285 292 mutex_unlock(&dm->dc_lock); ··· 308 297 int irq_type; 309 298 int rc = 0; 310 299 311 - if (acrtc->otg_inst == -1) 312 - goto skip; 300 + if (enable && !acrtc->base.enabled) { 301 + drm_dbg_vbl(crtc->dev, 302 + "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", 303 + acrtc->crtc_id, acrtc->base.enabled); 304 + return -EINVAL; 305 + } 313 306 314 307 irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 315 308 ··· 398 383 return rc; 399 384 } 400 385 #endif 401 - skip: 386 + 402 387 if (amdgpu_in_reset(adev)) 403 388 return 0; 404 389
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 83 83 edid_caps->panel_patch.remove_sink_ext_caps = true; 84 84 break; 85 85 case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): 86 + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171): 86 87 drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id); 87 88 edid_caps->panel_patch.disable_colorimetry = true; 88 89 break;
-3
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
··· 578 578 dpp_base->ctx->dc->optimized_required = true; 579 579 dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; 580 580 } 581 - } else { 582 - REG_SET(CM_MEM_PWR_CTRL, 0, 583 - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); 584 581 } 585 582 } 586 583
+1 -1
drivers/gpu/drm/amd/include/amd_cper.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: MIT */ 2 2 /* 3 3 * Copyright 2025 Advanced Micro Devices, Inc. 4 4 *
+1 -1
drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: MIT */ 2 2 3 3 /* 4 4 * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+1 -1
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
··· 2024 2024 table->VoltageResponseTime = 0; 2025 2025 table->PhaseResponseTime = 0; 2026 2026 table->MemoryThermThrottleEnable = 1; 2027 - table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ 2027 + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); 2028 2028 table->PCIeGenInterval = 1; 2029 2029 table->VRConfig = 0; 2030 2030
+1 -1
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
··· 2028 2028 table->VoltageResponseTime = 0; 2029 2029 table->PhaseResponseTime = 0; 2030 2030 table->MemoryThermThrottleEnable = 1; 2031 - table->PCIeBootLinkLevel = 0; 2031 + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); 2032 2032 table->PCIeGenInterval = 1; 2033 2033 2034 2034 result = iceland_populate_smc_svi2_config(hwmgr, table);
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 969 969 table_index); 970 970 uint32_t table_size; 971 971 int ret = 0; 972 - if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 972 + if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) 973 973 return -EINVAL; 974 974 975 975 table_size = smu_table->tables[table_index].size;
+4 -4
drivers/gpu/drm/ast/ast_drv.h
··· 282 282 __ast_write8(addr, reg + 1, val); 283 283 } 284 284 285 - static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, 285 + static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask, 286 286 u8 val) 287 287 { 288 - u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); 288 + u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask); 289 289 290 - tmp |= val; 291 - __ast_write8_i(addr, reg, index, tmp); 290 + val &= ~preserve_mask; 291 + __ast_write8_i(addr, reg, index, tmp | val); 292 292 } 293 293 294 294 static inline u32 ast_read32(struct ast_device *ast, u32 reg)
+1 -1
drivers/gpu/drm/ci/gitlab-ci.yml
··· 280 280 GIT_STRATEGY: none 281 281 script: 282 282 # ci-fairy check-commits --junit-xml=check-commits.xml 283 - - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml 283 + # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml 284 284 - | 285 285 set -eu 286 286 image_tags=(
+6 -2
drivers/gpu/drm/drm_gem_atomic_helper.c
··· 310 310 void __drm_gem_reset_shadow_plane(struct drm_plane *plane, 311 311 struct drm_shadow_plane_state *shadow_plane_state) 312 312 { 313 - __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); 314 - drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); 313 + if (shadow_plane_state) { 314 + __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); 315 + drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); 316 + } else { 317 + __drm_atomic_helper_plane_reset(plane, NULL); 318 + } 315 319 } 316 320 EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); 317 321
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
··· 347 347 u32 link_target, link_dwords; 348 348 bool switch_context = gpu->exec_state != exec_state; 349 349 bool switch_mmu_context = gpu->mmu_context != mmu_context; 350 - unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); 350 + unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq); 351 351 bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; 352 352 bool has_blt = !!(gpu->identity.minor_features5 & 353 353 chipMinorFeatures5_BLT_ENGINE);
+54 -1
drivers/gpu/drm/i915/display/intel_dmc.c
··· 546 546 REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id; 547 547 } 548 548 549 + static bool fixup_dmc_evt(struct intel_display *display, 550 + enum intel_dmc_id dmc_id, 551 + i915_reg_t reg_ctl, u32 *data_ctl, 552 + i915_reg_t reg_htp, u32 *data_htp) 553 + { 554 + if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl)) 555 + return false; 556 + 557 + if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp)) 558 + return false; 559 + 560 + /* make sure reg_ctl and reg_htp are for the same event */ 561 + if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) != 562 + i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0))) 563 + return false; 564 + 565 + /* 566 + * On ADL-S the HRR event handler is not restored after DC6. 567 + * Clear it to zero from the beginning to avoid mismatches later. 568 + */ 569 + if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN && 570 + is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) { 571 + *data_ctl = 0; 572 + *data_htp = 0; 573 + return true; 574 + } 575 + 576 + return false; 577 + } 578 + 549 579 static bool disable_dmc_evt(struct intel_display *display, 550 580 enum intel_dmc_id dmc_id, 551 581 i915_reg_t reg, u32 data) ··· 1094 1064 for (i = 0; i < mmio_count; i++) { 1095 1065 dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); 1096 1066 dmc_info->mmiodata[i] = mmiodata[i]; 1067 + } 1097 1068 1069 + for (i = 0; i < mmio_count - 1; i++) { 1070 + u32 orig_mmiodata[2] = { 1071 + dmc_info->mmiodata[i], 1072 + dmc_info->mmiodata[i+1], 1073 + }; 1074 + 1075 + if (!fixup_dmc_evt(display, dmc_id, 1076 + dmc_info->mmioaddr[i], &dmc_info->mmiodata[i], 1077 + dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1])) 1078 + continue; 1079 + 1080 + drm_dbg_kms(display->drm, 1081 + " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n", 1082 + i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), 1083 + orig_mmiodata[0], dmc_info->mmiodata[i]); 1084 + drm_dbg_kms(display->drm, 1085 + " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n", 1086 + i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]), 1087 + orig_mmiodata[1], dmc_info->mmiodata[i+1]); 1088 + } 1089 + 1090 + for (i = 0; i < mmio_count; i++) { 1098 1091 drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", 1099 - i, mmioaddr[i], mmiodata[i], 1092 + i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i], 1100 1093 is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : 1101 1094 is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", 1102 1095 disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
+9 -9
drivers/gpu/drm/imx/ipuv3/parallel-display.c
··· 25 25 26 26 struct imx_parallel_display_encoder { 27 27 struct drm_encoder encoder; 28 - struct drm_bridge bridge; 29 - struct imx_parallel_display *pd; 30 28 }; 31 29 32 30 struct imx_parallel_display { 33 31 struct device *dev; 34 32 u32 bus_format; 35 33 struct drm_bridge *next_bridge; 34 + struct drm_bridge bridge; 36 35 }; 37 36 38 37 static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) 39 38 { 40 - return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; 39 + return container_of(b, struct imx_parallel_display, bridge); 41 40 } 42 41 43 42 static const u32 imx_pd_bus_fmts[] = { ··· 194 195 if (IS_ERR(imxpd_encoder)) 195 196 return PTR_ERR(imxpd_encoder); 196 197 197 - imxpd_encoder->pd = imxpd; 198 198 encoder = &imxpd_encoder->encoder; 199 - bridge = &imxpd_encoder->bridge; 199 + bridge = &imxpd->bridge; 200 200 201 201 ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node); 202 202 if (ret) 203 203 return ret; 204 204 205 - bridge->funcs = &imx_pd_bridge_funcs; 206 205 drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 207 206 208 207 connector = drm_bridge_connector_init(drm, encoder); ··· 225 228 u32 bus_format = 0; 226 229 const char *fmt; 227 230 228 - imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); 229 - if (!imxpd) 230 - return -ENOMEM; 231 + imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge, 232 + &imx_pd_bridge_funcs); 233 + if (IS_ERR(imxpd)) 234 + return PTR_ERR(imxpd); 231 235 232 236 /* port@1 is the output port */ 233 237 imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); ··· 255 257 imxpd->dev = dev; 256 258 257 259 platform_set_drvdata(pdev, imxpd); 260 + 261 + devm_drm_bridge_add(dev, &imxpd->bridge); 258 262 259 263 return component_add(dev, &imx_pd_ops); 260 264 }
-10
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 686 686 for (i = 0; i < private->data->mmsys_dev_num; i++) 687 687 private->all_drm_private[i]->drm = NULL; 688 688 err_put_dev: 689 - for (i = 0; i < private->data->mmsys_dev_num; i++) { 690 - /* For device_find_child in mtk_drm_get_all_priv() */ 691 - put_device(private->all_drm_private[i]->dev); 692 - } 693 689 put_device(private->mutex_dev); 694 690 return ret; 695 691 } ··· 693 697 static void mtk_drm_unbind(struct device *dev) 694 698 { 695 699 struct mtk_drm_private *private = dev_get_drvdata(dev); 696 - int i; 697 700 698 701 /* for multi mmsys dev, unregister drm dev in mmsys master */ 699 702 if (private->drm_master) { 700 703 drm_dev_unregister(private->drm); 701 704 mtk_drm_kms_deinit(private->drm); 702 705 drm_dev_put(private->drm); 703 - 704 - for (i = 0; i < private->data->mmsys_dev_num; i++) { 705 - /* For device_find_child in mtk_drm_get_all_priv() */ 706 - put_device(private->all_drm_private[i]->dev); 707 - } 708 706 put_device(private->mutex_dev); 709 707 } 710 708 private->mtk_drm_bound = false;
+4 -1
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 780 780 return true; 781 781 } 782 782 783 + #define NEXT_BLK(blk) \ 784 + ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) 785 + 783 786 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) 784 787 { 785 788 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); ··· 814 811 815 812 for (blk = (const struct block_header *) fw_image->data; 816 813 (const u8*) blk < fw_image->data + fw_image->size; 817 - blk = (const struct block_header *) &blk->data[blk->size >> 2]) { 814 + blk = NEXT_BLK(blk)) { 818 815 if (blk->size == 0) 819 816 continue; 820 817
-7
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 348 348 return 0; 349 349 } 350 350 351 - static bool 352 - adreno_smmu_has_prr(struct msm_gpu *gpu) 353 - { 354 - struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); 355 - return adreno_smmu && adreno_smmu->set_prr_addr; 356 - } 357 - 358 351 int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, 359 352 uint32_t param, uint64_t *value, uint32_t *len) 360 353 {
+3
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 1545 1545 adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock, 1546 1546 dpu_kms->perf.perf_cfg); 1547 1547 1548 + if (dpu_kms->catalog->caps->has_3d_merge) 1549 + adjusted_mode_clk /= 2; 1550 + 1548 1551 /* 1549 1552 * The given mode, adjusted for the perf clock factor, should not exceed 1550 1553 * the max core clock rate
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
··· 267 267 .base = 0x200, .len = 0xa0,}, \ 268 268 .csc_blk = {.name = "csc", \ 269 269 .base = 0x320, .len = 0x100,}, \ 270 - .format_list = plane_formats_yuv, \ 271 - .num_formats = ARRAY_SIZE(plane_formats_yuv), \ 270 + .format_list = plane_formats, \ 271 + .num_formats = ARRAY_SIZE(plane_formats), \ 272 272 .rotation_cfg = NULL, \ 273 273 } 274 274
+8 -6
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 500 500 int i; 501 501 502 502 for (i = 0; i < DPU_MAX_PLANES; i++) { 503 + uint32_t w = src_w, h = src_h; 504 + 503 505 if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { 504 - src_w /= chroma_subsmpl_h; 505 - src_h /= chroma_subsmpl_v; 506 + w /= chroma_subsmpl_h; 507 + h /= chroma_subsmpl_v; 506 508 } 507 509 508 - pixel_ext->num_ext_pxls_top[i] = src_h; 509 - pixel_ext->num_ext_pxls_left[i] = src_w; 510 + pixel_ext->num_ext_pxls_top[i] = h; 511 + pixel_ext->num_ext_pxls_left[i] = w; 510 512 } 511 513 } 512 514 ··· 742 740 * We already have verified scaling against platform limitations. 743 741 * Now check if the SSPP supports scaling at all. 744 742 */ 745 - if (!sblk->scaler_blk.len && 743 + if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) && 746 744 ((drm_rect_width(&new_plane_state->src) >> 16 != 747 745 drm_rect_width(&new_plane_state->dst)) || 748 746 (drm_rect_height(&new_plane_state->src) >> 16 != ··· 1280 1278 state, plane_state, 1281 1279 prev_adjacent_plane_state); 1282 1280 if (ret) 1283 - break; 1281 + return ret; 1284 1282 1285 1283 prev_adjacent_plane_state = plane_state; 1286 1284 }
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 842 842 843 843 if (!reqs->scale && !reqs->yuv) 844 844 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); 845 - if (!hw_sspp && reqs->scale) 845 + if (!hw_sspp && !reqs->yuv) 846 846 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB); 847 847 if (!hw_sspp) 848 848 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
+3
drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
··· 72 72 DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n", 73 73 fb->width, dpu_wb_conn->maxlinewidth); 74 74 return -EINVAL; 75 + } else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 76 + DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier); 77 + return -EINVAL; 75 78 } 76 79 77 80 return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
-1
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
··· 109 109 struct msm_dsi_dphy_timing timing; 110 110 const struct msm_dsi_phy_cfg *cfg; 111 111 void *tuning_cfg; 112 - void *pll_data; 113 112 114 113 enum msm_dsi_phy_usecase usecase; 115 114 bool regulator_ldo_mode;
+2 -16
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
··· 426 426 u32 data; 427 427 428 428 spin_lock_irqsave(&pll->pll_enable_lock, flags); 429 - if (pll->pll_enable_cnt++) { 430 - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); 431 - WARN_ON(pll->pll_enable_cnt == INT_MAX); 432 - return; 433 - } 429 + pll->pll_enable_cnt++; 430 + WARN_ON(pll->pll_enable_cnt == INT_MAX); 434 431 435 432 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0); 436 433 data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; ··· 873 876 spin_lock_init(&pll_7nm->pll_enable_lock); 874 877 875 878 pll_7nm->phy = phy; 876 - phy->pll_data = pll_7nm; 877 879 878 880 ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws); 879 881 if (ret) { ··· 961 965 u32 const delay_us = 5; 962 966 u32 const timeout_us = 1000; 963 967 struct msm_dsi_dphy_timing *timing = &phy->timing; 964 - struct dsi_pll_7nm *pll = phy->pll_data; 965 968 void __iomem *base = phy->base; 966 969 bool less_than_1500_mhz; 967 - unsigned long flags; 968 970 u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0; 969 971 u32 glbl_pemph_ctrl_0; 970 972 u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; ··· 1084 1090 glbl_rescode_bot_ctrl = 0x3c; 1085 1091 } 1086 1092 1087 - spin_lock_irqsave(&pll->pll_enable_lock, flags); 1088 - pll->pll_enable_cnt = 1; 1089 1093 /* de-assert digital and pll power down */ 1090 1094 data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B | 1091 1095 DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; 1092 1096 writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); 1093 - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); 1094 1097 1095 1098 /* Assert PLL core reset */ 1096 1099 writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); ··· 1200 1209 1201 1210 static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) 1202 1211 { 1203 - struct dsi_pll_7nm *pll = phy->pll_data; 1204 1212 void __iomem *base = phy->base; 1205 - unsigned long flags; 1206 1213 u32 data; 1207 1214 1208 1215 DBG(""); ··· 1227 1238 writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); 1228 1239 writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); 1229 1240 1230 - spin_lock_irqsave(&pll->pll_enable_lock, flags); 1231 - pll->pll_enable_cnt = 0; 1232 1241 /* Turn off all PHY blocks */ 1233 1242 writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0); 1234 - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); 1235 1243 1236 1244 /* make sure phy is turned off */ 1237 1245 wmb();
+7 -3
drivers/gpu/drm/msm/msm_gem.c
··· 1120 1120 put_pages(obj); 1121 1121 } 1122 1122 1123 - if (obj->resv != &obj->_resv) { 1123 + /* 1124 + * In error paths, we could end up here before msm_gem_new_handle() 1125 + * has changed obj->resv to point to the shared resv. In this case, 1126 + * we don't want to drop a ref to the shared r_obj that we haven't 1127 + * taken yet. 1128 + */ 1129 + if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) { 1124 1130 struct drm_gem_object *r_obj = 1125 1131 container_of(obj->resv, struct drm_gem_object, _resv); 1126 - 1127 - WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE)); 1128 1132 1129 1133 /* Drop reference we hold to shared resv obj: */ 1130 1134 drm_gem_object_put(r_obj);
+5 -4
drivers/gpu/drm/msm/msm_gem_submit.c
··· 414 414 submit->user_fence, 415 415 DMA_RESV_USAGE_BOOKKEEP, 416 416 DMA_RESV_USAGE_BOOKKEEP); 417 + 418 + last_fence = vm->last_fence; 419 + vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); 420 + dma_fence_put(last_fence); 421 + 417 422 return; 418 423 } 419 424 ··· 432 427 dma_resv_add_fence(obj->resv, submit->user_fence, 433 428 DMA_RESV_USAGE_READ); 434 429 } 435 - 436 - last_fence = vm->last_fence; 437 - vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); 438 - dma_fence_put(last_fence); 439 430 } 440 431 441 432 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
+7 -1
drivers/gpu/drm/msm/msm_gem_vma.c
··· 971 971 lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op) 972 972 { 973 973 struct drm_device *dev = job->vm->drm; 974 + struct msm_drm_private *priv = dev->dev_private; 974 975 int i = job->nr_ops++; 975 976 int ret = 0; 976 977 ··· 1016 1015 default: 1017 1016 ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op); 1018 1017 break; 1018 + } 1019 + 1020 + if ((op->op == MSM_VM_BIND_OP_MAP_NULL) && 1021 + !adreno_smmu_has_prr(priv->gpu)) { 1022 + ret = UERR(EINVAL, dev, "PRR not supported\n"); 1019 1023 } 1020 1024 1021 1025 return ret; ··· 1427 1421 * Maybe we could allow just UNMAP ops? OTOH userspace should just 1428 1422 * immediately close the device file and all will be torn down. 1429 1423 */ 1430 - if (to_msm_vm(ctx->vm)->unusable) 1424 + if (to_msm_vm(msm_context_vm(dev, ctx))->unusable) 1431 1425 return UERR(EPIPE, dev, "context is unusable"); 1432 1426 1433 1427 /*
+11
drivers/gpu/drm/msm/msm_gpu.h
··· 299 299 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); 300 300 } 301 301 302 + static inline bool 303 + adreno_smmu_has_prr(struct msm_gpu *gpu) 304 + { 305 + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); 306 + 307 + if (!adreno_smmu) 308 + return false; 309 + 310 + return adreno_smmu && adreno_smmu->set_prr_addr; 311 + } 312 + 302 313 /* It turns out that all targets use the same ringbuffer size */ 303 314 #define MSM_GPU_RINGBUFFER_SZ SZ_32K 304 315 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
+5
drivers/gpu/drm/msm/msm_iommu.c
··· 338 338 339 339 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages); 340 340 if (ret != p->count) { 341 + kfree(p->pages); 342 + p->pages = NULL; 341 343 p->count = ret; 342 344 return -ENOMEM; 343 345 } ··· 352 350 { 353 351 struct kmem_cache *pt_cache = get_pt_cache(mmu); 354 352 uint32_t remaining_pt_count = p->count - p->ptr; 353 + 354 + if (!p->pages) 355 + return; 355 356 356 357 if (p->count > 0) 357 358 trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
+12 -2
drivers/gpu/drm/nouveau/nouveau_sched.c
··· 482 482 return 0; 483 483 } 484 484 485 + static bool 486 + nouveau_sched_job_list_empty(struct nouveau_sched *sched) 487 + { 488 + bool empty; 489 + 490 + spin_lock(&sched->job.list.lock); 491 + empty = list_empty(&sched->job.list.head); 492 + spin_unlock(&sched->job.list.lock); 493 + 494 + return empty; 495 + } 485 496 486 497 static void 487 498 nouveau_sched_fini(struct nouveau_sched *sched) ··· 500 489 struct drm_gpu_scheduler *drm_sched = &sched->base; 501 490 struct drm_sched_entity *entity = &sched->entity; 502 491 503 - rmb(); /* for list_empty to work without lock */ 504 - wait_event(sched->job.wq, list_empty(&sched->job.list.head)); 492 + wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched)); 505 493 506 494 drm_sched_entity_fini(entity); 507 495 drm_sched_fini(drm_sched);
+1 -1
drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
··· 359 359 dsi->lanes = 4; 360 360 dsi->format = MIPI_DSI_FMT_RGB888; 361 361 dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | 362 - MIPI_DSI_MODE_LPM; 362 + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; 363 363 364 364 kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base, 365 365 &kingdisplay_panel_funcs,
+6 -1
drivers/gpu/drm/panel/panel-sitronix-st7789v.c
··· 249 249 .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 250 250 }; 251 251 252 + /* 253 + * The mode data for this panel has been reverse engineered without access 254 + * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all 255 + * other panels results in garbage data on the display. 256 + */ 252 257 static const struct drm_display_mode t28cp45tn89_mode = { 253 258 .clock = 6008, 254 259 .hdisplay = 240, ··· 266 261 .vtotal = 320 + 8 + 4 + 4, 267 262 .width_mm = 43, 268 263 .height_mm = 57, 269 - .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC, 264 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 270 265 }; 271 266 272 267 static const struct drm_display_mode et028013dma_mode = {
+4 -21
drivers/gpu/drm/radeon/radeon_drv.c
··· 314 314 315 315 ret = pci_enable_device(pdev); 316 316 if (ret) 317 - goto err_free; 317 + return ret; 318 318 319 319 pci_set_drvdata(pdev, ddev); 320 320 321 321 ret = radeon_driver_load_kms(ddev, flags); 322 322 if (ret) 323 - goto err_agp; 323 + goto err; 324 324 325 325 ret = drm_dev_register(ddev, flags); 326 326 if (ret) 327 - goto err_agp; 327 + goto err; 328 328 329 329 if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) 330 330 format = drm_format_info(DRM_FORMAT_C8); ··· 337 337 338 338 return 0; 339 339 340 - err_agp: 340 + err: 341 341 pci_disable_device(pdev); 342 - err_free: 343 - drm_dev_put(ddev); 344 342 return ret; 345 - } 346 - 347 - static void 348 - radeon_pci_remove(struct pci_dev *pdev) 349 - { 350 - struct drm_device *dev = pci_get_drvdata(pdev); 351 - 352 - drm_put_dev(dev); 353 343 } 354 344 355 345 static void 356 346 radeon_pci_shutdown(struct pci_dev *pdev) 357 347 { 358 - /* if we are running in a VM, make sure the device 359 - * torn down properly on reboot/shutdown 360 - */ 361 - if (radeon_device_is_virtual()) 362 - radeon_pci_remove(pdev); 363 - 364 348 #if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64) 365 349 /* 366 350 * Some adapters need to be suspended before a ··· 597 613 .name = DRIVER_NAME, 598 614 .id_table = pciidlist, 599 615 .probe = radeon_pci_probe, 600 - .remove = radeon_pci_remove, 601 616 .shutdown = radeon_pci_shutdown, 602 617 .driver.pm = &radeon_pm_ops, 603 618 };
-1
drivers/gpu/drm/radeon/radeon_kms.c
··· 84 84 rdev->agp = NULL; 85 85 86 86 done_free: 87 - kfree(rdev); 88 87 dev->dev_private = NULL; 89 88 } 90 89
+4 -2
drivers/gpu/drm/scheduler/sched_entity.c
··· 70 70 entity->guilty = guilty; 71 71 entity->num_sched_list = num_sched_list; 72 72 entity->priority = priority; 73 + entity->last_user = current->group_leader; 73 74 /* 74 75 * It's perfectly valid to initialize an entity without having a valid 75 76 * scheduler attached. It's just not valid to use the scheduler before it ··· 303 302 304 303 /* For a killed process disallow further enqueueing of jobs. */ 305 304 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 306 - if ((!last_user || last_user == current->group_leader) && 305 + if (last_user == current->group_leader && 307 306 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 308 307 drm_sched_entity_kill(entity); 309 308 ··· 553 552 drm_sched_rq_remove_entity(entity->rq, entity); 554 553 entity->rq = rq; 555 554 } 556 - spin_unlock(&entity->lock); 557 555 558 556 if (entity->num_sched_list == 1) 559 557 entity->sched_list = NULL; 558 + 559 + spin_unlock(&entity->lock); 560 560 } 561 561 562 562 /**
+12 -7
drivers/gpu/drm/xe/xe_gt.c
··· 813 813 unsigned int fw_ref; 814 814 int err; 815 815 816 - if (xe_device_wedged(gt_to_xe(gt))) 817 - return -ECANCELED; 816 + if (xe_device_wedged(gt_to_xe(gt))) { 817 + err = -ECANCELED; 818 + goto err_pm_put; 819 + } 818 820 819 821 /* We only support GT resets with GuC submission */ 820 - if (!xe_device_uc_enabled(gt_to_xe(gt))) 821 - return -ENODEV; 822 + if (!xe_device_uc_enabled(gt_to_xe(gt))) { 823 + err = -ENODEV; 824 + goto err_pm_put; 825 + } 822 826 823 827 xe_gt_info(gt, "reset started\n"); 824 828 825 829 err = gt_wait_reset_unblock(gt); 826 830 if (!err) 827 831 xe_gt_warn(gt, "reset block failed to get lifted"); 828 - 829 - xe_pm_runtime_get(gt_to_xe(gt)); 830 832 831 833 if (xe_fault_inject_gt_reset()) { 832 834 err = -ECANCELED; ··· 876 874 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); 877 875 878 876 xe_device_declare_wedged(gt_to_xe(gt)); 877 + err_pm_put: 879 878 xe_pm_runtime_put(gt_to_xe(gt)); 880 879 881 880 return err; ··· 898 895 return; 899 896 900 897 xe_gt_info(gt, "reset queued\n"); 901 - queue_work(gt->ordered_wq, &gt->reset.worker); 898 + xe_pm_runtime_get_noresume(gt_to_xe(gt)); 899 + if (!queue_work(gt->ordered_wq, &gt->reset.worker)) 900 + xe_pm_runtime_put(gt_to_xe(gt)); 902 901 } 903 902 904 903 void xe_gt_suspend_prepare(struct xe_gt *gt)
+4 -4
drivers/gpu/drm/xe/xe_validation.h
··· 166 166 */ 167 167 DEFINE_CLASS(xe_validation, struct xe_validation_ctx *, 168 168 if (_T) xe_validation_ctx_fini(_T);, 169 - ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); 170 - _ret ? NULL : _ctx; }), 169 + ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); 170 + *_ret ? NULL : _ctx; }), 171 171 struct xe_validation_ctx *_ctx, struct xe_validation_device *_val, 172 - struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret); 172 + struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret); 173 173 static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T) 174 174 {return *_T; } 175 175 #define class_xe_validation_is_conditional true ··· 186 186 * exhaustive eviction. 187 187 */ 188 188 #define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \ 189 - scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \ 189 + scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \ 190 190 drm_exec_until_all_locked(_exec) 191 191 192 192 #endif
+6 -1
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
··· 290 290 return -EINVAL; 291 291 } 292 292 293 + if (unlikely(!try_module_get(THIS_MODULE))) { 294 + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference"); 295 + return -ENODEV; 296 + } 297 + 293 298 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); 294 299 if (!sa_entry) { 295 300 res = -ENOMEM; 301 + module_put(THIS_MODULE); 296 302 goto out; 297 303 } 298 304 ··· 307 301 sa_entry->esn = 1; 308 302 ch_ipsec_setkey(x, sa_entry); 309 303 x->xso.offload_handle = (unsigned long)sa_entry; 310 - try_module_get(THIS_MODULE); 311 304 out: 312 305 return res; 313 306 }
+1
drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
··· 17 17 #define HBG_PCU_CACHE_LINE_SIZE 32 18 18 #define HBG_TX_TIMEOUT_BUF_LEN 1024 19 19 #define HBG_RX_DESCR 0x01 20 + #define HBG_NO_PHY 0xFF 20 21 21 22 #define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \ 22 23 HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
+6 -4
drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
··· 136 136 { 137 137 struct net_device *netdev = pci_get_drvdata(pdev); 138 138 139 - netif_device_detach(netdev); 140 - 141 - if (state == pci_channel_io_perm_failure) 139 + if (state == pci_channel_io_perm_failure) { 140 + netif_device_detach(netdev); 142 141 return PCI_ERS_RESULT_DISCONNECT; 142 + } 143 143 144 - pci_disable_device(pdev); 145 144 return PCI_ERS_RESULT_NEED_RESET; 146 145 } 147 146 ··· 148 149 { 149 150 struct net_device *netdev = pci_get_drvdata(pdev); 150 151 struct hbg_priv *priv = netdev_priv(netdev); 152 + 153 + netif_device_detach(netdev); 154 + pci_disable_device(pdev); 151 155 152 156 if (pci_enable_device(pdev)) { 153 157 dev_err(&pdev->dev,
+3
drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
··· 244 244 245 245 hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); 246 246 247 + if (priv->mac.phy_addr == HBG_NO_PHY) 248 + return; 249 + 247 250 /* wait MAC link up */ 248 251 ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR, 249 252 link_status,
+1
drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
··· 32 32 const struct hbg_irq_info *irq_info) 33 33 { 34 34 priv->stats.rx_fifo_less_empty_thrsld_cnt++; 35 + hbg_hw_irq_enable(priv, irq_info->mask, true); 35 36 } 36 37 37 38 #define HBG_IRQ_I(name, handle) \
-1
drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
··· 20 20 #define HBG_MDIO_OP_INTERVAL_US (5 * 1000) 21 21 22 22 #define HBG_NP_LINK_FAIL_RETRY_TIMES 5 23 - #define HBG_NO_PHY 0xFF 24 23 25 24 static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd) 26 25 {
+1 -2
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 9429 9429 /* this command reads phy id and register at the same time */ 9430 9430 fallthrough; 9431 9431 case SIOCGMIIREG: 9432 - data->val_out = hclge_read_phy_reg(hdev, data->reg_num); 9433 - return 0; 9432 + return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out); 9434 9433 9435 9434 case SIOCSMIIREG: 9436 9435 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
+6 -3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
··· 274 274 phy_stop(phydev); 275 275 } 276 276 277 - u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) 277 + int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val) 278 278 { 279 279 struct hclge_phy_reg_cmd *req; 280 280 struct hclge_desc desc; ··· 286 286 req->reg_addr = cpu_to_le16(reg_addr); 287 287 288 288 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 289 - if (ret) 289 + if (ret) { 290 290 dev_err(&hdev->pdev->dev, 291 291 "failed to read phy reg, ret = %d.\n", ret); 292 + return ret; 293 + } 292 294 293 - return le16_to_cpu(req->reg_val); 295 + *val = le16_to_cpu(req->reg_val); 296 + return 0; 294 297 } 295 298 296 299 int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
··· 13 13 void hclge_mac_disconnect_phy(struct hnae3_handle *handle); 14 14 void hclge_mac_start_phy(struct hclge_dev *hdev); 15 15 void hclge_mac_stop_phy(struct hclge_dev *hdev); 16 - u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); 16 + int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val); 17 17 int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val); 18 18 19 19 #endif
+33 -2
drivers/net/ethernet/intel/ice/ice_common.c
··· 4382 4382 unsigned int lane; 4383 4383 int err; 4384 4384 4385 + /* E82X does not have sequential IDs, lane number is PF ID. 4386 + * For E825 device, the exception is the variant with external 4387 + * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number 4388 + * mapping. 4389 + */ 4390 + if (hw->mac_type == ICE_MAC_GENERIC || 4391 + hw->device_id == ICE_DEV_ID_E825C_SGMII) 4392 + return hw->pf_id; 4393 + 4385 4394 options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL); 4386 4395 if (!options) 4387 4396 return -ENOMEM; ··· 6506 6497 } 6507 6498 6508 6499 /** 6500 + * ice_get_dest_cgu - get destination CGU dev for given HW 6501 + * @hw: pointer to the HW struct 6502 + * 6503 + * Get CGU client id for CGU register read/write operations. 6504 + * 6505 + * Return: CGU device id to use in SBQ transactions. 6506 + */ 6507 + static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw) 6508 + { 6509 + /* On dual complex E825 only complex 0 has functional CGU powering all 6510 + * the PHYs. 6511 + * SBQ destination device cgu points to CGU on a current complex and to 6512 + * access primary CGU from the secondary complex, the driver should use 6513 + * cgu_peer as a destination device. 6514 + */ 6515 + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) && 6516 + !ice_is_primary(hw)) 6517 + return ice_sbq_dev_cgu_peer; 6518 + return ice_sbq_dev_cgu; 6519 + } 6520 + 6521 + /** 6509 6522 * ice_read_cgu_reg - Read a CGU register 6510 6523 * @hw: Pointer to the HW struct 6511 6524 * @addr: Register address to read ··· 6541 6510 int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val) 6542 6511 { 6543 6512 struct ice_sbq_msg_input cgu_msg = { 6513 + .dest_dev = ice_get_dest_cgu(hw), 6544 6514 .opcode = ice_sbq_msg_rd, 6545 - .dest_dev = ice_sbq_dev_cgu, 6546 6515 .msg_addr_low = addr 6547 6516 }; 6548 6517 int err; ··· 6573 6542 int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val) 6574 6543 { 6575 6544 struct ice_sbq_msg_input cgu_msg = { 6545 + .dest_dev = ice_get_dest_cgu(hw), 6576 6546 .opcode = ice_sbq_msg_wr, 6577 - .dest_dev = ice_sbq_dev_cgu, 6578 6547 .msg_addr_low = addr, 6579 6548 .data = val 6580 6549 };
+1 -1
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
··· 1479 1479 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; 1480 1480 1481 1481 hw->blk[blk].masks.count = per_pf; 1482 - hw->blk[blk].masks.first = hw->pf_id * per_pf; 1482 + hw->blk[blk].masks.first = hw->logical_pf_id * per_pf; 1483 1483 1484 1484 memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks)); 1485 1485
+1
drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
··· 50 50 ice_sbq_dev_phy_0 = 0x02, 51 51 ice_sbq_dev_cgu = 0x06, 52 52 ice_sbq_dev_phy_0_peer = 0x0D, 53 + ice_sbq_dev_cgu_peer = 0x0F, 53 54 }; 54 55 55 56 enum ice_sbq_msg_opcode {
+1 -1
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 2281 2281 case ETH_SS_PRIV_FLAGS: 2282 2282 return IGB_PRIV_FLAGS_STR_LEN; 2283 2283 default: 2284 - return -ENOTSUPP; 2284 + return -EOPNOTSUPP; 2285 2285 } 2286 2286 } 2287 2287
+4 -1
drivers/net/ethernet/intel/igc/igc_ethtool.c
··· 810 810 case ETH_SS_PRIV_FLAGS: 811 811 return IGC_PRIV_FLAGS_STR_LEN; 812 812 default: 813 - return -ENOTSUPP; 813 + return -EOPNOTSUPP; 814 814 } 815 815 } 816 816 ··· 2093 2093 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 2094 2094 netdev_info(adapter->netdev, "Offline testing starting"); 2095 2095 set_bit(__IGC_TESTING, &adapter->state); 2096 + 2097 + /* power up PHY for link test */ 2098 + igc_power_up_phy_copper(&adapter->hw); 2096 2099 2097 2100 /* Link test performed before hardware reset so autoneg doesn't 2098 2101 * interfere with test result
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 11507 11507 shutdown_aci: 11508 11508 mutex_destroy(&adapter->hw.aci.lock); 11509 11509 ixgbe_release_hw_control(adapter); 11510 - devlink_free(adapter->devlink); 11511 11510 clean_up_probe: 11512 11511 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); 11513 11512 free_netdev(netdev); 11513 + devlink_free(adapter->devlink); 11514 11514 pci_release_mem_regions(pdev); 11515 11515 if (disable_dev) 11516 11516 pci_disable_device(pdev);
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
··· 641 641 * disabled 642 642 */ 643 643 if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) 644 - return -ENOTSUPP; 644 + return -EOPNOTSUPP; 645 645 646 646 if (on) 647 647 adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
+35 -6
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
··· 320 320 err_free: 321 321 kfree(buf); 322 322 err_out: 323 - priv_rx->rq_stats->tls_resync_req_skip++; 324 323 return err; 325 324 } 326 325 ··· 338 339 339 340 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 340 341 mlx5e_ktls_priv_rx_put(priv_rx); 342 + priv_rx->rq_stats->tls_resync_req_skip++; 343 + tls_offload_rx_resync_async_request_cancel(&resync->core); 341 344 return; 342 345 } 343 346 344 347 c = resync->priv->channels.c[priv_rx->rxq]; 345 348 sq = &c->async_icosq; 346 349 347 - if (resync_post_get_progress_params(sq, priv_rx)) 350 + if (resync_post_get_progress_params(sq, priv_rx)) { 351 + priv_rx->rq_stats->tls_resync_req_skip++; 352 + tls_offload_rx_resync_async_request_cancel(&resync->core); 348 353 mlx5e_ktls_priv_rx_put(priv_rx); 354 + } 349 355 } 350 356 351 357 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, ··· 429 425 { 430 426 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; 431 427 struct mlx5e_ktls_offload_context_rx *priv_rx; 428 + struct tls_offload_resync_async *async_resync; 429 + struct tls_offload_context_rx *rx_ctx; 432 430 u8 tracker_state, auth_state, *ctx; 433 431 struct device *dev; 434 432 u32 hw_seq; 435 433 436 434 priv_rx = buf->priv_rx; 437 435 dev = mlx5_core_dma_dev(sq->channel->mdev); 438 - if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 436 + rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk)); 437 + async_resync = rx_ctx->resync_async; 438 + if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 439 + priv_rx->rq_stats->tls_resync_req_skip++; 440 + tls_offload_rx_resync_async_request_cancel(async_resync); 439 441 goto out; 442 + } 440 443 441 444 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, 442 445 DMA_FROM_DEVICE); ··· 454 443 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || 455 444 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { 456 445 priv_rx->rq_stats->tls_resync_req_skip++; 446 + tls_offload_rx_resync_async_request_cancel(async_resync); 457 447 goto out; 458 448 } 459 449 460 450 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 461 - tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 451 + tls_offload_rx_resync_async_request_end(async_resync, 452 + cpu_to_be32(hw_seq)); 462 453 priv_rx->rq_stats->tls_resync_req_end++; 463 454 out: 464 455 mlx5e_ktls_priv_rx_put(priv_rx); ··· 485 472 486 473 resync = &priv_rx->resync; 487 474 mlx5e_ktls_priv_rx_get(priv_rx); 488 - if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) 475 + if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) { 489 476 mlx5e_ktls_priv_rx_put(priv_rx); 477 + return false; 478 + } 490 479 491 480 return true; 492 481 } ··· 497 482 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) 498 483 { 499 484 struct ethhdr *eth = (struct ethhdr *)(skb->data); 485 + struct tls_offload_resync_async *resync_async; 500 486 struct net_device *netdev = rq->netdev; 501 487 struct net *net = dev_net(netdev); 502 488 struct sock *sk = NULL; ··· 543 527 544 528 seq = th->seq; 545 529 datalen = skb->len - depth; 546 - tls_offload_rx_resync_async_request_start(sk, seq, datalen); 530 + resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async; 531 + tls_offload_rx_resync_async_request_start(resync_async, seq, datalen); 547 532 rq->stats->tls_resync_req_start++; 548 533 549 534 unref: ··· 571 554 c = priv->channels.c[priv_rx->rxq]; 572 555 573 556 resync_handle_seq_match(priv_rx, c); 557 + } 558 + 559 + void 560 + mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi) 561 + { 562 + struct mlx5e_ktls_offload_context_rx *priv_rx; 563 + struct mlx5e_ktls_rx_resync_buf *buf; 564 + 565 + buf = wi->tls_get_params.buf; 566 + priv_rx = buf->priv_rx; 567 + priv_rx->rq_stats->tls_resync_req_skip++; 568 + tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core); 574 569 } 575 570 576 571 /* End of resync section */
+4
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
··· 29 29 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, 30 30 struct mlx5e_tx_wqe_info *wi, 31 31 u32 *dma_fifo_cc); 32 + 33 + void 34 + mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi); 35 + 32 36 static inline bool 33 37 mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, 34 38 struct mlx5e_tx_wqe_info *wi,
+4
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1036 1036 netdev_WARN_ONCE(cq->netdev, 1037 1037 "Bad OP in ICOSQ CQE: 0x%x\n", 1038 1038 get_cqe_opcode(cqe)); 1039 + #ifdef CONFIG_MLX5_EN_TLS 1040 + if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS) 1041 + mlx5e_ktls_rx_resync_async_request_cancel(wi); 1042 + #endif 1039 1043 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 1040 1044 (struct mlx5_err_cqe *)cqe); 1041 1045 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
-1
drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
··· 66 66 esw->fdb_table.legacy.addr_grp = NULL; 67 67 esw->fdb_table.legacy.allmulti_grp = NULL; 68 68 esw->fdb_table.legacy.promisc_grp = NULL; 69 - atomic64_set(&esw->user_count, 0); 70 69 } 71 70 72 71 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
-1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1978 1978 /* Holds true only as long as DMFS is the default */ 1979 1979 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, 1980 1980 MLX5_FLOW_STEERING_MODE_DMFS); 1981 - atomic64_set(&esw->user_count, 0); 1982 1981 } 1983 1982 1984 1983 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
+4 -2
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 2557 2557 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, 2558 2558 &nn->tlv_caps); 2559 2559 if (err) 2560 - goto err_free_nn; 2560 + goto err_free_xsk_pools; 2561 2561 2562 2562 err = nfp_ccm_mbox_alloc(nn); 2563 2563 if (err) 2564 - goto err_free_nn; 2564 + goto err_free_xsk_pools; 2565 2565 2566 2566 return nn; 2567 2567 2568 + err_free_xsk_pools: 2569 + kfree(nn->dp.xsk_pools); 2568 2570 err_free_nn: 2569 2571 if (nn->dp.netdev) 2570 2572 free_netdev(nn->dp.netdev);
+4
drivers/net/ethernet/sfc/mae.c
··· 1090 1090 kfree(mport); 1091 1091 } 1092 1092 1093 + /* 1094 + * Takes ownership of @desc, even if it returns an error 1095 + */ 1093 1096 static int efx_mae_process_mport(struct efx_nic *efx, 1094 1097 struct mae_mport_desc *desc) 1095 1098 { ··· 1103 1100 if (!IS_ERR_OR_NULL(mport)) { 1104 1101 netif_err(efx, drv, efx->net_dev, 1105 1102 "mport with id %u does exist!!!\n", desc->mport_id); 1103 + kfree(desc); 1106 1104 return -EEXIST; 1107 1105 } 1108 1106
+14 -18
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 4089 4089 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 4090 4090 struct stmmac_tx_queue *tx_q) 4091 4091 { 4092 - u16 tag = 0x0, inner_tag = 0x0; 4093 - u32 inner_type = 0x0; 4094 4092 struct dma_desc *p; 4093 + u16 tag = 0x0; 4095 4094 4096 - if (!priv->dma_cap.vlins) 4095 + if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb)) 4097 4096 return false; 4098 - if (!skb_vlan_tag_present(skb)) 4099 - return false; 4100 - if (skb->vlan_proto == htons(ETH_P_8021AD)) { 4101 - inner_tag = skb_vlan_tag_get(skb); 4102 - inner_type = STMMAC_VLAN_INSERT; 4103 - } 4104 4097 4105 4098 tag = skb_vlan_tag_get(skb); 4106 4099 ··· 4102 4109 else 4103 4110 p = &tx_q->dma_tx[tx_q->cur_tx]; 4104 4111 4105 - if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 4112 + if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0)) 4106 4113 return false; 4107 4114 4108 4115 stmmac_set_tx_owner(priv, p); ··· 4500 4507 bool has_vlan, set_ic; 4501 4508 int entry, first_tx; 4502 4509 dma_addr_t des; 4510 + u32 sdu_len; 4503 4511 4504 4512 tx_q = &priv->dma_conf.tx_queue[queue]; 4505 4513 txq_stats = &priv->xstats.txq_stats[queue]; ··· 4518 4524 } 4519 4525 4520 4526 if (priv->est && priv->est->enable && 4521 - priv->est->max_sdu[queue] && 4522 - skb->len > priv->est->max_sdu[queue]){ 4523 - priv->xstats.max_sdu_txq_drop[queue]++; 4524 - goto max_sdu_err; 4527 + priv->est->max_sdu[queue]) { 4528 + sdu_len = skb->len; 4529 + /* Add VLAN tag length if VLAN tag insertion offload is requested */ 4530 + if (priv->dma_cap.vlins && skb_vlan_tag_present(skb)) 4531 + sdu_len += VLAN_HLEN; 4532 + if (sdu_len > priv->est->max_sdu[queue]) { 4533 + priv->xstats.max_sdu_txq_drop[queue]++; 4534 + goto max_sdu_err; 4535 + } 4525 4536 } 4526 4537 4527 4538 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { ··· 7572 7573 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 7573 7574 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 7574 7575 } 7575 - if (priv->dma_cap.vlins) { 7576 + if (priv->dma_cap.vlins) 7576 7577 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 7577 - if (priv->dma_cap.dvlan) 7578 - ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 7579 - } 7580 7578 #endif 7581 7579 priv->msg_enable = netif_msg_init(debug, default_msg_level); 7582 7580
+2 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
··· 981 981 if (qopt->cmd == TAPRIO_CMD_DESTROY) 982 982 goto disable; 983 983 984 - if (qopt->num_entries >= dep) 984 + if (qopt->num_entries > dep) 985 985 return -EINVAL; 986 986 if (!qopt->cycle_time) 987 987 return -ERANGE; ··· 1012 1012 s64 delta_ns = qopt->entries[i].interval; 1013 1013 u32 gates = qopt->entries[i].gate_mask; 1014 1014 1015 - if (delta_ns > GENMASK(wid, 0)) 1015 + if (delta_ns > GENMASK(wid - 1, 0)) 1016 1016 return -ERANGE; 1017 1017 if (gates > GENMASK(31 - wid, 0)) 1018 1018 return -ERANGE;
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
··· 212 212 213 213 value = readl(ioaddr + VLAN_INCL); 214 214 value |= VLAN_VLTI; 215 - value |= VLAN_CSVL; /* Only use SVLAN */ 215 + value &= ~VLAN_CSVL; /* Only use CVLAN */ 216 216 value &= ~VLAN_VLC; 217 217 value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC; 218 218 writel(value, ioaddr + VLAN_INCL);
+5 -3
drivers/net/mctp/mctp-usb.c
··· 96 96 skb->data, skb->len, 97 97 mctp_usb_out_complete, skb); 98 98 99 + /* Stops TX queue first to prevent race condition with URB complete */ 100 + netif_stop_queue(dev); 99 101 rc = usb_submit_urb(urb, GFP_ATOMIC); 100 - if (rc) 102 + if (rc) { 103 + netif_wake_queue(dev); 101 104 goto err_drop; 102 - else 103 - netif_stop_queue(dev); 105 + } 104 106 105 107 return NETDEV_TX_OK; 106 108
+13 -8
drivers/net/netconsole.c
··· 886 886 887 887 static void update_userdata(struct netconsole_target *nt) 888 888 { 889 - int complete_idx = 0, child_count = 0; 890 889 struct list_head *entry; 890 + int child_count = 0; 891 + unsigned long flags; 892 + 893 + spin_lock_irqsave(&target_list_lock, flags); 891 894 892 895 /* Clear the current string in case the last userdatum was deleted */ 893 896 nt->userdata_length = 0; ··· 900 897 struct userdatum *udm_item; 901 898 struct config_item *item; 902 899 903 - if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS)) 904 - break; 900 + if (child_count >= MAX_EXTRADATA_ITEMS) { 901 + spin_unlock_irqrestore(&target_list_lock, flags); 902 + WARN_ON_ONCE(1); 903 + return; 904 + } 905 905 child_count++; 906 906 907 907 item = container_of(entry, struct config_item, ci_entry); ··· 918 912 * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is 919 913 * checked to not exceed MAX items with child_count above 920 914 */ 921 - complete_idx += scnprintf(&nt->extradata_complete[complete_idx], 922 - MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", 923 - item->ci_name, udm_item->value); 915 + nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length], 916 + MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", 917 + item->ci_name, udm_item->value); 924 918 } 925 - nt->userdata_length = strnlen(nt->extradata_complete, 926 - sizeof(nt->extradata_complete)); 919 + spin_unlock_irqrestore(&target_list_lock, flags); 927 920 } 928 921 929 922 static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
+6
drivers/net/phy/dp83867.c
··· 738 738 return ret; 739 739 } 740 740 741 + /* Although the DP83867 reports EEE capability through the 742 + * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature 743 + * is not actually implemented in hardware. 744 + */ 745 + phy_disable_eee(phydev); 746 + 741 747 if (phy_interface_is_rgmii(phydev) || 742 748 phydev->interface == PHY_INTERFACE_MODE_SGMII) { 743 749 val = phy_read(phydev, MII_DP83867_PHYCTRL);
+2 -2
drivers/net/phy/dp83869.c
··· 84 84 #define DP83869_CLK_DELAY_DEF 7 85 85 86 86 /* STRAP_STS1 bits */ 87 - #define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0) 87 + #define DP83869_STRAP_OP_MODE_MASK GENMASK(11, 9) 88 88 #define DP83869_STRAP_STS1_RESERVED BIT(11) 89 89 #define DP83869_STRAP_MIRROR_ENABLED BIT(12) 90 90 ··· 528 528 if (val < 0) 529 529 return val; 530 530 531 - dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK; 531 + dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val); 532 532 533 533 return 0; 534 534 }
+9 -3
drivers/net/usb/asix_devices.c
··· 230 230 int i; 231 231 unsigned long gpio_bits = dev->driver_info->data; 232 232 233 - usbnet_get_endpoints(dev,intf); 233 + ret = usbnet_get_endpoints(dev, intf); 234 + if (ret) 235 + goto out; 234 236 235 237 /* Toggle the GPIOs in a manufacturer/model specific way */ 236 238 for (i = 2; i >= 0; i--) { ··· 850 848 851 849 dev->driver_priv = priv; 852 850 853 - usbnet_get_endpoints(dev, intf); 851 + ret = usbnet_get_endpoints(dev, intf); 852 + if (ret) 853 + return ret; 854 854 855 855 /* Maybe the boot loader passed the MAC address via device tree */ 856 856 if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { ··· 1285 1281 int ret; 1286 1282 u8 buf[ETH_ALEN] = {0}; 1287 1283 1288 - usbnet_get_endpoints(dev,intf); 1284 + ret = usbnet_get_endpoints(dev, intf); 1285 + if (ret) 1286 + return ret; 1289 1287 1290 1288 /* Get the MAC address */ 1291 1289 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
+2
drivers/net/usb/usbnet.c
··· 1659 1659 net = dev->net; 1660 1660 unregister_netdev (net); 1661 1661 1662 + cancel_work_sync(&dev->kevent); 1663 + 1662 1664 while ((urb = usb_get_from_anchor(&dev->deferred))) { 1663 1665 dev_kfree_skb(urb->context); 1664 1666 kfree(urb->sg);
+8 -3
drivers/net/virtio_net.c
··· 1379 1379 ret = XDP_PASS; 1380 1380 rcu_read_lock(); 1381 1381 prog = rcu_dereference(rq->xdp_prog); 1382 - /* TODO: support multi buffer. */ 1383 - if (prog && num_buf == 1) 1384 - ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); 1382 + if (prog) { 1383 + /* TODO: support multi buffer. */ 1384 + if (num_buf == 1) 1385 + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, 1386 + stats); 1387 + else 1388 + ret = XDP_ABORTED; 1389 + } 1385 1390 rcu_read_unlock(); 1386 1391 1387 1392 switch (ret) {
+1
drivers/net/wireless/ath/ath10k/wmi.c
··· 1937 1937 if (cmd_id == WMI_CMD_UNSUPPORTED) { 1938 1938 ath10k_warn(ar, "wmi command %d is not supported by firmware\n", 1939 1939 cmd_id); 1940 + dev_kfree_skb_any(skb); 1940 1941 return ret; 1941 1942 } 1942 1943
+48 -6
drivers/net/wireless/ath/ath11k/core.c
··· 912 912 static const struct dmi_system_id ath11k_pm_quirk_table[] = { 913 913 { 914 914 .driver_data = (void *)ATH11K_PM_WOW, 915 - .matches = { 915 + .matches = { /* X13 G4 AMD #1 */ 916 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 917 + DMI_MATCH(DMI_PRODUCT_NAME, "21J3"), 918 + }, 919 + }, 920 + { 921 + .driver_data = (void *)ATH11K_PM_WOW, 922 + .matches = { /* X13 G4 AMD #2 */ 916 923 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 917 924 DMI_MATCH(DMI_PRODUCT_NAME, "21J4"), 918 925 }, 919 926 }, 920 927 { 921 928 .driver_data = (void *)ATH11K_PM_WOW, 922 - .matches = { 929 + .matches = { /* T14 G4 AMD #1 */ 930 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 931 + DMI_MATCH(DMI_PRODUCT_NAME, "21K3"), 932 + }, 933 + }, 934 + { 935 + .driver_data = (void *)ATH11K_PM_WOW, 936 + .matches = { /* T14 G4 AMD #2 */ 923 937 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 924 938 DMI_MATCH(DMI_PRODUCT_NAME, "21K4"), 925 939 }, 926 940 }, 927 941 { 928 942 .driver_data = (void *)ATH11K_PM_WOW, 929 - .matches = { 943 + .matches = { /* P14s G4 AMD #1 */ 944 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 945 + DMI_MATCH(DMI_PRODUCT_NAME, "21K5"), 946 + }, 947 + }, 948 + { 949 + .driver_data = (void *)ATH11K_PM_WOW, 950 + .matches = { /* P14s G4 AMD #2 */ 930 951 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 931 952 DMI_MATCH(DMI_PRODUCT_NAME, "21K6"), 932 953 }, 933 954 }, 934 955 { 935 956 .driver_data = (void *)ATH11K_PM_WOW, 936 - .matches = { 957 + .matches = { /* T16 G2 AMD #1 */ 958 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 959 + DMI_MATCH(DMI_PRODUCT_NAME, "21K7"), 960 + }, 961 + }, 962 + { 963 + .driver_data = (void *)ATH11K_PM_WOW, 964 + .matches = { /* T16 G2 AMD #2 */ 937 965 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 938 966 DMI_MATCH(DMI_PRODUCT_NAME, "21K8"), 939 967 }, 940 968 }, 941 969 { 942 970 .driver_data = (void *)ATH11K_PM_WOW, 943 - .matches = { 971 + .matches = { /* P16s G2 AMD #1 */ 972 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 973 + DMI_MATCH(DMI_PRODUCT_NAME, "21K9"), 974 + }, 975 + }, 976 + { 977 + .driver_data = (void *)ATH11K_PM_WOW, 978 + .matches = { /* P16s G2 AMD #2 */ 944 979 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 945 980 DMI_MATCH(DMI_PRODUCT_NAME, "21KA"), 946 981 }, 947 982 }, 948 983 { 949 984 .driver_data = (void *)ATH11K_PM_WOW, 950 - .matches = { 985 + .matches = { /* T14s G4 AMD #1 */ 986 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 987 + DMI_MATCH(DMI_PRODUCT_NAME, "21F8"), 988 + }, 989 + }, 990 + { 991 + .driver_data = (void *)ATH11K_PM_WOW, 992 + .matches = { /* T14s G4 AMD #2 */ 951 993 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 952 994 DMI_MATCH(DMI_PRODUCT_NAME, "21F9"), 953 995 },
+5 -5
drivers/net/wireless/ath/ath11k/mac.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 5 */ 6 6 7 7 #include <net/mac80211.h> ··· 4417 4417 } 4418 4418 4419 4419 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 4420 - flags |= WMI_KEY_PAIRWISE; 4420 + flags = WMI_KEY_PAIRWISE; 4421 4421 else 4422 - flags |= WMI_KEY_GROUP; 4422 + flags = WMI_KEY_GROUP; 4423 4423 4424 4424 ath11k_dbg(ar->ab, ATH11K_DBG_MAC, 4425 4425 "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n", ··· 4456 4456 4457 4457 is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP && 4458 4458 !arvif->num_stations); 4459 - if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) { 4459 + if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) { 4460 4460 ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); 4461 4461 if (ret) { 4462 4462 ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); ··· 4470 4470 goto exit; 4471 4471 } 4472 4472 4473 - if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta) 4473 + if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta) 4474 4474 arvif->reinstall_group_keys = true; 4475 4475 } 4476 4476
+18 -16
drivers/net/wireless/ath/ath12k/mac.c
··· 8290 8290 wake_up(&ar->txmgmt_empty_waitq); 8291 8291 } 8292 8292 8293 - int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) 8293 + static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id) 8294 8294 { 8295 - struct sk_buff *msdu = skb; 8295 + struct sk_buff *msdu; 8296 8296 struct ieee80211_tx_info *info; 8297 - struct ath12k *ar = ctx; 8298 - struct ath12k_base *ab = ar->ab; 8299 8297 8300 8298 spin_lock_bh(&ar->txmgmt_idr_lock); 8301 - idr_remove(&ar->txmgmt_idr, buf_id); 8299 + msdu = idr_remove(&ar->txmgmt_idr, buf_id); 8302 8300 spin_unlock_bh(&ar->txmgmt_idr_lock); 8303 - dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, 8301 + 8302 + if (!msdu) 8303 + return; 8304 + 8305 + dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, 8304 8306 DMA_TO_DEVICE); 8305 8307 8306 8308 info = IEEE80211_SKB_CB(msdu); 8307 8309 memset(&info->status, 0, sizeof(info->status)); 8308 8310 8309 - ath12k_mgmt_over_wmi_tx_drop(ar, skb); 8311 + ath12k_mgmt_over_wmi_tx_drop(ar, msdu); 8312 + } 8313 + 8314 + int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) 8315 + { 8316 + struct ath12k *ar = ctx; 8317 + 8318 + ath12k_mac_tx_mgmt_free(ar, buf_id); 8310 8319 8311 8320 return 0; 8312 8321 } ··· 8324 8315 { 8325 8316 struct ieee80211_vif *vif = ctx; 8326 8317 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 8327 - struct sk_buff *msdu = skb; 8328 8318 struct ath12k *ar = skb_cb->ar; 8329 - struct ath12k_base *ab = ar->ab; 8330 8319 8331 - if (skb_cb->vif == vif) { 8332 - spin_lock_bh(&ar->txmgmt_idr_lock); 8333 - idr_remove(&ar->txmgmt_idr, buf_id); 8334 - spin_unlock_bh(&ar->txmgmt_idr_lock); 8335 - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, 8336 - DMA_TO_DEVICE); 8337 - } 8320 + if (skb_cb->vif == vif) 8321 + ath12k_mac_tx_mgmt_free(ar, buf_id); 8338 8322 8339 8323 return 0; 8340 8324 }
+1 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 5627 5627 *cookie, le16_to_cpu(action_frame->len), 5628 5628 le32_to_cpu(af_params->channel)); 5629 5629 5630 - ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), 5631 - af_params); 5630 + ack = brcmf_p2p_send_action_frame(vif->ifp, af_params); 5632 5631 5633 5632 cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, 5634 5633 GFP_KERNEL);
+10 -18
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
··· 1529 1529 /** 1530 1530 * brcmf_p2p_tx_action_frame() - send action frame over fil. 1531 1531 * 1532 + * @ifp: interface to transmit on. 1532 1533 * @p2p: p2p info struct for vif. 1533 1534 * @af_params: action frame data/info. 1534 1535 * ··· 1539 1538 * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action 1540 1539 * frame is transmitted. 1541 1540 */ 1542 - static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, 1541 + static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp, 1542 + struct brcmf_p2p_info *p2p, 1543 1543 struct brcmf_fil_af_params_le *af_params) 1544 1544 { 1545 1545 struct brcmf_pub *drvr = p2p->cfg->pub; 1546 - struct brcmf_cfg80211_vif *vif; 1547 - struct brcmf_p2p_action_frame *p2p_af; 1548 1546 s32 err = 0; 1549 1547 1550 1548 brcmf_dbg(TRACE, "Enter\n"); ··· 1552 1552 clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); 1553 1553 clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); 1554 1554 1555 - /* check if it is a p2p_presence response */ 1556 - p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data; 1557 - if (p2p_af->subtype == P2P_AF_PRESENCE_RSP) 1558 - vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; 1559 - else 1560 - vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; 1561 - 1562 - err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params, 1555 + err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params, 1563 1556 sizeof(*af_params)); 1564 1557 if (err) { 1565 1558 bphy_err(drvr, " sending action frame has failed\n"); ··· 1704 1711 /** 1705 1712 * brcmf_p2p_send_action_frame() - send action frame . 1706 1713 * 1707 - * @cfg: driver private data for cfg80211 interface. 1708 - * @ndev: net device to transmit on. 1714 + * @ifp: interface to transmit on. 1709 1715 * @af_params: configuration data for action frame. 1710 1716 */ 1711 - bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, 1712 - struct net_device *ndev, 1717 + bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp, 1713 1718 struct brcmf_fil_af_params_le *af_params) 1714 1719 { 1720 + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; 1715 1721 struct brcmf_p2p_info *p2p = &cfg->p2p; 1716 - struct brcmf_if *ifp = netdev_priv(ndev); 1717 1722 struct brcmf_fil_action_frame_le *action_frame; 1718 1723 struct brcmf_config_af_params config_af_params; 1719 1724 struct afx_hdl *afx_hdl = &p2p->afx_hdl; ··· 1848 1857 if (af_params->channel) 1849 1858 msleep(P2P_AF_RETRY_DELAY_TIME); 1850 1859 1851 - ack = !brcmf_p2p_tx_action_frame(p2p, af_params); 1860 + ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params); 1852 1861 tx_retry++; 1853 1862 dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell, 1854 1863 dwell_jiffies); ··· 2208 2217 2209 2218 WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx); 2210 2219 2211 - init_completion(&p2p->send_af_done); 2212 2220 INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler); 2213 2221 init_completion(&p2p->afx_hdl.act_frm_scan); 2214 2222 init_completion(&p2p->wait_next_af); ··· 2502 2512 2503 2513 pri_ifp = brcmf_get_ifp(cfg->pub, 0); 2504 2514 p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; 2515 + 2516 + init_completion(&p2p->send_af_done); 2505 2517 2506 2518 if (p2pdev_forced) { 2507 2519 err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
+1 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
··· 168 168 int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, 169 169 const struct brcmf_event_msg *e, 170 170 void *data); 171 - bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, 172 - struct net_device *ndev, 171 + bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp, 173 172 struct brcmf_fil_af_params_le *af_params); 174 173 bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, 175 174 struct brcmf_bss_info_le *bi);
+3 -2
drivers/net/wireless/intel/iwlwifi/mld/link.c
··· 501 501 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif); 502 502 struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf); 503 503 bool is_deflink = link == &mld_vif->deflink; 504 + u8 fw_id = link->fw_id; 504 505 505 506 if (WARN_ON(!link || link->active)) 506 507 return; ··· 514 513 515 514 RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL); 516 515 517 - if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links)) 516 + if (WARN_ON(fw_id >= mld->fw->ucode_capa.num_links)) 518 517 return; 519 518 520 - RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL); 519 + RCU_INIT_POINTER(mld->fw_id_to_bss_conf[fw_id], NULL); 521 520 } 522 521 523 522 void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
+10 -3
drivers/nvme/host/pci.c
··· 1042 1042 return nvme_pci_setup_data_prp(req, &iter); 1043 1043 } 1044 1044 1045 - static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) 1045 + static blk_status_t nvme_pci_setup_meta_iter(struct request *req) 1046 1046 { 1047 1047 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1048 1048 unsigned int entries = req->nr_integrity_segments; ··· 1072 1072 * descriptor provides an explicit length, so we're relying on that 1073 1073 * mechanism to catch any misunderstandings between the application and 1074 1074 * device. 1075 + * 1076 + * P2P DMA also needs to use the blk_dma_iter method, so mptr setup 1077 + * leverages this routine when that happens. 1075 1078 */ 1076 - if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) { 1079 + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) || 1080 + (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) { 1077 1081 iod->cmd.common.metadata = cpu_to_le64(iter.addr); 1078 1082 iod->meta_total_len = iter.len; 1079 1083 iod->meta_dma = iter.addr; ··· 1118 1114 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1119 1115 struct bio_vec bv = rq_integrity_vec(req); 1120 1116 1117 + if (is_pci_p2pdma_page(bv.bv_page)) 1118 + return nvme_pci_setup_meta_iter(req); 1119 + 1121 1120 iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); 1122 1121 if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) 1123 1122 return BLK_STS_IOERR; ··· 1135 1128 1136 1129 if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && 1137 1130 nvme_pci_metadata_use_sgls(req)) 1138 - return nvme_pci_setup_meta_sgls(req); 1131 + return nvme_pci_setup_meta_iter(req); 1139 1132 return nvme_pci_setup_meta_mptr(req); 1140 1133 } 1141 1134
+3 -2
drivers/nvme/target/auth.c
··· 298 298 const char *hash_name; 299 299 u8 *challenge = req->sq->dhchap_c1; 300 300 struct nvme_dhchap_key *transformed_key; 301 - u8 buf[4]; 301 + u8 buf[4], sc_c = ctrl->concat ? 1 : 0; 302 302 int ret; 303 303 304 304 hash_name = nvme_auth_hmac_name(ctrl->shash_id); ··· 367 367 ret = crypto_shash_update(shash, buf, 2); 368 368 if (ret) 369 369 goto out; 370 - memset(buf, 0, 4); 370 + *buf = sc_c; 371 371 ret = crypto_shash_update(shash, buf, 1); 372 372 if (ret) 373 373 goto out; 374 374 ret = crypto_shash_update(shash, "HostHost", 8); 375 375 if (ret) 376 376 goto out; 377 + memset(buf, 0, 4); 377 378 ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); 378 379 if (ret) 379 380 goto out;
+32
drivers/pci/controller/dwc/pcie-qcom.c
··· 247 247 int (*get_resources)(struct qcom_pcie *pcie); 248 248 int (*init)(struct qcom_pcie *pcie); 249 249 int (*post_init)(struct qcom_pcie *pcie); 250 + void (*host_post_init)(struct qcom_pcie *pcie); 250 251 void (*deinit)(struct qcom_pcie *pcie); 251 252 void (*ltssm_enable)(struct qcom_pcie *pcie); 252 253 int (*config_sid)(struct qcom_pcie *pcie); ··· 1039 1038 return 0; 1040 1039 } 1041 1040 1041 + static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) 1042 + { 1043 + /* 1044 + * Downstream devices need to be in D0 state before enabling PCI PM 1045 + * substates. 1046 + */ 1047 + pci_set_power_state_locked(pdev, PCI_D0); 1048 + pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); 1049 + 1050 + return 0; 1051 + } 1052 + 1053 + static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) 1054 + { 1055 + struct dw_pcie_rp *pp = &pcie->pci->pp; 1056 + 1057 + pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); 1058 + } 1059 + 1042 1060 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1043 1061 { 1044 1062 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; ··· 1332 1312 pcie->cfg->ops->deinit(pcie); 1333 1313 } 1334 1314 1315 + static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) 1316 + { 1317 + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1318 + struct qcom_pcie *pcie = to_qcom_pcie(pci); 1319 + 1320 + if (pcie->cfg->ops->host_post_init) 1321 + pcie->cfg->ops->host_post_init(pcie); 1322 + } 1323 + 1335 1324 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1336 1325 .init = qcom_pcie_host_init, 1337 1326 .deinit = qcom_pcie_host_deinit, 1327 + .post_init = qcom_pcie_host_post_init, 1338 1328 }; 1339 1329 1340 1330 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ ··· 1406 1376 .get_resources = qcom_pcie_get_resources_2_7_0, 1407 1377 .init = qcom_pcie_init_2_7_0, 1408 1378 .post_init = qcom_pcie_post_init_2_7_0, 1379 + .host_post_init = qcom_pcie_host_post_init_2_7_0, 1409 1380 .deinit = qcom_pcie_deinit_2_7_0, 1410 1381 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1411 1382 .config_sid = qcom_pcie_config_sid_1_9_0, ··· 1417 1386 .get_resources = qcom_pcie_get_resources_2_7_0, 1418 1387 .init = qcom_pcie_init_2_7_0, 1419 1388 .post_init = qcom_pcie_post_init_2_7_0, 1389 + .host_post_init = qcom_pcie_host_post_init_2_7_0, 1420 1390 .deinit = qcom_pcie_deinit_2_7_0, 1421 1391 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1422 1392 };
+1 -1
drivers/pci/setup-bus.c
··· 1604 1604 pbus_size_io(bus, realloc_head ? 0 : additional_io_size, 1605 1605 additional_io_size, realloc_head); 1606 1606 1607 - if (pref) { 1607 + if (pref && (pref->flags & IORESOURCE_PREFETCH)) { 1608 1608 pbus_size_mem(bus, 1609 1609 IORESOURCE_MEM | IORESOURCE_PREFETCH | 1610 1610 (pref->flags & IORESOURCE_MEM_64),
+2
drivers/regulator/bd718x7-regulator.c
··· 1613 1613 step /= r1; 1614 1614 1615 1615 new[j].min = min; 1616 + new[j].min_sel = desc->linear_ranges[j].min_sel; 1617 + new[j].max_sel = desc->linear_ranges[j].max_sel; 1616 1618 new[j].step = step; 1617 1619 1618 1620 dev_dbg(dev, "%s: old range min %d, step %d\n",
+3 -2
drivers/scsi/hosts.c
··· 611 611 { 612 612 int cnt = 0; 613 613 614 - blk_mq_tagset_busy_iter(&shost->tag_set, 615 - scsi_host_check_in_flight, &cnt); 614 + if (shost->tag_set.ops) 615 + blk_mq_tagset_busy_iter(&shost->tag_set, 616 + scsi_host_check_in_flight, &cnt); 616 617 return cnt; 617 618 } 618 619 EXPORT_SYMBOL(scsi_host_busy);
+2 -2
drivers/scsi/scsi_error.c
··· 554 554 * happened, even if someone else gets the sense data. 555 555 */ 556 556 if (sshdr.asc == 0x28) 557 - scmd->device->ua_new_media_ctr++; 557 + atomic_inc(&sdev->ua_new_media_ctr); 558 558 else if (sshdr.asc == 0x29) 559 - scmd->device->ua_por_ctr++; 559 + atomic_inc(&sdev->ua_por_ctr); 560 560 } 561 561 562 562 if (scsi_sense_is_deferred(&sshdr))
+1
drivers/spi/spi-intel-pci.c
··· 80 80 { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info }, 81 81 { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, 82 82 { PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info }, 83 + { PCI_VDEVICE(INTEL, 0x5825), (unsigned long)&cnl_info }, 83 84 { PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info }, 84 85 { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info }, 85 86 { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
+17 -11
drivers/ufs/core/ufshcd.c
··· 4282 4282 get, UIC_GET_ATTR_ID(attr_sel), 4283 4283 UFS_UIC_COMMAND_RETRIES - retries); 4284 4284 4285 - if (mib_val && !ret) 4286 - *mib_val = uic_cmd.argument3; 4285 + if (mib_val) 4286 + *mib_val = ret == 0 ? uic_cmd.argument3 : 0; 4287 4287 4288 4288 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) 4289 4289 && pwr_mode_change) ··· 4999 4999 5000 5000 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) 5001 5001 { 5002 - int tx_lanes = 0, i, err = 0; 5002 + int tx_lanes, i, err = 0; 5003 5003 5004 5004 if (!peer) 5005 5005 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), ··· 6673 6673 hba->saved_uic_err, hba->force_reset, 6674 6674 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); 6675 6675 6676 + /* 6677 + * Use ufshcd_rpm_get_noresume() here to safely perform link recovery 6678 + * even if an error occurs during runtime suspend or runtime resume. 6679 + * This avoids potential deadlocks that could happen if we tried to 6680 + * resume the device while a PM operation is already in progress. 6681 + */ 6682 + ufshcd_rpm_get_noresume(hba); 6683 + if (hba->pm_op_in_progress) { 6684 + ufshcd_link_recovery(hba); 6685 + ufshcd_rpm_put(hba); 6686 + return; 6687 + } 6688 + ufshcd_rpm_put(hba); 6689 + 6676 6690 down(&hba->host_sem); 6677 6691 spin_lock_irqsave(hba->host->host_lock, flags); 6678 6692 if (ufshcd_err_handling_should_stop(hba)) { ··· 6697 6683 return; 6698 6684 } 6699 6685 spin_unlock_irqrestore(hba->host->host_lock, flags); 6700 - 6701 - ufshcd_rpm_get_noresume(hba); 6702 - if (hba->pm_op_in_progress) { 6703 - ufshcd_link_recovery(hba); 6704 - ufshcd_rpm_put(hba); 6705 - return; 6706 - } 6707 - ufshcd_rpm_put(hba); 6708 6686 6709 6687 ufshcd_err_handling_prepare(hba); 6710 6688
+110 -63
drivers/vfio/vfio_iommu_type1.c
··· 38 38 #include <linux/workqueue.h> 39 39 #include <linux/notifier.h> 40 40 #include <linux/mm_inline.h> 41 + #include <linux/overflow.h> 41 42 #include "vfio.h" 42 43 43 44 #define DRIVER_VERSION "0.2" ··· 168 167 { 169 168 struct rb_node *node = iommu->dma_list.rb_node; 170 169 170 + WARN_ON(!size); 171 + 171 172 while (node) { 172 173 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); 173 174 174 - if (start + size <= dma->iova) 175 + if (start + size - 1 < dma->iova) 175 176 node = node->rb_left; 176 - else if (start >= dma->iova + dma->size) 177 + else if (start > dma->iova + dma->size - 1) 177 178 node = node->rb_right; 178 179 else 179 180 return dma; ··· 185 182 } 186 183 187 184 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, 188 - dma_addr_t start, u64 size) 185 + dma_addr_t start, 186 + dma_addr_t end) 189 187 { 190 188 struct rb_node *res = NULL; 191 189 struct rb_node *node = iommu->dma_list.rb_node; 192 190 struct vfio_dma *dma_res = NULL; 193 191 192 + WARN_ON(end < start); 193 + 194 194 while (node) { 195 195 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); 196 196 197 - if (start < dma->iova + dma->size) { 197 + if (start <= dma->iova + dma->size - 1) { 198 198 res = node; 199 199 dma_res = dma; 200 200 if (start >= dma->iova) ··· 207 201 node = node->rb_right; 208 202 } 209 203 } 210 - if (res && size && dma_res->iova >= start + size) 204 + if (res && dma_res->iova > end) 211 205 res = NULL; 212 206 return res; 213 207 } ··· 217 211 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; 218 212 struct vfio_dma *dma; 219 213 214 + WARN_ON(new->size != 0); 215 + 220 216 while (*link) { 221 217 parent = *link; 222 218 dma = rb_entry(parent, struct vfio_dma, node); 223 219 224 - if (new->iova + new->size <= dma->iova) 220 + if (new->iova <= dma->iova) 225 221 link = &(*link)->rb_left; 226 222 else 227 223 link = &(*link)->rb_right; ··· 903 895 unsigned long remote_vaddr; 904 896 struct vfio_dma *dma; 905 897 bool do_accounting; 898 + dma_addr_t iova_end; 899 + size_t iova_size; 906 900 907 - if (!iommu || !pages) 901 + if (!iommu || !pages || npage <= 0) 908 902 return -EINVAL; 909 903 910 904 /* Supported for v2 version only */ 911 905 if (!iommu->v2) 912 906 return -EACCES; 907 + 908 + if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) || 909 + check_add_overflow(user_iova, iova_size - 1, &iova_end)) 910 + return -EOVERFLOW; 913 911 914 912 mutex_lock(&iommu->lock); 915 913 ··· 1022 1008 { 1023 1009 struct vfio_iommu *iommu = iommu_data; 1024 1010 bool do_accounting; 1011 + dma_addr_t iova_end; 1012 + size_t iova_size; 1025 1013 int i; 1026 1014 1027 1015 /* Supported for v2 version only */ 1028 1016 if (WARN_ON(!iommu->v2)) 1017 + return; 1018 + 1019 + if (WARN_ON(npage <= 0)) 1020 + return; 1021 + 1022 + if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) || 1023 + check_add_overflow(user_iova, iova_size - 1, &iova_end))) 1029 1024 return; 1030 1025 1031 1026 mutex_lock(&iommu->lock); ··· 1090 1067 #define VFIO_IOMMU_TLB_SYNC_MAX 512 1091 1068 1092 1069 static size_t unmap_unpin_fast(struct vfio_domain *domain, 1093 - struct vfio_dma *dma, dma_addr_t *iova, 1070 + struct vfio_dma *dma, dma_addr_t iova, 1094 1071 size_t len, phys_addr_t phys, long *unlocked, 1095 1072 struct list_head *unmapped_list, 1096 1073 int *unmapped_cnt, ··· 1100 1077 struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1101 1078 1102 1079 if (entry) { 1103 - unmapped = iommu_unmap_fast(domain->domain, *iova, len, 1080 + unmapped = iommu_unmap_fast(domain->domain, iova, len, 1104 1081 iotlb_gather); 1105 1082 1106 1083 if (!unmapped) { 1107 1084 kfree(entry); 1108 1085 } else { 1109 - entry->iova = *iova; 1086 + entry->iova = iova; 1110 1087 entry->phys = phys; 1111 1088 entry->len = unmapped; 1112 1089 list_add_tail(&entry->list, unmapped_list); 1113 1090 1114 - *iova += unmapped; 1115 1091 (*unmapped_cnt)++; 1116 1092 } 1117 1093 } ··· 1129 1107 } 1130 1108 1131 1109 static size_t unmap_unpin_slow(struct vfio_domain *domain, 1132 - struct vfio_dma *dma, dma_addr_t *iova, 1110 + struct vfio_dma *dma, dma_addr_t iova, 1133 1111 size_t len, phys_addr_t phys, 1134 1112 long *unlocked) 1135 1113 { 1136 - size_t unmapped = iommu_unmap(domain->domain, *iova, len); 1114 + size_t unmapped = iommu_unmap(domain->domain, iova, len); 1137 1115 1138 1116 if (unmapped) { 1139 - *unlocked += vfio_unpin_pages_remote(dma, *iova, 1117 + *unlocked += vfio_unpin_pages_remote(dma, iova, 1140 1118 phys >> PAGE_SHIFT, 1141 1119 unmapped >> PAGE_SHIFT, 1142 1120 false); 1143 - *iova += unmapped; 1144 1121 cond_resched(); 1145 1122 } 1146 1123 return unmapped; ··· 1148 1127 static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, 1149 1128 bool do_accounting) 1150 1129 { 1151 - dma_addr_t iova = dma->iova, end = dma->iova + dma->size; 1152 1130 struct vfio_domain *domain, *d; 1153 1131 LIST_HEAD(unmapped_region_list); 1154 1132 struct iommu_iotlb_gather iotlb_gather; 1155 1133 int unmapped_region_cnt = 0; 1156 1134 long unlocked = 0; 1135 + size_t pos = 0; 1157 1136 1158 1137 if (!dma->size) 1159 1138 return 0; ··· 1177 1156 } 1178 1157 1179 1158 iommu_iotlb_gather_init(&iotlb_gather); 1180 - while (iova < end) { 1159 + while (pos < dma->size) { 1181 1160 size_t unmapped, len; 1182 1161 phys_addr_t phys, next; 1162 + dma_addr_t iova = dma->iova + pos; 1183 1163 1184 1164 phys = iommu_iova_to_phys(domain->domain, iova); 1185 1165 if (WARN_ON(!phys)) { 1186 - iova += PAGE_SIZE; 1166 + pos += PAGE_SIZE; 1187 1167 continue; 1188 1168 } 1189 1169 ··· 1193 1171 * may require hardware cache flushing, try to find the 1194 1172 * largest contiguous physical memory chunk to unmap. 1195 1173 */ 1196 - for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) { 1174 + for (len = PAGE_SIZE; pos + len < dma->size; len += PAGE_SIZE) { 1197 1175 next = iommu_iova_to_phys(domain->domain, iova + len); 1198 1176 if (next != phys + len) 1199 1177 break; ··· 1203 1181 * First, try to use fast unmap/unpin. In case of failure, 1204 1182 * switch to slow unmap/unpin path. 1205 1183 */ 1206 - unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, 1184 + unmapped = unmap_unpin_fast(domain, dma, iova, len, phys, 1207 1185 &unlocked, &unmapped_region_list, 1208 1186 &unmapped_region_cnt, 1209 1187 &iotlb_gather); 1210 1188 if (!unmapped) { 1211 - unmapped = unmap_unpin_slow(domain, dma, &iova, len, 1189 + unmapped = unmap_unpin_slow(domain, dma, iova, len, 1212 1190 phys, &unlocked); 1213 1191 if (WARN_ON(!unmapped)) 1214 1192 break; 1215 1193 } 1194 + 1195 + pos += unmapped; 1216 1196 } 1217 1197 1218 1198 dma->iommu_mapped = false; ··· 1306 1282 } 1307 1283 1308 1284 static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, 1309 - dma_addr_t iova, size_t size, size_t pgsize) 1285 + dma_addr_t iova, dma_addr_t iova_end, size_t pgsize) 1310 1286 { 1311 1287 struct vfio_dma *dma; 1312 1288 struct rb_node *n; ··· 1323 1299 if (dma && dma->iova != iova) 1324 1300 return -EINVAL; 1325 1301 1326 - dma = vfio_find_dma(iommu, iova + size - 1, 0); 1327 - if (dma && dma->iova + dma->size != iova + size) 1302 + dma = vfio_find_dma(iommu, iova_end, 1); 1303 + if (dma && dma->iova + dma->size - 1 != iova_end) 1328 1304 return -EINVAL; 1329 1305 1330 1306 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { ··· 1333 1309 if (dma->iova < iova) 1334 1310 continue; 1335 1311 1336 - if (dma->iova > iova + size - 1) 1312 + if (dma->iova > iova_end) 1337 1313 break; 1338 1314 1339 1315 ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize); ··· 1398 1374 int ret = -EINVAL, retries = 0; 1399 1375 unsigned long pgshift; 1400 1376 dma_addr_t iova = unmap->iova; 1401 - u64 size = unmap->size; 1377 + dma_addr_t iova_end; 1378 + size_t size = unmap->size; 1402 1379 bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; 1403 1380 bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; 1404 1381 struct rb_node *n, *first_n; ··· 1412 1387 goto unlock; 1413 1388 } 1414 1389 1390 + if (iova != unmap->iova || size != unmap->size) { 1391 + ret = -EOVERFLOW; 1392 + goto unlock; 1393 + } 1394 + 1415 1395 pgshift = __ffs(iommu->pgsize_bitmap); 1416 1396 pgsize = (size_t)1 << pgshift; 1417 1397 ··· 1426 1396 if (unmap_all) { 1427 1397 if (iova || size) 1428 1398 goto unlock; 1429 - size = U64_MAX; 1430 - } else if (!size || size & (pgsize - 1) || 1431 - iova + size - 1 < iova || size > SIZE_MAX) { 1432 - goto unlock; 1399 + iova_end = ~(dma_addr_t)0; 1400 + } else { 1401 + if (!size || size & (pgsize - 1)) 1402 + goto unlock; 1403 + 1404 + if (check_add_overflow(iova, size - 1, &iova_end)) { 1405 + ret = -EOVERFLOW; 1406 + goto unlock; 1407 + } 1433 1408 } 1434 1409 1435 1410 /* When dirty tracking is enabled, allow only min supported pgsize */ ··· 1481 1446 if (dma && dma->iova != iova) 1482 1447 goto unlock; 1483 1448 1484 - dma = vfio_find_dma(iommu, iova + size - 1, 0); 1485 - if (dma && dma->iova + dma->size != iova + size) 1449 + dma = vfio_find_dma(iommu, iova_end, 1); 1450 + if (dma && dma->iova + dma->size - 1 != iova_end) 1486 1451 goto unlock; 1487 1452 } 1488 1453 1489 1454 ret = 0; 1490 - n = first_n = vfio_find_dma_first_node(iommu, iova, size); 1455 + n = first_n = vfio_find_dma_first_node(iommu, iova, iova_end); 1491 1456 1492 1457 while (n) { 1493 1458 dma = rb_entry(n, struct vfio_dma, node); 1494 - if (dma->iova >= iova + size) 1459 + if (dma->iova > iova_end) 1495 1460 break; 1496 1461 1497 1462 if (!iommu->v2 && iova > dma->iova) ··· 1683 1648 { 1684 1649 bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR; 1685 1650 dma_addr_t iova = map->iova; 1651 + dma_addr_t iova_end; 1686 1652 unsigned long vaddr = map->vaddr; 1653 + unsigned long vaddr_end; 1687 1654 size_t size = map->size; 1688 1655 int ret = 0, prot = 0; 1689 1656 size_t pgsize; ··· 1693 1656 1694 1657 /* Verify that none of our __u64 fields overflow */ 1695 1658 if (map->size != size || map->vaddr != vaddr || map->iova != iova) 1659 + return -EOVERFLOW; 1660 + 1661 + if (!size) 1696 1662 return -EINVAL; 1663 + 1664 + if (check_add_overflow(iova, size - 1, &iova_end) || 1665 + check_add_overflow(vaddr, size - 1, &vaddr_end)) 1666 + return -EOVERFLOW; 1697 1667 1698 1668 /* READ/WRITE from device perspective */ 1699 1669 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) ··· 1717 1673 1718 1674 WARN_ON((pgsize - 1) & PAGE_MASK); 1719 1675 1720 - if (!size || (size | iova | vaddr) & (pgsize - 1)) { 1721 - ret = -EINVAL; 1722 - goto out_unlock; 1723 - } 1724 - 1725 - /* Don't allow IOVA or virtual address wrap */ 1726 - if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { 1676 + if ((size | iova | vaddr) & (pgsize - 1)) { 1727 1677 ret = -EINVAL; 1728 1678 goto out_unlock; 1729 1679 } ··· 1748 1710 goto out_unlock; 1749 1711 } 1750 1712 1751 - if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { 1713 + if (!vfio_iommu_iova_dma_valid(iommu, iova, iova_end)) { 1752 1714 ret = -EINVAL; 1753 1715 goto out_unlock; 1754 1716 } ··· 1821 1783 1822 1784 for (; n; n = rb_next(n)) { 1823 1785 struct vfio_dma *dma; 1824 - dma_addr_t iova; 1786 + size_t pos = 0; 1825 1787 1826 1788 dma = rb_entry(n, struct vfio_dma, node); 1827 - iova = dma->iova; 1828 1789 1829 - while (iova < dma->iova + dma->size) { 1790 + while (pos < dma->size) { 1791 + dma_addr_t iova = dma->iova + pos; 1830 1792 phys_addr_t phys; 1831 1793 size_t size; 1832 1794 ··· 1842 1804 phys = iommu_iova_to_phys(d->domain, iova); 1843 1805 1844 1806 if (WARN_ON(!phys)) { 1845 - iova += PAGE_SIZE; 1807 + pos += PAGE_SIZE; 1846 1808 continue; 1847 1809 } 1848 1810 1849 1811 size = PAGE_SIZE; 1850 1812 p = phys + size; 1851 1813 i = iova + size; 1852 - while (i < dma->iova + dma->size && 1814 + while (pos + size < dma->size && 1853 1815 p == iommu_iova_to_phys(d->domain, i)) { 1854 1816 size += PAGE_SIZE; 1855 1817 p += PAGE_SIZE; ··· 1857 1819 } 1858 1820 } else { 1859 1821 unsigned long pfn; 1860 - unsigned long vaddr = dma->vaddr + 1861 - (iova - dma->iova); 1862 - size_t n = dma->iova + dma->size - iova; 1822 + unsigned long vaddr = dma->vaddr + pos; 1823 + size_t n = dma->size - pos; 1863 1824 long npage; 1864 1825 1865 1826 npage = vfio_pin_pages_remote(dma, vaddr, ··· 1889 1852 goto unwind; 1890 1853 } 1891 1854 1892 - iova += size; 1855 + pos += size; 1893 1856 } 1894 1857 } 1895 1858 ··· 1906 1869 unwind: 1907 1870 for (; n; n = rb_prev(n)) { 1908 1871 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 1909 - dma_addr_t iova; 1872 + size_t pos = 0; 1910 1873 1911 1874 if (dma->iommu_mapped) { 1912 1875 iommu_unmap(domain->domain, dma->iova, dma->size); 1913 1876 continue; 1914 1877 } 1915 1878 1916 - iova = dma->iova; 1917 - while (iova < dma->iova + dma->size) { 1879 + while (pos < dma->size) { 1880 + dma_addr_t iova = dma->iova + pos; 1918 1881 phys_addr_t phys, p; 1919 1882 size_t size; 1920 1883 dma_addr_t i; 1921 1884 1922 1885 phys = iommu_iova_to_phys(domain->domain, iova); 1923 1886 if (!phys) { 1924 - iova += PAGE_SIZE; 1887 + pos += PAGE_SIZE; 1925 1888 continue; 1926 1889 } 1927 1890 1928 1891 size = PAGE_SIZE; 1929 1892 p = phys + size; 1930 1893 i = iova + size; 1931 - while (i < dma->iova + dma->size && 1894 + while (pos + size < dma->size && 1932 1895 p == iommu_iova_to_phys(domain->domain, i)) { 1933 1896 size += PAGE_SIZE; 1934 1897 p += PAGE_SIZE; ··· 3014 2977 struct vfio_iommu_type1_dirty_bitmap_get range; 3015 2978 unsigned long pgshift; 3016 2979 size_t data_size = dirty.argsz - minsz; 3017 - size_t iommu_pgsize; 2980 + size_t size, iommu_pgsize; 2981 + dma_addr_t iova, iova_end; 3018 2982 3019 2983 if (!data_size || data_size < sizeof(range)) 3020 2984 return -EINVAL; ··· 3024 2986 sizeof(range))) 3025 2987 return -EFAULT; 3026 2988 3027 - if (range.iova + range.size < range.iova) 2989 + iova = range.iova; 2990 + size = range.size; 2991 + 2992 + if (iova != range.iova || size != range.size) 2993 + return -EOVERFLOW; 2994 + 2995 + if (!size) 3028 2996 return -EINVAL; 2997 + 2998 + if (check_add_overflow(iova, size - 1, &iova_end)) 2999 + return -EOVERFLOW; 3000 + 3029 3001 if (!access_ok((void __user *)range.bitmap.data, 3030 3002 range.bitmap.size)) 3031 3003 return -EINVAL; 3032 3004 3033 3005 pgshift = __ffs(range.bitmap.pgsize); 3034 - ret = verify_bitmap_size(range.size >> pgshift, 3006 + ret = verify_bitmap_size(size >> pgshift, 3035 3007 range.bitmap.size); 3036 3008 if (ret) 3037 3009 return ret; ··· 3055 3007 ret = -EINVAL; 3056 3008 goto out_unlock; 3057 3009 } 3058 - if (range.iova & (iommu_pgsize - 1)) { 3010 + if (iova & (iommu_pgsize - 1)) { 3059 3011 ret = -EINVAL; 3060 3012 goto out_unlock; 3061 3013 } 3062 - if (!range.size || range.size & (iommu_pgsize - 1)) { 3014 + if (size & (iommu_pgsize - 1)) { 3063 3015 ret = -EINVAL; 3064 3016 goto out_unlock; 3065 3017 } 3066 3018 3067 3019 if (iommu->dirty_page_tracking) 3068 3020 ret = vfio_iova_dirty_bitmap(range.bitmap.data, 3069 - iommu, range.iova, 3070 - range.size, 3021 + iommu, iova, iova_end, 3071 3022 range.bitmap.pgsize); 3072 3023 else 3073 3024 ret = -EINVAL;
+6 -2
drivers/video/fbdev/aty/atyfb_base.c
··· 2614 2614 pr_cont("\n"); 2615 2615 } 2616 2616 #endif 2617 - if (par->pll_ops->init_pll) 2618 - par->pll_ops->init_pll(info, &par->pll); 2617 + if (par->pll_ops->init_pll) { 2618 + ret = par->pll_ops->init_pll(info, &par->pll); 2619 + if (ret) 2620 + return ret; 2621 + } 2622 + 2619 2623 if (par->pll_ops->resume_pll) 2620 2624 par->pll_ops->resume_pll(info, &par->pll); 2621 2625
+12 -4
drivers/video/fbdev/core/bitblit.c
··· 79 79 struct fb_image *image, u8 *buf, u8 *dst) 80 80 { 81 81 u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 82 + unsigned int charcnt = vc->vc_font.charcount; 82 83 u32 idx = vc->vc_font.width >> 3; 83 84 u8 *src; 84 85 85 86 while (cnt--) { 86 - src = vc->vc_font.data + (scr_readw(s++)& 87 - charmask)*cellsize; 87 + u16 ch = scr_readw(s++) & charmask; 88 + 89 + if (ch >= charcnt) 90 + ch = 0; 91 + src = vc->vc_font.data + (unsigned int)ch * cellsize; 88 92 89 93 if (attr) { 90 94 update_attr(buf, src, attr, vc); ··· 116 112 u8 *dst) 117 113 { 118 114 u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 115 + unsigned int charcnt = vc->vc_font.charcount; 119 116 u32 shift_low = 0, mod = vc->vc_font.width % 8; 120 117 u32 shift_high = 8; 121 118 u32 idx = vc->vc_font.width >> 3; 122 119 u8 *src; 123 120 124 121 while (cnt--) { 125 - src = vc->vc_font.data + (scr_readw(s++)& 126 - charmask)*cellsize; 122 + u16 ch = scr_readw(s++) & charmask; 123 + 124 + if (ch >= charcnt) 125 + ch = 0; 126 + src = vc->vc_font.data + (unsigned int)ch * cellsize; 127 127 128 128 if (attr) { 129 129 update_attr(buf, src, attr, vc);
+19
drivers/video/fbdev/core/fbcon.c
··· 2810 2810 return found; 2811 2811 } 2812 2812 2813 + static void fbcon_delete_mode(struct fb_videomode *m) 2814 + { 2815 + struct fbcon_display *p; 2816 + 2817 + for (int i = first_fb_vc; i <= last_fb_vc; i++) { 2818 + p = &fb_display[i]; 2819 + if (p->mode == m) 2820 + p->mode = NULL; 2821 + } 2822 + } 2823 + 2824 + void fbcon_delete_modelist(struct list_head *head) 2825 + { 2826 + struct fb_modelist *modelist; 2827 + 2828 + list_for_each_entry(modelist, head, list) 2829 + fbcon_delete_mode(&modelist->mode); 2830 + } 2831 + 2813 2832 #ifdef CONFIG_VT_HW_CONSOLE_BINDING 2814 2833 static void fbcon_unbind(void) 2815 2834 {
+1
drivers/video/fbdev/core/fbmem.c
··· 544 544 fb_info->pixmap.addr = NULL; 545 545 } 546 546 547 + fbcon_delete_modelist(&fb_info->modelist); 547 548 fb_destroy_modelist(&fb_info->modelist); 548 549 registered_fb[fb_info->node] = NULL; 549 550 num_registered_fb--;
+1 -1
drivers/video/fbdev/pvr2fb.c
··· 192 192 193 193 #ifdef CONFIG_PVR2_DMA 194 194 static unsigned int shdma = PVR2_CASCADE_CHAN; 195 - static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; 195 + static unsigned int pvr2dma = CONFIG_NR_ONCHIP_DMA_CHANNELS; 196 196 #endif 197 197 198 198 static struct fb_videomode pvr2_modedb[] = {
+2
drivers/video/fbdev/valkyriefb.c
··· 329 329 330 330 if (of_address_to_resource(dp, 0, &r)) { 331 331 printk(KERN_ERR "can't find address for valkyrie\n"); 332 + of_node_put(dp); 332 333 return 0; 333 334 } 334 335 335 336 frame_buffer_phys = r.start; 336 337 cmap_regs_phys = r.start + 0x304000; 338 + of_node_put(dp); 337 339 } 338 340 #endif /* ppc (!CONFIG_MAC) */ 339 341
+16 -5
fs/nfsd/nfs4proc.c
··· 988 988 static void 989 989 nfsd4_read_release(union nfsd4_op_u *u) 990 990 { 991 - if (u->read.rd_nf) 991 + if (u->read.rd_nf) { 992 + trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, 993 + u->read.rd_offset, u->read.rd_length); 992 994 nfsd_file_put(u->read.rd_nf); 993 - trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, 994 - u->read.rd_offset, u->read.rd_length); 995 + } 995 996 } 996 997 997 998 static __be32 ··· 2893 2892 2894 2893 rqstp->rq_lease_breaker = (void **)&cstate->clp; 2895 2894 2896 - trace_nfsd_compound(rqstp, args->tag, args->taglen, args->opcnt); 2895 + trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt); 2897 2896 while (!status && resp->opcnt < args->opcnt) { 2898 2897 op = &args->ops[resp->opcnt++]; 2898 + 2899 + if (unlikely(resp->opcnt == NFSD_MAX_OPS_PER_COMPOUND)) { 2900 + /* If there are still more operations to process, 2901 + * stop here and report NFS4ERR_RESOURCE. */ 2902 + if (cstate->minorversion == 0 && 2903 + args->client_opcnt > resp->opcnt) { 2904 + op->status = nfserr_resource; 2905 + goto encode_op; 2906 + } 2907 + } 2899 2908 2900 2909 /* 2901 2910 * The XDR decode routines may have pre-set op->status; ··· 2983 2972 status = op->status; 2984 2973 } 2985 2974 2986 - trace_nfsd_compound_status(args->opcnt, resp->opcnt, 2975 + trace_nfsd_compound_status(args->client_opcnt, resp->opcnt, 2987 2976 status, nfsd4_op_name(op->opnum)); 2988 2977 2989 2978 nfsd4_cstate_clear_replay(cstate);
+1
fs/nfsd/nfs4state.c
··· 3902 3902 ca->headerpadsz = 0; 3903 3903 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 3904 3904 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 3905 + ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 3905 3906 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 3906 3907 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 3907 3908 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
+14 -7
fs/nfsd/nfs4xdr.c
··· 2488 2488 2489 2489 if (xdr_stream_decode_u32(argp->xdr, &argp->minorversion) < 0) 2490 2490 return false; 2491 - if (xdr_stream_decode_u32(argp->xdr, &argp->opcnt) < 0) 2491 + if (xdr_stream_decode_u32(argp->xdr, &argp->client_opcnt) < 0) 2492 2492 return false; 2493 + argp->opcnt = min_t(u32, argp->client_opcnt, 2494 + NFSD_MAX_OPS_PER_COMPOUND); 2493 2495 2494 2496 if (argp->opcnt > ARRAY_SIZE(argp->iops)) { 2495 2497 argp->ops = vcalloc(argp->opcnt, sizeof(*argp->ops)); ··· 2630 2628 __be32 *p; 2631 2629 __be32 pathlen; 2632 2630 int pathlen_offset; 2633 - int strlen, count=0; 2634 2631 char *str, *end, *next; 2635 - 2636 - dprintk("nfsd4_encode_components(%s)\n", components); 2632 + int count = 0; 2637 2633 2638 2634 pathlen_offset = xdr->buf->len; 2639 2635 p = xdr_reserve_space(xdr, 4); ··· 2658 2658 for (; *end && (*end != sep); end++) 2659 2659 /* find sep or end of string */; 2660 2660 2661 - strlen = end - str; 2662 - if (strlen) { 2663 - if (xdr_stream_encode_opaque(xdr, str, strlen) < 0) 2661 + if (end > str) { 2662 + if (xdr_stream_encode_opaque(xdr, str, end - str) < 0) 2664 2663 return nfserr_resource; 2665 2664 count++; 2666 2665 } else ··· 2937 2938 2938 2939 typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr, 2939 2940 const struct nfsd4_fattr_args *args); 2941 + 2942 + static __be32 nfsd4_encode_fattr4__inval(struct xdr_stream *xdr, 2943 + const struct nfsd4_fattr_args *args) 2944 + { 2945 + return nfserr_inval; 2946 + } 2940 2947 2941 2948 static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr, 2942 2949 const struct nfsd4_fattr_args *args) ··· 3565 3560 3566 3561 [FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop, 3567 3562 [FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support, 3563 + [FATTR4_TIME_DELEG_ACCESS] = nfsd4_encode_fattr4__inval, 3564 + [FATTR4_TIME_DELEG_MODIFY] = nfsd4_encode_fattr4__inval, 3568 3565 [FATTR4_OPEN_ARGUMENTS] = nfsd4_encode_fattr4_open_arguments, 3569 3566 }; 3570 3567
+3
fs/nfsd/nfsd.h
··· 57 57 __be32 err; /* 0, nfserr, or nfserr_eof */ 58 58 }; 59 59 60 + /* Maximum number of operations per session compound */ 61 + #define NFSD_MAX_OPS_PER_COMPOUND 200 62 + 60 63 struct nfsd_genl_rqstp { 61 64 struct sockaddr rq_daddr; 62 65 struct sockaddr rq_saddr;
+1
fs/nfsd/xdr4.h
··· 903 903 char * tag; 904 904 u32 taglen; 905 905 u32 minorversion; 906 + u32 client_opcnt; 906 907 u32 opcnt; 907 908 bool splice_ok; 908 909 struct nfsd4_op *ops;
+1 -1
fs/smb/client/cifsfs.c
··· 173 173 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); 174 174 175 175 module_param(enable_gcm_256, bool, 0644); 176 - MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0"); 176 + MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1"); 177 177 178 178 module_param(require_gcm_256, bool, 0644); 179 179 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
+2
fs/smb/client/cifsproto.h
··· 616 616 extern struct TCP_Server_Info * 617 617 cifs_find_tcp_session(struct smb3_fs_context *ctx); 618 618 619 + struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal); 620 + 619 621 void __cifs_put_smb_ses(struct cifs_ses *ses); 620 622 621 623 extern struct cifs_ses *
+19 -27
fs/smb/client/connect.c
··· 310 310 server->ssocket->flags); 311 311 sock_release(server->ssocket); 312 312 server->ssocket = NULL; 313 + } else if (cifs_rdma_enabled(server)) { 314 + smbd_destroy(server); 313 315 } 314 316 server->sequence_number = 0; 315 317 server->session_estab = false; ··· 339 337 list_del_init(&mid->qhead); 340 338 mid_execute_callback(mid); 341 339 release_mid(mid); 342 - } 343 - 344 - if (cifs_rdma_enabled(server)) { 345 - cifs_server_lock(server); 346 - smbd_destroy(server); 347 - cifs_server_unlock(server); 348 340 } 349 341 } 350 342 ··· 2011 2015 /** 2012 2016 * cifs_setup_ipc - helper to setup the IPC tcon for the session 2013 2017 * @ses: smb session to issue the request on 2014 - * @ctx: the superblock configuration context to use for building the 2015 - * new tree connection for the IPC (interprocess communication RPC) 2018 + * @seal: if encryption is requested 2016 2019 * 2017 2020 * A new IPC connection is made and stored in the session 2018 2021 * tcon_ipc. The IPC tcon has the same lifetime as the session. 2019 2022 */ 2020 - static int 2021 - cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2023 + struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal) 2022 2024 { 2023 2025 int rc = 0, xid; 2024 2026 struct cifs_tcon *tcon; 2025 2027 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; 2026 - bool seal = false; 2027 2028 struct TCP_Server_Info *server = ses->server; 2028 2029 2029 2030 /* 2030 2031 * If the mount request that resulted in the creation of the 2031 2032 * session requires encryption, force IPC to be encrypted too. 2032 2033 */ 2033 - if (ctx->seal) { 2034 - if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) 2035 - seal = true; 2036 - else { 2037 - cifs_server_dbg(VFS, 2038 - "IPC: server doesn't support encryption\n"); 2039 - return -EOPNOTSUPP; 2040 - } 2034 + if (seal && !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) { 2035 + cifs_server_dbg(VFS, "IPC: server doesn't support encryption\n"); 2036 + return ERR_PTR(-EOPNOTSUPP); 2041 2037 } 2042 2038 2043 2039 /* no need to setup directory caching on IPC share, so pass in false */ 2044 2040 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc); 2045 2041 if (tcon == NULL) 2046 - return -ENOMEM; 2042 + return ERR_PTR(-ENOMEM); 2047 2043 2048 2044 spin_lock(&server->srv_lock); 2049 2045 scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); ··· 2045 2057 tcon->ses = ses; 2046 2058 tcon->ipc = true; 2047 2059 tcon->seal = seal; 2048 - rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); 2060 + rc = server->ops->tree_connect(xid, ses, unc, tcon, ses->local_nls); 2049 2061 free_xid(xid); 2050 2062 2051 2063 if (rc) { 2052 - cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); 2064 + cifs_server_dbg(VFS | ONCE, "failed to connect to IPC (rc=%d)\n", rc); 2053 2065 tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail); 2054 - goto out; 2066 + return ERR_PTR(rc); 2055 2067 } 2056 2068 2057 2069 cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); ··· 2059 2071 spin_lock(&tcon->tc_lock); 2060 2072 tcon->status = TID_GOOD; 2061 2073 spin_unlock(&tcon->tc_lock); 2062 - ses->tcon_ipc = tcon; 2063 - out: 2064 - return rc; 2074 + return tcon; 2065 2075 } 2066 2076 2067 2077 static struct cifs_ses * ··· 2333 2347 { 2334 2348 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 2335 2349 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 2350 + struct cifs_tcon *ipc; 2336 2351 struct cifs_ses *ses; 2337 2352 unsigned int xid; 2338 2353 int retries = 0; ··· 2512 2525 list_add(&ses->smb_ses_list, &server->smb_ses_list); 2513 2526 spin_unlock(&cifs_tcp_ses_lock); 2514 2527 2515 - cifs_setup_ipc(ses, ctx); 2528 + ipc = cifs_setup_ipc(ses, ctx->seal); 2529 + spin_lock(&cifs_tcp_ses_lock); 2530 + spin_lock(&ses->ses_lock); 2531 + ses->tcon_ipc = !IS_ERR(ipc) ? ipc : NULL; 2532 + spin_unlock(&ses->ses_lock); 2533 + spin_unlock(&cifs_tcp_ses_lock); 2516 2534 2517 2535 free_xid(xid); 2518 2536
+47 -8
fs/smb/client/dfs_cache.c
··· 1120 1120 return match; 1121 1121 } 1122 1122 1123 - static bool is_ses_good(struct cifs_ses *ses) 1123 + static bool is_ses_good(struct cifs_tcon *tcon, struct cifs_ses *ses) 1124 1124 { 1125 1125 struct TCP_Server_Info *server = ses->server; 1126 - struct cifs_tcon *tcon = ses->tcon_ipc; 1126 + struct cifs_tcon *ipc = NULL; 1127 1127 bool ret; 1128 1128 1129 + spin_lock(&cifs_tcp_ses_lock); 1129 1130 spin_lock(&ses->ses_lock); 1130 1131 spin_lock(&ses->chan_lock); 1132 + 1131 1133 ret = !cifs_chan_needs_reconnect(ses, server) && 1132 - ses->ses_status == SES_GOOD && 1133 - !tcon->need_reconnect; 1134 + ses->ses_status == SES_GOOD; 1135 + 1134 1136 spin_unlock(&ses->chan_lock); 1137 + 1138 + if (!ret) 1139 + goto out; 1140 + 1141 + if (likely(ses->tcon_ipc)) { 1142 + if (ses->tcon_ipc->need_reconnect) { 1143 + ret = false; 1144 + goto out; 1145 + } 1146 + } else { 1147 + spin_unlock(&ses->ses_lock); 1148 + spin_unlock(&cifs_tcp_ses_lock); 1149 + 1150 + ipc = cifs_setup_ipc(ses, tcon->seal); 1151 + 1152 + spin_lock(&cifs_tcp_ses_lock); 1153 + spin_lock(&ses->ses_lock); 1154 + if (!IS_ERR(ipc)) { 1155 + if (!ses->tcon_ipc) { 1156 + ses->tcon_ipc = ipc; 1157 + ipc = NULL; 1158 + } 1159 + } else { 1160 + ret = false; 1161 + ipc = NULL; 1162 + } 1163 + } 1164 + 1165 + out: 1135 1166 spin_unlock(&ses->ses_lock); 1167 + spin_unlock(&cifs_tcp_ses_lock); 1168 + if (ipc && server->ops->tree_disconnect) { 1169 + unsigned int xid = get_xid(); 1170 + 1171 + (void)server->ops->tree_disconnect(xid, ipc); 1172 + _free_xid(xid); 1173 + } 1174 + tconInfoFree(ipc, netfs_trace_tcon_ref_free_ipc); 1136 1175 return ret; 1137 1176 } 1138 1177 1139 1178 /* Refresh dfs referral of @ses */ 1140 - static void refresh_ses_referral(struct cifs_ses *ses) 1179 + static void refresh_ses_referral(struct cifs_tcon *tcon, struct cifs_ses *ses) 1141 1180 { 1142 1181 struct cache_entry *ce; 1143 1182 unsigned int xid; ··· 1192 1153 } 1193 1154 1194 1155 ses = CIFS_DFS_ROOT_SES(ses); 1195 - if (!is_ses_good(ses)) { 1156 + if (!is_ses_good(tcon, ses)) { 1196 1157 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", 1197 1158 __func__); 1198 1159 goto out; ··· 1280 1241 up_read(&htable_rw_lock); 1281 1242 1282 1243 ses = CIFS_DFS_ROOT_SES(ses); 1283 - if (!is_ses_good(ses)) { 1244 + if (!is_ses_good(tcon, ses)) { 1284 1245 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", 1285 1246 __func__); 1286 1247 goto out; ··· 1348 1309 tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work); 1349 1310 1350 1311 list_for_each_entry(ses, &tcon->dfs_ses_list, dlist) 1351 - refresh_ses_referral(ses); 1312 + refresh_ses_referral(tcon, ses); 1352 1313 refresh_tcon_referral(tcon, false); 1353 1314 1354 1315 queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+2 -1
fs/smb/client/smb2ops.c
··· 2799 2799 struct cifs_fid fid; 2800 2800 int rc; 2801 2801 __le16 *utf16_path; 2802 - struct cached_fid *cfid = NULL; 2802 + struct cached_fid *cfid; 2803 2803 int retries = 0, cur_sleep = 1; 2804 2804 2805 2805 replay_again: 2806 2806 /* reinitialize for possible replay */ 2807 + cfid = NULL; 2807 2808 flags = CIFS_CP_CREATE_CLOSE_OP; 2808 2809 oplock = SMB2_OPLOCK_LEVEL_NONE; 2809 2810 server = cifs_pick_channel(ses);
+7 -1
fs/smb/server/transport_ipc.c
··· 263 263 264 264 static int handle_response(int type, void *payload, size_t sz) 265 265 { 266 - unsigned int handle = *(unsigned int *)payload; 266 + unsigned int handle; 267 267 struct ipc_msg_table_entry *entry; 268 268 int ret = 0; 269 + 270 + /* Prevent 4-byte read beyond declared payload size */ 271 + if (sz < sizeof(unsigned int)) 272 + return -EINVAL; 273 + 274 + handle = *(unsigned int *)payload; 269 275 270 276 ipc_update_last_active(); 271 277 down_read(&ipc_msg_table_lock);
+36 -11
fs/smb/server/transport_rdma.c
··· 418 418 419 419 sc->ib.dev = sc->rdma.cm_id->device; 420 420 421 - INIT_WORK(&sc->recv_io.posted.refill_work, 422 - smb_direct_post_recv_credits); 423 - INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work); 424 421 INIT_DELAYED_WORK(&sc->idle.timer_work, smb_direct_idle_connection_timer); 425 422 426 423 conn = ksmbd_conn_alloc(); ··· 466 469 disable_delayed_work_sync(&sc->idle.timer_work); 467 470 disable_work_sync(&sc->idle.immediate_work); 468 471 472 + if (sc->rdma.cm_id) 473 + rdma_lock_handler(sc->rdma.cm_id); 474 + 469 475 if (sc->ib.qp) { 470 476 ib_drain_qp(sc->ib.qp); 471 477 sc->ib.qp = NULL; ··· 497 497 ib_free_cq(sc->ib.recv_cq); 498 498 if (sc->ib.pd) 499 499 ib_dealloc_pd(sc->ib.pd); 500 - if (sc->rdma.cm_id) 500 + if (sc->rdma.cm_id) { 501 + rdma_unlock_handler(sc->rdma.cm_id); 501 502 rdma_destroy_id(sc->rdma.cm_id); 503 + } 502 504 503 505 smb_direct_destroy_pools(sc); 504 506 ksmbd_conn_free(KSMBD_TRANS(t)->conn); ··· 1729 1727 } 1730 1728 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1731 1729 case RDMA_CM_EVENT_DISCONNECTED: { 1732 - ib_drain_qp(sc->ib.qp); 1733 - 1734 1730 sc->status = SMBDIRECT_SOCKET_DISCONNECTED; 1735 1731 smb_direct_disconnect_rdma_work(&sc->disconnect_work); 1732 + if (sc->ib.qp) 1733 + ib_drain_qp(sc->ib.qp); 1736 1734 break; 1737 1735 } 1738 1736 case RDMA_CM_EVENT_CONNECT_ERROR: { ··· 1906 1904 goto out_err; 1907 1905 } 1908 1906 1909 - smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work); 1910 1907 return 0; 1911 1908 out_err: 1912 1909 put_recvmsg(sc, recvmsg); ··· 2250 2249 return -ECONNABORTED; 2251 2250 2252 2251 ret = smb_direct_check_recvmsg(recvmsg); 2253 - if (ret == -ECONNABORTED) 2254 - goto out; 2252 + if (ret) 2253 + goto put; 2255 2254 2256 2255 req = (struct smbdirect_negotiate_req *)recvmsg->packet; 2257 2256 sp->max_recv_size = min_t(int, sp->max_recv_size, ··· 2266 2265 sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max); 2267 2266 sc->recv_io.credits.target = max_t(u16, sc->recv_io.credits.target, 1); 2268 2267 2269 - ret = smb_direct_send_negotiate_response(sc, ret); 2270 - out: 2268 + put: 2271 2269 spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags); 2272 2270 sc->recv_io.reassembly.queue_length--; 2273 2271 list_del(&recvmsg->list); 2274 2272 spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags); 2275 2273 put_recvmsg(sc, recvmsg); 2274 + 2275 + if (ret == -ECONNABORTED) 2276 + return ret; 2277 + 2278 + if (ret) 2279 + goto respond; 2280 + 2281 + /* 2282 + * We negotiated with success, so we need to refill the recv queue. 2283 + * We do that with sc->idle.immediate_work still being disabled 2284 + * via smbdirect_socket_init(), so that queue_work(sc->workqueue, 2285 + * &sc->idle.immediate_work) in smb_direct_post_recv_credits() 2286 + * is a no-op. 2287 + * 2288 + * The message that grants the credits to the client is 2289 + * the negotiate response. 2290 + */ 2291 + INIT_WORK(&sc->recv_io.posted.refill_work, smb_direct_post_recv_credits); 2292 + smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work); 2293 + if (unlikely(sc->first_error)) 2294 + return sc->first_error; 2295 + INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work); 2296 + 2297 + respond: 2298 + ret = smb_direct_send_negotiate_response(sc, ret); 2276 2299 2277 2300 return ret; 2278 2301 }
+6
fs/xfs/libxfs/xfs_rtgroup.h
··· 50 50 uint8_t *rtg_rsum_cache; 51 51 struct xfs_open_zone *rtg_open_zone; 52 52 }; 53 + 54 + /* 55 + * Count of outstanding GC operations for zoned XFS. Any RTG with a 56 + * non-zero rtg_gccount will not be picked as new GC victim. 57 + */ 58 + atomic_t rtg_gccount; 53 59 }; 54 60 55 61 /*
+8
fs/xfs/xfs_zone_alloc.c
··· 246 246 * If a data write raced with this GC write, keep the existing data in 247 247 * the data fork, mark our newly written GC extent as reclaimable, then 248 248 * move on to the next extent. 249 + * 250 + * Note that this can also happen when racing with operations that do 251 + * not actually invalidate the data, but just move it to a different 252 + * inode (XFS_IOC_EXCHANGE_RANGE), or to a different offset inside the 253 + * inode (FALLOC_FL_COLLAPSE_RANGE / FALLOC_FL_INSERT_RANGE). If the 254 + * data was just moved around, GC fails to free the zone, but the zone 255 + * becomes a GC candidate again as soon as all previous GC I/O has 256 + * finished and these blocks will be moved out eventually. 249 257 */ 250 258 if (old_startblock != NULLFSBLOCK && 251 259 old_startblock != data.br_startblock)
+27
fs/xfs/xfs_zone_gc.c
··· 114 114 /* Open Zone being written to */ 115 115 struct xfs_open_zone *oz; 116 116 117 + struct xfs_rtgroup *victim_rtg; 118 + 117 119 /* Bio used for reads and writes, including the bvec used by it */ 118 120 struct bio_vec bv; 119 121 struct bio bio; /* must be last */ ··· 266 264 iter->rec_count = 0; 267 265 iter->rec_idx = 0; 268 266 iter->victim_rtg = victim_rtg; 267 + atomic_inc(&victim_rtg->rtg_gccount); 269 268 } 270 269 271 270 /* ··· 365 362 366 363 return 0; 367 364 done: 365 + atomic_dec(&iter->victim_rtg->rtg_gccount); 368 366 xfs_rtgroup_rele(iter->victim_rtg); 369 367 iter->victim_rtg = NULL; 370 368 return 0; ··· 454 450 455 451 if (!rtg) 456 452 continue; 453 + 454 + /* 455 + * If the zone is already undergoing GC, don't pick it again. 456 + * 457 + * This prevents us from picking one of the zones for which we 458 + * already submitted GC I/O, but for which the remapping hasn't 459 + * concluded yet. This won't cause data corruption, but 460 + * increases write amplification and slows down GC, so this is 461 + * a bad thing. 462 + */ 463 + if (atomic_read(&rtg->rtg_gccount)) { 464 + xfs_rtgroup_rele(rtg); 465 + continue; 466 + } 457 467 458 468 /* skip zones that are just waiting for a reset */ 459 469 if (rtg_rmap(rtg)->i_used_blocks == 0 || ··· 706 688 chunk->scratch = &data->scratch[data->scratch_idx]; 707 689 chunk->data = data; 708 690 chunk->oz = oz; 691 + chunk->victim_rtg = iter->victim_rtg; 692 + atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); 693 + atomic_inc(&chunk->victim_rtg->rtg_gccount); 709 694 710 695 bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); 711 696 bio->bi_end_io = xfs_zone_gc_end_io; ··· 731 710 xfs_zone_gc_free_chunk( 732 711 struct xfs_gc_bio *chunk) 733 712 { 713 + atomic_dec(&chunk->victim_rtg->rtg_gccount); 714 + xfs_rtgroup_rele(chunk->victim_rtg); 734 715 list_del(&chunk->entry); 735 716 xfs_open_zone_put(chunk->oz); 736 717 xfs_irele(chunk->ip); ··· 792 769 split_chunk->new_daddr = chunk->new_daddr; 793 770 split_chunk->oz = chunk->oz; 794 771 atomic_inc(&chunk->oz->oz_ref); 772 + 773 + split_chunk->victim_rtg = chunk->victim_rtg; 774 + atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); 775 + atomic_inc(&chunk->victim_rtg->rtg_gccount); 795 776 796 777 chunk->offset += split_len; 797 778 chunk->len -= split_len;
+1 -1
include/asm-generic/vmlinux.lds.h
··· 832 832 833 833 /* Required sections not related to debugging. */ 834 834 #define ELF_DETAILS \ 835 - .modinfo : { *(.modinfo) } \ 835 + .modinfo : { *(.modinfo) . = ALIGN(8); } \ 836 836 .comment 0 : { *(.comment) } \ 837 837 .symtab 0 : { *(.symtab) } \ 838 838 .strtab 0 : { *(.strtab) } \
+6 -5
include/linux/blk_types.h
··· 341 341 /* write the zero filled sector many times */ 342 342 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, 343 343 /* Open a zone */ 344 - REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, 344 + REQ_OP_ZONE_OPEN = (__force blk_opf_t)11, 345 345 /* Close a zone */ 346 - REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, 346 + REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13, 347 347 /* Transition a zone to full */ 348 - REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, 348 + REQ_OP_ZONE_FINISH = (__force blk_opf_t)15, 349 349 /* reset a zone write pointer */ 350 - REQ_OP_ZONE_RESET = (__force blk_opf_t)15, 350 + REQ_OP_ZONE_RESET = (__force blk_opf_t)17, 351 351 /* reset all the zone present on the device */ 352 - REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, 352 + REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19, 353 353 354 354 /* Driver private requests */ 355 355 REQ_OP_DRV_IN = (__force blk_opf_t)34, ··· 478 478 { 479 479 switch (op & REQ_OP_MASK) { 480 480 case REQ_OP_ZONE_RESET: 481 + case REQ_OP_ZONE_RESET_ALL: 481 482 case REQ_OP_ZONE_OPEN: 482 483 case REQ_OP_ZONE_CLOSE: 483 484 case REQ_OP_ZONE_FINISH:
+2
include/linux/fbcon.h
··· 18 18 void fbcon_resumed(struct fb_info *info); 19 19 int fbcon_mode_deleted(struct fb_info *info, 20 20 struct fb_videomode *mode); 21 + void fbcon_delete_modelist(struct list_head *head); 21 22 void fbcon_new_modelist(struct fb_info *info); 22 23 void fbcon_get_requirement(struct fb_info *info, 23 24 struct fb_blit_caps *caps); ··· 39 38 static inline void fbcon_resumed(struct fb_info *info) {} 40 39 static inline int fbcon_mode_deleted(struct fb_info *info, 41 40 struct fb_videomode *mode) { return 0; } 41 + static inline void fbcon_delete_modelist(struct list_head *head) {} 42 42 static inline void fbcon_new_modelist(struct fb_info *info) {} 43 43 static inline void fbcon_get_requirement(struct fb_info *info, 44 44 struct fb_blit_caps *caps) {}
+1 -1
include/linux/regmap.h
··· 1644 1644 * @status_invert: Inverted status register: cleared bits are active interrupts. 1645 1645 * @status_is_level: Status register is actuall signal level: Xor status 1646 1646 * register with previous value to get active interrupts. 1647 - * @wake_invert: Inverted wake register: cleared bits are wake enabled. 1647 + * @wake_invert: Inverted wake register: cleared bits are wake disabled. 1648 1648 * @type_in_mask: Use the mask registers for controlling irq type. Use this if 1649 1649 * the hardware provides separate bits for rising/falling edge 1650 1650 * or low/high level interrupts and they should be combined into
+2 -2
include/linux/sched.h
··· 2407 2407 * be defined in kernel/sched/core.c. 2408 2408 */ 2409 2409 #ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE 2410 - static inline void migrate_disable(void) 2410 + static __always_inline void migrate_disable(void) 2411 2411 { 2412 2412 __migrate_disable(); 2413 2413 } 2414 2414 2415 - static inline void migrate_enable(void) 2415 + static __always_inline void migrate_enable(void) 2416 2416 { 2417 2417 __migrate_enable(); 2418 2418 }
+1
include/net/bluetooth/hci.h
··· 434 434 HCI_USER_CHANNEL, 435 435 HCI_EXT_CONFIGURED, 436 436 HCI_LE_ADV, 437 + HCI_LE_ADV_0, 437 438 HCI_LE_PER_ADV, 438 439 HCI_LE_SCAN, 439 440 HCI_SSP_ENABLED,
+1
include/net/bluetooth/hci_core.h
··· 244 244 bool enabled; 245 245 bool pending; 246 246 bool periodic; 247 + bool periodic_enabled; 247 248 __u8 mesh; 248 249 __u8 instance; 249 250 __u8 handle;
+2 -2
include/net/bluetooth/l2cap.h
··· 38 38 #define L2CAP_DEFAULT_TX_WINDOW 63 39 39 #define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF 40 40 #define L2CAP_DEFAULT_MAX_TX 3 41 - #define L2CAP_DEFAULT_RETRANS_TO 2 /* seconds */ 42 - #define L2CAP_DEFAULT_MONITOR_TO 12 /* seconds */ 41 + #define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ 42 + #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ 43 43 #define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */ 44 44 #define L2CAP_DEFAULT_ACK_TO 200 45 45 #define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
+1 -1
include/net/bluetooth/mgmt.h
··· 853 853 __le16 window; 854 854 __le16 period; 855 855 __u8 num_ad_types; 856 - __u8 ad_types[]; 856 + __u8 ad_types[] __counted_by(num_ad_types); 857 857 } __packed; 858 858 #define MGMT_SET_MESH_RECEIVER_SIZE 6 859 859
+1 -1
include/net/tcp.h
··· 370 370 int tcp_ioctl(struct sock *sk, int cmd, int *karg); 371 371 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); 372 372 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); 373 - void tcp_rcvbuf_grow(struct sock *sk); 373 + void tcp_rcvbuf_grow(struct sock *sk, u32 newval); 374 374 void tcp_rcv_space_adjust(struct sock *sk); 375 375 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 376 376 void tcp_twsk_destructor(struct sock *sk);
+13 -12
include/net/tls.h
··· 451 451 452 452 /* Log all TLS record header TCP sequences in [seq, seq+len] */ 453 453 static inline void 454 - tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) 454 + tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async, 455 + __be32 seq, u16 len) 455 456 { 456 - struct tls_context *tls_ctx = tls_get_ctx(sk); 457 - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 458 - 459 - atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | 457 + atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | 460 458 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); 461 - rx_ctx->resync_async->loglen = 0; 462 - rx_ctx->resync_async->rcd_delta = 0; 459 + resync_async->loglen = 0; 460 + resync_async->rcd_delta = 0; 463 461 } 464 462 465 463 static inline void 466 - tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) 464 + tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async, 465 + __be32 seq) 467 466 { 468 - struct tls_context *tls_ctx = tls_get_ctx(sk); 469 - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 467 + atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); 468 + } 470 469 471 - atomic64_set(&rx_ctx->resync_async->req, 472 - ((u64)ntohl(seq) << 32) | RESYNC_REQ); 470 + static inline void 471 + tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async) 472 + { 473 + atomic64_set(&resync_async->req, 0); 473 474 } 474 475 475 476 static inline void
+4 -6
include/scsi/scsi_device.h
··· 252 252 unsigned int queue_stopped; /* request queue is quiesced */ 253 253 bool offline_already; /* Device offline message logged */ 254 254 255 - unsigned int ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */ 256 - unsigned int ua_por_ctr; /* Counter for Power On / Reset UAs */ 255 + atomic_t ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */ 256 + atomic_t ua_por_ctr; /* Counter for Power On / Reset UAs */ 257 257 258 258 atomic_t disk_events_disable_depth; /* disable depth for disk events */ 259 259 ··· 693 693 } 694 694 695 695 /* Macros to access the UNIT ATTENTION counters */ 696 - #define scsi_get_ua_new_media_ctr(sdev) \ 697 - ((const unsigned int)(sdev->ua_new_media_ctr)) 698 - #define scsi_get_ua_por_ctr(sdev) \ 699 - ((const unsigned int)(sdev->ua_por_ctr)) 696 + #define scsi_get_ua_new_media_ctr(sdev) atomic_read(&sdev->ua_new_media_ctr) 697 + #define scsi_get_ua_por_ctr(sdev) atomic_read(&sdev->ua_por_ctr) 700 698 701 699 #define MODULE_ALIAS_SCSI_DEVICE(type) \ 702 700 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
+9
include/trace/events/tcp.h
··· 218 218 __field(__u32, space) 219 219 __field(__u32, ooo_space) 220 220 __field(__u32, rcvbuf) 221 + __field(__u32, rcv_ssthresh) 222 + __field(__u32, window_clamp) 223 + __field(__u32, rcv_wnd) 221 224 __field(__u8, scaling_ratio) 222 225 __field(__u16, sport) 223 226 __field(__u16, dport) ··· 248 245 tp->rcv_nxt; 249 246 250 247 __entry->rcvbuf = sk->sk_rcvbuf; 248 + __entry->rcv_ssthresh = tp->rcv_ssthresh; 249 + __entry->window_clamp = tp->window_clamp; 250 + __entry->rcv_wnd = tp->rcv_wnd; 251 251 __entry->scaling_ratio = tp->scaling_ratio; 252 252 __entry->sport = ntohs(inet->inet_sport); 253 253 __entry->dport = ntohs(inet->inet_dport); ··· 270 264 ), 271 265 272 266 TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u " 267 + "rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u " 273 268 "family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 " 274 269 "saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx", 275 270 __entry->time, __entry->rtt_us, __entry->copied, 276 271 __entry->inq, __entry->space, __entry->ooo_space, 277 272 __entry->scaling_ratio, __entry->rcvbuf, 273 + __entry->rcv_ssthresh, __entry->window_clamp, 274 + __entry->rcv_wnd, 278 275 show_family_name(__entry->family), 279 276 __entry->sport, __entry->dport, 280 277 __entry->saddr, __entry->daddr,
+1 -1
include/uapi/linux/fb.h
··· 319 319 #define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ 320 320 #define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ 321 321 #define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ 322 - #define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ 322 + #define FB_VBLANK_HAVE_VSYNC 0x100 /* vertical syncs can be detected */ 323 323 324 324 struct fb_vblank { 325 325 __u32 flags; /* FB_VBLANK flags */
+2
kernel/bpf/helpers.c
··· 4345 4345 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 4346 4346 BTF_ID_FLAGS(func, bpf_local_irq_save) 4347 4347 BTF_ID_FLAGS(func, bpf_local_irq_restore) 4348 + #ifdef CONFIG_BPF_EVENTS 4348 4349 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) 4349 4350 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) 4350 4351 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) ··· 4354 4353 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) 4355 4354 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 4356 4355 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 4356 + #endif 4357 4357 #ifdef CONFIG_DMA_SHARED_BUFFER 4358 4358 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) 4359 4359 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
+2
kernel/bpf/ringbuf.c
··· 216 216 217 217 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) 218 218 { 219 + irq_work_sync(&rb->work); 220 + 219 221 /* copy pages pointer and nr_pages to local variable, as we are going 220 222 * to unmap rb itself with vunmap() below 221 223 */
-4
kernel/power/hibernate.c
··· 706 706 707 707 #ifdef CONFIG_SUSPEND 708 708 if (hibernation_mode == HIBERNATION_SUSPEND) { 709 - pm_restore_gfp_mask(); 710 709 error = suspend_devices_and_enter(mem_sleep_current); 711 710 if (!error) 712 711 goto exit; ··· 745 746 cpu_relax(); 746 747 747 748 exit: 748 - /* Match the pm_restore_gfp_mask() call in hibernate(). */ 749 - pm_restrict_gfp_mask(); 750 - 751 749 /* Restore swap signature. */ 752 750 error = swsusp_unmark(); 753 751 if (error)
+17 -5
kernel/power/main.c
··· 31 31 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 32 32 * with that modification). 33 33 */ 34 + static unsigned int saved_gfp_count; 34 35 static gfp_t saved_gfp_mask; 35 36 36 37 void pm_restore_gfp_mask(void) 37 38 { 38 39 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 39 - if (saved_gfp_mask) { 40 - gfp_allowed_mask = saved_gfp_mask; 41 - saved_gfp_mask = 0; 42 - } 40 + 41 + if (WARN_ON(!saved_gfp_count) || --saved_gfp_count) 42 + return; 43 + 44 + gfp_allowed_mask = saved_gfp_mask; 45 + saved_gfp_mask = 0; 46 + 47 + pm_pr_dbg("GFP mask restored\n"); 43 48 } 44 49 45 50 void pm_restrict_gfp_mask(void) 46 51 { 47 52 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 48 - WARN_ON(saved_gfp_mask); 53 + 54 + if (saved_gfp_count++) { 55 + WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask); 56 + return; 57 + } 58 + 49 59 saved_gfp_mask = gfp_allowed_mask; 50 60 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 61 + 62 + pm_pr_dbg("GFP mask restricted\n"); 51 63 } 52 64 53 65 unsigned int lock_system_sleep(void)
+1
kernel/power/process.c
··· 132 132 if (!pm_freezing) 133 133 static_branch_inc(&freezer_active); 134 134 135 + pm_wakeup_clear(0); 135 136 pm_freezing = true; 136 137 error = try_to_freeze_tasks(true); 137 138 if (!error)
-1
kernel/power/suspend.c
··· 595 595 } 596 596 597 597 pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); 598 - pm_wakeup_clear(0); 599 598 pm_suspend_clear_flags(); 600 599 error = suspend_prepare(state); 601 600 if (error)
+111 -15
kernel/sched/ext.c
··· 67 67 68 68 static struct delayed_work scx_watchdog_work; 69 69 70 - /* for %SCX_KICK_WAIT */ 71 - static unsigned long __percpu *scx_kick_cpus_pnt_seqs; 70 + /* 71 + * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of pick_task sequence 72 + * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu 73 + * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated 74 + * lazily when enabling and freed when disabling to avoid waste when sched_ext 75 + * isn't active. 76 + */ 77 + struct scx_kick_pseqs { 78 + struct rcu_head rcu; 79 + unsigned long seqs[]; 80 + }; 81 + 82 + static DEFINE_PER_CPU(struct scx_kick_pseqs __rcu *, scx_kick_pseqs); 72 83 73 84 /* 74 85 * Direct dispatch marker. ··· 791 780 if (rq->scx.flags & SCX_RQ_IN_WAKEUP) 792 781 return; 793 782 783 + /* Don't do anything if there already is a deferred operation. */ 784 + if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING) 785 + return; 786 + 794 787 /* 795 788 * If in balance, the balance callbacks will be called before rq lock is 796 789 * released. Schedule one. 790 + * 791 + * 792 + * We can't directly insert the callback into the 793 + * rq's list: The call can drop its lock and make the pending balance 794 + * callback visible to unrelated code paths that call rq_pin_lock(). 795 + * 796 + * Just let balance_one() know that it must do it itself. 797 797 */ 798 798 if (rq->scx.flags & SCX_RQ_IN_BALANCE) { 799 - queue_balance_callback(rq, &rq->scx.deferred_bal_cb, 800 - deferred_bal_cb_workfn); 799 + rq->scx.flags |= SCX_RQ_BAL_CB_PENDING; 801 800 return; 802 801 } 803 802 ··· 2024 2003 dspc->cursor = 0; 2025 2004 } 2026 2005 2006 + static inline void maybe_queue_balance_callback(struct rq *rq) 2007 + { 2008 + lockdep_assert_rq_held(rq); 2009 + 2010 + if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING)) 2011 + return; 2012 + 2013 + queue_balance_callback(rq, &rq->scx.deferred_bal_cb, 2014 + deferred_bal_cb_workfn); 2015 + 2016 + rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING; 2017 + } 2018 + 2027 2019 static int balance_one(struct rq *rq, struct task_struct *prev) 2028 2020 { 2029 2021 struct scx_sched *sch = scx_root; ··· 2183 2149 } 2184 2150 #endif 2185 2151 rq_repin_lock(rq, rf); 2152 + 2153 + maybe_queue_balance_callback(rq); 2186 2154 2187 2155 return ret; 2188 2156 } ··· 3507 3471 struct scx_dispatch_q *dsq; 3508 3472 int node; 3509 3473 3474 + irq_work_sync(&sch->error_irq_work); 3510 3475 kthread_stop(sch->helper->task); 3476 + 3511 3477 free_percpu(sch->pcpu); 3512 3478 3513 3479 for_each_node_state(node, N_POSSIBLE) ··· 3888 3850 } 3889 3851 } 3890 3852 3853 + static void free_kick_pseqs_rcu(struct rcu_head *rcu) 3854 + { 3855 + struct scx_kick_pseqs *pseqs = container_of(rcu, struct scx_kick_pseqs, rcu); 3856 + 3857 + kvfree(pseqs); 3858 + } 3859 + 3860 + static void free_kick_pseqs(void) 3861 + { 3862 + int cpu; 3863 + 3864 + for_each_possible_cpu(cpu) { 3865 + struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); 3866 + struct scx_kick_pseqs *to_free; 3867 + 3868 + to_free = rcu_replace_pointer(*pseqs, NULL, true); 3869 + if (to_free) 3870 + call_rcu(&to_free->rcu, free_kick_pseqs_rcu); 3871 + } 3872 + } 3873 + 3891 3874 static void scx_disable_workfn(struct kthread_work *work) 3892 3875 { 3893 3876 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); ··· 4045 3986 free_percpu(scx_dsp_ctx); 4046 3987 scx_dsp_ctx = NULL; 4047 3988 scx_dsp_max_batch = 0; 3989 + free_kick_pseqs(); 4048 3990 4049 3991 mutex_unlock(&scx_enable_mutex); 4050 3992 ··· 4408 4348 irq_work_queue(&sch->error_irq_work); 4409 4349 } 4410 4350 4351 + static int alloc_kick_pseqs(void) 4352 + { 4353 + int cpu; 4354 + 4355 + /* 4356 + * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size 4357 + * can exceed percpu allocator limits on large machines. 4358 + */ 4359 + for_each_possible_cpu(cpu) { 4360 + struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); 4361 + struct scx_kick_pseqs *new_pseqs; 4362 + 4363 + WARN_ON_ONCE(rcu_access_pointer(*pseqs)); 4364 + 4365 + new_pseqs = kvzalloc_node(struct_size(new_pseqs, seqs, nr_cpu_ids), 4366 + GFP_KERNEL, cpu_to_node(cpu)); 4367 + if (!new_pseqs) { 4368 + free_kick_pseqs(); 4369 + return -ENOMEM; 4370 + } 4371 + 4372 + rcu_assign_pointer(*pseqs, new_pseqs); 4373 + } 4374 + 4375 + return 0; 4376 + } 4377 + 4411 4378 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) 4412 4379 { 4413 4380 struct scx_sched *sch; ··· 4582 4495 goto err_unlock; 4583 4496 } 4584 4497 4498 + ret = alloc_kick_pseqs(); 4499 + if (ret) 4500 + goto err_unlock; 4501 + 4585 4502 sch = scx_alloc_and_add_sched(ops); 4586 4503 if (IS_ERR(sch)) { 4587 4504 ret = PTR_ERR(sch); 4588 - goto err_unlock; 4505 + goto err_free_pseqs; 4589 4506 } 4590 4507 4591 4508 /* ··· 4792 4701 4793 4702 return 0; 4794 4703 4704 + err_free_pseqs: 4705 + free_kick_pseqs(); 4795 4706 err_unlock: 4796 4707 mutex_unlock(&scx_enable_mutex); 4797 4708 return ret; ··· 5175 5082 { 5176 5083 struct rq *this_rq = this_rq(); 5177 5084 struct scx_rq *this_scx = &this_rq->scx; 5178 - unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); 5085 + struct scx_kick_pseqs __rcu *pseqs_pcpu = __this_cpu_read(scx_kick_pseqs); 5179 5086 bool should_wait = false; 5087 + unsigned long *pseqs; 5180 5088 s32 cpu; 5089 + 5090 + if (unlikely(!pseqs_pcpu)) { 5091 + pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_pseqs"); 5092 + return; 5093 + } 5094 + 5095 + pseqs = rcu_dereference_bh(pseqs_pcpu)->seqs; 5181 5096 5182 5097 for_each_cpu(cpu, this_scx->cpus_to_kick) { 5183 5098 should_wait |= kick_one_cpu(cpu, this_rq, pseqs); ··· 5308 5207 SCX_TG_ONLINE); 5309 5208 5310 5209 scx_idle_init_masks(); 5311 - 5312 - scx_kick_cpus_pnt_seqs = 5313 - __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids, 5314 - __alignof__(scx_kick_cpus_pnt_seqs[0])); 5315 - BUG_ON(!scx_kick_cpus_pnt_seqs); 5316 5210 5317 5211 for_each_possible_cpu(cpu) { 5318 5212 struct rq *rq = cpu_rq(cpu); ··· 5784 5688 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) 5785 5689 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) 5786 5690 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) 5787 - BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) 5788 - BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) 5691 + BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 5692 + BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 5789 5693 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 5790 5694 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 5791 5695 BTF_KFUNCS_END(scx_kfunc_ids_dispatch) ··· 5916 5820 5917 5821 BTF_KFUNCS_START(scx_kfunc_ids_unlocked) 5918 5822 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) 5919 - BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) 5920 - BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) 5823 + BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 5824 + BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 5921 5825 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 5922 5826 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 5923 5827 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
+1
kernel/sched/sched.h
··· 784 784 SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */ 785 785 SCX_RQ_BYPASSING = 1 << 4, 786 786 SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */ 787 + SCX_RQ_BAL_CB_PENDING = 1 << 6, /* must queue a cb after dispatching */ 787 788 788 789 SCX_RQ_IN_WAKEUP = 1 << 16, 789 790 SCX_RQ_IN_BALANCE = 1 << 17,
+1 -1
lib/Kconfig.kmsan
··· 3 3 bool 4 4 5 5 config HAVE_KMSAN_COMPILER 6 - def_bool CC_IS_CLANG 6 + def_bool $(cc-option,-fsanitize=kernel-memory) 7 7 8 8 config KMSAN 9 9 bool "KMSAN: detector of uninitialized values use"
+1 -1
lib/kunit/kunit-test.c
··· 739 739 740 740 static void test_dev_action(void *priv) 741 741 { 742 - *(void **)priv = (void *)1; 742 + *(long *)priv = 1; 743 743 } 744 744 745 745 static void kunit_device_test(struct kunit *test)
+2 -1
lib/kunit/test.c
··· 745 745 .param_index = ++test.param_index, 746 746 .parent = &test, 747 747 }; 748 - kunit_init_test(&param_test, test_case->name, test_case->log); 748 + kunit_init_test(&param_test, test_case->name, NULL); 749 + param_test.log = test_case->log; 749 750 kunit_run_case_catch_errors(suite, test_case, &param_test); 750 751 751 752 if (param_desc[0] == '\0') {
+12 -2
net/batman-adv/originator.c
··· 763 763 bat_priv = netdev_priv(mesh_iface); 764 764 765 765 primary_if = batadv_primary_if_get_selected(bat_priv); 766 - if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { 766 + if (!primary_if) { 767 767 ret = -ENOENT; 768 768 goto out_put_mesh_iface; 769 + } 770 + 771 + if (primary_if->if_status != BATADV_IF_ACTIVE) { 772 + ret = -ENOENT; 773 + goto out_put_primary_if; 769 774 } 770 775 771 776 hard_iface = batadv_netlink_get_hardif(bat_priv, cb); ··· 1332 1327 bat_priv = netdev_priv(mesh_iface); 1333 1328 1334 1329 primary_if = batadv_primary_if_get_selected(bat_priv); 1335 - if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { 1330 + if (!primary_if) { 1336 1331 ret = -ENOENT; 1337 1332 goto out_put_mesh_iface; 1333 + } 1334 + 1335 + if (primary_if->if_status != BATADV_IF_ACTIVE) { 1336 + ret = -ENOENT; 1337 + goto out_put_primary_if; 1338 1338 } 1339 1339 1340 1340 hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
+7
net/bluetooth/hci_conn.c
··· 843 843 if (bis) 844 844 return; 845 845 846 + bis = hci_conn_hash_lookup_big_state(hdev, 847 + conn->iso_qos.bcast.big, 848 + BT_OPEN, 849 + HCI_ROLE_MASTER); 850 + if (bis) 851 + return; 852 + 846 853 hci_le_terminate_big(hdev, conn); 847 854 } else { 848 855 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
+9 -2
net/bluetooth/hci_event.c
··· 1607 1607 1608 1608 hci_dev_set_flag(hdev, HCI_LE_ADV); 1609 1609 1610 - if (adv && !adv->periodic) 1610 + if (adv) 1611 1611 adv->enabled = true; 1612 + else if (!set->handle) 1613 + hci_dev_set_flag(hdev, HCI_LE_ADV_0); 1612 1614 1613 1615 conn = hci_lookup_le_connect(hdev); 1614 1616 if (conn) ··· 1621 1619 if (cp->num_of_sets) { 1622 1620 if (adv) 1623 1621 adv->enabled = false; 1622 + else if (!set->handle) 1623 + hci_dev_clear_flag(hdev, HCI_LE_ADV_0); 1624 1624 1625 1625 /* If just one instance was disabled check if there are 1626 1626 * any other instance enabled before clearing HCI_LE_ADV ··· 3963 3959 hci_dev_set_flag(hdev, HCI_LE_PER_ADV); 3964 3960 3965 3961 if (adv) 3966 - adv->enabled = true; 3962 + adv->periodic_enabled = true; 3967 3963 } else { 3964 + if (adv) 3965 + adv->periodic_enabled = false; 3966 + 3968 3967 /* If just one instance was disabled check if there are 3969 3968 * any other instance enabled before clearing HCI_LE_PER_ADV. 3970 3969 * The current periodic adv instance will be marked as
+14 -9
net/bluetooth/hci_sync.c
··· 863 863 { 864 864 struct hci_cmd_sync_work_entry *entry; 865 865 866 - entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); 867 - if (!entry) 868 - return false; 866 + mutex_lock(&hdev->cmd_sync_work_lock); 869 867 870 - hci_cmd_sync_cancel_entry(hdev, entry); 868 + entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); 869 + if (!entry) { 870 + mutex_unlock(&hdev->cmd_sync_work_lock); 871 + return false; 872 + } 873 + 874 + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); 875 + 876 + mutex_unlock(&hdev->cmd_sync_work_lock); 871 877 872 878 return true; 873 879 } ··· 1607 1601 1608 1602 /* If periodic advertising already disabled there is nothing to do. */ 1609 1603 adv = hci_find_adv_instance(hdev, instance); 1610 - if (!adv || !adv->periodic || !adv->enabled) 1604 + if (!adv || !adv->periodic_enabled) 1611 1605 return 0; 1612 1606 1613 1607 memset(&cp, 0, sizeof(cp)); ··· 1672 1666 1673 1667 /* If periodic advertising already enabled there is nothing to do. */ 1674 1668 adv = hci_find_adv_instance(hdev, instance); 1675 - if (adv && adv->periodic && adv->enabled) 1669 + if (adv && adv->periodic_enabled) 1676 1670 return 0; 1677 1671 1678 1672 memset(&cp, 0, sizeof(cp)); ··· 2606 2600 /* If current advertising instance is set to instance 0x00 2607 2601 * then we need to re-enable it. 2608 2602 */ 2609 - if (!hdev->cur_adv_instance) 2610 - err = hci_enable_ext_advertising_sync(hdev, 2611 - hdev->cur_adv_instance); 2603 + if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0)) 2604 + err = hci_enable_ext_advertising_sync(hdev, 0x00); 2612 2605 } else { 2613 2606 /* Schedule for most recent instance to be restarted and begin 2614 2607 * the software rotation loop
+8 -2
net/bluetooth/iso.c
··· 2032 2032 */ 2033 2033 if (!bacmp(&hcon->dst, BDADDR_ANY)) { 2034 2034 bacpy(&hcon->dst, &iso_pi(parent)->dst); 2035 - hcon->dst_type = iso_pi(parent)->dst_type; 2035 + hcon->dst_type = le_addr_type(iso_pi(parent)->dst_type); 2036 2036 } 2037 2037 2038 2038 if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { ··· 2046 2046 } 2047 2047 2048 2048 bacpy(&iso_pi(sk)->dst, &hcon->dst); 2049 - iso_pi(sk)->dst_type = hcon->dst_type; 2049 + 2050 + /* Convert from HCI to three-value type */ 2051 + if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) 2052 + iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC; 2053 + else 2054 + iso_pi(sk)->dst_type = BDADDR_LE_RANDOM; 2055 + 2050 2056 iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; 2051 2057 memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len); 2052 2058 iso_pi(sk)->base_len = iso_pi(parent)->base_len;
+2 -2
net/bluetooth/l2cap_core.c
··· 282 282 if (!delayed_work_pending(&chan->monitor_timer) && 283 283 chan->retrans_timeout) { 284 284 l2cap_set_timer(chan, &chan->retrans_timer, 285 - secs_to_jiffies(chan->retrans_timeout)); 285 + msecs_to_jiffies(chan->retrans_timeout)); 286 286 } 287 287 } 288 288 ··· 291 291 __clear_retrans_timer(chan); 292 292 if (chan->monitor_timeout) { 293 293 l2cap_set_timer(chan, &chan->monitor_timer, 294 - secs_to_jiffies(chan->monitor_timeout)); 294 + msecs_to_jiffies(chan->monitor_timeout)); 295 295 } 296 296 } 297 297
+15 -11
net/bluetooth/mgmt.c
··· 2175 2175 sk = cmd->sk; 2176 2176 2177 2177 if (status) { 2178 + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 2179 + status); 2178 2180 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true, 2179 2181 cmd_status_rsp, &status); 2180 - return; 2182 + goto done; 2181 2183 } 2182 2184 2183 - mgmt_pending_remove(cmd); 2184 2185 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); 2186 + 2187 + done: 2188 + mgmt_pending_free(cmd); 2185 2189 } 2186 2190 2187 2191 static int set_mesh_sync(struct hci_dev *hdev, void *data) 2188 2192 { 2189 2193 struct mgmt_pending_cmd *cmd = data; 2190 - struct mgmt_cp_set_mesh cp; 2194 + DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types, 2195 + sizeof(hdev->mesh_ad_types)); 2191 2196 size_t len; 2192 2197 2193 2198 mutex_lock(&hdev->mgmt_pending_lock); ··· 2202 2197 return -ECANCELED; 2203 2198 } 2204 2199 2205 - memcpy(&cp, cmd->param, sizeof(cp)); 2200 + len = cmd->param_len; 2201 + memcpy(cp, cmd->param, min(__struct_size(cp), len)); 2206 2202 2207 2203 mutex_unlock(&hdev->mgmt_pending_lock); 2208 2204 2209 - len = cmd->param_len; 2210 - 2211 2205 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); 2212 2206 2213 - if (cp.enable) 2207 + if (cp->enable) 2214 2208 hci_dev_set_flag(hdev, HCI_MESH); 2215 2209 else 2216 2210 hci_dev_clear_flag(hdev, HCI_MESH); 2217 2211 2218 - hdev->le_scan_interval = __le16_to_cpu(cp.period); 2219 - hdev->le_scan_window = __le16_to_cpu(cp.window); 2212 + hdev->le_scan_interval = __le16_to_cpu(cp->period); 2213 + hdev->le_scan_window = __le16_to_cpu(cp->window); 2220 2214 2221 - len -= sizeof(cp); 2215 + len -= sizeof(struct mgmt_cp_set_mesh); 2222 2216 2223 2217 /* If filters don't fit, forward all adv pkts */ 2224 2218 if (len <= sizeof(hdev->mesh_ad_types)) 2225 - memcpy(hdev->mesh_ad_types, cp.ad_types, len); 2219 + memcpy(hdev->mesh_ad_types, cp->ad_types, len); 2226 2220 2227 2221 hci_update_passive_scan_sync(hdev); 2228 2222 return 0;
+11 -15
net/bluetooth/rfcomm/tty.c
··· 643 643 tty_port_tty_hangup(&dev->port, true); 644 644 645 645 dev->modem_status = 646 - ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | 647 - ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | 646 + ((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) | 647 + ((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) | 648 648 ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | 649 649 ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); 650 650 } ··· 1055 1055 static int rfcomm_tty_tiocmget(struct tty_struct *tty) 1056 1056 { 1057 1057 struct rfcomm_dev *dev = tty->driver_data; 1058 + struct rfcomm_dlc *dlc = dev->dlc; 1059 + u8 v24_sig; 1058 1060 1059 1061 BT_DBG("tty %p dev %p", tty, dev); 1060 1062 1061 - return dev->modem_status; 1063 + rfcomm_dlc_get_modem_status(dlc, &v24_sig); 1064 + 1065 + return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status; 1062 1066 } 1063 1067 1064 1068 static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) ··· 1075 1071 1076 1072 rfcomm_dlc_get_modem_status(dlc, &v24_sig); 1077 1073 1078 - if (set & TIOCM_DSR || set & TIOCM_DTR) 1074 + if (set & TIOCM_DTR) 1079 1075 v24_sig |= RFCOMM_V24_RTC; 1080 - if (set & TIOCM_RTS || set & TIOCM_CTS) 1076 + if (set & TIOCM_RTS) 1081 1077 v24_sig |= RFCOMM_V24_RTR; 1082 - if (set & TIOCM_RI) 1083 - v24_sig |= RFCOMM_V24_IC; 1084 - if (set & TIOCM_CD) 1085 - v24_sig |= RFCOMM_V24_DV; 1086 1078 1087 - if (clear & TIOCM_DSR || clear & TIOCM_DTR) 1079 + if (clear & TIOCM_DTR) 1088 1080 v24_sig &= ~RFCOMM_V24_RTC; 1089 - if (clear & TIOCM_RTS || clear & TIOCM_CTS) 1081 + if (clear & TIOCM_RTS) 1090 1082 v24_sig &= ~RFCOMM_V24_RTR; 1091 - if (clear & TIOCM_RI) 1092 - v24_sig &= ~RFCOMM_V24_IC; 1093 - if (clear & TIOCM_CD) 1094 - v24_sig &= ~RFCOMM_V24_DV; 1095 1083 1096 1084 rfcomm_dlc_set_modem_status(dlc, v24_sig); 1097 1085
+24 -3
net/core/devmem.c
··· 17 17 #include <net/page_pool/helpers.h> 18 18 #include <net/page_pool/memory_provider.h> 19 19 #include <net/sock.h> 20 + #include <net/tcp.h> 20 21 #include <trace/events/page_pool.h> 21 22 22 23 #include "devmem.h" ··· 358 357 unsigned int dmabuf_id) 359 358 { 360 359 struct net_devmem_dmabuf_binding *binding; 361 - struct dst_entry *dst = __sk_dst_get(sk); 360 + struct net_device *dst_dev; 361 + struct dst_entry *dst; 362 362 int err = 0; 363 363 364 364 binding = net_devmem_lookup_dmabuf(dmabuf_id); ··· 368 366 goto out_err; 369 367 } 370 368 369 + rcu_read_lock(); 370 + dst = __sk_dst_get(sk); 371 + /* If dst is NULL (route expired), attempt to rebuild it. */ 372 + if (unlikely(!dst)) { 373 + if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { 374 + err = -EHOSTUNREACH; 375 + goto out_unlock; 376 + } 377 + dst = __sk_dst_get(sk); 378 + if (unlikely(!dst)) { 379 + err = -ENODEV; 380 + goto out_unlock; 381 + } 382 + } 383 + 371 384 /* The dma-addrs in this binding are only reachable to the corresponding 372 385 * net_device. 373 386 */ 374 - if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) { 387 + dst_dev = dst_dev_rcu(dst); 388 + if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) { 375 389 err = -ENODEV; 376 - goto out_err; 390 + goto out_unlock; 377 391 } 378 392 393 + rcu_read_unlock(); 379 394 return binding; 380 395 396 + out_unlock: 397 + rcu_read_unlock(); 381 398 out_err: 382 399 if (binding) 383 400 net_devmem_dmabuf_binding_put(binding);
+2 -1
net/core/filter.c
··· 3877 3877 u32 new_len = skb->len + head_room; 3878 3878 int ret; 3879 3879 3880 - if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || 3880 + if (unlikely(flags || (int)head_room < 0 || 3881 + (!skb_is_gso(skb) && new_len > max_len) || 3881 3882 new_len < skb->len)) 3882 3883 return -EINVAL; 3883 3884
+14 -7
net/ipv4/tcp_input.c
··· 891 891 } 892 892 } 893 893 894 - void tcp_rcvbuf_grow(struct sock *sk) 894 + void tcp_rcvbuf_grow(struct sock *sk, u32 newval) 895 895 { 896 896 const struct net *net = sock_net(sk); 897 897 struct tcp_sock *tp = tcp_sk(sk); 898 - int rcvwin, rcvbuf, cap; 898 + u32 rcvwin, rcvbuf, cap, oldval; 899 + u64 grow; 900 + 901 + oldval = tp->rcvq_space.space; 902 + tp->rcvq_space.space = newval; 899 903 900 904 if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) || 901 905 (sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 902 906 return; 903 907 908 + /* DRS is always one RTT late. */ 909 + rcvwin = newval << 1; 910 + 904 911 /* slow start: allow the sender to double its rate. */ 905 - rcvwin = tp->rcvq_space.space << 1; 912 + grow = (u64)rcvwin * (newval - oldval); 913 + do_div(grow, oldval); 914 + rcvwin += grow << 1; 906 915 907 916 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) 908 917 rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt; ··· 952 943 953 944 trace_tcp_rcvbuf_grow(sk, time); 954 945 955 - tp->rcvq_space.space = copied; 956 - 957 - tcp_rcvbuf_grow(sk); 946 + tcp_rcvbuf_grow(sk, copied); 958 947 959 948 new_measure: 960 949 tp->rcvq_space.seq = tp->copied_seq; ··· 5277 5270 } 5278 5271 /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */ 5279 5272 if (sk->sk_socket) 5280 - tcp_rcvbuf_grow(sk); 5273 + tcp_rcvbuf_grow(sk, tp->rcvq_space.space); 5281 5274 } 5282 5275 5283 5276 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
+3
net/mac80211/cfg.c
··· 1876 1876 link_conf->nontransmitted = false; 1877 1877 link_conf->ema_ap = false; 1878 1878 link_conf->bssid_indicator = 0; 1879 + link_conf->fils_discovery.min_interval = 0; 1880 + link_conf->fils_discovery.max_interval = 0; 1881 + link_conf->unsol_bcast_probe_resp_interval = 0; 1879 1882 1880 1883 __sta_info_flush(sdata, true, link_id, NULL); 1881 1884
+8 -3
net/mac80211/key.c
··· 508 508 ret = ieee80211_key_enable_hw_accel(new); 509 509 } 510 510 } else { 511 - if (!new->local->wowlan) 511 + if (!new->local->wowlan) { 512 512 ret = ieee80211_key_enable_hw_accel(new); 513 - else if (link_id < 0 || !sdata->vif.active_links || 514 - BIT(link_id) & sdata->vif.active_links) 513 + } else if (link_id < 0 || !sdata->vif.active_links || 514 + BIT(link_id) & sdata->vif.active_links) { 515 515 new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 516 + if (!(new->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | 517 + IEEE80211_KEY_FLAG_PUT_MIC_SPACE | 518 + IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) 519 + decrease_tailroom_need_count(sdata, 1); 520 + } 516 521 } 517 522 518 523 if (ret)
+1
net/mptcp/mib.c
··· 85 85 SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK), 86 86 SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK), 87 87 SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED), 88 + SNMP_MIB_ITEM("WinProbe", MPTCP_MIB_WINPROBE), 88 89 }; 89 90 90 91 /* mptcp_mib_alloc - allocate percpu mib counters
+1
net/mptcp/mib.h
··· 88 88 MPTCP_MIB_DSSFALLBACK, /* Bad or missing DSS */ 89 89 MPTCP_MIB_SIMULTCONNFALLBACK, /* Simultaneous connect */ 90 90 MPTCP_MIB_FALLBACKFAILED, /* Can't fallback due to msk status */ 91 + MPTCP_MIB_WINPROBE, /* MPTCP-level zero window probe */ 91 92 __MPTCP_MIB_MAX 92 93 }; 93 94
+53 -30
net/mptcp/protocol.c
··· 194 194 * - mptcp does not maintain a msk-level window clamp 195 195 * - returns true when the receive buffer is actually updated 196 196 */ 197 - static bool mptcp_rcvbuf_grow(struct sock *sk) 197 + static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval) 198 198 { 199 199 struct mptcp_sock *msk = mptcp_sk(sk); 200 200 const struct net *net = sock_net(sk); 201 - int rcvwin, rcvbuf, cap; 201 + u32 rcvwin, rcvbuf, cap, oldval; 202 + u64 grow; 202 203 204 + oldval = msk->rcvq_space.space; 205 + msk->rcvq_space.space = newval; 203 206 if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) || 204 207 (sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 205 208 return false; 206 209 207 - rcvwin = msk->rcvq_space.space << 1; 210 + /* DRS is always one RTT late. */ 211 + rcvwin = newval << 1; 212 + 213 + /* slow start: allow the sender to double its rate. */ 214 + grow = (u64)rcvwin * (newval - oldval); 215 + do_div(grow, oldval); 216 + rcvwin += grow << 1; 208 217 209 218 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) 210 219 rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq; ··· 343 334 skb_set_owner_r(skb, sk); 344 335 /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */ 345 336 if (sk->sk_socket) 346 - mptcp_rcvbuf_grow(sk); 337 + mptcp_rcvbuf_grow(sk, msk->rcvq_space.space); 347 338 } 348 339 349 340 static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset, ··· 1007 998 if (WARN_ON_ONCE(!msk->recovery)) 1008 999 break; 1009 1000 1010 - WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1001 + msk->first_pending = mptcp_send_next(sk); 1011 1002 } 1012 1003 1013 1004 dfrag_clear(sk, dfrag); ··· 1299 1290 if (copy == 0) { 1300 1291 u64 snd_una = READ_ONCE(msk->snd_una); 1301 1292 1302 - if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { 1293 + /* No need for zero probe if there are any data pending 1294 + * either at the msk or ssk level; skb is the current write 1295 + * queue tail and can be empty at this point. 1296 + */ 1297 + if (snd_una != msk->snd_nxt || skb->len || 1298 + skb != tcp_send_head(ssk)) { 1303 1299 tcp_remove_empty_skb(ssk); 1304 1300 return 0; 1305 1301 } ··· 1355 1341 mpext->dsn64); 1356 1342 1357 1343 if (zero_window_probe) { 1344 + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE); 1358 1345 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; 1359 1346 mpext->frozen = 1; 1360 1347 if (READ_ONCE(msk->csum_enabled)) ··· 1558 1543 1559 1544 mptcp_update_post_push(msk, dfrag, ret); 1560 1545 } 1561 - WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1546 + msk->first_pending = mptcp_send_next(sk); 1562 1547 1563 1548 if (msk->snd_burst <= 0 || 1564 1549 !sk_stream_memory_free(ssk) || ··· 1918 1903 get_page(dfrag->page); 1919 1904 list_add_tail(&dfrag->list, &msk->rtx_queue); 1920 1905 if (!msk->first_pending) 1921 - WRITE_ONCE(msk->first_pending, dfrag); 1906 + msk->first_pending = dfrag; 1922 1907 } 1923 1908 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, 1924 1909 dfrag->data_seq, dfrag->data_len, dfrag->already_sent, ··· 1951 1936 1952 1937 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); 1953 1938 1954 - static int __mptcp_recvmsg_mskq(struct sock *sk, 1955 - struct msghdr *msg, 1956 - size_t len, int flags, 1939 + static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg, 1940 + size_t len, int flags, int copied_total, 1957 1941 struct scm_timestamping_internal *tss, 1958 1942 int *cmsg_flags) 1959 1943 { 1960 1944 struct mptcp_sock *msk = mptcp_sk(sk); 1961 1945 struct sk_buff *skb, *tmp; 1946 + int total_data_len = 0; 1962 1947 int copied = 0; 1963 1948 1964 1949 skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { 1965 - u32 offset = MPTCP_SKB_CB(skb)->offset; 1950 + u32 delta, offset = MPTCP_SKB_CB(skb)->offset; 1966 1951 u32 data_len = skb->len - offset; 1967 - u32 count = min_t(size_t, len - copied, data_len); 1952 + u32 count; 1968 1953 int err; 1969 1954 1955 + if (flags & MSG_PEEK) { 1956 + /* skip already peeked skbs */ 1957 + if (total_data_len + data_len <= copied_total) { 1958 + total_data_len += data_len; 1959 + continue; 1960 + } 1961 + 1962 + /* skip the already peeked data in the current skb */ 1963 + delta = copied_total - total_data_len; 1964 + offset += delta; 1965 + data_len -= delta; 1966 + } 1967 + 1968 + count = min_t(size_t, len - copied, data_len); 1970 1969 if (!(flags & MSG_TRUNC)) { 1971 1970 err = skb_copy_datagram_msg(skb, offset, msg, count); 1972 1971 if (unlikely(err < 0)) { ··· 1997 1968 1998 1969 copied += count; 1999 1970 2000 - if (count < data_len) { 2001 - if (!(flags & MSG_PEEK)) { 1971 + if (!(flags & MSG_PEEK)) { 1972 + msk->bytes_consumed += count; 1973 + if (count < data_len) { 2002 1974 MPTCP_SKB_CB(skb)->offset += count; 2003 1975 MPTCP_SKB_CB(skb)->map_seq += count; 2004 - msk->bytes_consumed += count; 1976 + break; 2005 1977 } 2006 - break; 2007 - } 2008 1978 2009 - if (!(flags & MSG_PEEK)) { 2010 1979 /* avoid the indirect call, we know the destructor is sock_rfree */ 2011 1980 skb->destructor = NULL; 2012 1981 skb->sk = NULL; ··· 2012 1985 sk_mem_uncharge(sk, skb->truesize); 2013 1986 __skb_unlink(skb, &sk->sk_receive_queue); 2014 1987 skb_attempt_defer_free(skb); 2015 - msk->bytes_consumed += count; 2016 1988 } 2017 1989 2018 1990 if (copied >= len) ··· 2075 2049 if (msk->rcvq_space.copied <= msk->rcvq_space.space) 2076 2050 goto new_measure; 2077 2051 2078 - msk->rcvq_space.space = msk->rcvq_space.copied; 2079 - if (mptcp_rcvbuf_grow(sk)) { 2080 - 2052 + if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) { 2081 2053 /* Make subflows follow along. If we do not do this, we 2082 2054 * get drops at subflow level if skbs can't be moved to 2083 2055 * the mptcp rx queue fast enough (announced rcv_win can ··· 2087 2063 2088 2064 ssk = mptcp_subflow_tcp_sock(subflow); 2089 2065 slow = lock_sock_fast(ssk); 2090 - tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied; 2091 - tcp_rcvbuf_grow(ssk); 2066 + /* subflows can be added before tcp_init_transfer() */ 2067 + if (tcp_sk(ssk)->rcvq_space.space) 2068 + tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied); 2092 2069 unlock_sock_fast(ssk, slow); 2093 2070 } 2094 2071 } ··· 2208 2183 while (copied < len) { 2209 2184 int err, bytes_read; 2210 2185 2211 - bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags); 2186 + bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, 2187 + copied, &tss, &cmsg_flags); 2212 2188 if (unlikely(bytes_read < 0)) { 2213 2189 if (!copied) 2214 2190 copied = bytes_read; ··· 2900 2874 struct mptcp_sock *msk = mptcp_sk(sk); 2901 2875 struct mptcp_data_frag *dtmp, *dfrag; 2902 2876 2903 - WRITE_ONCE(msk->first_pending, NULL); 2877 + msk->first_pending = NULL; 2904 2878 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) 2905 2879 dfrag_clear(sk, dfrag); 2906 2880 } ··· 3440 3414 3441 3415 void __mptcp_check_push(struct sock *sk, struct sock *ssk) 3442 3416 { 3443 - if (!mptcp_send_head(sk)) 3444 - return; 3445 - 3446 3417 if (!sock_owned_by_user(sk)) 3447 3418 __mptcp_subflow_push_pending(sk, ssk, false); 3448 3419 else
+1 -1
net/mptcp/protocol.h
··· 414 414 { 415 415 const struct mptcp_sock *msk = mptcp_sk(sk); 416 416 417 - return READ_ONCE(msk->first_pending); 417 + return msk->first_pending; 418 418 } 419 419 420 420 static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
+1 -1
net/netfilter/nft_connlimit.c
··· 48 48 return; 49 49 } 50 50 51 - count = priv->list->count; 51 + count = READ_ONCE(priv->list->count); 52 52 53 53 if ((count > priv->limit) ^ priv->invert) { 54 54 regs->verdict.code = NFT_BREAK;
+27 -3
net/netfilter/nft_ct.c
··· 22 22 #include <net/netfilter/nf_conntrack_timeout.h> 23 23 #include <net/netfilter/nf_conntrack_l4proto.h> 24 24 #include <net/netfilter/nf_conntrack_expect.h> 25 + #include <net/netfilter/nf_conntrack_seqadj.h> 25 26 26 27 struct nft_ct_helper_obj { 27 28 struct nf_conntrack_helper *helper4; ··· 380 379 } 381 380 #endif 382 381 382 + static void __nft_ct_get_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) 383 + { 384 + #ifdef CONFIG_NF_CONNTRACK_LABELS 385 + if (priv->key == NFT_CT_LABELS) 386 + nf_connlabels_put(ctx->net); 387 + #endif 388 + } 389 + 383 390 static int nft_ct_get_init(const struct nft_ctx *ctx, 384 391 const struct nft_expr *expr, 385 392 const struct nlattr * const tb[]) ··· 422 413 if (tb[NFTA_CT_DIRECTION] != NULL) 423 414 return -EINVAL; 424 415 len = NF_CT_LABELS_MAX_SIZE; 416 + 417 + err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1); 418 + if (err) 419 + return err; 425 420 break; 426 421 #endif 427 422 case NFT_CT_HELPER: ··· 507 494 case IP_CT_DIR_REPLY: 508 495 break; 509 496 default: 510 - return -EINVAL; 497 + err = -EINVAL; 498 + goto err; 511 499 } 512 500 } 513 501 ··· 516 502 err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL, 517 503 NFT_DATA_VALUE, len); 518 504 if (err < 0) 519 - return err; 505 + goto err; 520 506 521 507 err = nf_ct_netns_get(ctx->net, ctx->family); 522 508 if (err < 0) 523 - return err; 509 + goto err; 524 510 525 511 if (priv->key == NFT_CT_BYTES || 526 512 priv->key == NFT_CT_PKTS || ··· 528 514 nf_ct_set_acct(ctx->net, true); 529 515 530 516 return 0; 517 + err: 518 + __nft_ct_get_destroy(ctx, priv); 519 + return err; 531 520 } 532 521 533 522 static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) ··· 643 626 static void nft_ct_get_destroy(const struct nft_ctx *ctx, 644 627 const struct nft_expr *expr) 645 628 { 629 + struct nft_ct *priv = nft_expr_priv(expr); 630 + 631 + __nft_ct_get_destroy(ctx, priv); 646 632 nf_ct_netns_put(ctx->net, ctx->family); 647 633 } 648 634 ··· 1193 1173 if (help) { 1194 1174 rcu_assign_pointer(help->helper, to_assign); 1195 1175 set_bit(IPS_HELPER_BIT, &ct->status); 1176 + 1177 + if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct)) 1178 + if (!nfct_seqadj_ext_add(ct)) 1179 + regs->verdict.code = NF_DROP; 1196 1180 } 1197 1181 } 1198 1182
+1 -1
net/sctp/input.c
··· 190 190 goto discard_release; 191 191 nf_reset_ct(skb); 192 192 193 - if (sk_filter(sk, skb)) 193 + if (sk_filter(sk, skb) || skb->len < sizeof(struct sctp_chunkhdr)) 194 194 goto discard_release; 195 195 196 196 /* Create an SCTP packet structure. */
+3 -1
net/tls/tls_device.c
··· 723 723 /* shouldn't get to wraparound: 724 724 * too long in async stage, something bad happened 725 725 */ 726 - if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) 726 + if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) { 727 + tls_offload_rx_resync_async_request_cancel(resync_async); 727 728 return false; 729 + } 728 730 729 731 /* asynchronous stage: log all headers seq such that 730 732 * req_seq <= seq <= end_seq, and wait for real resync request
+1 -2
net/wireless/nl80211.c
··· 4136 4136 rdev->wiphy.txq_quantum = old_txq_quantum; 4137 4137 } 4138 4138 4139 - if (old_rts_threshold) 4140 - kfree(old_radio_rts_threshold); 4139 + kfree(old_radio_rts_threshold); 4141 4140 return result; 4142 4141 } 4143 4142
+3
scripts/kconfig/mconf.c
··· 12 12 #include <errno.h> 13 13 #include <fcntl.h> 14 14 #include <limits.h> 15 + #include <locale.h> 15 16 #include <stdarg.h> 16 17 #include <stdlib.h> 17 18 #include <string.h> ··· 931 930 int res; 932 931 933 932 signal(SIGINT, sig_handler); 933 + 934 + setlocale(LC_ALL, ""); 934 935 935 936 if (ac > 1 && strcmp(av[1], "-s") == 0) { 936 937 silent = 1;
+3
scripts/kconfig/nconf.c
··· 7 7 #ifndef _GNU_SOURCE 8 8 #define _GNU_SOURCE 9 9 #endif 10 + #include <locale.h> 10 11 #include <string.h> 11 12 #include <strings.h> 12 13 #include <stdlib.h> ··· 1478 1477 { 1479 1478 int lines, columns; 1480 1479 char *mode; 1480 + 1481 + setlocale(LC_ALL, ""); 1481 1482 1482 1483 if (ac > 1 && strcmp(av[1], "-s") == 0) { 1483 1484 /* Silence conf_read() until the real callback is set up */
+1 -1
scripts/package/install-extmod-build
··· 63 63 # Clear VPATH and srcroot because the source files reside in the output 64 64 # directory. 65 65 # shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make 66 - "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-base=. "${destdir}")"/scripts 66 + "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-to=. "${destdir}")"/scripts 67 67 68 68 rm -f "${destdir}/scripts/Kbuild" 69 69 fi
+14
sound/hda/codecs/realtek/alc269.c
··· 3736 3736 ALC285_FIXUP_ASUS_GA605K_I2C_SPEAKER2_TO_DAC1, 3737 3737 ALC269_FIXUP_POSITIVO_P15X_HEADSET_MIC, 3738 3738 ALC289_FIXUP_ASUS_ZEPHYRUS_DUAL_SPK, 3739 + ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE, 3739 3740 }; 3740 3741 3741 3742 /* A special fixup for Lenovo C940 and Yoga Duet 7; ··· 6173 6172 { 0x1e, 0x90170150 }, /* Internal Speaker */ 6174 6173 { } 6175 6174 }, 6175 + }, 6176 + [ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE] = { 6177 + .type = HDA_FIXUP_PINS, 6178 + .v.pins = (const struct hda_pintbl[]) { 6179 + { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */ 6180 + { 0x1a, 0x22a190a0 }, /* dock mic */ 6181 + { } 6182 + }, 6183 + .chained = true, 6184 + .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST 6176 6185 } 6177 6186 }; 6178 6187 ··· 6589 6578 SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre x360 2-in-1 Laptop 16-aa0xxx", ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX), 6590 6579 SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2), 6591 6580 SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS), 6581 + SND_PCI_QUIRK(0x103c, 0x8c2d, "HP Victus 15-fa1xxx (MB 8C2D)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), 6592 6582 SND_PCI_QUIRK(0x103c, 0x8c30, "HP Victus 15-fb1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), 6593 6583 SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 6594 6584 SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), ··· 6971 6959 SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 6972 6960 SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 6973 6961 SND_PCI_QUIRK(0x1558, 0xa554, "VAIO VJFH52", ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE), 6962 + SND_PCI_QUIRK(0x1558, 0xa559, "VAIO RPL", ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE), 6974 6963 SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 6975 6964 SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 6976 6965 SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), ··· 7093 7080 SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), 7094 7081 SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), 7095 7082 SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2), 7083 + HDA_CODEC_QUIRK(0x17aa, 0x391c, "Lenovo Yoga 7 2-in-1 14AKP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), 7096 7084 SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2), 7097 7085 SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2), 7098 7086 SND_PCI_QUIRK(0x17aa, 0x38b7, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
+66 -168
sound/soc/qcom/qdsp6/audioreach.c
··· 612 612 struct apm_module_frame_size_factor_cfg *fs_cfg; 613 613 struct apm_module_param_data *param_data; 614 614 struct apm_module_hw_ep_mf_cfg *hw_cfg; 615 - int ic_sz, ep_sz, fs_sz, dl_sz; 616 - int rc, payload_size; 617 - struct gpr_pkt *pkt; 615 + int ic_sz = APM_DP_INTF_CFG_PSIZE; 616 + int ep_sz = APM_HW_EP_CFG_PSIZE; 617 + int fs_sz = APM_FS_CFG_PSIZE; 618 + int size = ic_sz + ep_sz + fs_sz; 618 619 void *p; 619 - 620 - ic_sz = APM_DP_INTF_CFG_PSIZE; 621 - ep_sz = APM_HW_EP_CFG_PSIZE; 622 - fs_sz = APM_FS_CFG_PSIZE; 623 - dl_sz = 0; 624 - 625 - payload_size = ic_sz + ep_sz + fs_sz + dl_sz; 626 - 627 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 620 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(size, APM_CMD_SET_CFG, 0); 628 621 if (IS_ERR(pkt)) 629 622 return PTR_ERR(pkt); 630 623 ··· 656 663 intf_cfg->cfg.mst_idx = 0; 657 664 intf_cfg->cfg.dptx_idx = cfg->dp_idx; 658 665 659 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 660 - 661 - kfree(pkt); 662 - 663 - return rc; 666 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 664 667 } 665 668 666 669 /* LPASS Codec DMA port Module Media Format Setup */ ··· 669 680 struct apm_module_hw_ep_power_mode_cfg *pm_cfg; 670 681 struct apm_module_param_data *param_data; 671 682 struct apm_module_hw_ep_mf_cfg *hw_cfg; 672 - int ic_sz, ep_sz, fs_sz, pm_sz, dl_sz; 673 - int rc, payload_size; 674 - struct gpr_pkt *pkt; 683 + int ic_sz = APM_CDMA_INTF_CFG_PSIZE; 684 + int ep_sz = APM_HW_EP_CFG_PSIZE; 685 + int fs_sz = APM_FS_CFG_PSIZE; 686 + int pm_sz = APM_HW_EP_PMODE_CFG_PSIZE; 687 + int size = ic_sz + ep_sz + fs_sz + pm_sz; 675 688 void *p; 676 - 677 - ic_sz = APM_CDMA_INTF_CFG_PSIZE; 678 - ep_sz = APM_HW_EP_CFG_PSIZE; 679 - fs_sz = APM_FS_CFG_PSIZE; 680 - pm_sz = APM_HW_EP_PMODE_CFG_PSIZE; 681 - dl_sz = 0; 682 - 683 - payload_size = ic_sz + ep_sz + fs_sz + pm_sz + dl_sz; 684 - 685 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 689 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(size, APM_CMD_SET_CFG, 0); 686 690 if (IS_ERR(pkt)) 687 691 return PTR_ERR(pkt); 688 692 ··· 723 741 param_data->param_size = pm_sz - APM_MODULE_PARAM_DATA_SIZE; 724 742 pm_cfg->power_mode.power_mode = 0; 725 743 726 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 727 - 728 - kfree(pkt); 729 - 730 - return rc; 744 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 731 745 } 732 746 733 747 int audioreach_send_u32_param(struct q6apm_graph *graph, struct audioreach_module *module, 734 748 uint32_t param_id, uint32_t param_val) 735 749 { 736 750 struct apm_module_param_data *param_data; 737 - struct gpr_pkt *pkt; 751 + struct gpr_pkt *pkt __free(kfree) = NULL; 738 752 uint32_t *param; 739 - int rc, payload_size; 740 - void *p; 741 - 742 - payload_size = sizeof(uint32_t) + APM_MODULE_PARAM_DATA_SIZE; 743 - p = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 753 + int payload_size = sizeof(uint32_t) + APM_MODULE_PARAM_DATA_SIZE; 754 + void *p = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 744 755 if (IS_ERR(p)) 745 756 return -ENOMEM; 746 757 ··· 750 775 param = p; 751 776 *param = param_val; 752 777 753 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 754 - 755 - kfree(pkt); 756 - 757 - return rc; 778 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 758 779 } 759 780 EXPORT_SYMBOL_GPL(audioreach_send_u32_param); 760 781 ··· 786 815 struct audioreach_module *module, 787 816 struct audioreach_module_config *cfg) 788 817 { 789 - int payload_size = le32_to_cpu(module->data->size); 790 - struct gpr_pkt *pkt; 791 - int rc; 818 + int size = le32_to_cpu(module->data->size); 792 819 void *p; 793 - 794 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 820 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(size, APM_CMD_SET_CFG, 0); 795 821 if (IS_ERR(pkt)) 796 822 return PTR_ERR(pkt); 797 823 798 824 p = (void *)pkt + GPR_HDR_SIZE + APM_CMD_HDR_SIZE; 799 825 800 - memcpy(p, module->data->data, payload_size); 826 + memcpy(p, module->data->data, size); 801 827 802 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 803 - 804 - kfree(pkt); 805 - 806 - return rc; 828 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 807 829 } 808 830 809 831 static int audioreach_mfc_set_media_format(struct q6apm_graph *graph, ··· 806 842 struct apm_module_param_data *param_data; 807 843 struct param_id_mfc_media_format *media_format; 808 844 uint32_t num_channels = cfg->num_channels; 809 - int payload_size; 810 - struct gpr_pkt *pkt; 811 - int rc, i; 845 + int payload_size = APM_MFC_CFG_PSIZE(media_format, num_channels) + 846 + APM_MODULE_PARAM_DATA_SIZE; 847 + int i; 812 848 void *p; 813 - 814 - payload_size = APM_MFC_CFG_PSIZE(media_format, num_channels) + 815 - APM_MODULE_PARAM_DATA_SIZE; 816 - 817 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 849 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 818 850 if (IS_ERR(pkt)) 819 851 return PTR_ERR(pkt); 820 852 ··· 830 870 for (i = 0; i < num_channels; i++) 831 871 media_format->channel_mapping[i] = cfg->channel_map[i]; 832 872 833 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 834 - 835 - kfree(pkt); 836 - 837 - return rc; 873 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 838 874 } 839 875 840 876 static int audioreach_set_compr_media_format(struct media_format *media_fmt_hdr, ··· 918 962 int audioreach_compr_set_param(struct q6apm_graph *graph, struct audioreach_module_config *mcfg) 919 963 { 920 964 struct media_format *header; 921 - struct gpr_pkt *pkt; 922 - int iid, payload_size, rc; 965 + int rc; 923 966 void *p; 924 - 925 - payload_size = sizeof(struct apm_sh_module_media_fmt_cmd); 926 - 927 - iid = q6apm_graph_get_rx_shmem_module_iid(graph); 928 - pkt = audioreach_alloc_cmd_pkt(payload_size, DATA_CMD_WR_SH_MEM_EP_MEDIA_FORMAT, 929 - 0, graph->port->id, iid); 930 - 967 + int iid = q6apm_graph_get_rx_shmem_module_iid(graph); 968 + int payload_size = sizeof(struct apm_sh_module_media_fmt_cmd); 969 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_cmd_pkt(payload_size, 970 + DATA_CMD_WR_SH_MEM_EP_MEDIA_FORMAT, 971 + 0, graph->port->id, iid); 931 972 if (IS_ERR(pkt)) 932 973 return -ENOMEM; 974 + 933 975 934 976 p = (void *)pkt + GPR_HDR_SIZE; 935 977 header = p; 936 978 rc = audioreach_set_compr_media_format(header, p, mcfg); 937 - if (rc) { 938 - kfree(pkt); 979 + if (rc) 939 980 return rc; 940 - } 941 981 942 - rc = gpr_send_port_pkt(graph->port, pkt); 943 - kfree(pkt); 944 - 945 - return rc; 982 + return gpr_send_port_pkt(graph->port, pkt); 946 983 } 947 984 EXPORT_SYMBOL_GPL(audioreach_compr_set_param); 948 985 ··· 947 998 struct apm_module_param_data *param_data; 948 999 struct apm_i2s_module_intf_cfg *intf_cfg; 949 1000 struct apm_module_hw_ep_mf_cfg *hw_cfg; 950 - int ic_sz, ep_sz, fs_sz; 951 - int rc, payload_size; 952 - struct gpr_pkt *pkt; 1001 + int ic_sz = APM_I2S_INTF_CFG_PSIZE; 1002 + int ep_sz = APM_HW_EP_CFG_PSIZE; 1003 + int fs_sz = APM_FS_CFG_PSIZE; 1004 + int size = ic_sz + ep_sz + fs_sz; 953 1005 void *p; 954 - 955 - ic_sz = APM_I2S_INTF_CFG_PSIZE; 956 - ep_sz = APM_HW_EP_CFG_PSIZE; 957 - fs_sz = APM_FS_CFG_PSIZE; 958 - 959 - payload_size = ic_sz + ep_sz + fs_sz; 960 - 961 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 1006 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(size, APM_CMD_SET_CFG, 0); 962 1007 if (IS_ERR(pkt)) 963 1008 return PTR_ERR(pkt); 964 1009 ··· 1003 1060 param_data->param_size = fs_sz - APM_MODULE_PARAM_DATA_SIZE; 1004 1061 fs_cfg->frame_size_factor = 1; 1005 1062 1006 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 1007 - 1008 - kfree(pkt); 1009 - 1010 - return rc; 1063 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 1011 1064 } 1012 1065 1013 1066 static int audioreach_logging_set_media_format(struct q6apm_graph *graph, ··· 1011 1072 { 1012 1073 struct apm_module_param_data *param_data; 1013 1074 struct data_logging_config *cfg; 1014 - int rc, payload_size; 1015 - struct gpr_pkt *pkt; 1075 + int size = sizeof(*cfg) + APM_MODULE_PARAM_DATA_SIZE; 1016 1076 void *p; 1017 - 1018 - payload_size = sizeof(*cfg) + APM_MODULE_PARAM_DATA_SIZE; 1019 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 1077 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(size, APM_CMD_SET_CFG, 0); 1020 1078 if (IS_ERR(pkt)) 1021 1079 return PTR_ERR(pkt); 1022 1080 ··· 1023 1087 param_data->module_instance_id = module->instance_id; 1024 1088 param_data->error_code = 0; 1025 1089 param_data->param_id = PARAM_ID_DATA_LOGGING_CONFIG; 1026 - param_data->param_size = payload_size - APM_MODULE_PARAM_DATA_SIZE; 1090 + param_data->param_size = size - APM_MODULE_PARAM_DATA_SIZE; 1027 1091 1028 1092 p = p + APM_MODULE_PARAM_DATA_SIZE; 1029 1093 cfg = p; ··· 1031 1095 cfg->log_tap_point_id = module->log_tap_point_id; 1032 1096 cfg->mode = module->log_mode; 1033 1097 1034 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 1035 - 1036 - kfree(pkt); 1037 - 1038 - return rc; 1098 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 1039 1099 } 1040 1100 1041 1101 static int audioreach_pcm_set_media_format(struct q6apm_graph *graph, ··· 1042 1110 uint32_t num_channels = mcfg->num_channels; 1043 1111 struct apm_pcm_module_media_fmt_cmd *cfg; 1044 1112 struct apm_module_param_data *param_data; 1045 - int rc, payload_size; 1046 - struct gpr_pkt *pkt; 1113 + int payload_size; 1114 + struct gpr_pkt *pkt __free(kfree) = NULL; 1047 1115 1048 1116 if (num_channels > 4) { 1049 1117 dev_err(graph->dev, "Error: Invalid channels (%d)!\n", num_channels); ··· 1078 1146 media_cfg->bits_per_sample = mcfg->bit_width; 1079 1147 memcpy(media_cfg->channel_mapping, mcfg->channel_map, mcfg->num_channels); 1080 1148 1081 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 1082 - 1083 - kfree(pkt); 1084 - 1085 - return rc; 1149 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 1086 1150 } 1087 1151 1088 1152 static int audioreach_shmem_set_media_format(struct q6apm_graph *graph, ··· 1090 1162 struct payload_media_fmt_pcm *cfg; 1091 1163 struct media_format *header; 1092 1164 int rc, payload_size; 1093 - struct gpr_pkt *pkt; 1165 + struct gpr_pkt *pkt __free(kfree) = NULL; 1094 1166 void *p; 1095 1167 1096 1168 if (num_channels > 4) { ··· 1132 1204 memcpy(cfg->channel_mapping, mcfg->channel_map, mcfg->num_channels); 1133 1205 } else { 1134 1206 rc = audioreach_set_compr_media_format(header, p, mcfg); 1135 - if (rc) { 1136 - kfree(pkt); 1207 + if (rc) 1137 1208 return rc; 1138 - } 1139 1209 } 1140 1210 1141 - rc = audioreach_graph_send_cmd_sync(graph, pkt, 0); 1142 - 1143 - kfree(pkt); 1144 - 1145 - return rc; 1211 + return audioreach_graph_send_cmd_sync(graph, pkt, 0); 1146 1212 } 1147 1213 1148 1214 int audioreach_gain_set_vol_ctrl(struct q6apm *apm, struct audioreach_module *module, int vol) 1149 1215 { 1150 1216 struct param_id_vol_ctrl_master_gain *cfg; 1151 1217 struct apm_module_param_data *param_data; 1152 - int rc, payload_size; 1153 - struct gpr_pkt *pkt; 1218 + int size = sizeof(*cfg) + APM_MODULE_PARAM_DATA_SIZE; 1154 1219 void *p; 1155 - 1156 - payload_size = sizeof(*cfg) + APM_MODULE_PARAM_DATA_SIZE; 1157 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 1220 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(size, APM_CMD_SET_CFG, 0); 1158 1221 if (IS_ERR(pkt)) 1159 1222 return PTR_ERR(pkt); 1160 1223 ··· 1155 1236 param_data->module_instance_id = module->instance_id; 1156 1237 param_data->error_code = 0; 1157 1238 param_data->param_id = PARAM_ID_VOL_CTRL_MASTER_GAIN; 1158 - param_data->param_size = payload_size - APM_MODULE_PARAM_DATA_SIZE; 1239 + param_data->param_size = size - APM_MODULE_PARAM_DATA_SIZE; 1159 1240 1160 1241 p = p + APM_MODULE_PARAM_DATA_SIZE; 1161 1242 cfg = p; 1162 1243 cfg->master_gain = vol; 1163 - rc = q6apm_send_cmd_sync(apm, pkt, 0); 1164 - 1165 - kfree(pkt); 1166 - 1167 - return rc; 1244 + return q6apm_send_cmd_sync(apm, pkt, 0); 1168 1245 } 1169 1246 EXPORT_SYMBOL_GPL(audioreach_gain_set_vol_ctrl); 1170 1247 ··· 1168 1253 { 1169 1254 struct apm_module_param_data *param_data; 1170 1255 struct apm_gain_module_cfg *cfg; 1171 - int rc, payload_size; 1172 - struct gpr_pkt *pkt; 1173 - 1174 - payload_size = APM_GAIN_CFG_PSIZE; 1175 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, APM_CMD_SET_CFG, 0); 1256 + int size = APM_GAIN_CFG_PSIZE; 1257 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(size, APM_CMD_SET_CFG, 0); 1176 1258 if (IS_ERR(pkt)) 1177 1259 return PTR_ERR(pkt); 1178 1260 ··· 1179 1267 param_data->module_instance_id = module->instance_id; 1180 1268 param_data->error_code = 0; 1181 1269 param_data->param_id = APM_PARAM_ID_GAIN; 1182 - param_data->param_size = payload_size - APM_MODULE_PARAM_DATA_SIZE; 1270 + param_data->param_size = size - APM_MODULE_PARAM_DATA_SIZE; 1183 1271 1184 1272 cfg->gain_cfg.gain = module->gain; 1185 1273 1186 - rc = q6apm_send_cmd_sync(graph->apm, pkt, 0); 1187 - 1188 - kfree(pkt); 1189 - 1190 - return rc; 1274 + return q6apm_send_cmd_sync(graph->apm, pkt, 0); 1191 1275 } 1192 1276 1193 1277 int audioreach_set_media_format(struct q6apm_graph *graph, struct audioreach_module *module, ··· 1268 1360 struct apm_cmd_shared_mem_map_regions *cmd; 1269 1361 uint32_t num_regions, buf_sz, payload_size; 1270 1362 struct audioreach_graph_data *data; 1271 - struct gpr_pkt *pkt; 1363 + struct gpr_pkt *pkt __free(kfree) = NULL; 1272 1364 void *p; 1273 - int rc, i; 1365 + int i; 1274 1366 1275 1367 if (dir == SNDRV_PCM_STREAM_PLAYBACK) 1276 1368 data = &graph->rx_data; ··· 1317 1409 } 1318 1410 mutex_unlock(&graph->lock); 1319 1411 1320 - rc = audioreach_graph_send_cmd_sync(graph, pkt, APM_CMD_RSP_SHARED_MEM_MAP_REGIONS); 1321 - 1322 - kfree(pkt); 1323 - 1324 - return rc; 1412 + return audioreach_graph_send_cmd_sync(graph, pkt, APM_CMD_RSP_SHARED_MEM_MAP_REGIONS); 1325 1413 } 1326 1414 EXPORT_SYMBOL_GPL(audioreach_map_memory_regions); 1327 1415 1328 1416 int audioreach_shared_memory_send_eos(struct q6apm_graph *graph) 1329 1417 { 1330 1418 struct data_cmd_wr_sh_mem_ep_eos *eos; 1331 - struct gpr_pkt *pkt; 1332 - int rc = 0, iid; 1333 - 1334 - iid = q6apm_graph_get_rx_shmem_module_iid(graph); 1335 - pkt = audioreach_alloc_cmd_pkt(sizeof(*eos), DATA_CMD_WR_SH_MEM_EP_EOS, 0, 1336 - graph->port->id, iid); 1419 + int iid = q6apm_graph_get_rx_shmem_module_iid(graph); 1420 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_cmd_pkt(sizeof(*eos), 1421 + DATA_CMD_WR_SH_MEM_EP_EOS, 0, graph->port->id, iid); 1337 1422 if (IS_ERR(pkt)) 1338 1423 return PTR_ERR(pkt); 1339 1424 ··· 1334 1433 1335 1434 eos->policy = WR_SH_MEM_EP_EOS_POLICY_LAST; 1336 1435 1337 - rc = gpr_send_port_pkt(graph->port, pkt); 1338 - kfree(pkt); 1339 - 1340 - return rc; 1436 + return gpr_send_port_pkt(graph->port, pkt); 1341 1437 } 1342 1438 EXPORT_SYMBOL_GPL(audioreach_shared_memory_send_eos);
+80 -99
sound/soc/qcom/qdsp6/q6adm.c
··· 109 109 110 110 } 111 111 112 + static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp, 113 + struct apr_pkt *pkt, uint32_t rsp_opcode) 114 + { 115 + struct device *dev = adm->dev; 116 + uint32_t opcode = pkt->hdr.opcode; 117 + int ret; 118 + 119 + mutex_lock(&adm->lock); 120 + copp->result.opcode = 0; 121 + copp->result.status = 0; 122 + ret = apr_send_pkt(adm->apr, pkt); 123 + if (ret < 0) { 124 + dev_err(dev, "Failed to send APR packet\n"); 125 + ret = -EINVAL; 126 + goto err; 127 + } 128 + 129 + /* Wait for the callback with copp id */ 130 + if (rsp_opcode) 131 + ret = wait_event_timeout(copp->wait, 132 + (copp->result.opcode == opcode) || 133 + (copp->result.opcode == rsp_opcode), 134 + msecs_to_jiffies(TIMEOUT_MS)); 135 + else 136 + ret = wait_event_timeout(copp->wait, 137 + (copp->result.opcode == opcode), 138 + msecs_to_jiffies(TIMEOUT_MS)); 139 + 140 + if (!ret) { 141 + dev_err(dev, "ADM copp cmd timedout\n"); 142 + ret = -ETIMEDOUT; 143 + } else if (copp->result.status > 0) { 144 + dev_err(dev, "DSP returned error[%d]\n", 145 + copp->result.status); 146 + ret = -EINVAL; 147 + } 148 + 149 + err: 150 + mutex_unlock(&adm->lock); 151 + return ret; 152 + } 153 + 154 + static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp, 155 + int port_id, int copp_idx) 156 + { 157 + struct apr_pkt close; 158 + 159 + close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, 160 + APR_HDR_LEN(APR_HDR_SIZE), 161 + APR_PKT_VER); 162 + close.hdr.pkt_size = sizeof(close); 163 + close.hdr.src_port = port_id; 164 + close.hdr.dest_port = copp->id; 165 + close.hdr.token = port_id << 16 | copp_idx; 166 + close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5; 167 + 168 + return q6adm_apr_send_copp_pkt(adm, copp, &close, 0); 169 + } 170 + 112 171 static void q6adm_free_copp(struct kref *ref) 113 172 { 114 173 struct q6copp *c = container_of(ref, struct q6copp, refcount); 115 174 struct q6adm *adm = c->adm; 116 175 unsigned long flags; 176 + int ret; 177 + 178 + ret = q6adm_device_close(adm, c, c->afe_port, c->copp_idx); 179 + if (ret < 0) 180 + dev_err(adm->dev, "Failed to close copp %d\n", ret); 117 181 118 182 spin_lock_irqsave(&adm->copps_list_lock, flags); 119 183 clear_bit(c->copp_idx, &adm->copp_bitmap[c->afe_port]); ··· 219 155 switch (result->opcode) { 220 156 case ADM_CMD_DEVICE_OPEN_V5: 221 157 case ADM_CMD_DEVICE_CLOSE_V5: 222 - copp = q6adm_find_copp(adm, port_idx, copp_idx); 223 - if (!copp) 224 - return 0; 225 - 226 - copp->result = *result; 227 - wake_up(&copp->wait); 228 - kref_put(&copp->refcount, q6adm_free_copp); 158 + list_for_each_entry(copp, &adm->copps_list, node) { 159 + if ((port_idx == copp->afe_port) && (copp_idx == copp->copp_idx)) { 160 + copp->result = *result; 161 + wake_up(&copp->wait); 162 + break; 163 + } 164 + } 229 165 break; 230 166 case ADM_CMD_MATRIX_MAP_ROUTINGS_V5: 231 167 adm->result = *result; ··· 298 234 return c; 299 235 } 300 236 301 - static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp, 302 - struct apr_pkt *pkt, uint32_t rsp_opcode) 303 - { 304 - struct device *dev = adm->dev; 305 - uint32_t opcode = pkt->hdr.opcode; 306 - int ret; 307 - 308 - mutex_lock(&adm->lock); 309 - copp->result.opcode = 0; 310 - copp->result.status = 0; 311 - ret = apr_send_pkt(adm->apr, pkt); 312 - if (ret < 0) { 313 - dev_err(dev, "Failed to send APR packet\n"); 314 - ret = -EINVAL; 315 - goto err; 316 - } 317 - 318 - /* Wait for the callback with copp id */ 319 - if (rsp_opcode) 320 - ret = wait_event_timeout(copp->wait, 321 - (copp->result.opcode == opcode) || 322 - (copp->result.opcode == rsp_opcode), 323 - msecs_to_jiffies(TIMEOUT_MS)); 324 - else 325 - ret = wait_event_timeout(copp->wait, 326 - (copp->result.opcode == opcode), 327 - msecs_to_jiffies(TIMEOUT_MS)); 328 - 329 - if (!ret) { 330 - dev_err(dev, "ADM copp cmd timedout\n"); 331 - ret = -ETIMEDOUT; 332 - } else if (copp->result.status > 0) { 333 - dev_err(dev, "DSP returned error[%d]\n", 334 - copp->result.status); 335 - ret = -EINVAL; 336 - } 337 - 338 - err: 339 - mutex_unlock(&adm->lock); 340 - return ret; 341 - } 342 - 343 - static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp, 344 - int port_id, int copp_idx) 345 - { 346 - struct apr_pkt close; 347 - 348 - close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, 349 - APR_HDR_LEN(APR_HDR_SIZE), 350 - APR_PKT_VER); 351 - close.hdr.pkt_size = sizeof(close); 352 - close.hdr.src_port = port_id; 353 - close.hdr.dest_port = copp->id; 354 - close.hdr.token = port_id << 16 | copp_idx; 355 - close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5; 356 - 357 - return q6adm_apr_send_copp_pkt(adm, copp, &close, 0); 358 - } 359 - 360 237 static struct q6copp *q6adm_find_matching_copp(struct q6adm *adm, 361 238 int port_id, int topology, 362 239 int mode, int rate, ··· 330 325 struct q6adm_cmd_device_open_v5 *open; 331 326 int afe_port = q6afe_get_port_id(port_id); 332 327 struct apr_pkt *pkt; 333 - void *p; 334 - int ret, pkt_size; 335 - 336 - pkt_size = APR_HDR_SIZE + sizeof(*open); 337 - p = kzalloc(pkt_size, GFP_KERNEL); 328 + int ret, pkt_size = APR_HDR_SIZE + sizeof(*open); 329 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 338 330 if (!p) 339 331 return -ENOMEM; 340 332 ··· 356 354 ret = q6dsp_map_channels(&open->dev_channel_mapping[0], 357 355 channel_mode); 358 356 if (ret) 359 - goto err; 357 + return ret; 360 358 361 - ret = q6adm_apr_send_copp_pkt(adm, copp, pkt, 362 - ADM_CMDRSP_DEVICE_OPEN_V5); 363 - 364 - err: 365 - kfree(pkt); 366 - return ret; 359 + return q6adm_apr_send_copp_pkt(adm, copp, pkt, ADM_CMDRSP_DEVICE_OPEN_V5); 367 360 } 368 361 369 362 /** ··· 461 464 struct q6adm_session_map_node_v5 *node; 462 465 struct apr_pkt *pkt; 463 466 uint16_t *copps_list; 464 - int pkt_size, ret, i, copp_idx; 465 - void *matrix_map; 466 - struct q6copp *copp; 467 - 467 + int ret, i, copp_idx; 468 468 /* Assumes port_ids have already been validated during adm_open */ 469 - pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) + 469 + struct q6copp *copp; 470 + int pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) + 470 471 (sizeof(uint32_t) * payload_map.num_copps)); 471 - 472 - matrix_map = kzalloc(pkt_size, GFP_KERNEL); 472 + void *matrix_map __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 473 473 if (!matrix_map) 474 474 return -ENOMEM; 475 475 ··· 504 510 if (port_idx < 0) { 505 511 dev_err(dev, "Invalid port_id %d\n", 506 512 payload_map.port_id[i]); 507 - kfree(pkt); 508 513 return -EINVAL; 509 514 } 510 515 copp_idx = payload_map.copp_idx[i]; 511 516 512 517 copp = q6adm_find_copp(adm, port_idx, copp_idx); 513 - if (!copp) { 514 - kfree(pkt); 518 + if (!copp) 515 519 return -EINVAL; 516 - } 517 520 518 521 copps_list[i] = copp->id; 519 522 kref_put(&copp->refcount, q6adm_free_copp); ··· 543 552 544 553 fail_cmd: 545 554 mutex_unlock(&adm->lock); 546 - kfree(pkt); 547 555 return ret; 548 556 } 549 557 EXPORT_SYMBOL_GPL(q6adm_matrix_map); ··· 557 567 */ 558 568 int q6adm_close(struct device *dev, struct q6copp *copp) 559 569 { 560 - struct q6adm *adm = dev_get_drvdata(dev->parent); 561 - int ret = 0; 562 - 563 - ret = q6adm_device_close(adm, copp, copp->afe_port, copp->copp_idx); 564 - if (ret < 0) { 565 - dev_err(adm->dev, "Failed to close copp %d\n", ret); 566 - return ret; 567 - } 568 - 569 570 kref_put(&copp->refcount, q6adm_free_copp); 570 571 571 572 return 0;
+14 -35
sound/soc/qcom/qdsp6/q6afe.c
··· 946 946 { 947 947 struct q6afe_port *p; 948 948 struct q6afe_port *ret = NULL; 949 - unsigned long flags; 950 949 951 - spin_lock_irqsave(&afe->port_list_lock, flags); 950 + guard(spinlock)(&afe->port_list_lock); 952 951 list_for_each_entry(p, &afe->port_list, node) 953 952 if (p->token == token) { 954 953 ret = p; ··· 955 956 break; 956 957 } 957 958 958 - spin_unlock_irqrestore(&afe->port_list_lock, flags); 959 959 return ret; 960 960 } 961 961 ··· 1075 1077 struct afe_svc_cmd_set_param *param; 1076 1078 struct afe_port_param_data_v2 *pdata; 1077 1079 struct apr_pkt *pkt; 1078 - int ret, pkt_size; 1079 - void *p, *pl; 1080 - 1081 - pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize; 1082 - p = kzalloc(pkt_size, GFP_KERNEL); 1080 + int ret, pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize; 1081 + void *pl; 1082 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1083 1083 if (!p) 1084 1084 return -ENOMEM; 1085 1085 ··· 1108 1112 if (ret) 1109 1113 dev_err(afe->dev, "AFE set params failed %d\n", ret); 1110 1114 1111 - kfree(pkt); 1112 1115 return ret; 1113 1116 } 1114 1117 ··· 1126 1131 struct q6afe *afe = port->afe; 1127 1132 struct apr_pkt *pkt; 1128 1133 u16 port_id = port->id; 1129 - int ret, pkt_size; 1130 - void *p, *pl; 1131 - 1132 - pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize; 1133 - p = kzalloc(pkt_size, GFP_KERNEL); 1134 + int ret, pkt_size = APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata) + psize; 1135 + void *pl; 1136 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1134 1137 if (!p) 1135 1138 return -ENOMEM; 1136 1139 ··· 1161 1168 dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n", 1162 1169 port_id, ret); 1163 1170 1164 - kfree(pkt); 1165 1171 return ret; 1166 1172 } 1167 1173 ··· 1277 1285 int port_id = port->id; 1278 1286 int ret = 0; 1279 1287 int index, pkt_size; 1280 - void *p; 1288 + void *p __free(kfree) = NULL; 1281 1289 1282 1290 index = port->token; 1283 1291 if (index < 0 || index >= AFE_PORT_MAX) { ··· 1308 1316 if (ret) 1309 1317 dev_err(afe->dev, "AFE close failed %d\n", ret); 1310 1318 1311 - kfree(pkt); 1312 1319 return ret; 1313 1320 } 1314 1321 EXPORT_SYMBOL_GPL(q6afe_port_stop); ··· 1667 1676 int ret, param_id = port->cfg_type; 1668 1677 struct apr_pkt *pkt; 1669 1678 int pkt_size; 1670 - void *p; 1679 + void *p __free(kfree) = NULL; 1671 1680 1672 1681 ret = q6afe_port_set_param_v2(port, &port->port_cfg, param_id, 1673 1682 AFE_MODULE_AUDIO_DEV_INTERFACE, ··· 1713 1722 dev_err(afe->dev, "AFE enable for port 0x%x failed %d\n", 1714 1723 port_id, ret); 1715 1724 1716 - kfree(pkt); 1717 1725 return ret; 1718 1726 } 1719 1727 EXPORT_SYMBOL_GPL(q6afe_port_start); ··· 1731 1741 int port_id; 1732 1742 struct q6afe *afe = dev_get_drvdata(dev->parent); 1733 1743 struct q6afe_port *port; 1734 - unsigned long flags; 1735 1744 int cfg_type; 1736 1745 1737 1746 if (id < 0 || id >= AFE_PORT_MAX) { ··· 1807 1818 port->cfg_type = cfg_type; 1808 1819 kref_init(&port->refcount); 1809 1820 1810 - spin_lock_irqsave(&afe->port_list_lock, flags); 1821 + guard(spinlock)(&afe->port_list_lock); 1811 1822 list_add_tail(&port->node, &afe->port_list); 1812 - spin_unlock_irqrestore(&afe->port_list_lock, flags); 1813 1823 1814 1824 return port; 1815 1825 ··· 1833 1845 struct afe_cmd_remote_lpass_core_hw_devote_request *vote_cfg; 1834 1846 struct apr_pkt *pkt; 1835 1847 int ret = 0; 1836 - int pkt_size; 1837 - void *p; 1838 - 1839 - pkt_size = APR_HDR_SIZE + sizeof(*vote_cfg); 1840 - p = kzalloc(pkt_size, GFP_KERNEL); 1848 + int pkt_size = APR_HDR_SIZE + sizeof(*vote_cfg); 1849 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1841 1850 if (!p) 1842 1851 return -ENOMEM; 1843 1852 ··· 1856 1871 if (ret < 0) 1857 1872 dev_err(afe->dev, "AFE failed to unvote (%d)\n", hw_block_id); 1858 1873 1859 - kfree(pkt); 1860 1874 return ret; 1861 1875 } 1862 1876 EXPORT_SYMBOL(q6afe_unvote_lpass_core_hw); ··· 1867 1883 struct afe_cmd_remote_lpass_core_hw_vote_request *vote_cfg; 1868 1884 struct apr_pkt *pkt; 1869 1885 int ret = 0; 1870 - int pkt_size; 1871 - void *p; 1872 - 1873 - pkt_size = APR_HDR_SIZE + sizeof(*vote_cfg); 1874 - p = kzalloc(pkt_size, GFP_KERNEL); 1886 + int pkt_size = APR_HDR_SIZE + sizeof(*vote_cfg); 1887 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1875 1888 if (!p) 1876 1889 return -ENOMEM; 1877 1890 ··· 1892 1911 if (ret) 1893 1912 dev_err(afe->dev, "AFE failed to vote (%d)\n", hw_block_id); 1894 1913 1895 - 1896 - kfree(pkt); 1897 1914 return ret; 1898 1915 } 1899 1916 EXPORT_SYMBOL(q6afe_vote_lpass_core_hw);
+7 -18
sound/soc/qcom/qdsp6/q6apm-dai.c
··· 86 86 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | 87 87 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | 88 88 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | 89 + SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR | 89 90 SNDRV_PCM_INFO_BATCH), 90 91 .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE), 91 92 .rates = SNDRV_PCM_RATE_8000_48000, ··· 106 105 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | 107 106 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | 108 107 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | 108 + SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR | 109 109 SNDRV_PCM_INFO_BATCH), 110 110 .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE), 111 111 .rates = SNDRV_PCM_RATE_8000_192000, ··· 151 149 { 152 150 struct q6apm_dai_rtd *prtd = priv; 153 151 struct snd_compr_stream *substream = prtd->cstream; 154 - unsigned long flags; 155 152 uint32_t wflags = 0; 156 153 uint64_t avail; 157 154 uint32_t bytes_written, bytes_to_write; 158 155 bool is_last_buffer = false; 159 156 157 + guard(spinlock_irqsave)(&prtd->lock); 160 158 switch (opcode) { 161 159 case APM_CLIENT_EVENT_CMD_EOS_DONE: 162 - spin_lock_irqsave(&prtd->lock, flags); 163 160 if (prtd->notify_on_drain) { 164 161 snd_compr_drain_notify(prtd->cstream); 165 162 prtd->notify_on_drain = false; 166 163 } else { 167 164 prtd->state = Q6APM_STREAM_STOPPED; 168 165 } 169 - spin_unlock_irqrestore(&prtd->lock, flags); 170 166 break; 171 167 case APM_CLIENT_EVENT_DATA_WRITE_DONE: 172 - spin_lock_irqsave(&prtd->lock, flags); 173 168 bytes_written = token >> APM_WRITE_TOKEN_LEN_SHIFT; 174 169 prtd->copied_total += bytes_written; 175 170 snd_compr_fragment_elapsed(substream); 176 171 177 - if (prtd->state != Q6APM_STREAM_RUNNING) { 178 - spin_unlock_irqrestore(&prtd->lock, flags); 172 + if (prtd->state != Q6APM_STREAM_RUNNING) 179 173 break; 180 - } 181 174 182 175 avail = prtd->bytes_received - prtd->bytes_sent; 183 176 ··· 197 200 audioreach_shared_memory_send_eos(prtd->graph); 198 201 } 199 202 200 - spin_unlock_irqrestore(&prtd->lock, flags); 201 203 break; 202 204 default: 203 205 break; ··· 577 581 { 578 582 struct snd_compr_runtime *runtime = stream->runtime; 579 583 struct q6apm_dai_rtd *prtd = runtime->private_data; 580 - unsigned long flags; 581 584 uint64_t temp_copied_total; 582 585 583 - spin_lock_irqsave(&prtd->lock, flags); 586 + guard(spinlock_irqsave)(&prtd->lock); 584 587 tstamp->copied_total = prtd->copied_total; 585 588 temp_copied_total = tstamp->copied_total; 586 589 tstamp->byte_offset = do_div(temp_copied_total, prtd->pcm_size); 587 - spin_unlock_irqrestore(&prtd->lock, flags); 588 590 589 591 return 0; 590 592 } ··· 625 631 { 626 632 struct snd_compr_runtime *runtime = stream->runtime; 627 633 struct q6apm_dai_rtd *prtd = runtime->private_data; 628 - unsigned long flags; 629 634 630 - spin_lock_irqsave(&prtd->lock, flags); 635 + guard(spinlock_irqsave)(&prtd->lock); 631 636 prtd->bytes_received += count; 632 - spin_unlock_irqrestore(&prtd->lock, flags); 633 637 634 638 return count; 635 639 } ··· 752 760 struct snd_compr_runtime *runtime = stream->runtime; 753 761 struct q6apm_dai_rtd *prtd = runtime->private_data; 754 762 void *dstn; 755 - unsigned long flags; 756 763 size_t copy; 757 764 u32 wflags = 0; 758 765 u32 app_pointer; ··· 786 795 return -EFAULT; 787 796 } 788 797 789 - spin_lock_irqsave(&prtd->lock, flags); 798 + guard(spinlock_irqsave)(&prtd->lock); 790 799 bytes_in_flight = prtd->bytes_received - prtd->copied_total; 791 800 792 801 if (prtd->next_track) { ··· 808 817 q6apm_write_async(prtd->graph, bytes_to_write, 0, 0, wflags); 809 818 prtd->bytes_sent += bytes_to_write; 810 819 } 811 - 812 - spin_unlock_irqrestore(&prtd->lock, flags); 813 820 814 821 return count; 815 822 }
+17 -40
sound/soc/qcom/qdsp6/q6apm.c
··· 99 99 struct apm_graph_mgmt_cmd *mgmt_cmd; 100 100 struct audioreach_sub_graph *sg; 101 101 struct q6apm *apm = graph->apm; 102 - int i = 0, rc, payload_size; 103 - struct gpr_pkt *pkt; 104 - 105 - payload_size = APM_GRAPH_MGMT_PSIZE(mgmt_cmd, num_sub_graphs); 106 - 107 - pkt = audioreach_alloc_apm_cmd_pkt(payload_size, opcode, 0); 102 + int i = 0, payload_size = APM_GRAPH_MGMT_PSIZE(mgmt_cmd, num_sub_graphs); 103 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(payload_size, opcode, 0); 108 104 if (IS_ERR(pkt)) 109 105 return PTR_ERR(pkt); 110 106 ··· 116 120 list_for_each_entry(sg, &info->sg_list, node) 117 121 mgmt_cmd->sub_graph_id_list[i++] = sg->sub_graph_id; 118 122 119 - rc = q6apm_send_cmd_sync(apm, pkt, 0); 120 - 121 - kfree(pkt); 122 - 123 - return rc; 123 + return q6apm_send_cmd_sync(apm, pkt, 0); 124 124 } 125 125 126 126 static void q6apm_put_audioreach_graph(struct kref *ref) ··· 140 148 141 149 static int q6apm_get_apm_state(struct q6apm *apm) 142 150 { 143 - struct gpr_pkt *pkt; 144 - 145 - pkt = audioreach_alloc_apm_cmd_pkt(0, APM_CMD_GET_SPF_STATE, 0); 151 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_apm_cmd_pkt(0, 152 + APM_CMD_GET_SPF_STATE, 0); 146 153 if (IS_ERR(pkt)) 147 154 return PTR_ERR(pkt); 148 155 149 156 q6apm_send_cmd_sync(apm, pkt, APM_CMD_RSP_GET_SPF_STATE); 150 - 151 - kfree(pkt); 152 157 153 158 return apm->state; 154 159 } ··· 259 270 { 260 271 struct apm_cmd_shared_mem_unmap_regions *cmd; 261 272 struct audioreach_graph_data *data; 262 - struct gpr_pkt *pkt; 273 + struct gpr_pkt *pkt __free(kfree) = NULL; 263 274 int rc; 264 275 265 276 if (dir == SNDRV_PCM_STREAM_PLAYBACK) ··· 279 290 cmd->mem_map_handle = data->mem_map_handle; 280 291 281 292 rc = audioreach_graph_send_cmd_sync(graph, pkt, APM_CMD_SHARED_MEM_UNMAP_REGIONS); 282 - kfree(pkt); 283 293 284 294 audioreach_graph_free_buf(graph); 285 295 ··· 408 420 { 409 421 struct apm_data_cmd_wr_sh_mem_ep_data_buffer_v2 *write_buffer; 410 422 struct audio_buffer *ab; 411 - struct gpr_pkt *pkt; 412 - int rc, iid; 413 - 414 - iid = q6apm_graph_get_rx_shmem_module_iid(graph); 415 - pkt = audioreach_alloc_pkt(sizeof(*write_buffer), DATA_CMD_WR_SH_MEM_EP_DATA_BUFFER_V2, 416 - graph->rx_data.dsp_buf | (len << APM_WRITE_TOKEN_LEN_SHIFT), 417 - graph->port->id, iid); 423 + int iid = q6apm_graph_get_rx_shmem_module_iid(graph); 424 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_pkt(sizeof(*write_buffer), 425 + DATA_CMD_WR_SH_MEM_EP_DATA_BUFFER_V2, 426 + graph->rx_data.dsp_buf | (len << APM_WRITE_TOKEN_LEN_SHIFT), 427 + graph->port->id, iid); 418 428 if (IS_ERR(pkt)) 419 429 return PTR_ERR(pkt); 420 430 ··· 436 450 437 451 mutex_unlock(&graph->lock); 438 452 439 - rc = gpr_send_port_pkt(graph->port, pkt); 440 - 441 - kfree(pkt); 442 - 443 - return rc; 453 + return gpr_send_port_pkt(graph->port, pkt); 444 454 } 445 455 EXPORT_SYMBOL_GPL(q6apm_write_async); 446 456 ··· 445 463 struct data_cmd_rd_sh_mem_ep_data_buffer_v2 *read_buffer; 446 464 struct audioreach_graph_data *port; 447 465 struct audio_buffer *ab; 448 - struct gpr_pkt *pkt; 449 - int rc, iid; 450 - 451 - iid = q6apm_graph_get_tx_shmem_module_iid(graph); 452 - pkt = audioreach_alloc_pkt(sizeof(*read_buffer), DATA_CMD_RD_SH_MEM_EP_DATA_BUFFER_V2, 453 - graph->tx_data.dsp_buf, graph->port->id, iid); 466 + int iid = q6apm_graph_get_tx_shmem_module_iid(graph); 467 + struct gpr_pkt *pkt __free(kfree) = audioreach_alloc_pkt(sizeof(*read_buffer), 468 + DATA_CMD_RD_SH_MEM_EP_DATA_BUFFER_V2, 469 + graph->tx_data.dsp_buf, graph->port->id, iid); 454 470 if (IS_ERR(pkt)) 455 471 return PTR_ERR(pkt); 456 472 ··· 470 490 471 491 mutex_unlock(&graph->lock); 472 492 473 - rc = gpr_send_port_pkt(graph->port, pkt); 474 - kfree(pkt); 475 - 476 - return rc; 493 + return gpr_send_port_pkt(graph->port, pkt); 477 494 } 478 495 EXPORT_SYMBOL_GPL(q6apm_read); 479 496
+48 -39
sound/soc/qcom/qdsp6/q6asm-dai.c
··· 58 58 phys_addr_t phys; 59 59 unsigned int pcm_size; 60 60 unsigned int pcm_count; 61 - unsigned int pcm_irq_pos; /* IRQ position */ 62 61 unsigned int periods; 63 62 uint64_t bytes_sent; 64 63 uint64_t bytes_received; 65 64 uint64_t copied_total; 66 65 uint16_t bits_per_sample; 66 + snd_pcm_uframes_t queue_ptr; 67 67 uint16_t source; /* Encoding source bit mask */ 68 68 struct audio_client *audio_client; 69 69 uint32_t next_track_stream_id; ··· 85 85 static const struct snd_pcm_hardware q6asm_dai_hardware_capture = { 86 86 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BATCH | 87 87 SNDRV_PCM_INFO_BLOCK_TRANSFER | 88 + SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR | 88 89 SNDRV_PCM_INFO_MMAP_VALID | 89 90 SNDRV_PCM_INFO_INTERLEAVED | 90 91 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), ··· 109 108 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BATCH | 110 109 SNDRV_PCM_INFO_BLOCK_TRANSFER | 111 110 SNDRV_PCM_INFO_MMAP_VALID | 111 + SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR | 112 112 SNDRV_PCM_INFO_INTERLEAVED | 113 113 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), 114 114 .formats = (SNDRV_PCM_FMTBIT_S16_LE | ··· 184 182 185 183 switch (opcode) { 186 184 case ASM_CLIENT_EVENT_CMD_RUN_DONE: 187 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 188 - q6asm_write_async(prtd->audio_client, prtd->stream_id, 189 - prtd->pcm_count, 0, 0, 0); 190 185 break; 191 186 case ASM_CLIENT_EVENT_CMD_EOS_DONE: 192 187 prtd->state = Q6ASM_STREAM_STOPPED; 193 188 break; 194 189 case ASM_CLIENT_EVENT_DATA_WRITE_DONE: { 195 - prtd->pcm_irq_pos += prtd->pcm_count; 196 190 snd_pcm_period_elapsed(substream); 197 - if (prtd->state == Q6ASM_STREAM_RUNNING) 198 - q6asm_write_async(prtd->audio_client, prtd->stream_id, 199 - prtd->pcm_count, 0, 0, 0); 200 - 201 191 break; 202 192 } 203 193 case ASM_CLIENT_EVENT_DATA_READ_DONE: 204 - prtd->pcm_irq_pos += prtd->pcm_count; 205 194 snd_pcm_period_elapsed(substream); 206 195 if (prtd->state == Q6ASM_STREAM_RUNNING) 207 196 q6asm_read(prtd->audio_client, prtd->stream_id); ··· 224 231 } 225 232 226 233 prtd->pcm_count = snd_pcm_lib_period_bytes(substream); 227 - prtd->pcm_irq_pos = 0; 228 234 /* rate and channels are sent to audio driver */ 229 - if (prtd->state) { 235 + if (prtd->state == Q6ASM_STREAM_RUNNING) { 230 236 /* clear the previous setup if any */ 231 237 q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE); 232 238 q6asm_unmap_memory_regions(substream->stream, 233 239 prtd->audio_client); 234 240 q6routing_stream_close(soc_prtd->dai_link->id, 235 241 substream->stream); 242 + prtd->state = Q6ASM_STREAM_STOPPED; 236 243 } 237 244 238 245 ret = q6asm_map_memory_regions(substream->stream, prtd->audio_client, ··· 299 306 q6asm_unmap_memory_regions(substream->stream, prtd->audio_client); 300 307 q6asm_audio_client_free(prtd->audio_client); 301 308 prtd->audio_client = NULL; 309 + 310 + return ret; 311 + } 312 + 313 + static int q6asm_dai_ack(struct snd_soc_component *component, struct snd_pcm_substream *substream) 314 + { 315 + struct snd_pcm_runtime *runtime = substream->runtime; 316 + struct q6asm_dai_rtd *prtd = runtime->private_data; 317 + int i, ret = 0, avail_periods; 318 + 319 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && prtd->state == Q6ASM_STREAM_RUNNING) { 320 + avail_periods = (runtime->control->appl_ptr - prtd->queue_ptr)/runtime->period_size; 321 + for (i = 0; i < avail_periods; i++) { 322 + ret = q6asm_write_async(prtd->audio_client, prtd->stream_id, 323 + prtd->pcm_count, 0, 0, 0); 324 + 325 + if (ret < 0) { 326 + dev_err(component->dev, "Error queuing playback buffer %d\n", ret); 327 + return ret; 328 + } 329 + prtd->queue_ptr += runtime->period_size; 330 + } 331 + } 302 332 303 333 return ret; 304 334 } ··· 419 403 } 420 404 421 405 ret = snd_pcm_hw_constraint_step(runtime, 0, 422 - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32); 406 + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 480); 423 407 if (ret < 0) { 424 408 dev_err(dev, "constraint for period bytes step ret = %d\n", 425 409 ret); 426 410 } 427 411 ret = snd_pcm_hw_constraint_step(runtime, 0, 428 - SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32); 412 + SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 480); 429 413 if (ret < 0) { 430 414 dev_err(dev, "constraint for buffer bytes step ret = %d\n", 431 415 ret); ··· 433 417 434 418 runtime->private_data = prtd; 435 419 436 - snd_soc_set_runtime_hwparams(substream, &q6asm_dai_hardware_playback); 437 - 438 - runtime->dma_bytes = q6asm_dai_hardware_playback.buffer_bytes_max; 439 - 420 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 421 + snd_soc_set_runtime_hwparams(substream, &q6asm_dai_hardware_playback); 422 + runtime->dma_bytes = q6asm_dai_hardware_playback.buffer_bytes_max; 423 + } else { 424 + snd_soc_set_runtime_hwparams(substream, &q6asm_dai_hardware_capture); 425 + runtime->dma_bytes = q6asm_dai_hardware_capture.buffer_bytes_max; 426 + } 440 427 441 428 if (pdata->sid < 0) 442 429 prtd->phys = substream->dma_buffer.addr; ··· 478 459 479 460 struct snd_pcm_runtime *runtime = substream->runtime; 480 461 struct q6asm_dai_rtd *prtd = runtime->private_data; 462 + snd_pcm_uframes_t ptr; 481 463 482 - if (prtd->pcm_irq_pos >= prtd->pcm_size) 483 - prtd->pcm_irq_pos = 0; 464 + ptr = q6asm_get_hw_pointer(prtd->audio_client, substream->stream) * runtime->period_size; 465 + if (ptr) 466 + return ptr - 1; 484 467 485 - return bytes_to_frames(runtime, (prtd->pcm_irq_pos)); 468 + return 0; 486 469 } 487 470 488 471 static int q6asm_dai_hw_params(struct snd_soc_component *component, ··· 514 493 { 515 494 struct q6asm_dai_rtd *prtd = priv; 516 495 struct snd_compr_stream *substream = prtd->cstream; 517 - unsigned long flags; 518 496 u32 wflags = 0; 519 497 uint64_t avail; 520 498 uint32_t bytes_written, bytes_to_write; 521 499 bool is_last_buffer = false; 522 500 501 + guard(spinlock_irqsave)(&prtd->lock); 502 + 523 503 switch (opcode) { 524 504 case ASM_CLIENT_EVENT_CMD_RUN_DONE: 525 - spin_lock_irqsave(&prtd->lock, flags); 526 505 if (!prtd->bytes_sent) { 527 506 q6asm_stream_remove_initial_silence(prtd->audio_client, 528 507 prtd->stream_id, ··· 533 512 prtd->bytes_sent += prtd->pcm_count; 534 513 } 535 514 536 - spin_unlock_irqrestore(&prtd->lock, flags); 537 515 break; 538 516 539 517 case ASM_CLIENT_EVENT_CMD_EOS_DONE: 540 - spin_lock_irqsave(&prtd->lock, flags); 541 518 if (prtd->notify_on_drain) { 542 519 if (substream->partial_drain) { 543 520 /* ··· 558 539 } else { 559 540 prtd->state = Q6ASM_STREAM_STOPPED; 560 541 } 561 - spin_unlock_irqrestore(&prtd->lock, flags); 562 542 break; 563 543 564 544 case ASM_CLIENT_EVENT_DATA_WRITE_DONE: 565 - spin_lock_irqsave(&prtd->lock, flags); 566 545 567 546 bytes_written = token >> ASM_WRITE_TOKEN_LEN_SHIFT; 568 547 prtd->copied_total += bytes_written; 569 548 snd_compr_fragment_elapsed(substream); 570 549 571 - if (prtd->state != Q6ASM_STREAM_RUNNING) { 572 - spin_unlock_irqrestore(&prtd->lock, flags); 550 + if (prtd->state != Q6ASM_STREAM_RUNNING) 573 551 break; 574 - } 575 552 576 553 avail = prtd->bytes_received - prtd->bytes_sent; 577 554 if (avail > prtd->pcm_count) { ··· 596 581 q6asm_cmd_nowait(prtd->audio_client, 597 582 prtd->stream_id, CMD_EOS); 598 583 599 - spin_unlock_irqrestore(&prtd->lock, flags); 600 584 break; 601 585 602 586 default: ··· 1045 1031 { 1046 1032 struct snd_compr_runtime *runtime = stream->runtime; 1047 1033 struct q6asm_dai_rtd *prtd = runtime->private_data; 1048 - unsigned long flags; 1049 1034 uint64_t temp_copied_total; 1050 1035 1051 - spin_lock_irqsave(&prtd->lock, flags); 1036 + guard(spinlock_irqsave)(&prtd->lock); 1052 1037 1053 1038 tstamp->copied_total = prtd->copied_total; 1054 1039 temp_copied_total = tstamp->copied_total; 1055 1040 tstamp->byte_offset = do_div(temp_copied_total, prtd->pcm_size); 1056 - 1057 - spin_unlock_irqrestore(&prtd->lock, flags); 1058 1041 1059 1042 return 0; 1060 1043 } ··· 1062 1051 { 1063 1052 struct snd_compr_runtime *runtime = stream->runtime; 1064 1053 struct q6asm_dai_rtd *prtd = runtime->private_data; 1065 - unsigned long flags; 1066 1054 u32 wflags = 0; 1067 1055 uint64_t avail, bytes_in_flight = 0; 1068 1056 void *dstn; ··· 1097 1087 return -EFAULT; 1098 1088 } 1099 1089 1100 - spin_lock_irqsave(&prtd->lock, flags); 1090 + guard(spinlock_irqsave)(&prtd->lock); 1101 1091 1102 1092 bytes_in_flight = prtd->bytes_received - prtd->copied_total; 1103 1093 ··· 1122 1112 bytes_to_write, 0, 0, wflags); 1123 1113 prtd->bytes_sent += bytes_to_write; 1124 1114 } 1125 - 1126 - spin_unlock_irqrestore(&prtd->lock, flags); 1127 1115 1128 1116 return count; 1129 1117 } ··· 1222 1214 .close = q6asm_dai_close, 1223 1215 .prepare = q6asm_dai_prepare, 1224 1216 .trigger = q6asm_dai_trigger, 1217 + .ack = q6asm_dai_ack, 1225 1218 .pointer = q6asm_dai_pointer, 1226 1219 .pcm_construct = q6asm_dai_pcm_new, 1227 1220 .compress_ops = &q6asm_dai_compress_ops,
+63 -140
sound/soc/qcom/qdsp6/q6asm.c
··· 6 6 #include <linux/mutex.h> 7 7 #include <linux/wait.h> 8 8 #include <linux/module.h> 9 + #include <linux/atomic.h> 9 10 #include <linux/soc/qcom/apr.h> 10 11 #include <linux/device.h> 11 12 #include <linux/of_platform.h> ··· 249 248 uint32_t num_periods; 250 249 uint32_t dsp_buf; 251 250 uint32_t mem_map_handle; 251 + atomic_t hw_ptr; 252 252 }; 253 253 254 254 struct q6asm { ··· 335 333 struct q6asm *a = dev_get_drvdata(ac->dev->parent); 336 334 struct apr_pkt *pkt; 337 335 int rc, pkt_size; 338 - void *p; 336 + void *p __free(kfree) = NULL; 339 337 340 338 if (ac->port[dir].mem_map_handle == 0) { 341 339 dev_err(ac->dev, "invalid mem handle\n"); ··· 360 358 mem_unmap->mem_map_handle = ac->port[dir].mem_map_handle; 361 359 362 360 rc = q6asm_apr_send_session_pkt(a, ac, pkt, 0); 363 - if (rc < 0) { 364 - kfree(pkt); 361 + if (rc < 0) 365 362 return rc; 366 - } 367 363 368 364 ac->port[dir].mem_map_handle = 0; 369 365 370 - kfree(pkt); 371 366 return 0; 372 367 } 373 368 ··· 428 429 struct audio_port_data *port = NULL; 429 430 struct audio_buffer *ab = NULL; 430 431 struct apr_pkt *pkt; 431 - void *p; 432 + void *p __free(kfree) = NULL; 432 433 unsigned long flags; 433 434 uint32_t num_regions, buf_sz; 434 - int rc, i, pkt_size; 435 + int i, pkt_size; 435 436 436 437 if (is_contiguous) { 437 438 num_regions = 1; ··· 478 479 } 479 480 spin_unlock_irqrestore(&ac->lock, flags); 480 481 481 - rc = q6asm_apr_send_session_pkt(a, ac, pkt, 482 - ASM_CMDRSP_SHARED_MEM_MAP_REGIONS); 483 - 484 - kfree(pkt); 485 - 486 - return rc; 482 + return q6asm_apr_send_session_pkt(a, ac, pkt, ASM_CMDRSP_SHARED_MEM_MAP_REGIONS); 487 483 } 488 484 489 485 /** ··· 571 577 int session_id) 572 578 { 573 579 struct audio_client *ac = NULL; 574 - unsigned long flags; 575 580 576 - spin_lock_irqsave(&a->slock, flags); 581 + guard(spinlock_irqsave)(&a->slock); 577 582 if ((session_id <= 0) || (session_id > MAX_SESSIONS)) { 578 583 dev_err(a->dev, "invalid session: %d\n", session_id); 579 584 goto err; ··· 587 594 ac = a->session[session_id]; 588 595 kref_get(&ac->refcount); 589 596 err: 590 - spin_unlock_irqrestore(&a->slock, flags); 591 597 return ac; 592 598 } 599 + 600 + int q6asm_get_hw_pointer(struct audio_client *ac, unsigned int dir) 601 + { 602 + struct audio_port_data *data = &ac->port[dir]; 603 + 604 + return (int)atomic_read(&data->hw_ptr); 605 + } 606 + EXPORT_SYMBOL_GPL(q6asm_get_hw_pointer); 593 607 594 608 static int32_t q6asm_stream_callback(struct apr_device *adev, 595 609 struct apr_resp_pkt *data, ··· 638 638 client_event = ASM_CLIENT_EVENT_CMD_OUT_FLUSH_DONE; 639 639 break; 640 640 case ASM_STREAM_CMD_OPEN_WRITE_V3: 641 + case ASM_DATA_CMD_WRITE_V2: 641 642 case ASM_STREAM_CMD_OPEN_READ_V3: 642 643 case ASM_STREAM_CMD_OPEN_READWRITE_V2: 643 644 case ASM_STREAM_CMD_SET_ENCDEC_PARAM: ··· 654 653 ret = 0; 655 654 goto done; 656 655 } 656 + break; 657 + case ASM_DATA_CMD_EOS: 658 + case ASM_DATA_CMD_READ_V2: 659 + /* response as result of close stream */ 657 660 break; 658 661 default: 659 662 dev_err(ac->dev, "command[0x%x] not expecting rsp\n", ··· 679 674 client_event = ASM_CLIENT_EVENT_DATA_WRITE_DONE; 680 675 if (ac->io_mode & ASM_SYNC_IO_MODE) { 681 676 phys_addr_t phys; 682 - unsigned long flags; 683 677 int token = hdr->token & ASM_WRITE_TOKEN_MASK; 684 678 685 - spin_lock_irqsave(&ac->lock, flags); 679 + guard(spinlock_irqsave)(&ac->lock); 686 680 687 681 port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK]; 688 682 689 683 if (!port->buf) { 690 - spin_unlock_irqrestore(&ac->lock, flags); 691 684 ret = 0; 692 685 goto done; 693 686 } ··· 696 693 upper_32_bits(phys) != result->status) { 697 694 dev_err(ac->dev, "Expected addr %pa\n", 698 695 &port->buf[token].phys); 699 - spin_unlock_irqrestore(&ac->lock, flags); 700 696 ret = -EINVAL; 701 697 goto done; 702 698 } 703 - spin_unlock_irqrestore(&ac->lock, flags); 699 + atomic_set(&port->hw_ptr, token + 1); 704 700 } 705 701 break; 706 702 case ASM_DATA_EVENT_READ_DONE_V2: 707 703 client_event = ASM_CLIENT_EVENT_DATA_READ_DONE; 708 704 if (ac->io_mode & ASM_SYNC_IO_MODE) { 709 705 struct asm_data_cmd_read_v2_done *done = data->payload; 710 - unsigned long flags; 711 706 phys_addr_t phys; 712 707 713 - spin_lock_irqsave(&ac->lock, flags); 708 + guard(spinlock_irqsave)(&ac->lock); 714 709 port = &ac->port[SNDRV_PCM_STREAM_CAPTURE]; 715 710 if (!port->buf) { 716 - spin_unlock_irqrestore(&ac->lock, flags); 717 711 ret = 0; 718 712 goto done; 719 713 } 720 714 721 715 phys = port->buf[hdr->token].phys; 716 + atomic_set(&port->hw_ptr, hdr->token + 1); 722 717 723 718 if (upper_32_bits(phys) != done->buf_addr_msw || 724 719 lower_32_bits(phys) != done->buf_addr_lsw) { ··· 724 723 &port->buf[hdr->token].phys, 725 724 done->buf_addr_lsw, 726 725 done->buf_addr_msw); 727 - spin_unlock_irqrestore(&ac->lock, flags); 728 726 ret = -EINVAL; 729 727 goto done; 730 728 } 731 - spin_unlock_irqrestore(&ac->lock, flags); 732 729 } 733 730 734 731 break; ··· 929 930 { 930 931 struct asm_stream_cmd_open_write_v3 *open; 931 932 struct apr_pkt *pkt; 932 - void *p; 933 - int rc, pkt_size; 934 - 935 - pkt_size = APR_HDR_SIZE + sizeof(*open); 936 - 937 - p = kzalloc(pkt_size, GFP_KERNEL); 933 + int rc, pkt_size = APR_HDR_SIZE + sizeof(*open); 934 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 938 935 if (!p) 939 936 return -ENOMEM; 940 937 ··· 996 1001 ac->io_mode |= ASM_TUN_WRITE_IO_MODE; 997 1002 998 1003 err: 999 - kfree(pkt); 1000 1004 return rc; 1001 1005 } 1002 1006 EXPORT_SYMBOL_GPL(q6asm_open_write); ··· 1006 1012 { 1007 1013 struct asm_session_cmd_run_v2 *run; 1008 1014 struct apr_pkt *pkt; 1009 - int pkt_size, rc; 1010 - void *p; 1011 - 1012 - pkt_size = APR_HDR_SIZE + sizeof(*run); 1013 - p = kzalloc(pkt_size, GFP_ATOMIC); 1015 + int rc, pkt_size = APR_HDR_SIZE + sizeof(*run); 1016 + void *p __free(kfree) = kzalloc(pkt_size, GFP_ATOMIC); 1014 1017 if (!p) 1015 1018 return -ENOMEM; 1016 1019 ··· 1028 1037 rc = 0; 1029 1038 } 1030 1039 1031 - kfree(pkt); 1032 1040 return rc; 1033 1041 } 1034 1042 ··· 1088 1098 struct asm_multi_channel_pcm_fmt_blk_v2 *fmt; 1089 1099 struct apr_pkt *pkt; 1090 1100 u8 *channel_mapping; 1091 - void *p; 1092 - int rc, pkt_size; 1093 - 1094 - pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1095 - p = kzalloc(pkt_size, GFP_KERNEL); 1101 + int pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1102 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1096 1103 if (!p) 1097 1104 return -ENOMEM; 1098 1105 ··· 1112 1125 } else { 1113 1126 if (q6dsp_map_channels(channel_mapping, channels)) { 1114 1127 dev_err(ac->dev, " map channels failed %d\n", channels); 1115 - rc = -EINVAL; 1116 - goto err; 1128 + return -EINVAL; 1117 1129 } 1118 1130 } 1119 1131 1120 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1121 - 1122 - err: 1123 - kfree(pkt); 1124 - return rc; 1132 + return q6asm_ac_send_cmd_sync(ac, pkt); 1125 1133 } 1126 1134 EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm); 1127 1135 ··· 1126 1144 { 1127 1145 struct asm_flac_fmt_blk_v2 *fmt; 1128 1146 struct apr_pkt *pkt; 1129 - void *p; 1130 - int rc, pkt_size; 1131 - 1132 - pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1133 - p = kzalloc(pkt_size, GFP_KERNEL); 1147 + int pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1148 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1134 1149 if (!p) 1135 1150 return -ENOMEM; 1136 1151 ··· 1147 1168 fmt->max_frame_size = cfg->max_frame_size; 1148 1169 fmt->sample_size = cfg->sample_size; 1149 1170 1150 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1151 - kfree(pkt); 1152 - 1153 - return rc; 1171 + return q6asm_ac_send_cmd_sync(ac, pkt); 1154 1172 } 1155 1173 EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_flac); 1156 1174 ··· 1157 1181 { 1158 1182 struct asm_wmastdv9_fmt_blk_v2 *fmt; 1159 1183 struct apr_pkt *pkt; 1160 - void *p; 1161 - int rc, pkt_size; 1162 - 1163 - pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1164 - p = kzalloc(pkt_size, GFP_KERNEL); 1184 + int pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1185 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1165 1186 if (!p) 1166 1187 return -ENOMEM; 1167 1188 ··· 1179 1206 fmt->enc_options = cfg->enc_options; 1180 1207 fmt->reserved = 0; 1181 1208 1182 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1183 - kfree(pkt); 1184 - 1185 - return rc; 1209 + return q6asm_ac_send_cmd_sync(ac, pkt); 1186 1210 } 1187 1211 EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_wma_v9); 1188 1212 ··· 1189 1219 { 1190 1220 struct asm_wmaprov10_fmt_blk_v2 *fmt; 1191 1221 struct apr_pkt *pkt; 1192 - void *p; 1193 - int rc, pkt_size; 1194 - 1195 - pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1196 - p = kzalloc(pkt_size, GFP_KERNEL); 1222 + int pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1223 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1197 1224 if (!p) 1198 1225 return -ENOMEM; 1199 1226 ··· 1212 1245 fmt->advanced_enc_options1 = cfg->adv_enc_options; 1213 1246 fmt->advanced_enc_options2 = cfg->adv_enc_options2; 1214 1247 1215 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1216 - kfree(pkt); 1217 - 1218 - return rc; 1248 + return q6asm_ac_send_cmd_sync(ac, pkt); 1219 1249 } 1220 1250 EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_wma_v10); 1221 1251 ··· 1222 1258 { 1223 1259 struct asm_alac_fmt_blk_v2 *fmt; 1224 1260 struct apr_pkt *pkt; 1225 - void *p; 1226 - int rc, pkt_size; 1227 - 1228 - pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1229 - p = kzalloc(pkt_size, GFP_KERNEL); 1261 + int pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1262 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1230 1263 if (!p) 1231 1264 return -ENOMEM; 1232 1265 ··· 1248 1287 fmt->mb = cfg->mb; 1249 1288 fmt->kb = cfg->kb; 1250 1289 1251 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1252 - kfree(pkt); 1253 - 1254 - return rc; 1290 + return q6asm_ac_send_cmd_sync(ac, pkt); 1255 1291 } 1256 1292 EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_alac); 1257 1293 ··· 1258 1300 { 1259 1301 struct asm_ape_fmt_blk_v2 *fmt; 1260 1302 struct apr_pkt *pkt; 1261 - void *p; 1262 - int rc, pkt_size; 1263 - 1264 - pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1265 - p = kzalloc(pkt_size, GFP_KERNEL); 1303 + int pkt_size = APR_HDR_SIZE + sizeof(*fmt); 1304 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1266 1305 if (!p) 1267 1306 return -ENOMEM; 1268 1307 ··· 1282 1327 fmt->sample_rate = cfg->sample_rate; 1283 1328 fmt->seek_table_present = cfg->seek_table_present; 1284 1329 1285 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1286 - kfree(pkt); 1287 - 1288 - return rc; 1330 + return q6asm_ac_send_cmd_sync(ac, pkt); 1289 1331 } 1290 1332 EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_ape); 1291 1333 ··· 1292 1340 { 1293 1341 uint32_t *samples; 1294 1342 struct apr_pkt *pkt; 1295 - void *p; 1296 - int rc, pkt_size; 1297 - 1298 - pkt_size = APR_HDR_SIZE + sizeof(uint32_t); 1299 - p = kzalloc(pkt_size, GFP_ATOMIC); 1343 + int rc, pkt_size = APR_HDR_SIZE + sizeof(uint32_t); 1344 + void *p __free(kfree) = kzalloc(pkt_size, GFP_ATOMIC); 1300 1345 if (!p) 1301 1346 return -ENOMEM; 1302 1347 ··· 1307 1358 rc = apr_send_pkt(ac->adev, pkt); 1308 1359 if (rc == pkt_size) 1309 1360 rc = 0; 1310 - 1311 - kfree(pkt); 1312 1361 1313 1362 return rc; 1314 1363 } ··· 1350 1403 struct apr_pkt *pkt; 1351 1404 u8 *channel_mapping; 1352 1405 u32 frames_per_buf = 0; 1353 - int pkt_size, rc; 1354 - void *p; 1355 - 1356 - pkt_size = APR_HDR_SIZE + sizeof(*enc_cfg); 1357 - p = kzalloc(pkt_size, GFP_KERNEL); 1406 + int pkt_size = APR_HDR_SIZE + sizeof(*enc_cfg); 1407 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1358 1408 if (!p) 1359 1409 return -ENOMEM; 1360 1410 ··· 1372 1428 enc_cfg->is_signed = 1; 1373 1429 channel_mapping = enc_cfg->channel_mapping; 1374 1430 1375 - if (q6dsp_map_channels(channel_mapping, channels)) { 1376 - rc = -EINVAL; 1377 - goto err; 1378 - } 1431 + if (q6dsp_map_channels(channel_mapping, channels)) 1432 + return -EINVAL; 1379 1433 1380 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1381 - err: 1382 - kfree(pkt); 1383 - return rc; 1434 + return q6asm_ac_send_cmd_sync(ac, pkt); 1384 1435 } 1385 1436 EXPORT_SYMBOL_GPL(q6asm_enc_cfg_blk_pcm_format_support); 1386 1437 ··· 1395 1456 struct audio_buffer *ab; 1396 1457 struct apr_pkt *pkt; 1397 1458 unsigned long flags; 1398 - int pkt_size; 1459 + int pkt_size = APR_HDR_SIZE + sizeof(*read); 1399 1460 int rc = 0; 1400 - void *p; 1401 - 1402 - pkt_size = APR_HDR_SIZE + sizeof(*read); 1403 - p = kzalloc(pkt_size, GFP_ATOMIC); 1461 + void *p __free(kfree) = kzalloc(pkt_size, GFP_ATOMIC); 1404 1462 if (!p) 1405 1463 return -ENOMEM; 1406 1464 ··· 1429 1493 else 1430 1494 pr_err("read op[0x%x]rc[%d]\n", pkt->hdr.opcode, rc); 1431 1495 1432 - kfree(pkt); 1433 1496 return rc; 1434 1497 } 1435 1498 EXPORT_SYMBOL_GPL(q6asm_read); ··· 1438 1503 { 1439 1504 struct asm_stream_cmd_open_read_v3 *open; 1440 1505 struct apr_pkt *pkt; 1441 - int pkt_size, rc; 1442 - void *p; 1443 - 1444 - pkt_size = APR_HDR_SIZE + sizeof(*open); 1445 - p = kzalloc(pkt_size, GFP_KERNEL); 1506 + int pkt_size = APR_HDR_SIZE + sizeof(*open); 1507 + void *p __free(kfree) = kzalloc(pkt_size, GFP_KERNEL); 1446 1508 if (!p) 1447 1509 return -ENOMEM; 1448 1510 ··· 1467 1535 pr_err("Invalid format[%d]\n", format); 1468 1536 } 1469 1537 1470 - rc = q6asm_ac_send_cmd_sync(ac, pkt); 1471 - 1472 - kfree(pkt); 1473 - return rc; 1538 + return q6asm_ac_send_cmd_sync(ac, pkt); 1474 1539 } 1475 1540 1476 1541 /** ··· 1507 1578 struct audio_buffer *ab; 1508 1579 unsigned long flags; 1509 1580 struct apr_pkt *pkt; 1510 - int pkt_size; 1581 + int pkt_size = APR_HDR_SIZE + sizeof(*write); 1511 1582 int rc = 0; 1512 - void *p; 1513 - 1514 - pkt_size = APR_HDR_SIZE + sizeof(*write); 1515 - p = kzalloc(pkt_size, GFP_ATOMIC); 1583 + void *p __free(kfree) = kzalloc(pkt_size, GFP_ATOMIC); 1516 1584 if (!p) 1517 1585 return -ENOMEM; 1518 1586 ··· 1544 1618 if (rc == pkt_size) 1545 1619 rc = 0; 1546 1620 1547 - kfree(pkt); 1548 1621 return rc; 1549 1622 } 1550 1623 EXPORT_SYMBOL_GPL(q6asm_write_async); ··· 1551 1626 static void q6asm_reset_buf_state(struct audio_client *ac) 1552 1627 { 1553 1628 struct audio_port_data *port; 1554 - unsigned long flags; 1555 1629 1556 - spin_lock_irqsave(&ac->lock, flags); 1630 + guard(spinlock_irqsave)(&ac->lock); 1557 1631 port = &ac->port[SNDRV_PCM_STREAM_PLAYBACK]; 1558 1632 port->dsp_buf = 0; 1559 1633 port = &ac->port[SNDRV_PCM_STREAM_CAPTURE]; 1560 1634 port->dsp_buf = 0; 1561 - spin_unlock_irqrestore(&ac->lock, flags); 1562 1635 } 1563 1636 1564 1637 static int __q6asm_cmd(struct audio_client *ac, uint32_t stream_id, int cmd,
+1
sound/soc/qcom/qdsp6/q6asm.h
··· 148 148 phys_addr_t phys, 149 149 size_t period_sz, unsigned int periods); 150 150 int q6asm_unmap_memory_regions(unsigned int dir, struct audio_client *ac); 151 + int q6asm_get_hw_pointer(struct audio_client *ac, unsigned int dir); 151 152 #endif /* __Q6_ASM_H__ */
+6 -21
sound/soc/qcom/qdsp6/q6prm.c
··· 62 62 struct prm_cmd_request_hw_core *req; 63 63 gpr_device_t *gdev = prm->gdev; 64 64 uint32_t opcode, rsp_opcode; 65 - struct gpr_pkt *pkt; 66 - int rc; 65 + struct gpr_pkt *pkt __free(kfree) = NULL; 67 66 68 67 if (enable) { 69 68 opcode = PRM_CMD_REQUEST_HW_RSC; ··· 87 88 88 89 req->hw_clk_id = hw_block_id; 89 90 90 - rc = q6prm_send_cmd_sync(prm, pkt, rsp_opcode); 91 - 92 - kfree(pkt); 93 - 94 - return rc; 91 + return q6prm_send_cmd_sync(prm, pkt, rsp_opcode); 95 92 } 96 93 97 94 int q6prm_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id, ··· 111 116 struct apm_module_param_data *param_data; 112 117 struct prm_cmd_request_rsc *req; 113 118 gpr_device_t *gdev = prm->gdev; 114 - struct gpr_pkt *pkt; 115 - int rc; 119 + struct gpr_pkt *pkt __free(kfree) = NULL; 116 120 117 121 pkt = audioreach_alloc_cmd_pkt(sizeof(*req), PRM_CMD_REQUEST_HW_RSC, 0, gdev->svc.id, 118 122 GPR_PRM_MODULE_IID); ··· 133 139 req->clock_id.clock_attri = clk_attr; 134 140 req->clock_id.clock_root = clk_root; 135 141 136 - rc = q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_REQUEST_HW_RSC); 137 - 138 - kfree(pkt); 139 - 140 - return rc; 142 + return q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_REQUEST_HW_RSC); 141 143 } 142 144 143 145 static int q6prm_release_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root, ··· 143 153 struct apm_module_param_data *param_data; 144 154 struct prm_cmd_release_rsc *rel; 145 155 gpr_device_t *gdev = prm->gdev; 146 - struct gpr_pkt *pkt; 147 - int rc; 156 + struct gpr_pkt *pkt __free(kfree) = NULL; 148 157 149 158 pkt = audioreach_alloc_cmd_pkt(sizeof(*rel), PRM_CMD_RELEASE_HW_RSC, 0, gdev->svc.id, 150 159 GPR_PRM_MODULE_IID); ··· 162 173 rel->num_clk_id = 1; 163 174 rel->clock_id.clock_id = clk_id; 164 175 165 - rc = q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_RELEASE_HW_RSC); 166 - 167 - kfree(pkt); 168 - 169 - return rc; 176 + return q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_RELEASE_HW_RSC); 170 177 } 171 178 172 179 int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+12 -13
sound/soc/renesas/rz-ssi.c
··· 85 85 struct snd_pcm_substream *substream; 86 86 int fifo_sample_size; /* sample capacity of SSI FIFO */ 87 87 int dma_buffer_pos; /* The address for the next DMA descriptor */ 88 + int completed_dma_buf_pos; /* The address of the last completed DMA descriptor. */ 88 89 int period_counter; /* for keeping track of periods transferred */ 89 90 int sample_width; 90 91 int buffer_pos; /* current frame position in the buffer */ ··· 216 215 rz_ssi_set_substream(strm, substream); 217 216 strm->sample_width = samples_to_bytes(runtime, 1); 218 217 strm->dma_buffer_pos = 0; 218 + strm->completed_dma_buf_pos = 0; 219 219 strm->period_counter = 0; 220 220 strm->buffer_pos = 0; 221 221 ··· 439 437 snd_pcm_period_elapsed(strm->substream); 440 438 strm->period_counter = current_period; 441 439 } 440 + 441 + strm->completed_dma_buf_pos += runtime->period_size; 442 + if (strm->completed_dma_buf_pos >= runtime->buffer_size) 443 + strm->completed_dma_buf_pos = 0; 442 444 } 443 445 444 446 static int rz_ssi_pio_recv(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) ··· 784 778 return -ENODEV; 785 779 } 786 780 787 - static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi) 781 + static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) 788 782 { 783 + struct snd_pcm_substream *substream = strm->substream; 784 + struct snd_pcm_runtime *runtime = substream->runtime; 789 785 int ret; 786 + 787 + strm->dma_buffer_pos = strm->completed_dma_buf_pos + runtime->period_size; 790 788 791 789 if (rz_ssi_is_stream_running(&ssi->playback) || 792 790 rz_ssi_is_stream_running(&ssi->capture)) ··· 804 794 ssi->hw_params_cache.channels); 805 795 } 806 796 807 - static void rz_ssi_streams_suspend(struct rz_ssi_priv *ssi) 808 - { 809 - if (rz_ssi_is_stream_running(&ssi->playback) || 810 - rz_ssi_is_stream_running(&ssi->capture)) 811 - return; 812 - 813 - ssi->playback.dma_buffer_pos = 0; 814 - ssi->capture.dma_buffer_pos = 0; 815 - } 816 - 817 797 static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd, 818 798 struct snd_soc_dai *dai) 819 799 { ··· 813 813 814 814 switch (cmd) { 815 815 case SNDRV_PCM_TRIGGER_RESUME: 816 - ret = rz_ssi_trigger_resume(ssi); 816 + ret = rz_ssi_trigger_resume(ssi, strm); 817 817 if (ret) 818 818 return ret; 819 819 ··· 852 852 853 853 case SNDRV_PCM_TRIGGER_SUSPEND: 854 854 rz_ssi_stop(ssi, strm); 855 - rz_ssi_streams_suspend(ssi); 856 855 break; 857 856 858 857 case SNDRV_PCM_TRIGGER_STOP:
+20 -17
sound/usb/mixer_s1810c.c
··· 178 178 179 179 pkt_out.fields[SC1810C_STATE_F1_IDX] = SC1810C_SET_STATE_F1; 180 180 pkt_out.fields[SC1810C_STATE_F2_IDX] = SC1810C_SET_STATE_F2; 181 - ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), 181 + ret = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 182 182 SC1810C_SET_STATE_REQ, 183 183 SC1810C_SET_STATE_REQTYPE, 184 184 (*seqnum), 0, &pkt_out, sizeof(pkt_out)); ··· 597 597 if (!list_empty(&chip->mixer_list)) 598 598 return 0; 599 599 600 - dev_info(&dev->dev, 601 - "Presonus Studio 1810c, device_setup: %u\n", chip->setup); 602 - if (chip->setup == 1) 603 - dev_info(&dev->dev, "(8out/18in @ 48kHz)\n"); 604 - else if (chip->setup == 2) 605 - dev_info(&dev->dev, "(6out/8in @ 192kHz)\n"); 606 - else 607 - dev_info(&dev->dev, "(8out/14in @ 96kHz)\n"); 608 - 609 600 ret = snd_s1810c_init_mixer_maps(chip); 610 601 if (ret < 0) 611 602 return ret; ··· 625 634 if (ret < 0) 626 635 return ret; 627 636 628 - // The 1824c has a Mono Main switch instead of a 629 - // A/B select switch. 630 - if (mixer->chip->usb_id == USB_ID(0x194f, 0x010d)) { 631 - ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw); 632 - if (ret < 0) 633 - return ret; 634 - } else if (mixer->chip->usb_id == USB_ID(0x194f, 0x010c)) { 637 + switch (chip->usb_id) { 638 + case USB_ID(0x194f, 0x010c): /* Presonus Studio 1810c */ 639 + dev_info(&dev->dev, 640 + "Presonus Studio 1810c, device_setup: %u\n", chip->setup); 641 + if (chip->setup == 1) 642 + dev_info(&dev->dev, "(8out/18in @ 48kHz)\n"); 643 + else if (chip->setup == 2) 644 + dev_info(&dev->dev, "(6out/8in @ 192kHz)\n"); 645 + else 646 + dev_info(&dev->dev, "(8out/14in @ 96kHz)\n"); 647 + 635 648 ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw); 636 649 if (ret < 0) 637 650 return ret; 651 + 652 + break; 653 + case USB_ID(0x194f, 0x010d): /* Presonus Studio 1824c */ 654 + ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw); 655 + if (ret < 0) 656 + return ret; 657 + 658 + break; 638 659 } 639 660 640 661 return ret;
+1 -1
tools/lib/bpf/bpf_tracing.h
··· 311 311 #define __PT_RET_REG regs[31] 312 312 #define __PT_FP_REG __unsupported__ 313 313 #define __PT_RC_REG gpr[3] 314 - #define __PT_SP_REG sp 314 + #define __PT_SP_REG gpr[1] 315 315 #define __PT_IP_REG nip 316 316 317 317 #elif defined(bpf_target_sparc)
+2 -2
tools/net/ynl/lib/ynl-priv.h
··· 313 313 struct nlattr *attr; 314 314 size_t len; 315 315 316 - len = strlen(str); 316 + len = strlen(str) + 1; 317 317 if (__ynl_attr_put_overflow(nlh, len)) 318 318 return; 319 319 ··· 321 321 attr->nla_type = attr_type; 322 322 323 323 strcpy((char *)ynl_attr_data(attr), str); 324 - attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len); 324 + attr->nla_len = NLA_HDRLEN + len; 325 325 326 326 nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len); 327 327 }
+3
tools/net/ynl/pyynl/ethtool.py
··· 44 44 Pretty-print a set of fields from the reply. desc specifies the 45 45 fields and the optional type (bool/yn). 46 46 """ 47 + if not reply: 48 + return 49 + 47 50 if len(desc) == 0: 48 51 return print_field(reply, *zip(reply.keys(), reply.keys())) 49 52
+4 -1
tools/objtool/check.c
··· 3516 3516 { 3517 3517 struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL; 3518 3518 3519 + if (!insn->alt_group) 3520 + return false; 3521 + 3519 3522 /* ANNOTATE_IGNORE_ALTERNATIVE */ 3520 - if (insn->alt_group && insn->alt_group->ignore) 3523 + if (insn->alt_group->ignore) 3521 3524 return true; 3522 3525 3523 3526 /*
+1
tools/testing/selftests/cachestat/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 test_cachestat 3 + tmpshmcstat
+2 -2
tools/testing/selftests/cachestat/test_cachestat.c
··· 226 226 int syscall_ret; 227 227 size_t compute_len = PS * 512; 228 228 struct cachestat_range cs_range = { PS, compute_len }; 229 - char *filename = "tmpshmcstat"; 229 + char *filename = "tmpshmcstat", *map; 230 230 struct cachestat cs; 231 231 bool ret = true; 232 232 int fd; ··· 257 257 } 258 258 break; 259 259 case FILE_MMAP: 260 - char *map = mmap(NULL, filesize, PROT_READ | PROT_WRITE, 260 + map = mmap(NULL, filesize, PROT_READ | PROT_WRITE, 261 261 MAP_SHARED, fd, 0); 262 262 263 263 if (map == MAP_FAILED) {
+1 -1
tools/testing/selftests/net/bareudp.sh
··· 1 - #!/bin/sh 1 + #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 4 # Test various bareudp tunnel configurations.
+23 -4
tools/testing/selftests/vfio/lib/include/vfio_util.h
··· 206 206 void vfio_pci_device_cleanup(struct vfio_pci_device *device); 207 207 void vfio_pci_device_reset(struct vfio_pci_device *device); 208 208 209 - void vfio_pci_dma_map(struct vfio_pci_device *device, 210 - struct vfio_dma_region *region); 211 - void vfio_pci_dma_unmap(struct vfio_pci_device *device, 212 - struct vfio_dma_region *region); 209 + int __vfio_pci_dma_map(struct vfio_pci_device *device, 210 + struct vfio_dma_region *region); 211 + int __vfio_pci_dma_unmap(struct vfio_pci_device *device, 212 + struct vfio_dma_region *region, 213 + u64 *unmapped); 214 + int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped); 215 + 216 + static inline void vfio_pci_dma_map(struct vfio_pci_device *device, 217 + struct vfio_dma_region *region) 218 + { 219 + VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0); 220 + } 221 + 222 + static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device, 223 + struct vfio_dma_region *region) 224 + { 225 + VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0); 226 + } 227 + 228 + static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device) 229 + { 230 + VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0); 231 + } 213 232 214 233 void vfio_pci_config_access(struct vfio_pci_device *device, bool write, 215 234 size_t config, size_t size, void *data);
+83 -25
tools/testing/selftests/vfio/lib/vfio_pci_device.c
··· 2 2 #include <dirent.h> 3 3 #include <fcntl.h> 4 4 #include <libgen.h> 5 + #include <stdint.h> 5 6 #include <stdlib.h> 6 7 #include <string.h> 7 8 #include <unistd.h> ··· 142 141 ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info); 143 142 } 144 143 145 - static void vfio_iommu_dma_map(struct vfio_pci_device *device, 144 + static int vfio_iommu_dma_map(struct vfio_pci_device *device, 146 145 struct vfio_dma_region *region) 147 146 { 148 147 struct vfio_iommu_type1_dma_map args = { ··· 153 152 .size = region->size, 154 153 }; 155 154 156 - ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &args); 155 + if (ioctl(device->container_fd, VFIO_IOMMU_MAP_DMA, &args)) 156 + return -errno; 157 + 158 + return 0; 157 159 } 158 160 159 - static void iommufd_dma_map(struct vfio_pci_device *device, 161 + static int iommufd_dma_map(struct vfio_pci_device *device, 160 162 struct vfio_dma_region *region) 161 163 { 162 164 struct iommu_ioas_map args = { ··· 173 169 .ioas_id = device->ioas_id, 174 170 }; 175 171 176 - ioctl_assert(device->iommufd, IOMMU_IOAS_MAP, &args); 172 + if (ioctl(device->iommufd, IOMMU_IOAS_MAP, &args)) 173 + return -errno; 174 + 175 + return 0; 177 176 } 178 177 179 - void vfio_pci_dma_map(struct vfio_pci_device *device, 178 + int __vfio_pci_dma_map(struct vfio_pci_device *device, 180 179 struct vfio_dma_region *region) 181 180 { 181 + int ret; 182 + 182 183 if (device->iommufd) 183 - iommufd_dma_map(device, region); 184 + ret = iommufd_dma_map(device, region); 184 185 else 185 - vfio_iommu_dma_map(device, region); 186 + ret = vfio_iommu_dma_map(device, region); 187 + 188 + if (ret) 189 + return ret; 186 190 187 191 list_add(&region->link, &device->dma_regions); 192 + 193 + return 0; 188 194 } 189 195 190 - static void vfio_iommu_dma_unmap(struct vfio_pci_device *device, 191 - struct vfio_dma_region *region) 196 + static int vfio_iommu_dma_unmap(int fd, u64 iova, u64 size, u32 flags, 197 + u64 *unmapped) 192 198 { 193 199 struct vfio_iommu_type1_dma_unmap args = { 194 200 .argsz = sizeof(args), 195 - .iova = region->iova, 196 - .size = region->size, 201 + .iova = iova, 202 + .size = size, 203 + .flags = flags, 197 204 }; 198 205 199 - ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &args); 206 + if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args)) 207 + return -errno; 208 + 209 + if (unmapped) 210 + *unmapped = args.size; 211 + 212 + return 0; 200 213 } 201 214 202 - static void iommufd_dma_unmap(struct vfio_pci_device *device, 203 - struct vfio_dma_region *region) 215 + static int iommufd_dma_unmap(int fd, u64 iova, u64 length, u32 ioas_id, 216 + u64 *unmapped) 204 217 { 205 218 struct iommu_ioas_unmap args = { 206 219 .size = sizeof(args), 207 - .iova = region->iova, 208 - .length = region->size, 209 - .ioas_id = device->ioas_id, 220 + .iova = iova, 221 + .length = length, 222 + .ioas_id = ioas_id, 210 223 }; 211 224 212 - ioctl_assert(device->iommufd, IOMMU_IOAS_UNMAP, &args); 225 + if (ioctl(fd, IOMMU_IOAS_UNMAP, &args)) 226 + return -errno; 227 + 228 + if (unmapped) 229 + *unmapped = args.length; 230 + 231 + return 0; 213 232 } 214 233 215 - void vfio_pci_dma_unmap(struct vfio_pci_device *device, 216 - struct vfio_dma_region *region) 234 + int __vfio_pci_dma_unmap(struct vfio_pci_device *device, 235 + struct vfio_dma_region *region, u64 *unmapped) 217 236 { 218 - if (device->iommufd) 219 - iommufd_dma_unmap(device, region); 220 - else 221 - vfio_iommu_dma_unmap(device, region); 237 + int ret; 222 238 223 - list_del(&region->link); 239 + if (device->iommufd) 240 + ret = iommufd_dma_unmap(device->iommufd, region->iova, 241 + region->size, device->ioas_id, 242 + unmapped); 243 + else 244 + ret = vfio_iommu_dma_unmap(device->container_fd, region->iova, 245 + region->size, 0, unmapped); 246 + 247 + if (ret) 248 + return ret; 249 + 250 + list_del_init(&region->link); 251 + 252 + return 0; 253 + } 254 + 255 + int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped) 256 + { 257 + int ret; 258 + struct vfio_dma_region *curr, *next; 259 + 260 + if (device->iommufd) 261 + ret = iommufd_dma_unmap(device->iommufd, 0, UINT64_MAX, 262 + device->ioas_id, unmapped); 263 + else 264 + ret = vfio_iommu_dma_unmap(device->container_fd, 0, 0, 265 + VFIO_DMA_UNMAP_FLAG_ALL, unmapped); 266 + 267 + if (ret) 268 + return ret; 269 + 270 + list_for_each_entry_safe(curr, next, &device->dma_regions, link) 271 + list_del_init(&curr->link); 272 + 273 + return 0; 224 274 } 225 275 226 276 static void vfio_pci_region_get(struct vfio_pci_device *device, int index,
+94 -1
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
··· 112 112 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB); 113 113 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); 114 114 115 + #undef FIXTURE_VARIANT_ADD_IOMMU_MODE 116 + 115 117 FIXTURE_SETUP(vfio_dma_mapping_test) 116 118 { 117 119 self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); ··· 131 129 struct vfio_dma_region region; 132 130 struct iommu_mapping mapping; 133 131 u64 mapping_size = size; 132 + u64 unmapped; 134 133 int rc; 135 134 136 135 region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); ··· 187 184 } 188 185 189 186 unmap: 190 - vfio_pci_dma_unmap(self->device, &region); 187 + rc = __vfio_pci_dma_unmap(self->device, &region, &unmapped); 188 + ASSERT_EQ(rc, 0); 189 + ASSERT_EQ(unmapped, region.size); 191 190 printf("Unmapped IOVA 0x%lx\n", region.iova); 192 191 ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr)); 193 192 ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); 194 193 195 194 ASSERT_TRUE(!munmap(region.vaddr, size)); 195 + } 196 + 197 + FIXTURE(vfio_dma_map_limit_test) { 198 + struct vfio_pci_device *device; 199 + struct vfio_dma_region region; 200 + size_t mmap_size; 201 + }; 202 + 203 + FIXTURE_VARIANT(vfio_dma_map_limit_test) { 204 + const char *iommu_mode; 205 + }; 206 + 207 + #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \ 208 + FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) { \ 209 + .iommu_mode = #_iommu_mode, \ 210 + } 211 + 212 + FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); 213 + 214 + #undef FIXTURE_VARIANT_ADD_IOMMU_MODE 215 + 216 + FIXTURE_SETUP(vfio_dma_map_limit_test) 217 + { 218 + struct vfio_dma_region *region = &self->region; 219 + u64 region_size = getpagesize(); 220 + 221 + /* 222 + * Over-allocate mmap by double the size to provide enough backing vaddr 223 + * for overflow tests 224 + */ 225 + self->mmap_size = 2 * region_size; 226 + 227 + self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); 228 + region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE, 229 + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 230 + ASSERT_NE(region->vaddr, MAP_FAILED); 231 + 232 + /* One page prior to the end of address space */ 233 + region->iova = ~(iova_t)0 & ~(region_size - 1); 234 + region->size = region_size; 235 + } 236 + 237 + FIXTURE_TEARDOWN(vfio_dma_map_limit_test) 238 + { 239 + vfio_pci_device_cleanup(self->device); 240 + ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0); 241 + } 242 + 243 + TEST_F(vfio_dma_map_limit_test, unmap_range) 244 + { 245 + struct vfio_dma_region *region = &self->region; 246 + u64 unmapped; 247 + int rc; 248 + 249 + vfio_pci_dma_map(self->device, region); 250 + ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); 251 + 252 + rc = __vfio_pci_dma_unmap(self->device, region, &unmapped); 253 + ASSERT_EQ(rc, 0); 254 + ASSERT_EQ(unmapped, region->size); 255 + } 256 + 257 + TEST_F(vfio_dma_map_limit_test, unmap_all) 258 + { 259 + struct vfio_dma_region *region = &self->region; 260 + u64 unmapped; 261 + int rc; 262 + 263 + vfio_pci_dma_map(self->device, region); 264 + ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); 265 + 266 + rc = __vfio_pci_dma_unmap_all(self->device, &unmapped); 267 + ASSERT_EQ(rc, 0); 268 + ASSERT_EQ(unmapped, region->size); 269 + } 270 + 271 + TEST_F(vfio_dma_map_limit_test, overflow) 272 + { 273 + struct vfio_dma_region *region = &self->region; 274 + int rc; 275 + 276 + region->size = self->mmap_size; 277 + 278 + rc = __vfio_pci_dma_map(self->device, region); 279 + ASSERT_EQ(rc, -EOVERFLOW); 280 + 281 + rc = __vfio_pci_dma_unmap(self->device, region, NULL); 282 + ASSERT_EQ(rc, -EOVERFLOW); 196 283 } 197 284 198 285 int main(int argc, char *argv[])