Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-riscv-6.8-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 6.8 part #1

- KVM_GET_REG_LIST improvement for vector registers
- Generate ISA extension reg_list using macros in get-reg-list selftest
- Steal time account support along with selftest

+3937 -2357
+4
.mailmap
··· 191 191 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com> 192 192 Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com> 193 193 Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com> 194 + Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com> 195 + Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com> 196 + Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com> 197 + Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com> 194 198 Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org> 195 199 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com> 196 200 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
+3 -3
Documentation/admin-guide/kernel-parameters.txt
··· 3985 3985 vulnerability. System may allow data leaks with this 3986 3986 option. 3987 3987 3988 - no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized 3989 - steal time accounting. steal time is computed, but 3990 - won't influence scheduler behaviour 3988 + no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES,RISCV] Disable 3989 + paravirtualized steal time accounting. steal time is 3990 + computed, but won't influence scheduler behaviour 3991 3991 3992 3992 nosync [HW,M68K] Disables sync negotiation for all devices. 3993 3993
+6 -4
Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
··· 15 15 16 16 properties: 17 17 compatible: 18 - enum: 19 - - fsl,imx23-ocotp 20 - - fsl,imx28-ocotp 18 + items: 19 + - enum: 20 + - fsl,imx23-ocotp 21 + - fsl,imx28-ocotp 22 + - const: fsl,ocotp 21 23 22 24 reg: 23 25 maxItems: 1 ··· 37 35 examples: 38 36 - | 39 37 ocotp: efuse@8002c000 { 40 - compatible = "fsl,imx28-ocotp"; 38 + compatible = "fsl,imx28-ocotp", "fsl,ocotp"; 41 39 #address-cells = <1>; 42 40 #size-cells = <1>; 43 41 reg = <0x8002c000 0x2000>;
+1 -2
MAINTAINERS
··· 6050 6050 M: dm-devel@lists.linux.dev 6051 6051 L: dm-devel@lists.linux.dev 6052 6052 S: Maintained 6053 - W: http://sources.redhat.com/dm 6054 6053 Q: http://patchwork.kernel.org/project/dm-devel/list/ 6055 6054 T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git 6056 - T: quilt http://people.redhat.com/agk/patches/linux/editing/ 6057 6055 F: Documentation/admin-guide/device-mapper/ 6058 6056 F: drivers/md/Kconfig 6059 6057 F: drivers/md/Makefile ··· 9524 9526 HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) 9525 9527 M: Yisen Zhuang <yisen.zhuang@huawei.com> 9526 9528 M: Salil Mehta <salil.mehta@huawei.com> 9529 + M: Jijie Shao <shaojijie@huawei.com> 9527 9530 L: netdev@vger.kernel.org 9528 9531 S: Maintained 9529 9532 W: http://www.hisilicon.com
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 7 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION*
-5
arch/arc/Kconfig
··· 49 49 select OF 50 50 select OF_EARLY_FLATTREE 51 51 select PCI_SYSCALL if PCI 52 - select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING 53 52 select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32 54 53 select TRACE_IRQFLAGS_SUPPORT 55 54 ··· 230 231 TLB entries have a per-page Cache Enable Bit. 231 232 Note that Global I/D ENABLE + Per Page DISABLE works but corollary 232 233 Global DISABLE + Per Page ENABLE won't work 233 - 234 - config ARC_CACHE_VIPT_ALIASING 235 - bool "Support VIPT Aliasing D$" 236 - depends on ARC_HAS_DCACHE && ISA_ARCOMPACT 237 234 238 235 endif #ARC_CACHE 239 236
-43
arch/arc/include/asm/cacheflush.h
··· 44 44 45 45 #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */ 46 46 47 - #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING 48 - 49 47 #define flush_cache_mm(mm) /* called on munmap/exit */ 50 48 #define flush_cache_range(mm, u_vstart, u_vend) 51 49 #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ 52 - 53 - #else /* VIPT aliasing dcache */ 54 - 55 - /* To clear out stale userspace mappings */ 56 - void flush_cache_mm(struct mm_struct *mm); 57 - void flush_cache_range(struct vm_area_struct *vma, 58 - unsigned long start,unsigned long end); 59 - void flush_cache_page(struct vm_area_struct *vma, 60 - unsigned long user_addr, unsigned long page); 61 - 62 - /* 63 - * To make sure that userspace mapping is flushed to memory before 64 - * get_user_pages() uses a kernel mapping to access the page 65 - */ 66 - #define ARCH_HAS_FLUSH_ANON_PAGE 67 - void flush_anon_page(struct vm_area_struct *vma, 68 - struct page *page, unsigned long u_vaddr); 69 - 70 - #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ 71 50 72 51 /* 73 52 * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default ··· 54 75 * to record that they dirtied the dcache 55 76 */ 56 77 #define PG_dc_clean PG_arch_1 57 - 58 - #define CACHE_COLORS_NUM 4 59 - #define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1) 60 - #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK) 61 - 62 - /* 63 - * Simple wrapper over config option 64 - * Bootup code ensures that hardware matches kernel configuration 65 - */ 66 - static inline int cache_is_vipt_aliasing(void) 67 - { 68 - return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); 69 - } 70 - 71 - /* 72 - * checks if two addresses (after page aligning) index into same cache set 73 - */ 74 - #define addr_not_cache_congruent(addr1, addr2) \ 75 - ({ \ 76 - cache_is_vipt_aliasing() ? \ 77 - (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \ 78 - }) 79 78 80 79 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 81 80 do { \
+32
arch/arc/include/asm/entry-arcv2.h
··· 291 291 /* M = 8-1 N = 8 */ 292 292 .endm 293 293 294 + .macro SAVE_ABI_CALLEE_REGS 295 + push r13 296 + push r14 297 + push r15 298 + push r16 299 + push r17 300 + push r18 301 + push r19 302 + push r20 303 + push r21 304 + push r22 305 + push r23 306 + push r24 307 + push r25 308 + .endm 309 + 310 + .macro RESTORE_ABI_CALLEE_REGS 311 + pop r25 312 + pop r24 313 + pop r23 314 + pop r22 315 + pop r21 316 + pop r20 317 + pop r19 318 + pop r18 319 + pop r17 320 + pop r16 321 + pop r15 322 + pop r14 323 + pop r13 324 + .endm 325 + 294 326 #endif
+86 -1
arch/arc/include/asm/entry-compact.h
··· 33 33 #include <asm/irqflags-compact.h> 34 34 #include <asm/thread_info.h> /* For THREAD_SIZE */ 35 35 36 + /* Note on the LD/ST addr modes with addr reg wback 37 + * 38 + * LD.a same as LD.aw 39 + * 40 + * LD.a reg1, [reg2, x] => Pre Incr 41 + * Eff Addr for load = [reg2 + x] 42 + * 43 + * LD.ab reg1, [reg2, x] => Post Incr 44 + * Eff Addr for load = [reg2] 45 + */ 46 + 47 + .macro PUSHAX aux 48 + lr r9, [\aux] 49 + push r9 50 + .endm 51 + 52 + .macro POPAX aux 53 + pop r9 54 + sr r9, [\aux] 55 + .endm 56 + 57 + .macro SAVE_R0_TO_R12 58 + push r0 59 + push r1 60 + push r2 61 + push r3 62 + push r4 63 + push r5 64 + push r6 65 + push r7 66 + push r8 67 + push r9 68 + push r10 69 + push r11 70 + push r12 71 + .endm 72 + 73 + .macro RESTORE_R12_TO_R0 74 + pop r12 75 + pop r11 76 + pop r10 77 + pop r9 78 + pop r8 79 + pop r7 80 + pop r6 81 + pop r5 82 + pop r4 83 + pop r3 84 + pop r2 85 + pop r1 86 + pop r0 87 + .endm 88 + 89 + .macro SAVE_ABI_CALLEE_REGS 90 + push r13 91 + push r14 92 + push r15 93 + push r16 94 + push r17 95 + push r18 96 + push r19 97 + push r20 98 + push r21 99 + push r22 100 + push r23 101 + push r24 102 + push r25 103 + .endm 104 + 105 + .macro RESTORE_ABI_CALLEE_REGS 106 + pop r25 107 + pop r24 108 + pop r23 109 + pop r22 110 + pop r21 111 + pop r20 112 + pop r19 113 + pop r18 114 + pop r17 115 + pop r16 116 + pop r15 117 + pop r14 118 + pop r13 119 + .endm 120 + 36 121 /*-------------------------------------------------------------- 37 122 * Switch to Kernel Mode stack if SP points to User Mode stack 38 123 * ··· 320 235 SWITCH_TO_KERNEL_STK 321 236 322 237 323 - PUSH 0x003\LVL\()abcd /* Dummy ECR */ 238 + st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */ 324 239 sub sp, sp, 8 /* skip orig_r0 (not needed) 325 240 skip pt_regs->sp, already saved above */ 326 241
+4 -106
arch/arc/include/asm/entry.h
··· 21 21 #include <asm/entry-arcv2.h> 22 22 #endif 23 23 24 - /* Note on the LD/ST addr modes with addr reg wback 25 - * 26 - * LD.a same as LD.aw 27 - * 28 - * LD.a reg1, [reg2, x] => Pre Incr 29 - * Eff Addr for load = [reg2 + x] 30 - * 31 - * LD.ab reg1, [reg2, x] => Post Incr 32 - * Eff Addr for load = [reg2] 33 - */ 34 - 35 - .macro PUSH reg 36 - st.a \reg, [sp, -4] 37 - .endm 38 - 39 - .macro PUSHAX aux 40 - lr r9, [\aux] 41 - PUSH r9 42 - .endm 43 - 44 - .macro POP reg 45 - ld.ab \reg, [sp, 4] 46 - .endm 47 - 48 - .macro POPAX aux 49 - POP r9 50 - sr r9, [\aux] 51 - .endm 52 - 53 - /*-------------------------------------------------------------- 54 - * Helpers to save/restore Scratch Regs: 55 - * used by Interrupt/Exception Prologue/Epilogue 56 - *-------------------------------------------------------------*/ 57 - .macro SAVE_R0_TO_R12 58 - PUSH r0 59 - PUSH r1 60 - PUSH r2 61 - PUSH r3 62 - PUSH r4 63 - PUSH r5 64 - PUSH r6 65 - PUSH r7 66 - PUSH r8 67 - PUSH r9 68 - PUSH r10 69 - PUSH r11 70 - PUSH r12 71 - .endm 72 - 73 - .macro RESTORE_R12_TO_R0 74 - POP r12 75 - POP r11 76 - POP r10 77 - POP r9 78 - POP r8 79 - POP r7 80 - POP r6 81 - POP r5 82 - POP r4 83 - POP r3 84 - POP r2 85 - POP r1 86 - POP r0 87 - 88 - .endm 89 - 90 - /*-------------------------------------------------------------- 91 - * Helpers to save/restore callee-saved regs: 92 - * used by several macros below 93 - *-------------------------------------------------------------*/ 94 - .macro SAVE_R13_TO_R25 95 - PUSH r13 96 - PUSH r14 97 - PUSH r15 98 - PUSH r16 99 - PUSH r17 100 - PUSH r18 101 - PUSH r19 102 - PUSH r20 103 - PUSH r21 104 - PUSH r22 105 - PUSH r23 106 - PUSH r24 107 - PUSH r25 108 - .endm 109 - 110 - .macro RESTORE_R25_TO_R13 111 - POP r25 112 - POP r24 113 - POP r23 114 - POP r22 115 - POP r21 116 - POP r20 117 - POP r19 118 - POP r18 119 - POP r17 120 - POP r16 121 - POP r15 122 - POP r14 123 - POP r13 124 - .endm 125 - 126 24 /* 127 25 * save user mode callee regs as struct callee_regs 128 26 * - needed by fork/do_signal/unaligned-access-emulation. 129 27 */ 130 28 .macro SAVE_CALLEE_SAVED_USER 131 - SAVE_R13_TO_R25 29 + SAVE_ABI_CALLEE_REGS 132 30 .endm 133 31 134 32 /* ··· 34 136 * - could have been changed by ptrace tracer or unaligned-access fixup 35 137 */ 36 138 .macro RESTORE_CALLEE_SAVED_USER 37 - RESTORE_R25_TO_R13 139 + RESTORE_ABI_CALLEE_REGS 38 140 .endm 39 141 40 142 /* 41 143 * save/restore kernel mode callee regs at the time of context switch 42 144 */ 43 145 .macro SAVE_CALLEE_SAVED_KERNEL 44 - SAVE_R13_TO_R25 146 + SAVE_ABI_CALLEE_REGS 45 147 .endm 46 148 47 149 .macro RESTORE_CALLEE_SAVED_KERNEL 48 - RESTORE_R25_TO_R13 150 + RESTORE_ABI_CALLEE_REGS 49 151 .endm 50 152 51 153 /*--------------------------------------------------------------
+7
arch/arc/include/asm/hugepage.h
··· 10 10 #include <linux/types.h> 11 11 #include <asm-generic/pgtable-nopmd.h> 12 12 13 + /* 14 + * Hugetlb definitions. 15 + */ 16 + #define HPAGE_SHIFT PMD_SHIFT 17 + #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 18 + #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 19 + 13 20 static inline pte_t pmd_pte(pmd_t pmd) 14 21 { 15 22 return __pte(pmd_val(pmd));
+8 -6
arch/arc/include/asm/ptrace.h
··· 54 54 ecr_reg ecr; 55 55 }; 56 56 57 + struct callee_regs { 58 + unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; 59 + }; 60 + 57 61 #define MAX_REG_OFFSET offsetof(struct pt_regs, ecr) 58 62 59 63 #else ··· 96 92 unsigned long status32; 97 93 }; 98 94 99 - #define MAX_REG_OFFSET offsetof(struct pt_regs, status32) 100 - 101 - #endif 102 - 103 - /* Callee saved registers - need to be saved only when you are scheduled out */ 104 - 105 95 struct callee_regs { 106 96 unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; 107 97 }; 98 + 99 + #define MAX_REG_OFFSET offsetof(struct pt_regs, status32) 100 + 101 + #endif 108 102 109 103 #define instruction_pointer(regs) ((regs)->ret) 110 104 #define profile_pc(regs) instruction_pointer(regs)
+1 -3
arch/arc/kernel/setup.c
··· 153 153 { 154 154 int n = 0; 155 155 #ifdef CONFIG_ISA_ARCV2 156 - const char *release, *cpu_nm, *isa_nm = "ARCv2"; 156 + const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2"; 157 157 int dual_issue = 0, dual_enb = 0, mpy_opt, present; 158 158 int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk; 159 159 char mpy_nm[16], lpb_nm[32]; ··· 171 171 * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent 172 172 * releases only update it. 173 173 */ 174 - 175 - cpu_nm = "HS38"; 176 174 177 175 if (info->arcver > 0x50 && info->arcver <= 0x53) { 178 176 release = arc_hs_rel[info->arcver - 0x51].str;
+3 -3
arch/arc/kernel/signal.c
··· 62 62 unsigned int sigret_magic; 63 63 }; 64 64 65 - static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) 65 + static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs) 66 66 { 67 67 int err = 0; 68 68 #ifndef CONFIG_ISA_ARCOMPACT ··· 75 75 #else 76 76 v2abi.r58 = v2abi.r59 = 0; 77 77 #endif 78 - err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi)); 78 + err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi)); 79 79 #endif 80 80 return err; 81 81 } 82 82 83 - static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) 83 + static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs) 84 84 { 85 85 int err = 0; 86 86 #ifndef CONFIG_ISA_ARCOMPACT
+6 -130
arch/arc/mm/cache.c
··· 145 145 p_dc->sz_k = 1 << (dbcr.sz - 1); 146 146 147 147 n += scnprintf(buf + n, len - n, 148 - "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", 148 + "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n", 149 149 p_dc->sz_k, assoc, p_dc->line_len, 150 150 vipt ? "VIPT" : "PIPT", 151 - p_dc->colors > 1 ? " aliasing" : "", 152 151 IS_USED_CFG(CONFIG_ARC_HAS_DCACHE)); 153 152 154 153 slc_chk: ··· 702 703 * Exported APIs 703 704 */ 704 705 705 - /* 706 - * Handle cache congruency of kernel and userspace mappings of page when kernel 707 - * writes-to/reads-from 708 - * 709 - * The idea is to defer flushing of kernel mapping after a WRITE, possible if: 710 - * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent 711 - * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) 712 - * -In SMP, if hardware caches are coherent 713 - * 714 - * There's a corollary case, where kernel READs from a userspace mapped page. 715 - * If the U-mapping is not congruent to K-mapping, former needs flushing. 716 - */ 717 706 void flush_dcache_folio(struct folio *folio) 718 707 { 719 - struct address_space *mapping; 720 - 721 - if (!cache_is_vipt_aliasing()) { 722 - clear_bit(PG_dc_clean, &folio->flags); 723 - return; 724 - } 725 - 726 - /* don't handle anon pages here */ 727 - mapping = folio_flush_mapping(folio); 728 - if (!mapping) 729 - return; 730 - 731 - /* 732 - * pagecache page, file not yet mapped to userspace 733 - * Make a note that K-mapping is dirty 734 - */ 735 - if (!mapping_mapped(mapping)) { 736 - clear_bit(PG_dc_clean, &folio->flags); 737 - } else if (folio_mapped(folio)) { 738 - /* kernel reading from page with U-mapping */ 739 - phys_addr_t paddr = (unsigned long)folio_address(folio); 740 - unsigned long vaddr = folio_pos(folio); 741 - 742 - /* 743 - * vaddr is not actually the virtual address, but is 744 - * congruent to every user mapping. 745 - */ 746 - if (addr_not_cache_congruent(paddr, vaddr)) 747 - __flush_dcache_pages(paddr, vaddr, 748 - folio_nr_pages(folio)); 749 - } 708 + clear_bit(PG_dc_clean, &folio->flags); 709 + return; 750 710 } 751 711 EXPORT_SYMBOL(flush_dcache_folio); 752 712 ··· 879 921 880 922 } 881 923 882 - #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 883 - 884 - void flush_cache_mm(struct mm_struct *mm) 885 - { 886 - flush_cache_all(); 887 - } 888 - 889 - void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, 890 - unsigned long pfn) 891 - { 892 - phys_addr_t paddr = pfn << PAGE_SHIFT; 893 - 894 - u_vaddr &= PAGE_MASK; 895 - 896 - __flush_dcache_pages(paddr, u_vaddr, 1); 897 - 898 - if (vma->vm_flags & VM_EXEC) 899 - __inv_icache_pages(paddr, u_vaddr, 1); 900 - } 901 - 902 - void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 903 - unsigned long end) 904 - { 905 - flush_cache_all(); 906 - } 907 - 908 - void flush_anon_page(struct vm_area_struct *vma, struct page *page, 909 - unsigned long u_vaddr) 910 - { 911 - /* TBD: do we really need to clear the kernel mapping */ 912 - __flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1); 913 - __flush_dcache_pages((phys_addr_t)page_address(page), 914 - (phys_addr_t)page_address(page), 1); 915 - 916 - } 917 - 918 - #endif 919 - 920 924 void copy_user_highpage(struct page *to, struct page *from, 921 925 unsigned long u_vaddr, struct vm_area_struct *vma) 922 926 { ··· 886 966 struct folio *dst = page_folio(to); 887 967 void *kfrom = kmap_atomic(from); 888 968 void *kto = kmap_atomic(to); 889 - int clean_src_k_mappings = 0; 890 - 891 - /* 892 - * If SRC page was already mapped in userspace AND it's U-mapping is 893 - * not congruent with K-mapping, sync former to physical page so that 894 - * K-mapping in memcpy below, sees the right data 895 - * 896 - * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 897 - * equally valid for SRC page as well 898 - * 899 - * For !VIPT cache, all of this gets compiled out as 900 - * addr_not_cache_congruent() is 0 901 - */ 902 - if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 903 - __flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1); 904 - clean_src_k_mappings = 1; 905 - } 906 969 907 970 copy_page(kto, kfrom); 908 971 909 - /* 910 - * Mark DST page K-mapping as dirty for a later finalization by 911 - * update_mmu_cache(). Although the finalization could have been done 912 - * here as well (given that both vaddr/paddr are available). 913 - * But update_mmu_cache() already has code to do that for other 914 - * non copied user pages (e.g. read faults which wire in pagecache page 915 - * directly). 916 - */ 917 972 clear_bit(PG_dc_clean, &dst->flags); 918 - 919 - /* 920 - * if SRC was already usermapped and non-congruent to kernel mapping 921 - * sync the kernel mapping back to physical page 922 - */ 923 - if (clean_src_k_mappings) { 924 - __flush_dcache_pages((unsigned long)kfrom, 925 - (unsigned long)kfrom, 1); 926 - } else { 927 - clear_bit(PG_dc_clean, &src->flags); 928 - } 973 + clear_bit(PG_dc_clean, &src->flags); 929 974 930 975 kunmap_atomic(kto); 931 976 kunmap_atomic(kfrom); ··· 1025 1140 dc->line_len, L1_CACHE_BYTES); 1026 1141 1027 1142 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ 1028 - if (is_isa_arcompact()) { 1029 - int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); 1030 - 1031 - if (dc->colors > 1) { 1032 - if (!handled) 1033 - panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 1034 - if (CACHE_COLORS_NUM != dc->colors) 1035 - panic("CACHE_COLORS_NUM not optimized for config\n"); 1036 - } else if (handled && dc->colors == 1) { 1037 - panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 1038 - } 1143 + if (is_isa_arcompact() && dc->colors > 1) { 1144 + panic("Aliasing VIPT cache not supported\n"); 1039 1145 } 1040 1146 } 1041 1147
+3 -18
arch/arc/mm/mmap.c
··· 14 14 15 15 #include <asm/cacheflush.h> 16 16 17 - #define COLOUR_ALIGN(addr, pgoff) \ 18 - ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ 19 - (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) 20 - 21 17 /* 22 18 * Ensure that shared mappings are correctly aligned to 23 19 * avoid aliasing issues with VIPT caches. ··· 27 31 { 28 32 struct mm_struct *mm = current->mm; 29 33 struct vm_area_struct *vma; 30 - int do_align = 0; 31 - int aliasing = cache_is_vipt_aliasing(); 32 34 struct vm_unmapped_area_info info; 33 - 34 - /* 35 - * We only need to do colour alignment if D cache aliases. 36 - */ 37 - if (aliasing) 38 - do_align = filp || (flags & MAP_SHARED); 39 35 40 36 /* 41 37 * We enforce the MAP_FIXED case. 42 38 */ 43 39 if (flags & MAP_FIXED) { 44 - if (aliasing && flags & MAP_SHARED && 40 + if (flags & MAP_SHARED && 45 41 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 46 42 return -EINVAL; 47 43 return addr; ··· 43 55 return -ENOMEM; 44 56 45 57 if (addr) { 46 - if (do_align) 47 - addr = COLOUR_ALIGN(addr, pgoff); 48 - else 49 - addr = PAGE_ALIGN(addr); 58 + addr = PAGE_ALIGN(addr); 50 59 51 60 vma = find_vma(mm, addr); 52 61 if (TASK_SIZE - len >= addr && ··· 55 70 info.length = len; 56 71 info.low_limit = mm->mmap_base; 57 72 info.high_limit = TASK_SIZE; 58 - info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 73 + info.align_mask = 0; 59 74 info.align_offset = pgoff << PAGE_SHIFT; 60 75 return vm_unmapped_area(&info); 61 76 }
+5 -11
arch/arc/mm/tlb.c
··· 478 478 479 479 create_tlb(vma, vaddr, ptep); 480 480 481 - if (page == ZERO_PAGE(0)) { 481 + if (page == ZERO_PAGE(0)) 482 482 return; 483 - } 484 483 485 484 /* 486 - * Exec page : Independent of aliasing/page-color considerations, 487 - * since icache doesn't snoop dcache on ARC, any dirty 488 - * K-mapping of a code page needs to be wback+inv so that 489 - * icache fetch by userspace sees code correctly. 490 - * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it 491 - * so userspace sees the right data. 492 - * (Avoids the flush for Non-exec + congruent mapping case) 485 + * For executable pages, since icache doesn't snoop dcache, any 486 + * dirty K-mapping of a code page needs to be wback+inv so that 487 + * icache fetch by userspace sees code correctly. 493 488 */ 494 - if ((vma->vm_flags & VM_EXEC) || 495 - addr_not_cache_congruent(paddr, vaddr)) { 489 + if (vma->vm_flags & VM_EXEC) { 496 490 struct folio *folio = page_folio(page); 497 491 int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); 498 492 if (dirty) {
+1
arch/arm/boot/dts/ti/omap/am33xx.dtsi
··· 359 359 <SYSC_IDLE_NO>, 360 360 <SYSC_IDLE_SMART>, 361 361 <SYSC_IDLE_SMART_WKUP>; 362 + ti,sysc-delay-us = <2>; 362 363 clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>; 363 364 clock-names = "fck"; 364 365 #address-cells = <1>;
+1 -1
arch/arm/boot/dts/ti/omap/dra7.dtsi
··· 147 147 148 148 l3-noc@44000000 { 149 149 compatible = "ti,dra7-l3-noc"; 150 - reg = <0x44000000 0x1000>, 150 + reg = <0x44000000 0x1000000>, 151 151 <0x45000000 0x1000>; 152 152 interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, 153 153 <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+5
arch/arm/mach-omap2/id.c
··· 793 793 794 794 soc_dev_attr->machine = soc_name; 795 795 soc_dev_attr->family = omap_get_family(); 796 + if (!soc_dev_attr->family) { 797 + kfree(soc_dev_attr); 798 + return; 799 + } 796 800 soc_dev_attr->revision = soc_rev; 797 801 soc_dev_attr->custom_attr_group = omap_soc_groups[0]; 798 802 799 803 soc_dev = soc_device_register(soc_dev_attr); 800 804 if (IS_ERR(soc_dev)) { 805 + kfree(soc_dev_attr->family); 801 806 kfree(soc_dev_attr); 802 807 return; 803 808 }
-3
arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
··· 68 68 &emac0 { 69 69 pinctrl-names = "default"; 70 70 pinctrl-0 = <&ext_rgmii_pins>; 71 - phy-mode = "rgmii"; 72 71 phy-handle = <&ext_rgmii_phy>; 73 - allwinner,rx-delay-ps = <3100>; 74 - allwinner,tx-delay-ps = <700>; 75 72 status = "okay"; 76 73 }; 77 74
+3
arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
··· 13 13 }; 14 14 15 15 &emac0 { 16 + allwinner,rx-delay-ps = <3100>; 17 + allwinner,tx-delay-ps = <700>; 18 + phy-mode = "rgmii"; 16 19 phy-supply = <&reg_dcdce>; 17 20 }; 18 21
+2
arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
··· 13 13 }; 14 14 15 15 &emac0 { 16 + allwinner,tx-delay-ps = <700>; 17 + phy-mode = "rgmii-rxid"; 16 18 phy-supply = <&reg_dldo1>; 17 19 }; 18 20
+1
arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
··· 238 238 mt6360: pmic@34 { 239 239 compatible = "mediatek,mt6360"; 240 240 reg = <0x34>; 241 + interrupt-parent = <&pio>; 241 242 interrupts = <128 IRQ_TYPE_EDGE_FALLING>; 242 243 interrupt-names = "IRQB"; 243 244 interrupt-controller;
-4
arch/arm64/include/asm/syscall_wrapper.h
··· 44 44 return sys_ni_syscall(); \ 45 45 } 46 46 47 - #define COMPAT_SYS_NI(name) \ 48 - SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers); 49 - 50 47 #endif /* CONFIG_COMPAT */ 51 48 52 49 #define __SYSCALL_DEFINEx(x, name, ...) \ ··· 78 81 } 79 82 80 83 asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused); 81 - #define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); 82 84 83 85 #endif /* __ASM_SYSCALL_WRAPPER_H */
+1 -1
arch/arm64/kvm/arm.c
··· 410 410 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); 411 411 kvm_timer_vcpu_terminate(vcpu); 412 412 kvm_pmu_vcpu_destroy(vcpu); 413 - 413 + kvm_vgic_vcpu_destroy(vcpu); 414 414 kvm_arm_vcpu_destroy(vcpu); 415 415 } 416 416
+33 -22
arch/arm64/kvm/vgic/vgic-init.c
··· 368 368 vgic_v4_teardown(kvm); 369 369 } 370 370 371 - void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 371 + static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 372 372 { 373 373 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 374 374 ··· 379 379 vgic_flush_pending_lpis(vcpu); 380 380 381 381 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 382 - vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 382 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 383 + vgic_unregister_redist_iodev(vcpu); 384 + vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 385 + } 383 386 } 384 387 385 - static void __kvm_vgic_destroy(struct kvm *kvm) 388 + void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 386 389 { 387 - struct kvm_vcpu *vcpu; 388 - unsigned long i; 390 + struct kvm *kvm = vcpu->kvm; 389 391 390 - lockdep_assert_held(&kvm->arch.config_lock); 391 - 392 - vgic_debug_destroy(kvm); 393 - 394 - kvm_for_each_vcpu(i, vcpu, kvm) 395 - kvm_vgic_vcpu_destroy(vcpu); 396 - 397 - kvm_vgic_dist_destroy(kvm); 392 + mutex_lock(&kvm->slots_lock); 393 + __kvm_vgic_vcpu_destroy(vcpu); 394 + mutex_unlock(&kvm->slots_lock); 398 395 } 399 396 400 397 void kvm_vgic_destroy(struct kvm *kvm) 401 398 { 399 + struct kvm_vcpu *vcpu; 400 + unsigned long i; 401 + 402 + mutex_lock(&kvm->slots_lock); 403 + 404 + vgic_debug_destroy(kvm); 405 + 406 + kvm_for_each_vcpu(i, vcpu, kvm) 407 + __kvm_vgic_vcpu_destroy(vcpu); 408 + 402 409 mutex_lock(&kvm->arch.config_lock); 403 - __kvm_vgic_destroy(kvm); 410 + 411 + kvm_vgic_dist_destroy(kvm); 412 + 404 413 mutex_unlock(&kvm->arch.config_lock); 414 + mutex_unlock(&kvm->slots_lock); 405 415 } 406 416 407 417 /** ··· 479 469 type = VGIC_V3; 480 470 } 481 471 482 - if (ret) { 483 - __kvm_vgic_destroy(kvm); 472 + if (ret) 484 473 goto out; 485 - } 474 + 486 475 dist->ready = true; 487 476 dist_base = dist->vgic_dist_base; 488 477 mutex_unlock(&kvm->arch.config_lock); 489 478 490 479 ret = vgic_register_dist_iodev(kvm, dist_base, type); 491 - if (ret) { 480 + if (ret) 492 481 kvm_err("Unable to register VGIC dist MMIO regions\n"); 493 - kvm_vgic_destroy(kvm); 494 - } 495 - mutex_unlock(&kvm->slots_lock); 496 - return ret; 497 482 483 + goto out_slots; 498 484 out: 499 485 mutex_unlock(&kvm->arch.config_lock); 486 + out_slots: 500 487 mutex_unlock(&kvm->slots_lock); 488 + 489 + if (ret) 490 + kvm_vgic_destroy(kvm); 491 + 501 492 return ret; 502 493 } 503 494
+3 -1
arch/arm64/kvm/vgic/vgic-mmio-v3.c
··· 820 820 return ret; 821 821 } 822 822 823 - static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) 823 + void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) 824 824 { 825 825 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; 826 826 ··· 832 832 struct kvm_vcpu *vcpu; 833 833 unsigned long c; 834 834 int ret = 0; 835 + 836 + lockdep_assert_held(&kvm->slots_lock); 835 837 836 838 kvm_for_each_vcpu(c, vcpu, kvm) { 837 839 ret = vgic_register_redist_iodev(vcpu);
+1
arch/arm64/kvm/vgic/vgic.h
··· 241 241 int vgic_v3_save_pending_tables(struct kvm *kvm); 242 242 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count); 243 243 int vgic_register_redist_iodev(struct kvm_vcpu *vcpu); 244 + void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu); 244 245 bool vgic_v3_check_base(struct kvm *kvm); 245 246 246 247 void vgic_v3_load(struct kvm_vcpu *vcpu);
+19
arch/riscv/Kconfig
··· 724 724 725 725 If you want to execute 32-bit userspace applications, say Y. 726 726 727 + config PARAVIRT 728 + bool "Enable paravirtualization code" 729 + depends on RISCV_SBI 730 + help 731 + This changes the kernel so it can modify itself when it is run 732 + under a hypervisor, potentially improving performance significantly 733 + over full virtualization. 734 + 735 + config PARAVIRT_TIME_ACCOUNTING 736 + bool "Paravirtual steal time accounting" 737 + depends on PARAVIRT 738 + help 739 + Select this option to enable fine granularity task steal time 740 + accounting. Time spent executing other tasks in parallel with 741 + the current vCPU is discounted from the vCPU power. To account for 742 + that, there can be a small performance impact. 743 + 744 + If in doubt, say N here. 745 + 727 746 config RELOCATABLE 728 747 bool "Build a relocatable kernel" 729 748 depends on MMU && 64BIT && !XIP_KERNEL
+10
arch/riscv/include/asm/kvm_host.h
··· 41 41 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 42 42 #define KVM_REQ_HFENCE \ 43 43 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 44 + #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6) 44 45 45 46 enum kvm_riscv_hfence_type { 46 47 KVM_RISCV_HFENCE_UNKNOWN = 0, ··· 263 262 264 263 /* 'static' configurations which are set only once */ 265 264 struct kvm_vcpu_config cfg; 265 + 266 + /* SBI steal-time accounting */ 267 + struct { 268 + gpa_t shmem; 269 + u64 last_steal; 270 + } sta; 266 271 }; 267 272 268 273 static inline void kvm_arch_sync_events(struct kvm *kvm) {} ··· 376 369 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); 377 370 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); 378 371 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); 372 + 373 + void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu); 374 + void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu); 379 375 380 376 #endif /* __RISCV_KVM_HOST_H__ */
+16 -4
arch/riscv/include/asm/kvm_vcpu_sbi.h
··· 15 15 #define KVM_SBI_VERSION_MINOR 0 16 16 17 17 enum kvm_riscv_sbi_ext_status { 18 - KVM_RISCV_SBI_EXT_UNINITIALIZED, 19 - KVM_RISCV_SBI_EXT_AVAILABLE, 20 - KVM_RISCV_SBI_EXT_UNAVAILABLE, 18 + KVM_RISCV_SBI_EXT_STATUS_UNINITIALIZED, 19 + KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE, 20 + KVM_RISCV_SBI_EXT_STATUS_ENABLED, 21 + KVM_RISCV_SBI_EXT_STATUS_DISABLED, 21 22 }; 22 23 23 24 struct kvm_vcpu_sbi_context { ··· 37 36 unsigned long extid_start; 38 37 unsigned long extid_end; 39 38 40 - bool default_unavail; 39 + bool default_disabled; 41 40 42 41 /** 43 42 * SBI extension handler. It can be defined for a given extension or group of ··· 60 59 const struct kvm_one_reg *reg); 61 60 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu, 62 61 const struct kvm_one_reg *reg); 62 + int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, 63 + const struct kvm_one_reg *reg); 64 + int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, 65 + const struct kvm_one_reg *reg); 63 66 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( 64 67 struct kvm_vcpu *vcpu, unsigned long extid); 68 + bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx); 65 69 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); 66 70 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu); 71 + 72 + int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, 73 + unsigned long *reg_val); 74 + int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, 75 + unsigned long reg_val); 67 76 68 77 #ifdef CONFIG_RISCV_SBI_V01 69 78 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; ··· 85 74 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst; 86 75 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; 87 76 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn; 77 + extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta; 88 78 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; 89 79 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; 90 80
+28
arch/riscv/include/asm/paravirt.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_RISCV_PARAVIRT_H 3 + #define _ASM_RISCV_PARAVIRT_H 4 + 5 + #ifdef CONFIG_PARAVIRT 6 + #include <linux/static_call_types.h> 7 + 8 + struct static_key; 9 + extern struct static_key paravirt_steal_enabled; 10 + extern struct static_key paravirt_steal_rq_enabled; 11 + 12 + u64 dummy_steal_clock(int cpu); 13 + 14 + DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); 15 + 16 + static inline u64 paravirt_steal_clock(int cpu) 17 + { 18 + return static_call(pv_steal_clock)(cpu); 19 + } 20 + 21 + int __init pv_time_init(void); 22 + 23 + #else 24 + 25 + #define pv_time_init() do {} while (0) 26 + 27 + #endif /* CONFIG_PARAVIRT */ 28 + #endif /* _ASM_RISCV_PARAVIRT_H */
+1
arch/riscv/include/asm/paravirt_api_clock.h
··· 1 + #include <asm/paravirt.h>
+17
arch/riscv/include/asm/sbi.h
··· 31 31 SBI_EXT_SRST = 0x53525354, 32 32 SBI_EXT_PMU = 0x504D55, 33 33 SBI_EXT_DBCN = 0x4442434E, 34 + SBI_EXT_STA = 0x535441, 34 35 35 36 /* Experimentals extensions must lie within this range */ 36 37 SBI_EXT_EXPERIMENTAL_START = 0x08000000, ··· 244 243 SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2, 245 244 }; 246 245 246 + /* SBI STA (steal-time accounting) extension */ 247 + enum sbi_ext_sta_fid { 248 + SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0, 249 + }; 250 + 251 + struct sbi_sta_struct { 252 + __le32 sequence; 253 + __le32 flags; 254 + __le64 steal; 255 + u8 preempted; 256 + u8 pad[47]; 257 + } __packed; 258 + 259 + #define SBI_STA_SHMEM_DISABLE -1 260 + 261 + /* SBI spec version fields */ 247 262 #define SBI_SPEC_VERSION_DEFAULT 0x1 248 263 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 249 264 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
-5
arch/riscv/include/asm/syscall_wrapper.h
··· 46 46 return sys_ni_syscall(); \ 47 47 } 48 48 49 - #define COMPAT_SYS_NI(name) \ 50 - SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers); 51 - 52 49 #endif /* CONFIG_COMPAT */ 53 50 54 51 #define __SYSCALL_DEFINEx(x, name, ...) \ ··· 78 81 { \ 79 82 return sys_ni_syscall(); \ 80 83 } 81 - 82 - #define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers); 83 84 84 85 #endif /* __ASM_SYSCALL_WRAPPER_H */
+13
arch/riscv/include/uapi/asm/kvm.h
··· 157 157 KVM_RISCV_SBI_EXT_EXPERIMENTAL, 158 158 KVM_RISCV_SBI_EXT_VENDOR, 159 159 KVM_RISCV_SBI_EXT_DBCN, 160 + KVM_RISCV_SBI_EXT_STA, 160 161 KVM_RISCV_SBI_EXT_MAX, 162 + }; 163 + 164 + /* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ 165 + struct kvm_riscv_sbi_sta { 166 + unsigned long shmem_lo; 167 + unsigned long shmem_hi; 161 168 }; 162 169 163 170 /* Possible states for kvm_riscv_timer */ ··· 247 240 (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long)) 248 241 #define KVM_REG_RISCV_VECTOR_REG(n) \ 249 242 ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) 243 + 244 + /* Registers for specific SBI extensions are mapped as type 10 */ 245 + #define KVM_REG_RISCV_SBI_STATE (0x0a << KVM_REG_RISCV_TYPE_SHIFT) 246 + #define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) 247 + #define KVM_REG_RISCV_SBI_STA_REG(name) \ 248 + (offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long)) 250 249 251 250 /* Device Control API: RISC-V AIA */ 252 251 #define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
+1
arch/riscv/kernel/Makefile
··· 85 85 obj-$(CONFIG_SMP) += cpu_ops_sbi.o 86 86 endif 87 87 obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o 88 + obj-$(CONFIG_PARAVIRT) += paravirt.o 88 89 obj-$(CONFIG_KGDB) += kgdb.o 89 90 obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o 90 91 obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
+135
arch/riscv/kernel/paravirt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2023 Ventana Micro Systems Inc. 4 + */ 5 + 6 + #define pr_fmt(fmt) "riscv-pv: " fmt 7 + 8 + #include <linux/cpuhotplug.h> 9 + #include <linux/compiler.h> 10 + #include <linux/errno.h> 11 + #include <linux/init.h> 12 + #include <linux/jump_label.h> 13 + #include <linux/kconfig.h> 14 + #include <linux/kernel.h> 15 + #include <linux/percpu-defs.h> 16 + #include <linux/printk.h> 17 + #include <linux/static_call.h> 18 + #include <linux/types.h> 19 + 20 + #include <asm/barrier.h> 21 + #include <asm/page.h> 22 + #include <asm/paravirt.h> 23 + #include <asm/sbi.h> 24 + 25 + struct static_key paravirt_steal_enabled; 26 + struct static_key paravirt_steal_rq_enabled; 27 + 28 + static u64 native_steal_clock(int cpu) 29 + { 30 + return 0; 31 + } 32 + 33 + DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); 34 + 35 + static bool steal_acc = true; 36 + static int __init parse_no_stealacc(char *arg) 37 + { 38 + steal_acc = false; 39 + return 0; 40 + } 41 + 42 + early_param("no-steal-acc", parse_no_stealacc); 43 + 44 + DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64); 45 + 46 + static bool __init has_pv_steal_clock(void) 47 + { 48 + if (sbi_spec_version >= sbi_mk_version(2, 0) && 49 + sbi_probe_extension(SBI_EXT_STA) > 0) { 50 + pr_info("SBI STA extension detected\n"); 51 + return true; 52 + } 53 + 54 + return false; 55 + } 56 + 57 + static int sbi_sta_steal_time_set_shmem(unsigned long lo, unsigned long hi, 58 + unsigned long flags) 59 + { 60 + struct sbiret ret; 61 + 62 + ret = sbi_ecall(SBI_EXT_STA, SBI_EXT_STA_STEAL_TIME_SET_SHMEM, 63 + lo, hi, flags, 0, 0, 0); 64 + if (ret.error) { 65 + if (lo == SBI_STA_SHMEM_DISABLE && hi == SBI_STA_SHMEM_DISABLE) 66 + pr_warn("Failed to disable steal-time shmem"); 67 + else 68 + pr_warn("Failed to set steal-time shmem"); 69 + return sbi_err_map_linux_errno(ret.error); 70 + } 71 + 72 + return 0; 73 + } 74 + 75 + static int pv_time_cpu_online(unsigned int cpu) 76 + { 77 + struct sbi_sta_struct *st = this_cpu_ptr(&steal_time); 78 + phys_addr_t pa = __pa(st); 79 + unsigned long lo = (unsigned long)pa; 80 + unsigned long hi = IS_ENABLED(CONFIG_32BIT) ? upper_32_bits((u64)pa) : 0; 81 + 82 + return sbi_sta_steal_time_set_shmem(lo, hi, 0); 83 + } 84 + 85 + static int pv_time_cpu_down_prepare(unsigned int cpu) 86 + { 87 + return sbi_sta_steal_time_set_shmem(SBI_STA_SHMEM_DISABLE, 88 + SBI_STA_SHMEM_DISABLE, 0); 89 + } 90 + 91 + static u64 pv_time_steal_clock(int cpu) 92 + { 93 + struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu); 94 + u32 sequence; 95 + u64 steal; 96 + 97 + /* 98 + * Check the sequence field before and after reading the steal 99 + * field. Repeat the read if it is different or odd. 100 + */ 101 + do { 102 + sequence = READ_ONCE(st->sequence); 103 + virt_rmb(); 104 + steal = READ_ONCE(st->steal); 105 + virt_rmb(); 106 + } while ((le32_to_cpu(sequence) & 1) || 107 + sequence != READ_ONCE(st->sequence)); 108 + 109 + return le64_to_cpu(steal); 110 + } 111 + 112 + int __init pv_time_init(void) 113 + { 114 + int ret; 115 + 116 + if (!has_pv_steal_clock()) 117 + return 0; 118 + 119 + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 120 + "riscv/pv_time:online", 121 + pv_time_cpu_online, 122 + pv_time_cpu_down_prepare); 123 + if (ret < 0) 124 + return ret; 125 + 126 + static_call_update(pv_steal_clock, pv_time_steal_clock); 127 + 128 + static_key_slow_inc(&paravirt_steal_enabled); 129 + if (steal_acc) 130 + static_key_slow_inc(&paravirt_steal_rq_enabled); 131 + 132 + pr_info("Computing paravirt steal-time\n"); 133 + 134 + return 0; 135 + }
+3
arch/riscv/kernel/time.c
··· 12 12 #include <asm/sbi.h> 13 13 #include <asm/processor.h> 14 14 #include <asm/timex.h> 15 + #include <asm/paravirt.h> 15 16 16 17 unsigned long riscv_timebase __ro_after_init; 17 18 EXPORT_SYMBOL_GPL(riscv_timebase); ··· 46 45 timer_probe(); 47 46 48 47 tick_setup_hrtimer_broadcast(); 48 + 49 + pv_time_init(); 49 50 }
+1
arch/riscv/kvm/Kconfig
··· 30 30 select KVM_XFER_TO_GUEST_WORK 31 31 select KVM_GENERIC_MMU_NOTIFIER 32 32 select PREEMPT_NOTIFIERS 33 + select SCHED_INFO 33 34 help 34 35 Support hosting virtualized guest machines. 35 36
+1
arch/riscv/kvm/Makefile
··· 26 26 kvm-y += vcpu_sbi_base.o 27 27 kvm-y += vcpu_sbi_replace.o 28 28 kvm-y += vcpu_sbi_hsm.o 29 + kvm-y += vcpu_sbi_sta.o 29 30 kvm-y += vcpu_timer.o 30 31 kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o 31 32 kvm-y += aia.o
+13
arch/riscv/kvm/aia_imsic.c
··· 55 55 /* IMSIC SW-file */ 56 56 struct imsic_mrif *swfile; 57 57 phys_addr_t swfile_pa; 58 + spinlock_t swfile_extirq_lock; 58 59 }; 59 60 60 61 #define imsic_vs_csr_read(__c) \ ··· 614 613 { 615 614 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; 616 615 struct imsic_mrif *mrif = imsic->swfile; 616 + unsigned long flags; 617 + 618 + /* 619 + * The critical section is necessary during external interrupt 620 + * updates to avoid the risk of losing interrupts due to potential 621 + * interruptions between reading topei and updating pending status. 622 + */ 623 + 624 + spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); 617 625 618 626 if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) && 619 627 imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) 620 628 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); 621 629 else 622 630 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); 631 + 632 + spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); 623 633 } 624 634 625 635 static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear, ··· 1051 1039 } 1052 1040 imsic->swfile = page_to_virt(swfile_page); 1053 1041 imsic->swfile_pa = page_to_phys(swfile_page); 1042 + spin_lock_init(&imsic->swfile_extirq_lock); 1054 1043 1055 1044 /* Setup IO device */ 1056 1045 kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
+8 -2
arch/riscv/kvm/vcpu.c
··· 83 83 vcpu->arch.hfence_tail = 0; 84 84 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); 85 85 86 + kvm_riscv_vcpu_sbi_sta_reset(vcpu); 87 + 86 88 /* Reset the guest CSRs for hotplug usecase */ 87 89 if (loaded) 88 90 kvm_arch_vcpu_load(vcpu, smp_processor_id()); ··· 543 541 544 542 kvm_riscv_vcpu_aia_load(vcpu, cpu); 545 543 544 + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 545 + 546 546 vcpu->cpu = cpu; 547 547 } 548 548 ··· 618 614 619 615 if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) 620 616 kvm_riscv_hfence_process(vcpu); 617 + 618 + if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 619 + kvm_riscv_vcpu_record_steal_time(vcpu); 621 620 } 622 621 } 623 622 ··· 764 757 /* Update HVIP CSR for current CPU */ 765 758 kvm_riscv_update_hvip(vcpu); 766 759 767 - if (ret <= 0 || 768 - kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || 760 + if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || 769 761 kvm_request_pending(vcpu) || 770 762 xfer_to_guest_mode_work_pending()) { 771 763 vcpu->mode = OUTSIDE_GUEST_MODE;
+111 -38
arch/riscv/kvm/vcpu_onereg.c
··· 485 485 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) 486 486 rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, 487 487 reg_val); 488 - break; 488 + break; 489 489 default: 490 490 rc = -ENOENT; 491 491 break; ··· 931 931 return copy_isa_ext_reg_indices(vcpu, NULL);; 932 932 } 933 933 934 - static inline unsigned long num_sbi_ext_regs(void) 934 + static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 935 935 { 936 - /* 937 - * number of KVM_REG_RISCV_SBI_SINGLE + 938 - * 2 x (number of KVM_REG_RISCV_SBI_MULTI) 939 - */ 940 - return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1); 941 - } 936 + unsigned int n = 0; 942 937 943 - static int copy_sbi_ext_reg_indices(u64 __user *uindices) 944 - { 945 - int n; 946 - 947 - /* copy KVM_REG_RISCV_SBI_SINGLE */ 948 - n = KVM_RISCV_SBI_EXT_MAX; 949 - for (int i = 0; i < n; i++) { 938 + for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) { 950 939 u64 size = IS_ENABLED(CONFIG_32BIT) ? 951 940 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 952 941 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | 953 942 KVM_REG_RISCV_SBI_SINGLE | i; 954 943 944 + if (!riscv_vcpu_supports_sbi_ext(vcpu, i)) 945 + continue; 946 + 947 + if (uindices) { 948 + if (put_user(reg, uindices)) 949 + return -EFAULT; 950 + uindices++; 951 + } 952 + 953 + n++; 954 + } 955 + 956 + return n; 957 + } 958 + 959 + static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu) 960 + { 961 + return copy_sbi_ext_reg_indices(vcpu, NULL); 962 + } 963 + 964 + static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 965 + { 966 + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; 967 + int total = 0; 968 + 969 + if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) { 970 + u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 971 + int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long); 972 + 973 + for (int i = 0; i < n; i++) { 974 + u64 reg = KVM_REG_RISCV | size | 975 + KVM_REG_RISCV_SBI_STATE | 976 + KVM_REG_RISCV_SBI_STA | i; 977 + 978 + if (uindices) { 979 + if (put_user(reg, uindices)) 980 + return -EFAULT; 981 + uindices++; 982 + } 983 + } 984 + 985 + total += n; 986 + } 987 + 988 + return total; 989 + } 990 + 991 + static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu) 992 + { 993 + return copy_sbi_reg_indices(vcpu, NULL); 994 + } 995 + 996 + static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu) 997 + { 998 + if (!riscv_isa_extension_available(vcpu->arch.isa, v)) 999 + return 0; 1000 + 1001 + /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */ 1002 + return 37; 1003 + } 1004 + 1005 + static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu, 1006 + u64 __user *uindices) 1007 + { 1008 + const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 1009 + int n = num_vector_regs(vcpu); 1010 + u64 reg, size; 1011 + int i; 1012 + 1013 + if (n == 0) 1014 + return 0; 1015 + 1016 + /* copy vstart, vl, vtype, vcsr and vlenb */ 1017 + size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 1018 + for (i = 0; i < 5; i++) { 1019 + reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i; 1020 + 955 1021 if (uindices) { 956 1022 if (put_user(reg, uindices)) 957 1023 return -EFAULT; ··· 1025 959 } 1026 960 } 1027 961 1028 - /* copy KVM_REG_RISCV_SBI_MULTI */ 1029 - n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1; 1030 - for (int i = 0; i < n; i++) { 1031 - u64 size = IS_ENABLED(CONFIG_32BIT) ? 1032 - KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 1033 - u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | 1034 - KVM_REG_RISCV_SBI_MULTI_EN | i; 1035 - 1036 - if (uindices) { 1037 - if (put_user(reg, uindices)) 1038 - return -EFAULT; 1039 - uindices++; 1040 - } 1041 - 1042 - reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | 1043 - KVM_REG_RISCV_SBI_MULTI_DIS | i; 962 + /* vector_regs have a variable 'vlenb' size */ 963 + size = __builtin_ctzl(cntx->vector.vlenb); 964 + size <<= KVM_REG_SIZE_SHIFT; 965 + for (i = 0; i < 32; i++) { 966 + reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size | 967 + KVM_REG_RISCV_VECTOR_REG(i); 1044 968 1045 969 if (uindices) { 1046 970 if (put_user(reg, uindices)) ··· 1039 983 } 1040 984 } 1041 985 1042 - return num_sbi_ext_regs(); 986 + return n; 1043 987 } 1044 988 1045 989 /* ··· 1057 1001 res += num_timer_regs(); 1058 1002 res += num_fp_f_regs(vcpu); 1059 1003 res += num_fp_d_regs(vcpu); 1004 + res += num_vector_regs(vcpu); 1060 1005 res += num_isa_ext_regs(vcpu); 1061 - res += num_sbi_ext_regs(); 1006 + res += num_sbi_ext_regs(vcpu); 1007 + res += num_sbi_regs(vcpu); 1062 1008 1063 1009 return res; 1064 1010 } ··· 1103 1045 return ret; 1104 1046 uindices += ret; 1105 1047 1048 + ret = copy_vector_reg_indices(vcpu, uindices); 1049 + if (ret < 0) 1050 + return ret; 1051 + uindices += ret; 1052 + 1106 1053 ret = copy_isa_ext_reg_indices(vcpu, uindices); 1107 1054 if (ret < 0) 1108 1055 return ret; 1109 1056 uindices += ret; 1110 1057 1111 - ret = copy_sbi_ext_reg_indices(uindices); 1058 + ret = copy_sbi_ext_reg_indices(vcpu, uindices); 1112 1059 if (ret < 0) 1113 1060 return ret; 1061 + uindices += ret; 1062 + 1063 + ret = copy_sbi_reg_indices(vcpu, uindices); 1064 + if (ret < 0) 1065 + return ret; 1066 + uindices += ret; 1114 1067 1115 1068 return 0; 1116 1069 } ··· 1144 1075 case KVM_REG_RISCV_FP_D: 1145 1076 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, 1146 1077 KVM_REG_RISCV_FP_D); 1078 + case KVM_REG_RISCV_VECTOR: 1079 + return kvm_riscv_vcpu_set_reg_vector(vcpu, reg); 1147 1080 case KVM_REG_RISCV_ISA_EXT: 1148 1081 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); 1149 1082 case KVM_REG_RISCV_SBI_EXT: 1150 1083 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); 1151 - case KVM_REG_RISCV_VECTOR: 1152 - return kvm_riscv_vcpu_set_reg_vector(vcpu, reg); 1084 + case KVM_REG_RISCV_SBI_STATE: 1085 + return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg); 1153 1086 default: 1154 1087 break; 1155 1088 } ··· 1177 1106 case KVM_REG_RISCV_FP_D: 1178 1107 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, 1179 1108 KVM_REG_RISCV_FP_D); 1109 + case KVM_REG_RISCV_VECTOR: 1110 + return kvm_riscv_vcpu_get_reg_vector(vcpu, reg); 1180 1111 case KVM_REG_RISCV_ISA_EXT: 1181 1112 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); 1182 1113 case KVM_REG_RISCV_SBI_EXT: 1183 1114 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); 1184 - case KVM_REG_RISCV_VECTOR: 1185 - return kvm_riscv_vcpu_get_reg_vector(vcpu, reg); 1115 + case KVM_REG_RISCV_SBI_STATE: 1116 + return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg); 1186 1117 default: 1187 1118 break; 1188 1119 }
+110 -32
arch/riscv/kvm/vcpu_sbi.c
··· 71 71 .ext_ptr = &vcpu_sbi_ext_dbcn, 72 72 }, 73 73 { 74 + .ext_idx = KVM_RISCV_SBI_EXT_STA, 75 + .ext_ptr = &vcpu_sbi_ext_sta, 76 + }, 77 + { 74 78 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL, 75 79 .ext_ptr = &vcpu_sbi_ext_experimental, 76 80 }, ··· 83 79 .ext_ptr = &vcpu_sbi_ext_vendor, 84 80 }, 85 81 }; 82 + 83 + static const struct kvm_riscv_sbi_extension_entry * 84 + riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx) 85 + { 86 + const struct kvm_riscv_sbi_extension_entry *sext = NULL; 87 + 88 + if (idx >= KVM_RISCV_SBI_EXT_MAX) 89 + return NULL; 90 + 91 + for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) { 92 + if (sbi_ext[i].ext_idx == idx) { 93 + sext = &sbi_ext[i]; 94 + break; 95 + } 96 + } 97 + 98 + return sext; 99 + } 100 + 101 + bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx) 102 + { 103 + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; 104 + const struct kvm_riscv_sbi_extension_entry *sext; 105 + 106 + sext = riscv_vcpu_get_sbi_ext(vcpu, idx); 107 + 108 + return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE; 109 + } 86 110 87 111 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) 88 112 { ··· 172 140 unsigned long reg_num, 173 141 unsigned long reg_val) 174 142 { 175 - unsigned long i; 176 - const struct kvm_riscv_sbi_extension_entry *sext = NULL; 177 143 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; 178 - 179 - if (reg_num >= KVM_RISCV_SBI_EXT_MAX) 180 - return -ENOENT; 144 + const struct kvm_riscv_sbi_extension_entry *sext; 181 145 182 146 if (reg_val != 1 && reg_val != 0) 183 147 return -EINVAL; 184 148 185 - for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { 186 - if (sbi_ext[i].ext_idx == reg_num) { 187 - sext = &sbi_ext[i]; 188 - break; 189 - } 190 - } 191 - if (!sext) 149 + sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num); 150 + if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE) 192 151 return -ENOENT; 193 152 194 153 scontext->ext_status[sext->ext_idx] = (reg_val) ? 195 - KVM_RISCV_SBI_EXT_AVAILABLE : 196 - KVM_RISCV_SBI_EXT_UNAVAILABLE; 154 + KVM_RISCV_SBI_EXT_STATUS_ENABLED : 155 + KVM_RISCV_SBI_EXT_STATUS_DISABLED; 197 156 198 157 return 0; 199 158 } ··· 193 170 unsigned long reg_num, 194 171 unsigned long *reg_val) 195 172 { 196 - unsigned long i; 197 - const struct kvm_riscv_sbi_extension_entry *sext = NULL; 198 173 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; 174 + const struct kvm_riscv_sbi_extension_entry *sext; 199 175 200 - if (reg_num >= KVM_RISCV_SBI_EXT_MAX) 201 - return -ENOENT; 202 - 203 - for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { 204 - if (sbi_ext[i].ext_idx == reg_num) { 205 - sext = &sbi_ext[i]; 206 - break; 207 - } 208 - } 209 - if (!sext) 176 + sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num); 177 + if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE) 210 178 return -ENOENT; 211 179 212 180 *reg_val = scontext->ext_status[sext->ext_idx] == 213 - KVM_RISCV_SBI_EXT_AVAILABLE; 181 + KVM_RISCV_SBI_EXT_STATUS_ENABLED; 182 + 214 183 return 0; 215 184 } 216 185 ··· 325 310 return 0; 326 311 } 327 312 313 + int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, 314 + const struct kvm_one_reg *reg) 315 + { 316 + unsigned long __user *uaddr = 317 + (unsigned long __user *)(unsigned long)reg->addr; 318 + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 319 + KVM_REG_SIZE_MASK | 320 + KVM_REG_RISCV_SBI_STATE); 321 + unsigned long reg_subtype, reg_val; 322 + 323 + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 324 + return -EINVAL; 325 + 326 + if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id))) 327 + return -EFAULT; 328 + 329 + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; 330 + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; 331 + 332 + switch (reg_subtype) { 333 + case KVM_REG_RISCV_SBI_STA: 334 + return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val); 335 + default: 336 + return -EINVAL; 337 + } 338 + 339 + return 0; 340 + } 341 + 342 + int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, 343 + const struct kvm_one_reg *reg) 344 + { 345 + unsigned long __user *uaddr = 346 + (unsigned long __user *)(unsigned long)reg->addr; 347 + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 348 + KVM_REG_SIZE_MASK | 349 + KVM_REG_RISCV_SBI_STATE); 350 + unsigned long reg_subtype, reg_val; 351 + int ret; 352 + 353 + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 354 + return -EINVAL; 355 + 356 + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; 357 + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; 358 + 359 + switch (reg_subtype) { 360 + case KVM_REG_RISCV_SBI_STA: 361 + ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val); 362 + break; 363 + default: 364 + return -EINVAL; 365 + } 366 + 367 + if (ret) 368 + return ret; 369 + 370 + if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id))) 371 + return -EFAULT; 372 + 373 + return 0; 374 + } 375 + 328 376 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( 329 377 struct kvm_vcpu *vcpu, unsigned long extid) 330 378 { ··· 403 325 if (ext->extid_start <= extid && ext->extid_end >= extid) { 404 326 if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX || 405 327 scontext->ext_status[entry->ext_idx] == 406 - KVM_RISCV_SBI_EXT_AVAILABLE) 328 + KVM_RISCV_SBI_EXT_STATUS_ENABLED) 407 329 return ext; 408 330 409 331 return NULL; ··· 491 413 492 414 if (ext->probe && !ext->probe(vcpu)) { 493 415 scontext->ext_status[entry->ext_idx] = 494 - KVM_RISCV_SBI_EXT_UNAVAILABLE; 416 + KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE; 495 417 continue; 496 418 } 497 419 498 - scontext->ext_status[entry->ext_idx] = ext->default_unavail ? 499 - KVM_RISCV_SBI_EXT_UNAVAILABLE : 500 - KVM_RISCV_SBI_EXT_AVAILABLE; 420 + scontext->ext_status[entry->ext_idx] = ext->default_disabled ? 421 + KVM_RISCV_SBI_EXT_STATUS_DISABLED : 422 + KVM_RISCV_SBI_EXT_STATUS_ENABLED; 501 423 } 502 424 }
+1 -1
arch/riscv/kvm/vcpu_sbi_replace.c
··· 204 204 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = { 205 205 .extid_start = SBI_EXT_DBCN, 206 206 .extid_end = SBI_EXT_DBCN, 207 - .default_unavail = true, 207 + .default_disabled = true, 208 208 .handler = kvm_sbi_ext_dbcn_handler, 209 209 };
+208
arch/riscv/kvm/vcpu_sbi_sta.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2023 Ventana Micro Systems Inc. 4 + */ 5 + 6 + #include <linux/kconfig.h> 7 + #include <linux/kernel.h> 8 + #include <linux/kvm_host.h> 9 + #include <linux/mm.h> 10 + #include <linux/sizes.h> 11 + 12 + #include <asm/bug.h> 13 + #include <asm/current.h> 14 + #include <asm/kvm_vcpu_sbi.h> 15 + #include <asm/page.h> 16 + #include <asm/sbi.h> 17 + #include <asm/uaccess.h> 18 + 19 + void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu) 20 + { 21 + vcpu->arch.sta.shmem = INVALID_GPA; 22 + vcpu->arch.sta.last_steal = 0; 23 + } 24 + 25 + void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu) 26 + { 27 + gpa_t shmem = vcpu->arch.sta.shmem; 28 + u64 last_steal = vcpu->arch.sta.last_steal; 29 + u32 *sequence_ptr, sequence; 30 + u64 *steal_ptr, steal; 31 + unsigned long hva; 32 + gfn_t gfn; 33 + 34 + if (shmem == INVALID_GPA) 35 + return; 36 + 37 + /* 38 + * shmem is 64-byte aligned (see the enforcement in 39 + * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct 40 + * is 64 bytes, so we know all its offsets are in the same page. 41 + */ 42 + gfn = shmem >> PAGE_SHIFT; 43 + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); 44 + 45 + if (WARN_ON(kvm_is_error_hva(hva))) { 46 + vcpu->arch.sta.shmem = INVALID_GPA; 47 + return; 48 + } 49 + 50 + sequence_ptr = (u32 *)(hva + offset_in_page(shmem) + 51 + offsetof(struct sbi_sta_struct, sequence)); 52 + steal_ptr = (u64 *)(hva + offset_in_page(shmem) + 53 + offsetof(struct sbi_sta_struct, steal)); 54 + 55 + if (WARN_ON(get_user(sequence, sequence_ptr))) 56 + return; 57 + 58 + sequence = le32_to_cpu(sequence); 59 + sequence += 1; 60 + 61 + if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr))) 62 + return; 63 + 64 + if (!WARN_ON(get_user(steal, steal_ptr))) { 65 + steal = le64_to_cpu(steal); 66 + vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay); 67 + steal += vcpu->arch.sta.last_steal - last_steal; 68 + WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); 69 + } 70 + 71 + sequence += 1; 72 + WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)); 73 + 74 + kvm_vcpu_mark_page_dirty(vcpu, gfn); 75 + } 76 + 77 + static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) 78 + { 79 + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 80 + unsigned long shmem_phys_lo = cp->a0; 81 + unsigned long shmem_phys_hi = cp->a1; 82 + u32 flags = cp->a2; 83 + struct sbi_sta_struct zero_sta = {0}; 84 + unsigned long hva; 85 + bool writable; 86 + gpa_t shmem; 87 + int ret; 88 + 89 + if (flags != 0) 90 + return SBI_ERR_INVALID_PARAM; 91 + 92 + if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE && 93 + shmem_phys_hi == SBI_STA_SHMEM_DISABLE) { 94 + vcpu->arch.sta.shmem = INVALID_GPA; 95 + return 0; 96 + } 97 + 98 + if (shmem_phys_lo & (SZ_64 - 1)) 99 + return SBI_ERR_INVALID_PARAM; 100 + 101 + shmem = shmem_phys_lo; 102 + 103 + if (shmem_phys_hi != 0) { 104 + if (IS_ENABLED(CONFIG_32BIT)) 105 + shmem |= ((gpa_t)shmem_phys_hi << 32); 106 + else 107 + return SBI_ERR_INVALID_ADDRESS; 108 + } 109 + 110 + hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); 111 + if (kvm_is_error_hva(hva) || !writable) 112 + return SBI_ERR_INVALID_ADDRESS; 113 + 114 + ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); 115 + if (ret) 116 + return SBI_ERR_FAILURE; 117 + 118 + vcpu->arch.sta.shmem = shmem; 119 + vcpu->arch.sta.last_steal = current->sched_info.run_delay; 120 + 121 + return 0; 122 + } 123 + 124 + static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, 125 + struct kvm_vcpu_sbi_return *retdata) 126 + { 127 + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 128 + unsigned long funcid = cp->a6; 129 + int ret; 130 + 131 + switch (funcid) { 132 + case SBI_EXT_STA_STEAL_TIME_SET_SHMEM: 133 + ret = kvm_sbi_sta_steal_time_set_shmem(vcpu); 134 + break; 135 + default: 136 + ret = SBI_ERR_NOT_SUPPORTED; 137 + break; 138 + } 139 + 140 + retdata->err_val = ret; 141 + 142 + return 0; 143 + } 144 + 145 + static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu) 146 + { 147 + return !!sched_info_on(); 148 + } 149 + 150 + const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = { 151 + .extid_start = SBI_EXT_STA, 152 + .extid_end = SBI_EXT_STA, 153 + .handler = kvm_sbi_ext_sta_handler, 154 + .probe = kvm_sbi_ext_sta_probe, 155 + }; 156 + 157 + int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, 158 + unsigned long reg_num, 159 + unsigned long *reg_val) 160 + { 161 + switch (reg_num) { 162 + case KVM_REG_RISCV_SBI_STA_REG(shmem_lo): 163 + *reg_val = (unsigned long)vcpu->arch.sta.shmem; 164 + break; 165 + case KVM_REG_RISCV_SBI_STA_REG(shmem_hi): 166 + if (IS_ENABLED(CONFIG_32BIT)) 167 + *reg_val = upper_32_bits(vcpu->arch.sta.shmem); 168 + else 169 + *reg_val = 0; 170 + break; 171 + default: 172 + return -EINVAL; 173 + } 174 + 175 + return 0; 176 + } 177 + 178 + int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, 179 + unsigned long reg_num, 180 + unsigned long reg_val) 181 + { 182 + switch (reg_num) { 183 + case KVM_REG_RISCV_SBI_STA_REG(shmem_lo): 184 + if (IS_ENABLED(CONFIG_32BIT)) { 185 + gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem); 186 + 187 + vcpu->arch.sta.shmem = reg_val; 188 + vcpu->arch.sta.shmem |= hi << 32; 189 + } else { 190 + vcpu->arch.sta.shmem = reg_val; 191 + } 192 + break; 193 + case KVM_REG_RISCV_SBI_STA_REG(shmem_hi): 194 + if (IS_ENABLED(CONFIG_32BIT)) { 195 + gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem); 196 + 197 + vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32); 198 + vcpu->arch.sta.shmem |= lo; 199 + } else if (reg_val != 0) { 200 + return -EINVAL; 201 + } 202 + break; 203 + default: 204 + return -EINVAL; 205 + } 206 + 207 + return 0; 208 + }
+14 -18
arch/riscv/kvm/vcpu_switch.S
··· 15 15 .altmacro 16 16 .option norelax 17 17 18 - ENTRY(__kvm_riscv_switch_to) 18 + SYM_FUNC_START(__kvm_riscv_switch_to) 19 19 /* Save Host GPRs (except A0 and T0-T6) */ 20 20 REG_S ra, (KVM_ARCH_HOST_RA)(a0) 21 21 REG_S sp, (KVM_ARCH_HOST_SP)(a0) ··· 45 45 REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0) 46 46 REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0) 47 47 REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0) 48 - la t4, __kvm_switch_return 48 + la t4, .Lkvm_switch_return 49 49 REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0) 50 50 51 51 /* Save Host and Restore Guest SSTATUS */ ··· 113 113 114 114 /* Back to Host */ 115 115 .align 2 116 - __kvm_switch_return: 116 + .Lkvm_switch_return: 117 117 /* Swap Guest A0 with SSCRATCH */ 118 118 csrrw a0, CSR_SSCRATCH, a0 119 119 ··· 208 208 209 209 /* Return to C code */ 210 210 ret 211 - ENDPROC(__kvm_riscv_switch_to) 211 + SYM_FUNC_END(__kvm_riscv_switch_to) 212 212 213 - ENTRY(__kvm_riscv_unpriv_trap) 213 + SYM_CODE_START(__kvm_riscv_unpriv_trap) 214 214 /* 215 215 * We assume that faulting unpriv load/store instruction is 216 216 * 4-byte long and blindly increment SEPC by 4. ··· 231 231 csrr a1, CSR_HTINST 232 232 REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0) 233 233 sret 234 - ENDPROC(__kvm_riscv_unpriv_trap) 234 + SYM_CODE_END(__kvm_riscv_unpriv_trap) 235 235 236 236 #ifdef CONFIG_FPU 237 - .align 3 238 - .global __kvm_riscv_fp_f_save 239 - __kvm_riscv_fp_f_save: 237 + SYM_FUNC_START(__kvm_riscv_fp_f_save) 240 238 csrr t2, CSR_SSTATUS 241 239 li t1, SR_FS 242 240 csrs CSR_SSTATUS, t1 ··· 274 276 sw t0, KVM_ARCH_FP_F_FCSR(a0) 275 277 csrw CSR_SSTATUS, t2 276 278 ret 279 + SYM_FUNC_END(__kvm_riscv_fp_f_save) 277 280 278 - .align 3 279 - .global __kvm_riscv_fp_d_save 280 - __kvm_riscv_fp_d_save: 281 + SYM_FUNC_START(__kvm_riscv_fp_d_save) 281 282 csrr t2, CSR_SSTATUS 282 283 li t1, SR_FS 283 284 csrs CSR_SSTATUS, t1 ··· 316 319 sw t0, KVM_ARCH_FP_D_FCSR(a0) 317 320 csrw CSR_SSTATUS, t2 318 321 ret 322 + SYM_FUNC_END(__kvm_riscv_fp_d_save) 319 323 320 - .align 3 321 - .global __kvm_riscv_fp_f_restore 322 - __kvm_riscv_fp_f_restore: 324 + SYM_FUNC_START(__kvm_riscv_fp_f_restore) 323 325 csrr t2, CSR_SSTATUS 324 326 li t1, SR_FS 325 327 lw t0, KVM_ARCH_FP_F_FCSR(a0) ··· 358 362 fscsr t0 359 363 csrw CSR_SSTATUS, t2 360 364 ret 365 + SYM_FUNC_END(__kvm_riscv_fp_f_restore) 361 366 362 - .align 3 363 - .global __kvm_riscv_fp_d_restore 364 - __kvm_riscv_fp_d_restore: 367 + SYM_FUNC_START(__kvm_riscv_fp_d_restore) 365 368 csrr t2, CSR_SSTATUS 366 369 li t1, SR_FS 367 370 lw t0, KVM_ARCH_FP_D_FCSR(a0) ··· 400 405 fscsr t0 401 406 csrw CSR_SSTATUS, t2 402 407 ret 408 + SYM_FUNC_END(__kvm_riscv_fp_d_restore) 403 409 #endif
+16
arch/riscv/kvm/vcpu_vector.c
··· 76 76 cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); 77 77 if (!cntx->vector.datap) 78 78 return -ENOMEM; 79 + cntx->vector.vlenb = riscv_v_vsize / 32; 79 80 80 81 vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); 81 82 if (!vcpu->arch.host_context.vector.datap) ··· 115 114 break; 116 115 case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): 117 116 *reg_addr = &cntx->vector.vcsr; 117 + break; 118 + case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb): 119 + *reg_addr = &cntx->vector.vlenb; 118 120 break; 119 121 case KVM_REG_RISCV_VECTOR_CSR_REG(datap): 120 122 default: ··· 176 172 177 173 if (!riscv_isa_extension_available(isa, v)) 178 174 return -ENOENT; 175 + 176 + if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) { 177 + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 178 + unsigned long reg_val; 179 + 180 + if (copy_from_user(&reg_val, uaddr, reg_size)) 181 + return -EFAULT; 182 + if (reg_val != cntx->vector.vlenb) 183 + return -EINVAL; 184 + 185 + return 0; 186 + } 179 187 180 188 rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr); 181 189 if (rc)
+5 -4
arch/s390/configs/debug_defconfig
··· 44 44 CONFIG_KEXEC_SIG=y 45 45 CONFIG_CRASH_DUMP=y 46 46 CONFIG_LIVEPATCH=y 47 - CONFIG_MARCH_ZEC12=y 48 - CONFIG_TUNE_ZEC12=y 47 + CONFIG_MARCH_Z13=y 49 48 CONFIG_NR_CPUS=512 50 49 CONFIG_NUMA=y 51 50 CONFIG_HZ_100=y ··· 75 76 CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y 76 77 CONFIG_MODVERSIONS=y 77 78 CONFIG_MODULE_SRCVERSION_ALL=y 78 - CONFIG_MODULE_SIG_SHA256=y 79 79 CONFIG_BLK_DEV_THROTTLING=y 80 80 CONFIG_BLK_WBT=y 81 81 CONFIG_BLK_CGROUP_IOLATENCY=y ··· 91 93 CONFIG_IOSCHED_BFQ=y 92 94 CONFIG_BINFMT_MISC=m 93 95 CONFIG_ZSWAP=y 96 + CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y 94 97 CONFIG_ZSMALLOC_STAT=y 95 98 CONFIG_SLUB_STATS=y 96 99 # CONFIG_COMPAT_BRK is not set ··· 618 619 CONFIG_BTRFS_DEBUG=y 619 620 CONFIG_BTRFS_ASSERT=y 620 621 CONFIG_NILFS2_FS=m 622 + CONFIG_BCACHEFS_FS=y 623 + CONFIG_BCACHEFS_QUOTA=y 624 + CONFIG_BCACHEFS_POSIX_ACL=y 621 625 CONFIG_FS_DAX=y 622 626 CONFIG_EXPORTFS_BLOCK_OPS=y 623 627 CONFIG_FS_ENCRYPTION=y ··· 693 691 CONFIG_ENCRYPTED_KEYS=m 694 692 CONFIG_KEY_NOTIFICATIONS=y 695 693 CONFIG_SECURITY=y 696 - CONFIG_SECURITY_NETWORK=y 697 694 CONFIG_HARDENED_USERCOPY=y 698 695 CONFIG_FORTIFY_SOURCE=y 699 696 CONFIG_SECURITY_SELINUX=y
+5 -4
arch/s390/configs/defconfig
··· 42 42 CONFIG_KEXEC_SIG=y 43 43 CONFIG_CRASH_DUMP=y 44 44 CONFIG_LIVEPATCH=y 45 - CONFIG_MARCH_ZEC12=y 46 - CONFIG_TUNE_ZEC12=y 45 + CONFIG_MARCH_Z13=y 47 46 CONFIG_NR_CPUS=512 48 47 CONFIG_NUMA=y 49 48 CONFIG_HZ_100=y ··· 70 71 CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y 71 72 CONFIG_MODVERSIONS=y 72 73 CONFIG_MODULE_SRCVERSION_ALL=y 73 - CONFIG_MODULE_SIG_SHA256=y 74 74 CONFIG_BLK_DEV_THROTTLING=y 75 75 CONFIG_BLK_WBT=y 76 76 CONFIG_BLK_CGROUP_IOLATENCY=y ··· 86 88 CONFIG_IOSCHED_BFQ=y 87 89 CONFIG_BINFMT_MISC=m 88 90 CONFIG_ZSWAP=y 91 + CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y 89 92 CONFIG_ZSMALLOC_STAT=y 90 93 # CONFIG_COMPAT_BRK is not set 91 94 CONFIG_MEMORY_HOTPLUG=y ··· 604 605 CONFIG_BTRFS_FS=y 605 606 CONFIG_BTRFS_FS_POSIX_ACL=y 606 607 CONFIG_NILFS2_FS=m 608 + CONFIG_BCACHEFS_FS=m 609 + CONFIG_BCACHEFS_QUOTA=y 610 + CONFIG_BCACHEFS_POSIX_ACL=y 607 611 CONFIG_FS_DAX=y 608 612 CONFIG_EXPORTFS_BLOCK_OPS=y 609 613 CONFIG_FS_ENCRYPTION=y ··· 679 677 CONFIG_ENCRYPTED_KEYS=m 680 678 CONFIG_KEY_NOTIFICATIONS=y 681 679 CONFIG_SECURITY=y 682 - CONFIG_SECURITY_NETWORK=y 683 680 CONFIG_SECURITY_SELINUX=y 684 681 CONFIG_SECURITY_SELINUX_BOOTPARAM=y 685 682 CONFIG_SECURITY_LOCKDOWN_LSM=y
+1 -2
arch/s390/configs/zfcpdump_defconfig
··· 9 9 CONFIG_BLK_DEV_INITRD=y 10 10 CONFIG_CC_OPTIMIZE_FOR_SIZE=y 11 11 CONFIG_CRASH_DUMP=y 12 - CONFIG_MARCH_ZEC12=y 13 - CONFIG_TUNE_ZEC12=y 12 + CONFIG_MARCH_Z13=y 14 13 # CONFIG_COMPAT is not set 15 14 CONFIG_NR_CPUS=2 16 15 CONFIG_HZ_100=y
+1 -1
arch/s390/include/asm/fpu/api.h
··· 79 79 #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31) 80 80 81 81 #define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH) 82 - #define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7) 82 + #define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW) 83 83 84 84 struct kernel_fpu; 85 85
+1 -12
arch/s390/include/asm/syscall_wrapper.h
··· 63 63 cond_syscall(__s390x_sys_##name); \ 64 64 cond_syscall(__s390_sys_##name) 65 65 66 - #define SYS_NI(name) \ 67 - SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \ 68 - SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers) 69 - 70 66 #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ 71 67 long __s390_compat_sys##name(struct pt_regs *regs); \ 72 68 ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \ ··· 81 85 82 86 /* 83 87 * As some compat syscalls may not be implemented, we need to expand 84 - * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in 85 - * kernel/time/posix-stubs.c to cover this case as well. 88 + * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well. 86 89 */ 87 90 #define COND_SYSCALL_COMPAT(name) \ 88 91 cond_syscall(__s390_compat_sys_##name) 89 - 90 - #define COMPAT_SYS_NI(name) \ 91 - SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers) 92 92 93 93 #define __S390_SYS_STUBx(x, name, ...) \ 94 94 long __s390_sys##name(struct pt_regs *regs); \ ··· 115 123 116 124 #define COND_SYSCALL(name) \ 117 125 cond_syscall(__s390x_sys_##name) 118 - 119 - #define SYS_NI(name) \ 120 - SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers) 121 126 122 127 #define __S390_SYS_STUBx(x, fullname, name, ...) 123 128
+4 -30
arch/x86/include/asm/syscall_wrapper.h
··· 86 86 return sys_ni_syscall(); \ 87 87 } 88 88 89 - #define __SYS_NI(abi, name) \ 90 - SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers); 91 - 92 89 #ifdef CONFIG_X86_64 93 90 #define __X64_SYS_STUB0(name) \ 94 91 __SYS_STUB0(x64, sys_##name) ··· 97 100 #define __X64_COND_SYSCALL(name) \ 98 101 __COND_SYSCALL(x64, sys_##name) 99 102 100 - #define __X64_SYS_NI(name) \ 101 - __SYS_NI(x64, sys_##name) 102 103 #else /* CONFIG_X86_64 */ 103 104 #define __X64_SYS_STUB0(name) 104 105 #define __X64_SYS_STUBx(x, name, ...) 105 106 #define __X64_COND_SYSCALL(name) 106 - #define __X64_SYS_NI(name) 107 107 #endif /* CONFIG_X86_64 */ 108 108 109 109 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) ··· 114 120 #define __IA32_COND_SYSCALL(name) \ 115 121 __COND_SYSCALL(ia32, sys_##name) 116 122 117 - #define __IA32_SYS_NI(name) \ 118 - __SYS_NI(ia32, sys_##name) 119 123 #else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ 120 124 #define __IA32_SYS_STUB0(name) 121 125 #define __IA32_SYS_STUBx(x, name, ...) 122 126 #define __IA32_COND_SYSCALL(name) 123 - #define __IA32_SYS_NI(name) 124 127 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ 125 128 126 129 #ifdef CONFIG_IA32_EMULATION ··· 126 135 * additional wrappers (aptly named __ia32_sys_xyzzy) which decode the 127 136 * ia32 regs in the proper order for shared or "common" syscalls. As some 128 137 * syscalls may not be implemented, we need to expand COND_SYSCALL in 129 - * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this 130 - * case as well. 138 + * kernel/sys_ni.c to cover this case as well. 131 139 */ 132 140 #define __IA32_COMPAT_SYS_STUB0(name) \ 133 141 __SYS_STUB0(ia32, compat_sys_##name) ··· 138 148 #define __IA32_COMPAT_COND_SYSCALL(name) \ 139 149 __COND_SYSCALL(ia32, compat_sys_##name) 140 150 141 - #define __IA32_COMPAT_SYS_NI(name) \ 142 - __SYS_NI(ia32, compat_sys_##name) 143 - 144 151 #else /* CONFIG_IA32_EMULATION */ 145 152 #define __IA32_COMPAT_SYS_STUB0(name) 146 153 #define __IA32_COMPAT_SYS_STUBx(x, name, ...) 147 154 #define __IA32_COMPAT_COND_SYSCALL(name) 148 - #define __IA32_COMPAT_SYS_NI(name) 149 155 #endif /* CONFIG_IA32_EMULATION */ 150 156 151 157 ··· 161 175 #define __X32_COMPAT_COND_SYSCALL(name) \ 162 176 __COND_SYSCALL(x64, compat_sys_##name) 163 177 164 - #define __X32_COMPAT_SYS_NI(name) \ 165 - __SYS_NI(x64, compat_sys_##name) 166 178 #else /* CONFIG_X86_X32_ABI */ 167 179 #define __X32_COMPAT_SYS_STUB0(name) 168 180 #define __X32_COMPAT_SYS_STUBx(x, name, ...) 169 181 #define __X32_COMPAT_COND_SYSCALL(name) 170 - #define __X32_COMPAT_SYS_NI(name) 171 182 #endif /* CONFIG_X86_X32_ABI */ 172 183 173 184 ··· 195 212 196 213 /* 197 214 * As some compat syscalls may not be implemented, we need to expand 198 - * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in 199 - * kernel/time/posix-stubs.c to cover this case as well. 215 + * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well. 200 216 */ 201 217 #define COND_SYSCALL_COMPAT(name) \ 202 218 __IA32_COMPAT_COND_SYSCALL(name) \ 203 219 __X32_COMPAT_COND_SYSCALL(name) 204 - 205 - #define COMPAT_SYS_NI(name) \ 206 - __IA32_COMPAT_SYS_NI(name) \ 207 - __X32_COMPAT_SYS_NI(name) 208 220 209 221 #endif /* CONFIG_COMPAT */ 210 222 ··· 221 243 * As the generic SYSCALL_DEFINE0() macro does not decode any parameters for 222 244 * obvious reasons, and passing struct pt_regs *regs to it in %rdi does not 223 245 * hurt, we only need to re-define it here to keep the naming congruent to 224 - * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI() 225 - * macros to work correctly. 246 + * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro 247 + * to work correctly. 226 248 */ 227 249 #define SYSCALL_DEFINE0(sname) \ 228 250 SYSCALL_METADATA(_##sname, 0); \ ··· 234 256 #define COND_SYSCALL(name) \ 235 257 __X64_COND_SYSCALL(name) \ 236 258 __IA32_COND_SYSCALL(name) 237 - 238 - #define SYS_NI(name) \ 239 - __X64_SYS_NI(name) \ 240 - __IA32_SYS_NI(name) 241 259 242 260 243 261 /*
+1 -1
arch/x86/kernel/acpi/boot.c
··· 293 293 processor->processor_id, /* ACPI ID */ 294 294 processor->lapic_flags & ACPI_MADT_ENABLED); 295 295 296 + has_lapic_cpus = true; 296 297 return 0; 297 298 } 298 299 ··· 1135 1134 if (!count) { 1136 1135 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, 1137 1136 acpi_parse_lapic, MAX_LOCAL_APIC); 1138 - has_lapic_cpus = count > 0; 1139 1137 x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, 1140 1138 acpi_parse_x2apic, MAX_LOCAL_APIC); 1141 1139 }
+12 -2
arch/x86/kernel/alternative.c
··· 255 255 } 256 256 } 257 257 258 + static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len) 259 + { 260 + unsigned long flags; 261 + 262 + local_irq_save(flags); 263 + optimize_nops(instr, len); 264 + sync_core(); 265 + local_irq_restore(flags); 266 + } 267 + 258 268 /* 259 269 * In this context, "source" is where the instructions are placed in the 260 270 * section .altinstr_replacement, for example during kernel build by the ··· 448 438 * patch if feature is *NOT* present. 449 439 */ 450 440 if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) { 451 - optimize_nops(instr, a->instrlen); 441 + optimize_nops_inplace(instr, a->instrlen); 452 442 continue; 453 443 } 454 444 ··· 1695 1685 } else { 1696 1686 local_irq_save(flags); 1697 1687 memcpy(addr, opcode, len); 1698 - local_irq_restore(flags); 1699 1688 sync_core(); 1689 + local_irq_restore(flags); 1700 1690 1701 1691 /* 1702 1692 * Could also do a CLFLUSH here to speed up CPU recovery; but
+16
arch/x86/kernel/head_64.S
··· 255 255 testl $X2APIC_ENABLE, %eax 256 256 jnz .Lread_apicid_msr 257 257 258 + #ifdef CONFIG_X86_X2APIC 259 + /* 260 + * If system is in X2APIC mode then MMIO base might not be 261 + * mapped causing the MMIO read below to fault. Faults can't 262 + * be handled at that point. 263 + */ 264 + cmpl $0, x2apic_mode(%rip) 265 + jz .Lread_apicid_mmio 266 + 267 + /* Force the AP into X2APIC mode. */ 268 + orl $X2APIC_ENABLE, %eax 269 + wrmsr 270 + jmp .Lread_apicid_msr 271 + #endif 272 + 273 + .Lread_apicid_mmio: 258 274 /* Read the APIC ID from the fix-mapped MMIO space. */ 259 275 movq apic_mmio_base(%rip), %rcx 260 276 addq $APIC_ID, %rcx
+19
arch/x86/kvm/svm/sev.c
··· 2972 2972 2973 2973 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux); 2974 2974 } 2975 + 2976 + /* 2977 + * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if 2978 + * the host/guest supports its use. 2979 + * 2980 + * guest_can_use() checks a number of requirements on the host/guest to 2981 + * ensure that MSR_IA32_XSS is available, but it might report true even 2982 + * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host 2983 + * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better 2984 + * to further check that the guest CPUID actually supports 2985 + * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved 2986 + * guests will still get intercepted and caught in the normal 2987 + * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths. 2988 + */ 2989 + if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && 2990 + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 2991 + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1); 2992 + else 2993 + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0); 2975 2994 } 2976 2995 2977 2996 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+1
arch/x86/kvm/svm/svm.c
··· 103 103 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, 104 104 { .index = MSR_IA32_LASTINTFROMIP, .always = false }, 105 105 { .index = MSR_IA32_LASTINTTOIP, .always = false }, 106 + { .index = MSR_IA32_XSS, .always = false }, 106 107 { .index = MSR_EFER, .always = false }, 107 108 { .index = MSR_IA32_CR_PAT, .always = false }, 108 109 { .index = MSR_AMD64_SEV_ES_GHCB, .always = true },
+1 -1
arch/x86/kvm/svm/svm.h
··· 30 30 #define IOPM_SIZE PAGE_SIZE * 3 31 31 #define MSRPM_SIZE PAGE_SIZE * 2 32 32 33 - #define MAX_DIRECT_ACCESS_MSRS 46 33 + #define MAX_DIRECT_ACCESS_MSRS 47 34 34 #define MSRPM_OFFSETS 32 35 35 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 36 36 extern bool npt_enabled;
+1
arch/x86/xen/Kconfig
··· 9 9 select PARAVIRT_CLOCK 10 10 select X86_HV_CALLBACK_VECTOR 11 11 depends on X86_64 || (X86_32 && X86_PAE) 12 + depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8) 12 13 depends on X86_LOCAL_APIC && X86_TSC 13 14 help 14 15 This is the Linux Xen port. Enabling this will allow the
+6 -4
drivers/bluetooth/hci_vhci.c
··· 11 11 #include <linux/module.h> 12 12 #include <asm/unaligned.h> 13 13 14 + #include <linux/atomic.h> 14 15 #include <linux/kernel.h> 15 16 #include <linux/init.h> 16 17 #include <linux/slab.h> ··· 45 44 bool wakeup; 46 45 __u16 msft_opcode; 47 46 bool aosp_capable; 47 + atomic_t initialized; 48 48 }; 49 49 50 50 static int vhci_open_dev(struct hci_dev *hdev) ··· 77 75 78 76 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); 79 77 80 - mutex_lock(&data->open_mutex); 81 78 skb_queue_tail(&data->readq, skb); 82 - mutex_unlock(&data->open_mutex); 83 79 84 - wake_up_interruptible(&data->read_wait); 80 + if (atomic_read(&data->initialized)) 81 + wake_up_interruptible(&data->read_wait); 85 82 return 0; 86 83 } 87 84 ··· 465 464 skb_put_u8(skb, 0xff); 466 465 skb_put_u8(skb, opcode); 467 466 put_unaligned_le16(hdev->id, skb_put(skb, 2)); 468 - skb_queue_tail(&data->readq, skb); 467 + skb_queue_head(&data->readq, skb); 468 + atomic_inc(&data->initialized); 469 469 470 470 wake_up_interruptible(&data->read_wait); 471 471 return 0;
+14 -4
drivers/bus/ti-sysc.c
··· 2158 2158 sysc_val = sysc_read_sysconfig(ddata); 2159 2159 sysc_val |= sysc_mask; 2160 2160 sysc_write(ddata, sysc_offset, sysc_val); 2161 - /* Flush posted write */ 2161 + 2162 + /* 2163 + * Some devices need a delay before reading registers 2164 + * after reset. Presumably a srst_udelay is not needed 2165 + * for devices that use a rstctrl register reset. 2166 + */ 2167 + if (ddata->cfg.srst_udelay) 2168 + fsleep(ddata->cfg.srst_udelay); 2169 + 2170 + /* 2171 + * Flush posted write. For devices needing srst_udelay 2172 + * this should trigger an interconnect error if the 2173 + * srst_udelay value is needed but not configured. 2174 + */ 2162 2175 sysc_val = sysc_read_sysconfig(ddata); 2163 2176 } 2164 - 2165 - if (ddata->cfg.srst_udelay) 2166 - fsleep(ddata->cfg.srst_udelay); 2167 2177 2168 2178 if (ddata->post_reset_quirk) 2169 2179 ddata->post_reset_quirk(ddata);
+8 -4
drivers/gpio/gpio-dwapb.c
··· 282 282 { 283 283 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 284 284 struct dwapb_gpio *gpio = to_dwapb_gpio(gc); 285 + irq_hw_number_t hwirq = irqd_to_hwirq(d); 285 286 unsigned long flags; 286 287 u32 val; 287 288 288 289 raw_spin_lock_irqsave(&gc->bgpio_lock, flags); 289 - val = dwapb_read(gpio, GPIO_INTEN); 290 - val |= BIT(irqd_to_hwirq(d)); 290 + val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq); 291 291 dwapb_write(gpio, GPIO_INTEN, val); 292 + val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq); 293 + dwapb_write(gpio, GPIO_INTMASK, val); 292 294 raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); 293 295 } 294 296 ··· 298 296 { 299 297 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 300 298 struct dwapb_gpio *gpio = to_dwapb_gpio(gc); 299 + irq_hw_number_t hwirq = irqd_to_hwirq(d); 301 300 unsigned long flags; 302 301 u32 val; 303 302 304 303 raw_spin_lock_irqsave(&gc->bgpio_lock, flags); 305 - val = dwapb_read(gpio, GPIO_INTEN); 306 - val &= ~BIT(irqd_to_hwirq(d)); 304 + val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq); 305 + dwapb_write(gpio, GPIO_INTMASK, val); 306 + val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq); 307 307 dwapb_write(gpio, GPIO_INTEN, val); 308 308 raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); 309 309 }
+12 -4
drivers/gpio/gpiolib-cdev.c
··· 2481 2481 return 0; 2482 2482 } 2483 2483 2484 - /* 2485 - * gpio_ioctl() - ioctl handler for the GPIO chardev 2486 - */ 2487 - static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2484 + static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg) 2488 2485 { 2489 2486 struct gpio_chardev_data *cdev = file->private_data; 2490 2487 struct gpio_device *gdev = cdev->gdev; ··· 2516 2519 default: 2517 2520 return -EINVAL; 2518 2521 } 2522 + } 2523 + 2524 + /* 2525 + * gpio_ioctl() - ioctl handler for the GPIO chardev 2526 + */ 2527 + static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2528 + { 2529 + struct gpio_chardev_data *cdev = file->private_data; 2530 + 2531 + return call_ioctl_locked(file, cmd, arg, cdev->gdev, 2532 + gpio_ioctl_unlocked); 2519 2533 } 2520 2534 2521 2535 #ifdef CONFIG_COMPAT
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 285 285 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { 286 286 struct amdgpu_bo *bo = vm_bo->bo; 287 287 288 + vm_bo->moved = true; 288 289 if (!bo || bo->tbo.type != ttm_bo_type_kernel) 289 290 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 290 291 else if (bo->parent)
+12 -6
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1653 1653 if (test_bit(gpuidx, prange->bitmap_access)) 1654 1654 bitmap_set(ctx->bitmap, gpuidx, 1); 1655 1655 } 1656 + 1657 + /* 1658 + * If prange is already mapped or with always mapped flag, 1659 + * update mapping on GPUs with ACCESS attribute 1660 + */ 1661 + if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { 1662 + if (prange->mapped_to_gpu || 1663 + prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED) 1664 + bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); 1665 + } 1656 1666 } else { 1657 1667 bitmap_or(ctx->bitmap, prange->bitmap_access, 1658 1668 prange->bitmap_aip, MAX_GPU_INSTANCE); 1659 1669 } 1660 1670 1661 1671 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { 1662 - bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); 1663 - if (!prange->mapped_to_gpu || 1664 - bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { 1665 - r = 0; 1666 - goto free_ctx; 1667 - } 1672 + r = 0; 1673 + goto free_ctx; 1668 1674 } 1669 1675 1670 1676 if (prange->actual_loc && !prange->ttm_res) {
+15 -12
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
··· 1014 1014 DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); 1015 1015 break; 1016 1016 case AS_SIGNAL_TYPE_DISPLAY_PORT: 1017 - ss_info->spread_spectrum_percentage = 1017 + if (bp->base.integrated_info) { 1018 + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage); 1019 + ss_info->spread_spectrum_percentage = 1020 + bp->base.integrated_info->gpuclk_ss_percentage; 1021 + ss_info->type.CENTER_MODE = 1022 + bp->base.integrated_info->gpuclk_ss_type; 1023 + } else { 1024 + ss_info->spread_spectrum_percentage = 1018 1025 disp_cntl_tbl->dp_ss_percentage; 1019 - ss_info->spread_spectrum_range = 1026 + ss_info->spread_spectrum_range = 1020 1027 disp_cntl_tbl->dp_ss_rate_10hz * 10; 1021 - if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) 1022 - ss_info->type.CENTER_MODE = true; 1023 - 1028 + if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) 1029 + ss_info->type.CENTER_MODE = true; 1030 + } 1024 1031 DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage); 1025 1032 break; 1026 1033 case AS_SIGNAL_TYPE_GPU_PLL: ··· 2393 2386 return BP_RESULT_BADBIOSTABLE; 2394 2387 2395 2388 info->num_chans = info_v30->channel_num; 2396 - /* As suggested by VBIOS we should always use 2397 - * dram_channel_width_bytes = 2 when using VRAM 2398 - * table version 3.0. This is because the channel_width 2399 - * param in the VRAM info table is changed in 7000 series and 2400 - * no longer represents the memory channel width. 2401 - */ 2402 - info->dram_channel_width_bytes = 2; 2389 + info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8; 2403 2390 2404 2391 return result; 2405 2392 } ··· 2821 2820 info->ma_channel_number = info_v2_2->umachannelnumber; 2822 2821 info->dp_ss_control = 2823 2822 le16_to_cpu(info_v2_2->reserved1); 2823 + info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage; 2824 + info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type; 2824 2825 2825 2826 for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { 2826 2827 info->ext_disp_conn_info.gu_id[i] =
+18 -8
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 5095 5095 */ 5096 5096 bool dc_is_dmub_outbox_supported(struct dc *dc) 5097 5097 { 5098 - /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 5099 - if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 5100 - dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 5101 - !dc->debug.dpia_debug.bits.disable_dpia) 5102 - return true; 5098 + switch (dc->ctx->asic_id.chip_family) { 5103 5099 5104 - if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 && 5105 - !dc->debug.dpia_debug.bits.disable_dpia) 5106 - return true; 5100 + case FAMILY_YELLOW_CARP: 5101 + /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 5102 + if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 5103 + !dc->debug.dpia_debug.bits.disable_dpia) 5104 + return true; 5105 + break; 5106 + 5107 + case AMDGPU_FAMILY_GC_11_0_1: 5108 + case AMDGPU_FAMILY_GC_11_5_0: 5109 + if (!dc->debug.dpia_debug.bits.disable_dpia) 5110 + return true; 5111 + break; 5112 + 5113 + default: 5114 + break; 5115 + } 5107 5116 5108 5117 /* dmub aux needs dmub notifications to be enabled */ 5109 5118 return dc->debug.enable_dmub_aux_for_legacy_ddc; 5119 + 5110 5120 } 5111 5121 5112 5122 /**
+1 -1
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
··· 5420 5420 *OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, 5421 5421 OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots); 5422 5422 5423 - if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) { 5423 + if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) { 5424 5424 *RequiresDSC = true; 5425 5425 LinkDSCEnable = true; 5426 5426 *OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
+6
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 960 960 dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; 961 961 dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; 962 962 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; 963 + 964 + if (dc->ctx->dmub_srv->dmub->fw_version < 965 + DMUB_FW_VERSION(7, 0, 35)) { 966 + dc->debug.force_disable_subvp = true; 967 + dc->debug.disable_fpo_optimizations = true; 968 + } 963 969 } 964 970 } 965 971
+2
drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
··· 417 417 /* V2.1 */ 418 418 struct edp_info edp1_info; 419 419 struct edp_info edp2_info; 420 + uint32_t gpuclk_ss_percentage; 421 + uint32_t gpuclk_ss_type; 420 422 }; 421 423 422 424 /*
+2 -1
drivers/gpu/drm/i915/display/intel_cx0_phy.c
··· 2465 2465 2466 2466 val |= XELPDP_FORWARD_CLOCK_UNGATE; 2467 2467 2468 - if (is_hdmi_frl(crtc_state->port_clock)) 2468 + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && 2469 + is_hdmi_frl(crtc_state->port_clock)) 2469 2470 val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK); 2470 2471 else 2471 2472 val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+12 -1
drivers/gpu/drm/i915/display/intel_display.c
··· 3747 3747 if (!active) 3748 3748 goto out; 3749 3749 3750 - intel_dsc_get_config(pipe_config); 3751 3750 intel_bigjoiner_get_config(pipe_config); 3751 + intel_dsc_get_config(pipe_config); 3752 3752 3753 3753 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 3754 3754 DISPLAY_VER(dev_priv) >= 11) ··· 6029 6029 if (intel_crtc_needs_modeset(new_crtc_state)) { 6030 6030 drm_dbg_kms(&i915->drm, 6031 6031 "[CRTC:%d:%s] modeset required\n", 6032 + crtc->base.base.id, crtc->base.name); 6033 + return -EINVAL; 6034 + } 6035 + 6036 + /* 6037 + * FIXME: Bigjoiner+async flip is busted currently. 6038 + * Remove this check once the issues are fixed. 6039 + */ 6040 + if (new_crtc_state->bigjoiner_pipes) { 6041 + drm_dbg_kms(&i915->drm, 6042 + "[CRTC:%d:%s] async flip disallowed with bigjoiner\n", 6032 6043 crtc->base.base.id, crtc->base.name); 6033 6044 return -EINVAL; 6034 6045 }
+41 -2
drivers/gpu/drm/i915/display/intel_dmc.c
··· 389 389 enum intel_dmc_id dmc_id; 390 390 391 391 /* TODO: check if the following applies to all D13+ platforms. */ 392 - if (!IS_DG2(i915) && !IS_TIGERLAKE(i915)) 392 + if (!IS_TIGERLAKE(i915)) 393 393 return; 394 394 395 395 for_each_dmc_id(dmc_id) { ··· 493 493 intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); 494 494 } 495 495 496 + static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915, 497 + enum intel_dmc_id dmc_id, i915_reg_t reg) 498 + { 499 + u32 offset = i915_mmio_reg_offset(reg); 500 + u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0)); 501 + u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); 502 + 503 + return offset >= start && offset < end; 504 + } 505 + 506 + static bool disable_dmc_evt(struct drm_i915_private *i915, 507 + enum intel_dmc_id dmc_id, 508 + i915_reg_t reg, u32 data) 509 + { 510 + if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg)) 511 + return false; 512 + 513 + /* keep all pipe DMC events disabled by default */ 514 + if (dmc_id != DMC_FW_MAIN) 515 + return true; 516 + 517 + return false; 518 + } 519 + 520 + static u32 dmc_mmiodata(struct drm_i915_private *i915, 521 + struct intel_dmc *dmc, 522 + enum intel_dmc_id dmc_id, int i) 523 + { 524 + if (disable_dmc_evt(i915, dmc_id, 525 + dmc->dmc_info[dmc_id].mmioaddr[i], 526 + dmc->dmc_info[dmc_id].mmiodata[i])) 527 + return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 528 + DMC_EVT_CTL_TYPE_EDGE_0_1) | 529 + REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 530 + DMC_EVT_CTL_EVENT_ID_FALSE); 531 + else 532 + return dmc->dmc_info[dmc_id].mmiodata[i]; 533 + } 534 + 496 535 /** 497 536 * intel_dmc_load_program() - write the firmware from memory to register. 498 537 * @i915: i915 drm device. ··· 571 532 for_each_dmc_id(dmc_id) { 572 533 for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { 573 534 intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], 574 - dmc->dmc_info[dmc_id].mmiodata[i]); 535 + dmc_mmiodata(i915, dmc, dmc_id, i)); 575 536 } 576 537 } 577 538
+2 -2
drivers/gpu/drm/i915/i915_hwmon.c
··· 175 175 * tau4 = (4 | x) << y 176 176 * but add 2 when doing the final right shift to account for units 177 177 */ 178 - tau4 = ((1 << x_w) | x) << y; 178 + tau4 = (u64)((1 << x_w) | x) << y; 179 179 /* val in hwmon interface units (millisec) */ 180 180 out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 181 181 ··· 211 211 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT); 212 212 x = REG_FIELD_GET(PKG_MAX_WIN_X, r); 213 213 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r); 214 - tau4 = ((1 << x_w) | x) << y; 214 + tau4 = (u64)((1 << x_w) | x) << y; 215 215 max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 216 216 217 217 if (val > max_win)
+42 -29
drivers/hid/hid-nintendo.c
··· 325 325 * All the controller's button values are stored in a u32. 326 326 * They can be accessed with bitwise ANDs. 327 327 */ 328 - static const u32 JC_BTN_Y = BIT(0); 329 - static const u32 JC_BTN_X = BIT(1); 330 - static const u32 JC_BTN_B = BIT(2); 331 - static const u32 JC_BTN_A = BIT(3); 332 - static const u32 JC_BTN_SR_R = BIT(4); 333 - static const u32 JC_BTN_SL_R = BIT(5); 334 - static const u32 JC_BTN_R = BIT(6); 335 - static const u32 JC_BTN_ZR = BIT(7); 336 - static const u32 JC_BTN_MINUS = BIT(8); 337 - static const u32 JC_BTN_PLUS = BIT(9); 338 - static const u32 JC_BTN_RSTICK = BIT(10); 339 - static const u32 JC_BTN_LSTICK = BIT(11); 340 - static const u32 JC_BTN_HOME = BIT(12); 341 - static const u32 JC_BTN_CAP = BIT(13); /* capture button */ 342 - static const u32 JC_BTN_DOWN = BIT(16); 343 - static const u32 JC_BTN_UP = BIT(17); 344 - static const u32 JC_BTN_RIGHT = BIT(18); 345 - static const u32 JC_BTN_LEFT = BIT(19); 346 - static const u32 JC_BTN_SR_L = BIT(20); 347 - static const u32 JC_BTN_SL_L = BIT(21); 348 - static const u32 JC_BTN_L = BIT(22); 349 - static const u32 JC_BTN_ZL = BIT(23); 328 + #define JC_BTN_Y BIT(0) 329 + #define JC_BTN_X BIT(1) 330 + #define JC_BTN_B BIT(2) 331 + #define JC_BTN_A BIT(3) 332 + #define JC_BTN_SR_R BIT(4) 333 + #define JC_BTN_SL_R BIT(5) 334 + #define JC_BTN_R BIT(6) 335 + #define JC_BTN_ZR BIT(7) 336 + #define JC_BTN_MINUS BIT(8) 337 + #define JC_BTN_PLUS BIT(9) 338 + #define JC_BTN_RSTICK BIT(10) 339 + #define JC_BTN_LSTICK BIT(11) 340 + #define JC_BTN_HOME BIT(12) 341 + #define JC_BTN_CAP BIT(13) /* capture button */ 342 + #define JC_BTN_DOWN BIT(16) 343 + #define JC_BTN_UP BIT(17) 344 + #define JC_BTN_RIGHT BIT(18) 345 + #define JC_BTN_LEFT BIT(19) 346 + #define JC_BTN_SR_L BIT(20) 347 + #define JC_BTN_SL_L BIT(21) 348 + #define JC_BTN_L BIT(22) 349 + #define JC_BTN_ZL BIT(23) 350 350 351 351 enum joycon_msg_type { 352 352 JOYCON_MSG_TYPE_NONE, ··· 927 927 */ 928 928 static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr) 929 929 { 930 - int i; 930 + int i, divz = 0; 931 931 932 932 for (i = 0; i < 3; i++) { 933 933 ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] - 934 934 ctlr->accel_cal.offset[i]; 935 935 ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] - 936 936 ctlr->gyro_cal.offset[i]; 937 + 938 + if (ctlr->imu_cal_accel_divisor[i] == 0) { 939 + ctlr->imu_cal_accel_divisor[i] = 1; 940 + divz++; 941 + } 942 + 943 + if (ctlr->imu_cal_gyro_divisor[i] == 0) { 944 + ctlr->imu_cal_gyro_divisor[i] = 1; 945 + divz++; 946 + } 937 947 } 948 + 949 + if (divz) 950 + hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz); 938 951 } 939 952 940 953 static const s16 DFLT_ACCEL_OFFSET /*= 0*/; ··· 1176 1163 JC_IMU_SAMPLES_PER_DELTA_AVG) { 1177 1164 ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum / 1178 1165 ctlr->imu_delta_samples_count; 1179 - /* don't ever want divide by zero shenanigans */ 1180 - if (ctlr->imu_avg_delta_ms == 0) { 1181 - ctlr->imu_avg_delta_ms = 1; 1182 - hid_warn(ctlr->hdev, 1183 - "calculated avg imu delta of 0\n"); 1184 - } 1185 1166 ctlr->imu_delta_samples_count = 0; 1186 1167 ctlr->imu_delta_samples_sum = 0; 1168 + } 1169 + 1170 + /* don't ever want divide by zero shenanigans */ 1171 + if (ctlr->imu_avg_delta_ms == 0) { 1172 + ctlr->imu_avg_delta_ms = 1; 1173 + hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n"); 1187 1174 } 1188 1175 1189 1176 /* useful for debugging IMU sample rate */
+32 -16
drivers/i2c/busses/i2c-aspeed.c
··· 249 249 if (!slave) 250 250 return 0; 251 251 252 - command = readl(bus->base + ASPEED_I2C_CMD_REG); 252 + /* 253 + * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive 254 + * transfers with low enough latency between the nak/stop phase of the current 255 + * command and the start/address phase of the following command that the 256 + * interrupts are coalesced by the time we process them. 257 + */ 258 + if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { 259 + irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 260 + bus->slave_state = ASPEED_I2C_SLAVE_STOP; 261 + } 253 262 254 - /* Slave was requested, restart state machine. */ 263 + if (irq_status & ASPEED_I2CD_INTR_TX_NAK && 264 + bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { 265 + irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 266 + bus->slave_state = ASPEED_I2C_SLAVE_STOP; 267 + } 268 + 269 + /* Propagate any stop conditions to the slave implementation. */ 270 + if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) { 271 + i2c_slave_event(slave, I2C_SLAVE_STOP, &value); 272 + bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 273 + } 274 + 275 + /* 276 + * Now that we've dealt with any potentially coalesced stop conditions, 277 + * address any start conditions. 278 + */ 255 279 if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) { 256 280 irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH; 257 281 bus->slave_state = ASPEED_I2C_SLAVE_START; 258 282 } 259 283 260 - /* Slave is not currently active, irq was for someone else. */ 284 + /* 285 + * If the slave has been stopped and not started then slave interrupt 286 + * handling is complete. 287 + */ 261 288 if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) 262 289 return irq_handled; 263 290 291 + command = readl(bus->base + ASPEED_I2C_CMD_REG); 264 292 dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n", 265 293 irq_status, command); 266 294 ··· 305 277 ASPEED_I2C_SLAVE_WRITE_REQUESTED; 306 278 } 307 279 irq_handled |= ASPEED_I2CD_INTR_RX_DONE; 308 - } 309 - 310 - /* Slave was asked to stop. */ 311 - if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { 312 - irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 313 - bus->slave_state = ASPEED_I2C_SLAVE_STOP; 314 - } 315 - if (irq_status & ASPEED_I2CD_INTR_TX_NAK && 316 - bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { 317 - irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 318 - bus->slave_state = ASPEED_I2C_SLAVE_STOP; 319 280 } 320 281 321 282 switch (bus->slave_state) { ··· 341 324 i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); 342 325 break; 343 326 case ASPEED_I2C_SLAVE_STOP: 344 - i2c_slave_event(slave, I2C_SLAVE_STOP, &value); 345 - bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 327 + /* Stop event handling is done early. Unreachable. */ 346 328 break; 347 329 case ASPEED_I2C_SLAVE_START: 348 330 /* Slave was just started. Waiting for the next event. */;
+7 -1
drivers/i2c/busses/i2c-qcom-geni.c
··· 858 858 ret = geni_se_resources_on(&gi2c->se); 859 859 if (ret) { 860 860 dev_err(dev, "Error turning on resources %d\n", ret); 861 + clk_disable_unprepare(gi2c->core_clk); 861 862 return ret; 862 863 } 863 864 proto = geni_se_read_proto(&gi2c->se); ··· 878 877 /* FIFO is disabled, so we can only use GPI DMA */ 879 878 gi2c->gpi_mode = true; 880 879 ret = setup_gpi_dma(gi2c); 881 - if (ret) 880 + if (ret) { 881 + geni_se_resources_off(&gi2c->se); 882 + clk_disable_unprepare(gi2c->core_clk); 882 883 return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n"); 884 + } 883 885 884 886 dev_dbg(dev, "Using GPI DMA mode for I2C\n"); 885 887 } else { ··· 895 891 896 892 if (!tx_depth) { 897 893 dev_err(dev, "Invalid TX FIFO depth\n"); 894 + geni_se_resources_off(&gi2c->se); 895 + clk_disable_unprepare(gi2c->core_clk); 898 896 return -EINVAL; 899 897 } 900 898
+11 -2
drivers/i2c/busses/i2c-rk3x.c
··· 178 178 * @clk: function clk for rk3399 or function & Bus clks for others 179 179 * @pclk: Bus clk for rk3399 180 180 * @clk_rate_nb: i2c clk rate change notify 181 + * @irq: irq number 181 182 * @t: I2C known timing information 182 183 * @lock: spinlock for the i2c bus 183 184 * @wait: the waitqueue to wait for i2c transfer ··· 201 200 struct clk *clk; 202 201 struct clk *pclk; 203 202 struct notifier_block clk_rate_nb; 203 + int irq; 204 204 205 205 /* Settings */ 206 206 struct i2c_timings t; ··· 1089 1087 1090 1088 spin_unlock_irqrestore(&i2c->lock, flags); 1091 1089 1092 - rk3x_i2c_start(i2c); 1093 - 1094 1090 if (!polling) { 1091 + rk3x_i2c_start(i2c); 1092 + 1095 1093 timeout = wait_event_timeout(i2c->wait, !i2c->busy, 1096 1094 msecs_to_jiffies(WAIT_TIMEOUT)); 1097 1095 } else { 1096 + disable_irq(i2c->irq); 1097 + rk3x_i2c_start(i2c); 1098 + 1098 1099 timeout = rk3x_i2c_wait_xfer_poll(i2c); 1100 + 1101 + enable_irq(i2c->irq); 1099 1102 } 1100 1103 1101 1104 spin_lock_irqsave(&i2c->lock, flags); ··· 1316 1309 dev_err(&pdev->dev, "cannot request IRQ\n"); 1317 1310 return ret; 1318 1311 } 1312 + 1313 + i2c->irq = irq; 1319 1314 1320 1315 platform_set_drvdata(pdev, i2c); 1321 1316
+26 -11
drivers/iio/accel/kionix-kx022a.c
··· 393 393 * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2 394 394 * => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed 395 395 * in low-power mode(?) ) 396 - * => +/-2G => 4 / 2^16 * 9,80665 * 10^6 (to scale to micro) 397 - * => +/-2G - 598.550415 398 - * +/-4G - 1197.10083 399 - * +/-8G - 2394.20166 400 - * +/-16G - 4788.40332 396 + * => +/-2G => 4 / 2^16 * 9,80665 397 + * => +/-2G - 0.000598550415 398 + * +/-4G - 0.00119710083 399 + * +/-8G - 0.00239420166 400 + * +/-16G - 0.00478840332 401 401 */ 402 402 static const int kx022a_scale_table[][2] = { 403 - { 598, 550415 }, 404 - { 1197, 100830 }, 405 - { 2394, 201660 }, 406 - { 4788, 403320 }, 403 + { 0, 598550 }, 404 + { 0, 1197101 }, 405 + { 0, 2394202 }, 406 + { 0, 4788403 }, 407 407 }; 408 408 409 409 static int kx022a_read_avail(struct iio_dev *indio_dev, ··· 422 422 *vals = (const int *)kx022a_scale_table; 423 423 *length = ARRAY_SIZE(kx022a_scale_table) * 424 424 ARRAY_SIZE(kx022a_scale_table[0]); 425 - *type = IIO_VAL_INT_PLUS_MICRO; 425 + *type = IIO_VAL_INT_PLUS_NANO; 426 426 return IIO_AVAIL_LIST; 427 427 default: 428 428 return -EINVAL; ··· 483 483 mutex_unlock(&data->mutex); 484 484 485 485 return ret; 486 + } 487 + 488 + static int kx022a_write_raw_get_fmt(struct iio_dev *idev, 489 + struct iio_chan_spec const *chan, 490 + long mask) 491 + { 492 + switch (mask) { 493 + case IIO_CHAN_INFO_SCALE: 494 + return IIO_VAL_INT_PLUS_NANO; 495 + case IIO_CHAN_INFO_SAMP_FREQ: 496 + return IIO_VAL_INT_PLUS_MICRO; 497 + default: 498 + return -EINVAL; 499 + } 486 500 } 487 501 488 502 static int kx022a_write_raw(struct iio_dev *idev, ··· 643 629 644 630 kx022a_reg2scale(regval, val, val2); 645 631 646 - return IIO_VAL_INT_PLUS_MICRO; 632 + return IIO_VAL_INT_PLUS_NANO; 647 633 } 648 634 649 635 return -EINVAL; ··· 870 856 static const struct iio_info kx022a_info = { 871 857 .read_raw = &kx022a_read_raw, 872 858 .write_raw = &kx022a_write_raw, 859 + .write_raw_get_fmt = &kx022a_write_raw_get_fmt, 873 860 .read_avail = &kx022a_read_avail, 874 861 875 862 .validate_trigger = iio_validate_own_trigger,
+4
drivers/iio/adc/imx93_adc.c
··· 93 93 IMX93_ADC_CHAN(1), 94 94 IMX93_ADC_CHAN(2), 95 95 IMX93_ADC_CHAN(3), 96 + IMX93_ADC_CHAN(4), 97 + IMX93_ADC_CHAN(5), 98 + IMX93_ADC_CHAN(6), 99 + IMX93_ADC_CHAN(7), 96 100 }; 97 101 98 102 static void imx93_adc_power_down(struct imx93_adc *adc)
+4 -4
drivers/iio/adc/mcp3564.c
··· 918 918 mutex_unlock(&adc->lock); 919 919 return ret; 920 920 case IIO_CHAN_INFO_CALIBBIAS: 921 - if (val < mcp3564_calib_bias[0] && val > mcp3564_calib_bias[2]) 921 + if (val < mcp3564_calib_bias[0] || val > mcp3564_calib_bias[2]) 922 922 return -EINVAL; 923 923 924 924 mutex_lock(&adc->lock); ··· 928 928 mutex_unlock(&adc->lock); 929 929 return ret; 930 930 case IIO_CHAN_INFO_CALIBSCALE: 931 - if (val < mcp3564_calib_scale[0] && val > mcp3564_calib_scale[2]) 931 + if (val < mcp3564_calib_scale[0] || val > mcp3564_calib_scale[2]) 932 932 return -EINVAL; 933 933 934 934 if (adc->calib_scale == val) ··· 1122 1122 enum mcp3564_ids ids; 1123 1123 int ret = 0; 1124 1124 unsigned int tmp = 0x01; 1125 - bool err = true; 1125 + bool err = false; 1126 1126 1127 1127 /* 1128 1128 * The address is set on a per-device basis by fuses in the factory, ··· 1509 1509 module_spi_driver(mcp3564_driver); 1510 1510 1511 1511 MODULE_AUTHOR("Marius Cristea <marius.cristea@microchip.com>"); 1512 - MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP346xR ADCs"); 1512 + MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP356xR ADCs"); 1513 1513 MODULE_LICENSE("GPL v2");
+15 -1
drivers/iio/adc/meson_saradc.c
··· 1241 1241 .cmv_select = 1, 1242 1242 }; 1243 1243 1244 + static const struct meson_sar_adc_param meson_sar_adc_axg_param = { 1245 + .has_bl30_integration = true, 1246 + .clock_rate = 1200000, 1247 + .bandgap_reg = MESON_SAR_ADC_REG11, 1248 + .regmap_config = &meson_sar_adc_regmap_config_gxbb, 1249 + .resolution = 12, 1250 + .disable_ring_counter = 1, 1251 + .has_reg11 = true, 1252 + .vref_volatge = 1, 1253 + .has_vref_select = true, 1254 + .vref_select = VREF_VDDA, 1255 + .cmv_select = 1, 1256 + }; 1257 + 1244 1258 static const struct meson_sar_adc_param meson_sar_adc_g12a_param = { 1245 1259 .has_bl30_integration = false, 1246 1260 .clock_rate = 1200000, ··· 1299 1285 }; 1300 1286 1301 1287 static const struct meson_sar_adc_data meson_sar_adc_axg_data = { 1302 - .param = &meson_sar_adc_gxl_param, 1288 + .param = &meson_sar_adc_axg_param, 1303 1289 .name = "meson-axg-saradc", 1304 1290 }; 1305 1291
+3 -1
drivers/iio/adc/ti_am335x_adc.c
··· 670 670 platform_set_drvdata(pdev, indio_dev); 671 671 672 672 err = tiadc_request_dma(pdev, adc_dev); 673 - if (err && err == -EPROBE_DEFER) 673 + if (err && err != -ENODEV) { 674 + dev_err_probe(&pdev->dev, err, "DMA request failed\n"); 674 675 goto err_dma; 676 + } 675 677 676 678 return 0; 677 679
+10
drivers/iio/buffer/industrialio-triggered-buffer.c
··· 46 46 struct iio_buffer *buffer; 47 47 int ret; 48 48 49 + /* 50 + * iio_triggered_buffer_cleanup() assumes that the buffer allocated here 51 + * is assigned to indio_dev->buffer but this is only the case if this 52 + * function is the first caller to iio_device_attach_buffer(). If 53 + * indio_dev->buffer is already set then we can't proceed otherwise the 54 + * cleanup function will try to free a buffer that was not allocated here. 55 + */ 56 + if (indio_dev->buffer) 57 + return -EADDRINUSE; 58 + 49 59 buffer = iio_kfifo_allocate(); 50 60 if (!buffer) { 51 61 ret = -ENOMEM;
+2 -2
drivers/iio/common/ms_sensors/ms_sensors_i2c.c
··· 15 15 /* Conversion times in us */ 16 16 static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000, 17 17 13000, 7000 }; 18 - static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000, 19 - 5000, 8000 }; 18 + static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000, 19 + 3000, 8000 }; 20 20 static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100, 21 21 4100, 8220, 16440 }; 22 22
+80 -53
drivers/iio/imu/adis16475.c
··· 70 70 #define ADIS16475_MAX_SCAN_DATA 20 71 71 /* spi max speed in brust mode */ 72 72 #define ADIS16475_BURST_MAX_SPEED 1000000 73 - #define ADIS16475_LSB_DEC_MASK BIT(0) 74 - #define ADIS16475_LSB_FIR_MASK BIT(1) 73 + #define ADIS16475_LSB_DEC_MASK 0 74 + #define ADIS16475_LSB_FIR_MASK 1 75 75 #define ADIS16500_BURST_DATA_SEL_0_CHN_MASK GENMASK(5, 0) 76 76 #define ADIS16500_BURST_DATA_SEL_1_CHN_MASK GENMASK(12, 7) 77 77 ··· 1406 1406 return 0; 1407 1407 } 1408 1408 1409 + 1410 + static int adis16475_probe(struct spi_device *spi) 1411 + { 1412 + struct iio_dev *indio_dev; 1413 + struct adis16475 *st; 1414 + int ret; 1415 + 1416 + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 1417 + if (!indio_dev) 1418 + return -ENOMEM; 1419 + 1420 + st = iio_priv(indio_dev); 1421 + 1422 + st->info = spi_get_device_match_data(spi); 1423 + if (!st->info) 1424 + return -EINVAL; 1425 + 1426 + ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data); 1427 + if (ret) 1428 + return ret; 1429 + 1430 + indio_dev->name = st->info->name; 1431 + indio_dev->channels = st->info->channels; 1432 + indio_dev->num_channels = st->info->num_channels; 1433 + indio_dev->info = &adis16475_info; 1434 + indio_dev->modes = INDIO_DIRECT_MODE; 1435 + 1436 + ret = __adis_initial_startup(&st->adis); 1437 + if (ret) 1438 + return ret; 1439 + 1440 + ret = adis16475_config_irq_pin(st); 1441 + if (ret) 1442 + return ret; 1443 + 1444 + ret = adis16475_config_sync_mode(st); 1445 + if (ret) 1446 + return ret; 1447 + 1448 + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, 1449 + adis16475_trigger_handler); 1450 + if (ret) 1451 + return ret; 1452 + 1453 + ret = devm_iio_device_register(&spi->dev, indio_dev); 1454 + if (ret) 1455 + return ret; 1456 + 1457 + adis16475_debugfs_init(indio_dev); 1458 + 1459 + return 0; 1460 + } 1461 + 1409 1462 static const struct of_device_id adis16475_of_match[] = { 1410 1463 { .compatible = "adi,adis16470", 1411 1464 .data = &adis16475_chip_info[ADIS16470] }, ··· 1504 1451 }; 1505 1452 MODULE_DEVICE_TABLE(of, adis16475_of_match); 1506 1453 1507 - static int adis16475_probe(struct spi_device *spi) 1508 - { 1509 - struct iio_dev *indio_dev; 1510 - struct adis16475 *st; 1511 - int ret; 1512 - 1513 - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 1514 - if (!indio_dev) 1515 - return -ENOMEM; 1516 - 1517 - st = iio_priv(indio_dev); 1518 - 1519 - st->info = device_get_match_data(&spi->dev); 1520 - if (!st->info) 1521 - return -EINVAL; 1522 - 1523 - ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data); 1524 - if (ret) 1525 - return ret; 1526 - 1527 - indio_dev->name = st->info->name; 1528 - indio_dev->channels = st->info->channels; 1529 - indio_dev->num_channels = st->info->num_channels; 1530 - indio_dev->info = &adis16475_info; 1531 - indio_dev->modes = INDIO_DIRECT_MODE; 1532 - 1533 - ret = __adis_initial_startup(&st->adis); 1534 - if (ret) 1535 - return ret; 1536 - 1537 - ret = adis16475_config_irq_pin(st); 1538 - if (ret) 1539 - return ret; 1540 - 1541 - ret = adis16475_config_sync_mode(st); 1542 - if (ret) 1543 - return ret; 1544 - 1545 - ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, 1546 - adis16475_trigger_handler); 1547 - if (ret) 1548 - return ret; 1549 - 1550 - ret = devm_iio_device_register(&spi->dev, indio_dev); 1551 - if (ret) 1552 - return ret; 1553 - 1554 - adis16475_debugfs_init(indio_dev); 1555 - 1556 - return 0; 1557 - } 1454 + static const struct spi_device_id adis16475_ids[] = { 1455 + { "adis16470", (kernel_ulong_t)&adis16475_chip_info[ADIS16470] }, 1456 + { "adis16475-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_1] }, 1457 + { "adis16475-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_2] }, 1458 + { "adis16475-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_3] }, 1459 + { "adis16477-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_1] }, 1460 + { "adis16477-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_2] }, 1461 + { "adis16477-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_3] }, 1462 + { "adis16465-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_1] }, 1463 + { "adis16465-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_2] }, 1464 + { "adis16465-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_3] }, 1465 + { "adis16467-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_1] }, 1466 + { "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] }, 1467 + { "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] }, 1468 + { "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] }, 1469 + { "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] }, 1470 + { "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] }, 1471 + { "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] }, 1472 + { "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] }, 1473 + { "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] }, 1474 + { "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] }, 1475 + { } 1476 + }; 1477 + MODULE_DEVICE_TABLE(spi, adis16475_ids); 1558 1478 1559 1479 static struct spi_driver adis16475_driver = { 1560 1480 .driver = { ··· 1535 1509 .of_match_table = adis16475_of_match, 1536 1510 }, 1537 1511 .probe = adis16475_probe, 1512 + .id_table = adis16475_ids, 1538 1513 }; 1539 1514 module_spi_driver(adis16475_driver); 1540 1515
+2 -2
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 750 750 ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset, 751 751 chan->channel2, val); 752 752 mutex_unlock(&st->lock); 753 - return IIO_VAL_INT; 753 + return ret; 754 754 case IIO_ACCEL: 755 755 mutex_lock(&st->lock); 756 756 ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset, 757 757 chan->channel2, val); 758 758 mutex_unlock(&st->lock); 759 - return IIO_VAL_INT; 759 + return ret; 760 760 761 761 default: 762 762 return -EINVAL;
+2 -98
drivers/iio/light/hid-sensor-als.c
··· 14 14 #include "../common/hid-sensors/hid-sensor-trigger.h" 15 15 16 16 enum { 17 - CHANNEL_SCAN_INDEX_INTENSITY, 18 - CHANNEL_SCAN_INDEX_ILLUM, 19 - CHANNEL_SCAN_INDEX_COLOR_TEMP, 20 - CHANNEL_SCAN_INDEX_CHROMATICITY_X, 21 - CHANNEL_SCAN_INDEX_CHROMATICITY_Y, 17 + CHANNEL_SCAN_INDEX_INTENSITY = 0, 18 + CHANNEL_SCAN_INDEX_ILLUM = 1, 22 19 CHANNEL_SCAN_INDEX_MAX 23 20 }; 24 21 ··· 65 68 BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE), 66 69 .scan_index = CHANNEL_SCAN_INDEX_ILLUM, 67 70 }, 68 - { 69 - .type = IIO_COLORTEMP, 70 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 71 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | 72 - BIT(IIO_CHAN_INFO_SCALE) | 73 - BIT(IIO_CHAN_INFO_SAMP_FREQ) | 74 - BIT(IIO_CHAN_INFO_HYSTERESIS) | 75 - BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE), 76 - .scan_index = CHANNEL_SCAN_INDEX_COLOR_TEMP, 77 - }, 78 - { 79 - .type = IIO_CHROMATICITY, 80 - .modified = 1, 81 - .channel2 = IIO_MOD_X, 82 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 83 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | 84 - BIT(IIO_CHAN_INFO_SCALE) | 85 - BIT(IIO_CHAN_INFO_SAMP_FREQ) | 86 - BIT(IIO_CHAN_INFO_HYSTERESIS) | 87 - BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE), 88 - .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X, 89 - }, 90 - { 91 - .type = IIO_CHROMATICITY, 92 - .modified = 1, 93 - .channel2 = IIO_MOD_Y, 94 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 95 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | 96 - BIT(IIO_CHAN_INFO_SCALE) | 97 - BIT(IIO_CHAN_INFO_SAMP_FREQ) | 98 - BIT(IIO_CHAN_INFO_HYSTERESIS) | 99 - BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE), 100 - .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_Y, 101 - }, 102 71 IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP) 103 72 }; 104 73 ··· 102 139 report_id = als_state->als[chan->scan_index].report_id; 103 140 min = als_state->als[chan->scan_index].logical_minimum; 104 141 address = HID_USAGE_SENSOR_LIGHT_ILLUM; 105 - break; 106 - case CHANNEL_SCAN_INDEX_COLOR_TEMP: 107 - report_id = als_state->als[chan->scan_index].report_id; 108 - min = als_state->als[chan->scan_index].logical_minimum; 109 - address = HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE; 110 - break; 111 - case CHANNEL_SCAN_INDEX_CHROMATICITY_X: 112 - report_id = als_state->als[chan->scan_index].report_id; 113 - min = als_state->als[chan->scan_index].logical_minimum; 114 - address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X; 115 - break; 116 - case CHANNEL_SCAN_INDEX_CHROMATICITY_Y: 117 - report_id = als_state->als[chan->scan_index].report_id; 118 - min = als_state->als[chan->scan_index].logical_minimum; 119 - address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y; 120 142 break; 121 143 default: 122 144 report_id = -1; ··· 223 275 als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data; 224 276 ret = 0; 225 277 break; 226 - case HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE: 227 - als_state->scan.illum[CHANNEL_SCAN_INDEX_COLOR_TEMP] = sample_data; 228 - ret = 0; 229 - break; 230 - case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X: 231 - als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_X] = sample_data; 232 - ret = 0; 233 - break; 234 - case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y: 235 - als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_Y] = sample_data; 236 - ret = 0; 237 - break; 238 278 case HID_USAGE_SENSOR_TIME_TIMESTAMP: 239 279 als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes, 240 280 *(s64 *)raw_data); ··· 256 320 257 321 dev_dbg(&pdev->dev, "als %x:%x\n", st->als[i].index, 258 322 st->als[i].report_id); 259 - } 260 - 261 - ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT, 262 - usage_id, 263 - HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE, 264 - &st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP]); 265 - if (ret < 0) 266 - return ret; 267 - als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_COLOR_TEMP, 268 - st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].size); 269 - 270 - dev_dbg(&pdev->dev, "als %x:%x\n", 271 - st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].index, 272 - st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].report_id); 273 - 274 - for (i = 0; i < 2; i++) { 275 - int next_scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X + i; 276 - 277 - ret = sensor_hub_input_get_attribute_info(hsdev, 278 - HID_INPUT_REPORT, usage_id, 279 - HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X + i, 280 - &st->als[next_scan_index]); 281 - if (ret < 0) 282 - return ret; 283 - 284 - als_adjust_channel_bit_mask(channels, 285 - CHANNEL_SCAN_INDEX_CHROMATICITY_X + i, 286 - st->als[next_scan_index].size); 287 - 288 - dev_dbg(&pdev->dev, "als %x:%x\n", 289 - st->als[next_scan_index].index, 290 - st->als[next_scan_index].report_id); 291 323 } 292 324 293 325 st->scale_precision = hid_sensor_format_scale(usage_id,
+1 -1
drivers/iio/magnetometer/tmag5273.c
··· 356 356 case IIO_CHAN_INFO_OFFSET: 357 357 switch (chan->type) { 358 358 case IIO_TEMP: 359 - *val = -266314; 359 + *val = -16005; 360 360 return IIO_VAL_INT; 361 361 default: 362 362 return -EINVAL;
+1
drivers/input/joystick/xpad.c
··· 286 286 { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 287 287 { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, 288 288 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, 289 + { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE }, 289 290 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, 290 291 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 291 292 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+42 -4
drivers/input/keyboard/atkbd.c
··· 765 765 ps2dev->serio->phys); 766 766 } 767 767 768 + #ifdef CONFIG_X86 769 + static bool atkbd_is_portable_device(void) 770 + { 771 + static const char * const chassis_types[] = { 772 + "8", /* Portable */ 773 + "9", /* Laptop */ 774 + "10", /* Notebook */ 775 + "14", /* Sub-Notebook */ 776 + "31", /* Convertible */ 777 + "32", /* Detachable */ 778 + }; 779 + int i; 780 + 781 + for (i = 0; i < ARRAY_SIZE(chassis_types); i++) 782 + if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i])) 783 + return true; 784 + 785 + return false; 786 + } 787 + 788 + /* 789 + * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops 790 + * the controller is always in translated mode. In this mode mice/touchpads will 791 + * not work. So in this case simply assume a keyboard is connected to avoid 792 + * confusing some laptop keyboards. 793 + * 794 + * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is 795 + * ok in translated mode, only atkbd_select_set() checks atkbd->id and in 796 + * translated mode that is a no-op. 797 + */ 798 + static bool atkbd_skip_getid(struct atkbd *atkbd) 799 + { 800 + return atkbd->translated && atkbd_is_portable_device(); 801 + } 802 + #else 803 + static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; } 804 + #endif 805 + 768 806 /* 769 807 * atkbd_probe() probes for an AT keyboard on a serio port. 770 808 */ ··· 832 794 */ 833 795 834 796 param[0] = param[1] = 0xa5; /* initialize with invalid values */ 835 - if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) { 797 + if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) { 836 798 837 799 /* 838 - * If the get ID command failed, we check if we can at least set the LEDs on 839 - * the keyboard. This should work on every keyboard out there. It also turns 840 - * the LEDs off, which we want anyway. 800 + * If the get ID command was skipped or failed, we check if we can at least set 801 + * the LEDs on the keyboard. This should work on every keyboard out there. 802 + * It also turns the LEDs off, which we want anyway. 841 803 */ 842 804 param[0] = 0; 843 805 if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+3
drivers/input/keyboard/ipaq-micro-keys.c
··· 105 105 keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes, 106 106 keys->input->keycodesize * keys->input->keycodemax, 107 107 GFP_KERNEL); 108 + if (!keys->codes) 109 + return -ENOMEM; 110 + 108 111 keys->input->keycode = keys->codes; 109 112 110 113 __set_bit(EV_KEY, keys->input->evbit);
+5
drivers/input/misc/soc_button_array.c
··· 299 299 info->name = "power"; 300 300 info->event_code = KEY_POWER; 301 301 info->wakeup = true; 302 + } else if (upage == 0x01 && usage == 0xc6) { 303 + info->name = "airplane mode switch"; 304 + info->event_type = EV_SW; 305 + info->event_code = SW_RFKILL_ALL; 306 + info->active_low = false; 302 307 } else if (upage == 0x01 && usage == 0xca) { 303 308 info->name = "rotation lock switch"; 304 309 info->event_type = EV_SW;
+2 -3
drivers/input/mouse/amimouse.c
··· 125 125 return 0; 126 126 } 127 127 128 - static int __exit amimouse_remove(struct platform_device *pdev) 128 + static void __exit amimouse_remove(struct platform_device *pdev) 129 129 { 130 130 struct input_dev *dev = platform_get_drvdata(pdev); 131 131 132 132 input_unregister_device(dev); 133 - return 0; 134 133 } 135 134 136 135 static struct platform_driver amimouse_driver = { 137 - .remove = __exit_p(amimouse_remove), 136 + .remove_new = __exit_p(amimouse_remove), 138 137 .driver = { 139 138 .name = "amiga-mouse", 140 139 },
+1
drivers/input/mouse/synaptics.c
··· 183 183 "LEN009b", /* T580 */ 184 184 "LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */ 185 185 "LEN040f", /* P1 Gen 3 */ 186 + "LEN0411", /* L14 Gen 1 */ 186 187 "LEN200f", /* T450s */ 187 188 "LEN2044", /* L470 */ 188 189 "LEN2054", /* E480 */
+8
drivers/input/serio/i8042-acpipnpio.h
··· 361 361 .driver_data = (void *)(SERIO_QUIRK_DRITEK) 362 362 }, 363 363 { 364 + /* Acer TravelMate P459-G2-M */ 365 + .matches = { 366 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 367 + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"), 368 + }, 369 + .driver_data = (void *)(SERIO_QUIRK_NOMUX) 370 + }, 371 + { 364 372 /* Amoi M636/A737 */ 365 373 .matches = { 366 374 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
+3
drivers/interconnect/core.c
··· 395 395 } 396 396 mutex_unlock(&icc_lock); 397 397 398 + if (!node) 399 + return ERR_PTR(-EINVAL); 400 + 398 401 if (IS_ERR(node)) 399 402 return ERR_CAST(node); 400 403
+1 -1
drivers/interconnect/qcom/icc-rpm.c
··· 307 307 308 308 if (qn->ib_coeff) { 309 309 agg_peak_rate = qn->max_peak[ctx] * 100; 310 - agg_peak_rate = div_u64(qn->max_peak[ctx], qn->ib_coeff); 310 + agg_peak_rate = div_u64(agg_peak_rate, qn->ib_coeff); 311 311 } else { 312 312 agg_peak_rate = qn->max_peak[ctx]; 313 313 }
+1
drivers/interconnect/qcom/sm8250.c
··· 1995 1995 .driver = { 1996 1996 .name = "qnoc-sm8250", 1997 1997 .of_match_table = qnoc_of_match, 1998 + .sync_state = icc_sync_state, 1998 1999 }, 1999 2000 }; 2000 2001 module_platform_driver(qnoc_driver);
+1
drivers/md/Kconfig
··· 660 660 661 661 config DM_AUDIT 662 662 bool "DM audit events" 663 + depends on BLK_DEV_DM 663 664 depends on AUDIT 664 665 help 665 666 Generate audit events for device-mapper.
+6 -5
drivers/md/dm-integrity.c
··· 1755 1755 sectors_to_process = dio->range.n_sectors; 1756 1756 1757 1757 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { 1758 + struct bio_vec bv_copy = bv; 1758 1759 unsigned int pos; 1759 1760 char *mem, *checksums_ptr; 1760 1761 1761 1762 again: 1762 - mem = bvec_kmap_local(&bv); 1763 + mem = bvec_kmap_local(&bv_copy); 1763 1764 pos = 0; 1764 1765 checksums_ptr = checksums; 1765 1766 do { ··· 1769 1768 sectors_to_process -= ic->sectors_per_block; 1770 1769 pos += ic->sectors_per_block << SECTOR_SHIFT; 1771 1770 sector += ic->sectors_per_block; 1772 - } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); 1771 + } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack); 1773 1772 kunmap_local(mem); 1774 1773 1775 1774 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, ··· 1794 1793 if (!sectors_to_process) 1795 1794 break; 1796 1795 1797 - if (unlikely(pos < bv.bv_len)) { 1798 - bv.bv_offset += pos; 1799 - bv.bv_len -= pos; 1796 + if (unlikely(pos < bv_copy.bv_len)) { 1797 + bv_copy.bv_offset += pos; 1798 + bv_copy.bv_len -= pos; 1800 1799 goto again; 1801 1800 } 1802 1801 }
+3
drivers/md/dm-raid.c
··· 3317 3317 mddev_lock_nointr(&rs->md); 3318 3318 md_stop(&rs->md); 3319 3319 mddev_unlock(&rs->md); 3320 + 3321 + if (work_pending(&rs->md.event_work)) 3322 + flush_work(&rs->md.event_work); 3320 3323 raid_set_free(rs); 3321 3324 } 3322 3325
+8 -3
drivers/md/md.c
··· 82 82 83 83 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 84 84 static struct workqueue_struct *md_wq; 85 + 86 + /* 87 + * This workqueue is used for sync_work to register new sync_thread, and for 88 + * del_work to remove rdev, and for event_work that is only set by dm-raid. 89 + * 90 + * Noted that sync_work will grab reconfig_mutex, hence never flush this 91 + * workqueue whith reconfig_mutex grabbed. 92 + */ 85 93 static struct workqueue_struct *md_misc_wq; 86 94 struct workqueue_struct *md_bitmap_wq; 87 95 ··· 6338 6330 struct md_personality *pers = mddev->pers; 6339 6331 md_bitmap_destroy(mddev); 6340 6332 mddev_detach(mddev); 6341 - /* Ensure ->event_work is done */ 6342 - if (mddev->event_work.func) 6343 - flush_workqueue(md_misc_wq); 6344 6333 spin_lock(&mddev->lock); 6345 6334 mddev->pers = NULL; 6346 6335 spin_unlock(&mddev->lock);
+4 -1
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
··· 866 866 netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", 867 867 offset, adapter->ring_size); 868 868 err = -1; 869 - goto failed; 869 + goto free_buffer; 870 870 } 871 871 872 872 return 0; 873 + free_buffer: 874 + kfree(tx_ring->tx_buffer); 875 + tx_ring->tx_buffer = NULL; 873 876 failed: 874 877 if (adapter->ring_vir_addr != NULL) { 875 878 dma_free_coherent(&pdev->dev, adapter->ring_size,
+2 -9
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
··· 59 59 for (i = 0; i < num_frags ; i++) { 60 60 skb_frag_t *frag = &sinfo->frags[i]; 61 61 struct bnxt_sw_tx_bd *frag_tx_buf; 62 - struct pci_dev *pdev = bp->pdev; 63 62 dma_addr_t frag_mapping; 64 63 int frag_len; 65 64 ··· 72 73 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 73 74 74 75 frag_len = skb_frag_size(frag); 75 - frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, 76 - frag_len, DMA_TO_DEVICE); 77 - 78 - if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping))) 79 - return NULL; 80 - 81 - dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); 82 - 83 76 flags = frag_len << TX_BD_LEN_SHIFT; 84 77 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 78 + frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) + 79 + skb_frag_off(frag); 85 80 txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); 86 81 87 82 len = frag_len;
+1 -1
drivers/net/ethernet/intel/i40e/i40e_register.h
··· 207 207 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 208 208 #define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT) 209 209 #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 210 - #define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT) 210 + #define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT) 211 211 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 212 212 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) 213 213 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+2 -2
drivers/net/ethernet/intel/i40e/i40e_type.h
··· 37 37 #define I40E_QTX_CTL_VM_QUEUE 0x1 38 38 #define I40E_QTX_CTL_PF_QUEUE 0x2 39 39 40 - #define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK 40 + #define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1) 41 41 #define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) 42 42 #define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2) 43 43 44 - #define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK 44 + #define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0) 45 45 #define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0) 46 46 #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) 47 47 #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)
+2 -2
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 1850 1850 linkmode_zero(ks->link_modes.supported); 1851 1851 linkmode_zero(ks->link_modes.advertising); 1852 1852 1853 - for (i = 0; i < BITS_PER_TYPE(u64); i++) { 1853 + for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) { 1854 1854 if (phy_types_low & BIT_ULL(i)) 1855 1855 ice_linkmode_set_bit(&phy_type_low_lkup[i], ks, 1856 1856 req_speeds, advert_phy_type_lo, 1857 1857 i); 1858 1858 } 1859 1859 1860 - for (i = 0; i < BITS_PER_TYPE(u64); i++) { 1860 + for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) { 1861 1861 if (phy_types_high & BIT_ULL(i)) 1862 1862 ice_linkmode_set_bit(&phy_type_high_lkup[i], ks, 1863 1863 req_speeds, advert_phy_type_hi,
+2
drivers/net/ethernet/intel/ice/ice_lag.c
··· 1981 1981 int n, err; 1982 1982 1983 1983 ice_lag_init_feature_support_flag(pf); 1984 + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) 1985 + return 0; 1984 1986 1985 1987 pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL); 1986 1988 if (!pf->lag)
+3 -4
drivers/net/ethernet/intel/ice/ice_lib.c
··· 2371 2371 } else { 2372 2372 max_txqs[i] = vsi->alloc_txq; 2373 2373 } 2374 + 2375 + if (vsi->type == ICE_VSI_PF) 2376 + max_txqs[i] += vsi->num_xdp_txq; 2374 2377 } 2375 2378 2376 2379 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); ··· 2623 2620 if (vsi->type == ICE_VSI_VF && 2624 2621 vsi->agg_node && vsi->agg_node->valid) 2625 2622 vsi->agg_node->num_vsis--; 2626 - if (vsi->agg_node) { 2627 - vsi->agg_node->valid = false; 2628 - vsi->agg_node->agg_id = 0; 2629 - } 2630 2623 } 2631 2624 2632 2625 /**
+14 -3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
··· 399 399 static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) 400 400 { 401 401 struct otx2_nic *pfvf = netdev_priv(dev); 402 + u8 old_pfc_en; 402 403 int err; 403 404 404 - /* Save PFC configuration to interface */ 405 + old_pfc_en = pfvf->pfc_en; 405 406 pfvf->pfc_en = pfc->pfc_en; 406 407 407 408 if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX) ··· 412 411 * supported by the tx queue configuration 413 412 */ 414 413 err = otx2_check_pfc_config(pfvf); 415 - if (err) 414 + if (err) { 415 + pfvf->pfc_en = old_pfc_en; 416 416 return err; 417 + } 417 418 418 419 process_pfc: 419 420 err = otx2_config_priority_flow_ctrl(pfvf); 420 - if (err) 421 + if (err) { 422 + pfvf->pfc_en = old_pfc_en; 421 423 return err; 424 + } 422 425 423 426 /* Request Per channel Bpids */ 424 427 if (pfc->pfc_en) ··· 430 425 431 426 err = otx2_pfc_txschq_update(pfvf); 432 427 if (err) { 428 + if (pfc->pfc_en) 429 + otx2_nix_config_bp(pfvf, false); 430 + 431 + otx2_pfc_txschq_stop(pfvf); 432 + pfvf->pfc_en = old_pfc_en; 433 + otx2_config_priority_flow_ctrl(pfvf); 433 434 dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__); 434 435 return err; 435 436 }
+3
drivers/net/ethernet/mediatek/mtk_wed_wo.c
··· 291 291 for (i = 0; i < q->n_desc; i++) { 292 292 struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; 293 293 294 + if (!entry->buf) 295 + continue; 296 + 294 297 dma_unmap_single(wo->hw->dev, entry->addr, entry->len, 295 298 DMA_TO_DEVICE); 296 299 skb_free_frag(entry->buf);
+7 -5
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 156 156 return token; 157 157 } 158 158 159 - static int cmd_alloc_index(struct mlx5_cmd *cmd) 159 + static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent) 160 160 { 161 161 unsigned long flags; 162 162 int ret; 163 163 164 164 spin_lock_irqsave(&cmd->alloc_lock, flags); 165 165 ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds); 166 - if (ret < cmd->vars.max_reg_cmds) 166 + if (ret < cmd->vars.max_reg_cmds) { 167 167 clear_bit(ret, &cmd->vars.bitmask); 168 + ent->idx = ret; 169 + cmd->ent_arr[ent->idx] = ent; 170 + } 168 171 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 169 172 170 173 return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM; ··· 982 979 sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem; 983 980 down(sem); 984 981 if (!ent->page_queue) { 985 - alloc_ret = cmd_alloc_index(cmd); 982 + alloc_ret = cmd_alloc_index(cmd, ent); 986 983 if (alloc_ret < 0) { 987 984 mlx5_core_err_rl(dev, "failed to allocate command entry\n"); 988 985 if (ent->callback) { ··· 997 994 up(sem); 998 995 return; 999 996 } 1000 - ent->idx = alloc_ret; 1001 997 } else { 1002 998 ent->idx = cmd->vars.max_reg_cmds; 1003 999 spin_lock_irqsave(&cmd->alloc_lock, flags); 1004 1000 clear_bit(ent->idx, &cmd->vars.bitmask); 1001 + cmd->ent_arr[ent->idx] = ent; 1005 1002 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 1006 1003 } 1007 1004 1008 - cmd->ent_arr[ent->idx] = ent; 1009 1005 lay = get_inst(cmd, ent->idx); 1010 1006 ent->lay = lay; 1011 1007 memset(lay, 0, sizeof(*lay));
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
··· 718 718 719 719 while (block_timestamp > tracer->last_timestamp) { 720 720 /* Check block override if it's not the first block */ 721 - if (!tracer->last_timestamp) { 721 + if (tracer->last_timestamp) { 722 722 u64 *ts_event; 723 723 /* To avoid block override be the HW in case of buffer 724 724 * wraparound, the time stamp of the previous block
+1
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
··· 154 154 in = kvzalloc(inlen, GFP_KERNEL); 155 155 if (!in || !ft->g) { 156 156 kfree(ft->g); 157 + ft->g = NULL; 157 158 kvfree(in); 158 159 return -ENOMEM; 159 160 }
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
··· 197 197 } 198 198 esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; 199 199 esw_attr->out_count++; 200 - /* attr->dests[].rep is resolved when we handle encap */ 200 + /* attr->dests[].vport is resolved when we handle encap */ 201 201 202 202 return 0; 203 203 } ··· 270 270 271 271 out_priv = netdev_priv(out_dev); 272 272 rpriv = out_priv->ppriv; 273 - esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; 273 + esw_attr->dests[esw_attr->out_count].vport_valid = true; 274 + esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport; 274 275 esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; 275 276 276 277 esw_attr->out_count++;
+28 -22
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 300 300 if (err) 301 301 goto destroy_neigh_entry; 302 302 303 + e->encap_size = ipv4_encap_size; 304 + e->encap_header = encap_header; 305 + encap_header = NULL; 306 + 303 307 if (!(nud_state & NUD_VALID)) { 304 308 neigh_event_send(attr.n, NULL); 305 309 /* the encap entry will be made valid on neigh update event ··· 314 310 315 311 memset(&reformat_params, 0, sizeof(reformat_params)); 316 312 reformat_params.type = e->reformat_type; 317 - reformat_params.size = ipv4_encap_size; 318 - reformat_params.data = encap_header; 313 + reformat_params.size = e->encap_size; 314 + reformat_params.data = e->encap_header; 319 315 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, 320 316 MLX5_FLOW_NAMESPACE_FDB); 321 317 if (IS_ERR(e->pkt_reformat)) { ··· 323 319 goto destroy_neigh_entry; 324 320 } 325 321 326 - e->encap_size = ipv4_encap_size; 327 - e->encap_header = encap_header; 328 322 e->flags |= MLX5_ENCAP_ENTRY_VALID; 329 323 mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); 330 324 mlx5e_route_lookup_ipv4_put(&attr); ··· 405 403 if (err) 406 404 goto free_encap; 407 405 406 + e->encap_size = ipv4_encap_size; 407 + kfree(e->encap_header); 408 + e->encap_header = encap_header; 409 + encap_header = NULL; 410 + 408 411 if (!(nud_state & NUD_VALID)) { 409 412 neigh_event_send(attr.n, NULL); 410 413 /* the encap entry will be made valid on neigh update event 411 414 * and not used before that. 412 415 */ 413 - goto free_encap; 416 + goto release_neigh; 414 417 } 415 418 416 419 memset(&reformat_params, 0, sizeof(reformat_params)); 417 420 reformat_params.type = e->reformat_type; 418 - reformat_params.size = ipv4_encap_size; 419 - reformat_params.data = encap_header; 421 + reformat_params.size = e->encap_size; 422 + reformat_params.data = e->encap_header; 420 423 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, 421 424 MLX5_FLOW_NAMESPACE_FDB); 422 425 if (IS_ERR(e->pkt_reformat)) { 423 426 err = PTR_ERR(e->pkt_reformat); 424 427 goto free_encap; 425 428 } 426 - 427 - e->encap_size = ipv4_encap_size; 428 - kfree(e->encap_header); 429 - e->encap_header = encap_header; 430 429 431 430 e->flags |= MLX5_ENCAP_ENTRY_VALID; 432 431 mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); ··· 570 567 if (err) 571 568 goto destroy_neigh_entry; 572 569 570 + e->encap_size = ipv6_encap_size; 571 + e->encap_header = encap_header; 572 + encap_header = NULL; 573 + 573 574 if (!(nud_state & NUD_VALID)) { 574 575 neigh_event_send(attr.n, NULL); 575 576 /* the encap entry will be made valid on neigh update event ··· 584 577 585 578 memset(&reformat_params, 0, sizeof(reformat_params)); 586 579 reformat_params.type = e->reformat_type; 587 - reformat_params.size = ipv6_encap_size; 588 - reformat_params.data = encap_header; 580 + reformat_params.size = e->encap_size; 581 + reformat_params.data = e->encap_header; 589 582 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, 590 583 MLX5_FLOW_NAMESPACE_FDB); 591 584 if (IS_ERR(e->pkt_reformat)) { ··· 593 586 goto destroy_neigh_entry; 594 587 } 595 588 596 - e->encap_size = ipv6_encap_size; 597 - e->encap_header = encap_header; 598 589 e->flags |= MLX5_ENCAP_ENTRY_VALID; 599 590 mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); 600 591 mlx5e_route_lookup_ipv6_put(&attr); ··· 674 669 if (err) 675 670 goto free_encap; 676 671 672 + e->encap_size = ipv6_encap_size; 673 + kfree(e->encap_header); 674 + e->encap_header = encap_header; 675 + encap_header = NULL; 676 + 677 677 if (!(nud_state & NUD_VALID)) { 678 678 neigh_event_send(attr.n, NULL); 679 679 /* the encap entry will be made valid on neigh update event 680 680 * and not used before that. 681 681 */ 682 - goto free_encap; 682 + goto release_neigh; 683 683 } 684 684 685 685 memset(&reformat_params, 0, sizeof(reformat_params)); 686 686 reformat_params.type = e->reformat_type; 687 - reformat_params.size = ipv6_encap_size; 688 - reformat_params.data = encap_header; 687 + reformat_params.size = e->encap_size; 688 + reformat_params.data = e->encap_header; 689 689 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, 690 690 MLX5_FLOW_NAMESPACE_FDB); 691 691 if (IS_ERR(e->pkt_reformat)) { 692 692 err = PTR_ERR(e->pkt_reformat); 693 693 goto free_encap; 694 694 } 695 - 696 - e->encap_size = ipv6_encap_size; 697 - kfree(e->encap_header); 698 - e->encap_header = encap_header; 699 695 700 696 e->flags |= MLX5_ENCAP_ENTRY_VALID; 701 697 mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
··· 1064 1064 1065 1065 out_priv = netdev_priv(encap_dev); 1066 1066 rpriv = out_priv->ppriv; 1067 - esw_attr->dests[out_index].rep = rpriv->rep; 1067 + esw_attr->dests[out_index].vport_valid = true; 1068 + esw_attr->dests[out_index].vport = rpriv->rep->vport; 1068 1069 esw_attr->dests[out_index].mdev = out_priv->mdev; 1069 1070 } 1070 1071
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
··· 493 493 dma_addr_t dma_addr = xdptxd->dma_addr; 494 494 u32 dma_len = xdptxd->len; 495 495 u16 ds_cnt, inline_hdr_sz; 496 + unsigned int frags_size; 496 497 u8 num_wqebbs = 1; 497 498 int num_frags = 0; 498 499 bool inline_ok; ··· 504 503 505 504 inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE || 506 505 dma_len >= MLX5E_XDP_MIN_INLINE; 506 + frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0; 507 507 508 - if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) { 508 + if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) { 509 509 stats->err++; 510 510 return false; 511 511 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 2142 2142 2143 2143 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev) 2144 2144 { 2145 - mdev->num_block_tc++; 2145 + mdev->num_block_tc--; 2146 2146 } 2147 2147 2148 2148 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 49 49 count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 50 50 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), 51 51 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); 52 - if (count == sizeof(drvinfo->fw_version)) 52 + if (count >= sizeof(drvinfo->fw_version)) 53 53 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 54 54 "%d.%d.%04d", fw_rev_maj(mdev), 55 55 fw_rev_min(mdev), fw_rev_sub(mdev));
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 78 78 count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 79 79 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), 80 80 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); 81 - if (count == sizeof(drvinfo->fw_version)) 81 + if (count >= sizeof(drvinfo->fw_version)) 82 82 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 83 83 "%d.%d.%04d", fw_rev_maj(mdev), 84 84 fw_rev_min(mdev), fw_rev_sub(mdev));
+7 -3
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 3778 3778 break; 3779 3779 case FLOW_ACTION_ACCEPT: 3780 3780 case FLOW_ACTION_PIPE: 3781 - if (set_branch_dest_ft(flow->priv, attr)) 3781 + err = set_branch_dest_ft(flow->priv, attr); 3782 + if (err) 3782 3783 goto out_err; 3783 3784 break; 3784 3785 case FLOW_ACTION_JUMP: ··· 3789 3788 goto out_err; 3790 3789 } 3791 3790 *jump_count = cond->extval; 3792 - if (set_branch_dest_ft(flow->priv, attr)) 3791 + err = set_branch_dest_ft(flow->priv, attr); 3792 + if (err) 3793 3793 goto out_err; 3794 3794 break; 3795 3795 default: ··· 5738 5736 5739 5737 esw = priv->mdev->priv.eswitch; 5740 5738 attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); 5741 - if (IS_ERR(attr->act_id_restore_rule)) 5739 + if (IS_ERR(attr->act_id_restore_rule)) { 5740 + err = PTR_ERR(attr->act_id_restore_rule); 5742 5741 goto err_rule; 5742 + } 5743 5743 5744 5744 return 0; 5745 5745
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 526 526 u8 total_vlan; 527 527 struct { 528 528 u32 flags; 529 - struct mlx5_eswitch_rep *rep; 529 + bool vport_valid; 530 + u16 vport; 530 531 struct mlx5_pkt_reformat *pkt_reformat; 531 532 struct mlx5_core_dev *mdev; 532 533 struct mlx5_termtbl_handle *termtbl;
+19 -12
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 287 287 for (i = from; i < to; i++) 288 288 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 289 289 mlx5_chains_put_table(chains, 0, 1, 0); 290 - else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 290 + else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, 291 291 esw_attr->dests[i].mdev)) 292 - mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport, 293 - false); 292 + mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false); 294 293 } 295 294 296 295 static bool ··· 357 358 * this criteria. 358 359 */ 359 360 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { 360 - if (esw_attr->dests[i].rep && 361 - mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 361 + if (esw_attr->dests[i].vport_valid && 362 + mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, 362 363 esw_attr->dests[i].mdev)) { 363 364 result = true; 364 365 } else { ··· 387 388 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 388 389 389 390 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, 390 - esw_attr->dests[j].rep->vport, false); 391 + esw_attr->dests[j].vport, false); 391 392 if (IS_ERR(dest[*i].ft)) { 392 393 err = PTR_ERR(dest[*i].ft); 393 394 goto err_indir_tbl_get; ··· 431 432 int attr_idx) 432 433 { 433 434 if (esw->offloads.ft_ipsec_tx_pol && 434 - esw_attr->dests[attr_idx].rep && 435 - esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK && 435 + esw_attr->dests[attr_idx].vport_valid && 436 + esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK && 436 437 /* To be aligned with software, encryption is needed only for tunnel device */ 437 438 (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) && 438 - esw_attr->dests[attr_idx].rep != esw_attr->in_rep && 439 + esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport && 439 440 esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev)) 440 441 return true; 441 442 ··· 468 469 int attr_idx, int dest_idx, bool pkt_reformat) 469 470 { 470 471 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 471 - dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; 472 + dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport; 472 473 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 473 474 dest[dest_idx].vport.vhca_id = 474 475 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); ··· 1176 1177 struct mlx5_flow_handle *flow; 1177 1178 struct mlx5_flow_spec *spec; 1178 1179 struct mlx5_vport *vport; 1180 + int err, pfindex; 1179 1181 unsigned long i; 1180 1182 void *misc; 1181 - int err; 1182 1183 1183 1184 if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) 1184 1185 return 0; ··· 1254 1255 flows[vport->index] = flow; 1255 1256 } 1256 1257 } 1257 - esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows; 1258 + 1259 + pfindex = mlx5_get_dev_index(peer_dev); 1260 + if (pfindex >= MLX5_MAX_PORTS) { 1261 + esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n", 1262 + pfindex, MLX5_MAX_PORTS); 1263 + err = -EINVAL; 1264 + goto add_ec_vf_flow_err; 1265 + } 1266 + esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows; 1258 1267 1259 1268 kvfree(spec); 1260 1269 return 0;
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
··· 233 233 234 234 /* hairpin */ 235 235 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 236 - if (!esw_attr->dest_int_port && esw_attr->dests[i].rep && 237 - esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK) 236 + if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid && 237 + esw_attr->dests[i].vport == MLX5_VPORT_UPLINK) 238 238 return true; 239 239 240 240 return false;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 277 277 req_list_size = max_list_size; 278 278 } 279 279 280 - out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) + 280 + out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) + 281 281 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); 282 282 283 283 out = kvzalloc(out_sz, GFP_KERNEL);
+3
drivers/net/ethernet/micrel/ks8851.h
··· 350 350 * @rxd: Space for receiving SPI data, in DMA-able space. 351 351 * @txd: Space for transmitting SPI data, in DMA-able space. 352 352 * @msg_enable: The message flags controlling driver output (see ethtool). 353 + * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR). 354 + * @queued_len: Space required in hardware TX buffer for queued packets in txq. 353 355 * @fid: Incrementing frame id tag. 354 356 * @rc_ier: Cached copy of KS_IER. 355 357 * @rc_ccr: Cached copy of KS_CCR. ··· 401 399 struct work_struct rxctrl_work; 402 400 403 401 struct sk_buff_head txq; 402 + unsigned int queued_len; 404 403 405 404 struct eeprom_93cx6 eeprom; 406 405 struct regulator *vdd_reg;
+11 -11
drivers/net/ethernet/micrel/ks8851_common.c
··· 362 362 handled |= IRQ_RXPSI; 363 363 364 364 if (status & IRQ_TXI) { 365 - handled |= IRQ_TXI; 366 - 367 - /* no lock here, tx queue should have been stopped */ 368 - 369 - /* update our idea of how much tx space is available to the 370 - * system */ 371 - ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); 365 + unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR); 372 366 373 367 netif_dbg(ks, intr, ks->netdev, 374 - "%s: txspace %d\n", __func__, ks->tx_space); 368 + "%s: txspace %d\n", __func__, tx_space); 369 + 370 + spin_lock(&ks->statelock); 371 + ks->tx_space = tx_space; 372 + if (netif_queue_stopped(ks->netdev)) 373 + netif_wake_queue(ks->netdev); 374 + spin_unlock(&ks->statelock); 375 + 376 + handled |= IRQ_TXI; 375 377 } 376 378 377 379 if (status & IRQ_RXI) ··· 415 413 416 414 if (status & IRQ_LCI) 417 415 mii_check_link(&ks->mii); 418 - 419 - if (status & IRQ_TXI) 420 - netif_wake_queue(ks->netdev); 421 416 422 417 return IRQ_HANDLED; 423 418 } ··· 499 500 ks8851_wrreg16(ks, KS_ISR, ks->rc_ier); 500 501 ks8851_wrreg16(ks, KS_IER, ks->rc_ier); 501 502 503 + ks->queued_len = 0; 502 504 netif_start_queue(ks->netdev); 503 505 504 506 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+27 -15
drivers/net/ethernet/micrel/ks8851_spi.c
··· 287 287 } 288 288 289 289 /** 290 + * calc_txlen - calculate size of message to send packet 291 + * @len: Length of data 292 + * 293 + * Returns the size of the TXFIFO message needed to send 294 + * this packet. 295 + */ 296 + static unsigned int calc_txlen(unsigned int len) 297 + { 298 + return ALIGN(len + 4, 4); 299 + } 300 + 301 + /** 290 302 * ks8851_rx_skb_spi - receive skbuff 291 303 * @ks: The device state 292 304 * @skb: The skbuff ··· 317 305 */ 318 306 static void ks8851_tx_work(struct work_struct *work) 319 307 { 308 + unsigned int dequeued_len = 0; 320 309 struct ks8851_net_spi *kss; 310 + unsigned short tx_space; 321 311 struct ks8851_net *ks; 322 312 unsigned long flags; 323 313 struct sk_buff *txb; ··· 336 322 last = skb_queue_empty(&ks->txq); 337 323 338 324 if (txb) { 325 + dequeued_len += calc_txlen(txb->len); 326 + 339 327 ks8851_wrreg16_spi(ks, KS_RXQCR, 340 328 ks->rc_rxqcr | RXQCR_SDA); 341 329 ks8851_wrfifo_spi(ks, txb, last); ··· 347 331 ks8851_done_tx(ks, txb); 348 332 } 349 333 } 334 + 335 + tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR); 336 + 337 + spin_lock(&ks->statelock); 338 + ks->queued_len -= dequeued_len; 339 + ks->tx_space = tx_space; 340 + spin_unlock(&ks->statelock); 350 341 351 342 ks8851_unlock_spi(ks, &flags); 352 343 } ··· 367 344 struct ks8851_net_spi *kss = to_ks8851_spi(ks); 368 345 369 346 flush_work(&kss->tx_work); 370 - } 371 - 372 - /** 373 - * calc_txlen - calculate size of message to send packet 374 - * @len: Length of data 375 - * 376 - * Returns the size of the TXFIFO message needed to send 377 - * this packet. 378 - */ 379 - static unsigned int calc_txlen(unsigned int len) 380 - { 381 - return ALIGN(len + 4, 4); 382 347 } 383 348 384 349 /** ··· 397 386 398 387 spin_lock(&ks->statelock); 399 388 400 - if (needed > ks->tx_space) { 389 + if (ks->queued_len + needed > ks->tx_space) { 401 390 netif_stop_queue(dev); 402 391 ret = NETDEV_TX_BUSY; 403 392 } else { 404 - ks->tx_space -= needed; 393 + ks->queued_len += needed; 405 394 skb_queue_tail(&ks->txq, skb); 406 395 } 407 396 408 397 spin_unlock(&ks->statelock); 409 - schedule_work(&kss->tx_work); 398 + if (ret == NETDEV_TX_OK) 399 + schedule_work(&kss->tx_work); 410 400 411 401 return ret; 412 402 }
+1
drivers/net/ethernet/microsoft/Kconfig
··· 20 20 depends on PCI_MSI && X86_64 21 21 depends on PCI_HYPERV 22 22 select AUXILIARY_BUS 23 + select PAGE_POOL 23 24 help 24 25 This driver supports Microsoft Azure Network Adapter (MANA). 25 26 So far, the driver is only supported on X86_64.
+8 -8
drivers/net/ethernet/mscc/ocelot_stats.c
··· 582 582 rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64]; 583 583 rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127]; 584 584 rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255]; 585 - rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255]; 586 - rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511]; 587 - rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023]; 588 - rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526]; 585 + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511]; 586 + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023]; 587 + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526]; 588 + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX]; 589 589 } 590 590 591 591 static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port, ··· 610 610 rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64]; 611 611 rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127]; 612 612 rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255]; 613 - rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255]; 614 - rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511]; 615 - rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023]; 616 - rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526]; 613 + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511]; 614 + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023]; 615 + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526]; 616 + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX]; 617 617 } 618 618 619 619 void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
··· 237 237 */ 238 238 ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); 239 239 240 - if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN) 240 + if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)) 241 241 return; 242 242 243 243 num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
+6 -76
drivers/net/ethernet/wangxun/libwx/wx_lib.c
··· 160 160 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 161 161 } 162 162 163 - static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer, 164 - int rx_buffer_pgcnt) 165 - { 166 - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 167 - struct page *page = rx_buffer->page; 168 - 169 - /* avoid re-using remote and pfmemalloc pages */ 170 - if (!dev_page_is_reusable(page)) 171 - return false; 172 - 173 - #if (PAGE_SIZE < 8192) 174 - /* if we are only owner of page we can reuse it */ 175 - if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 176 - return false; 177 - #endif 178 - 179 - /* If we have drained the page fragment pool we need to update 180 - * the pagecnt_bias and page count so that we fully restock the 181 - * number of references the driver holds. 182 - */ 183 - if (unlikely(pagecnt_bias == 1)) { 184 - page_ref_add(page, USHRT_MAX - 1); 185 - rx_buffer->pagecnt_bias = USHRT_MAX; 186 - } 187 - 188 - return true; 189 - } 190 - 191 - /** 192 - * wx_reuse_rx_page - page flip buffer and store it back on the ring 193 - * @rx_ring: rx descriptor ring to store buffers on 194 - * @old_buff: donor buffer to have page reused 195 - * 196 - * Synchronizes page for reuse by the adapter 197 - **/ 198 - static void wx_reuse_rx_page(struct wx_ring *rx_ring, 199 - struct wx_rx_buffer *old_buff) 200 - { 201 - u16 nta = rx_ring->next_to_alloc; 202 - struct wx_rx_buffer *new_buff; 203 - 204 - new_buff = &rx_ring->rx_buffer_info[nta]; 205 - 206 - /* update, and store next to alloc */ 207 - nta++; 208 - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 209 - 210 - /* transfer page from old buffer to new buffer */ 211 - new_buff->page = old_buff->page; 212 - new_buff->page_dma = old_buff->page_dma; 213 - new_buff->page_offset = old_buff->page_offset; 214 - new_buff->pagecnt_bias = old_buff->pagecnt_bias; 215 - } 216 - 217 163 static void wx_dma_sync_frag(struct wx_ring *rx_ring, 218 164 struct wx_rx_buffer *rx_buffer) 219 165 { ··· 216 270 size, 217 271 DMA_FROM_DEVICE); 218 272 skip_sync: 219 - rx_buffer->pagecnt_bias--; 220 - 221 273 return rx_buffer; 222 274 } 223 275 ··· 224 280 struct sk_buff *skb, 225 281 int rx_buffer_pgcnt) 226 282 { 227 - if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 228 - /* hand second half of page back to the ring */ 229 - wx_reuse_rx_page(rx_ring, rx_buffer); 230 - } else { 231 - if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) 232 - /* the page has been released from the ring */ 233 - WX_CB(skb)->page_released = true; 234 - else 235 - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); 236 - 237 - __page_frag_cache_drain(rx_buffer->page, 238 - rx_buffer->pagecnt_bias); 239 - } 283 + if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) 284 + /* the page has been released from the ring */ 285 + WX_CB(skb)->page_released = true; 240 286 241 287 /* clear contents of rx_buffer */ 242 288 rx_buffer->page = NULL; ··· 269 335 if (size <= WX_RXBUFFER_256) { 270 336 memcpy(__skb_put(skb, size), page_addr, 271 337 ALIGN(size, sizeof(long))); 272 - rx_buffer->pagecnt_bias++; 273 - 338 + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); 274 339 return skb; 275 340 } 341 + 342 + skb_mark_for_recycle(skb); 276 343 277 344 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) 278 345 WX_CB(skb)->dma = rx_buffer->dma; ··· 317 382 bi->page_dma = dma; 318 383 bi->page = page; 319 384 bi->page_offset = 0; 320 - page_ref_add(page, USHRT_MAX - 1); 321 - bi->pagecnt_bias = USHRT_MAX; 322 385 323 386 return true; 324 387 } ··· 656 723 /* exit if we failed to retrieve a buffer */ 657 724 if (!skb) { 658 725 rx_ring->rx_stats.alloc_rx_buff_failed++; 659 - rx_buffer->pagecnt_bias++; 660 726 break; 661 727 } 662 728 ··· 2180 2248 2181 2249 /* free resources associated with mapping */ 2182 2250 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); 2183 - __page_frag_cache_drain(rx_buffer->page, 2184 - rx_buffer->pagecnt_bias); 2185 2251 2186 2252 i++; 2187 2253 rx_buffer++;
-1
drivers/net/ethernet/wangxun/libwx/wx_type.h
··· 787 787 dma_addr_t page_dma; 788 788 struct page *page; 789 789 unsigned int page_offset; 790 - u16 pagecnt_bias; 791 790 }; 792 791 793 792 struct wx_queue_stats {
+4 -2
drivers/net/phy/phy_device.c
··· 1548 1548 goto error; 1549 1549 1550 1550 phy_resume(phydev); 1551 - phy_led_triggers_register(phydev); 1551 + if (!phydev->is_on_sfp_module) 1552 + phy_led_triggers_register(phydev); 1552 1553 1553 1554 /** 1554 1555 * If the external phy used by current mac interface is managed by ··· 1818 1817 } 1819 1818 phydev->phylink = NULL; 1820 1819 1821 - phy_led_triggers_unregister(phydev); 1820 + if (!phydev->is_on_sfp_module) 1821 + phy_led_triggers_unregister(phydev); 1822 1822 1823 1823 if (phydev->mdio.dev.driver) 1824 1824 module_put(phydev->mdio.dev.driver->owner);
+20 -3
drivers/net/usb/ax88179_178a.c
··· 173 173 u8 in_pm; 174 174 u32 wol_supported; 175 175 u32 wolopts; 176 + u8 disconnecting; 176 177 }; 177 178 178 179 struct ax88179_int_data { ··· 209 208 { 210 209 int ret; 211 210 int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); 211 + struct ax88179_data *ax179_data = dev->driver_priv; 212 212 213 213 BUG_ON(!dev); 214 214 ··· 221 219 ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 222 220 value, index, data, size); 223 221 224 - if (unlikely(ret < 0)) 222 + if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting))) 225 223 netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n", 226 224 index, ret); 227 225 ··· 233 231 { 234 232 int ret; 235 233 int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); 234 + struct ax88179_data *ax179_data = dev->driver_priv; 236 235 237 236 BUG_ON(!dev); 238 237 ··· 245 242 ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 246 243 value, index, data, size); 247 244 248 - if (unlikely(ret < 0)) 245 + if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting))) 249 246 netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n", 250 247 index, ret); 251 248 ··· 493 490 ax88179_set_pm_mode(dev, false); 494 491 495 492 return usbnet_resume(intf); 493 + } 494 + 495 + static void ax88179_disconnect(struct usb_interface *intf) 496 + { 497 + struct usbnet *dev = usb_get_intfdata(intf); 498 + struct ax88179_data *ax179_data; 499 + 500 + if (!dev) 501 + return; 502 + 503 + ax179_data = dev->driver_priv; 504 + ax179_data->disconnecting = 1; 505 + 506 + usbnet_disconnect(intf); 496 507 } 497 508 498 509 static void ··· 1923 1906 .suspend = ax88179_suspend, 1924 1907 .resume = ax88179_resume, 1925 1908 .reset_resume = ax88179_resume, 1926 - .disconnect = usbnet_disconnect, 1909 + .disconnect = ax88179_disconnect, 1927 1910 .supports_autosuspend = 1, 1928 1911 .disable_hub_initiated_lpm = 1, 1929 1912 };
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
··· 1385 1385 * if it is true then one of the handlers took the page. 1386 1386 */ 1387 1387 1388 - if (reclaim) { 1388 + if (reclaim && txq) { 1389 1389 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1390 1390 int index = SEQ_TO_INDEX(sequence); 1391 1391 int cmd_index = iwl_txq_get_cmd_index(txq, index);
+2 -2
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 3106 3106 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3107 3107 u32 i, r, j, rb_len = 0; 3108 3108 3109 - spin_lock(&rxq->lock); 3109 + spin_lock_bh(&rxq->lock); 3110 3110 3111 3111 r = iwl_get_closed_rb_stts(trans, rxq); 3112 3112 ··· 3130 3130 *data = iwl_fw_error_next_data(*data); 3131 3131 } 3132 3132 3133 - spin_unlock(&rxq->lock); 3133 + spin_unlock_bh(&rxq->lock); 3134 3134 3135 3135 return rb_len; 3136 3136 }
+6 -4
drivers/net/wireless/mediatek/mt76/dma.c
··· 783 783 784 784 static void 785 785 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, 786 - int len, bool more, u32 info) 786 + int len, bool more, u32 info, bool allow_direct) 787 787 { 788 788 struct sk_buff *skb = q->rx_head; 789 789 struct skb_shared_info *shinfo = skb_shinfo(skb); ··· 795 795 796 796 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); 797 797 } else { 798 - mt76_put_page_pool_buf(data, true); 798 + mt76_put_page_pool_buf(data, allow_direct); 799 799 } 800 800 801 801 if (more) ··· 815 815 struct sk_buff *skb; 816 816 unsigned char *data; 817 817 bool check_ddone = false; 818 + bool allow_direct = !mt76_queue_is_wed_rx(q); 818 819 bool more; 819 820 820 821 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && ··· 856 855 } 857 856 858 857 if (q->rx_head) { 859 - mt76_add_fragment(dev, q, data, len, more, info); 858 + mt76_add_fragment(dev, q, data, len, more, info, 859 + allow_direct); 860 860 continue; 861 861 } 862 862 ··· 886 884 continue; 887 885 888 886 free_frag: 889 - mt76_put_page_pool_buf(data, true); 887 + mt76_put_page_pool_buf(data, allow_direct); 890 888 } 891 889 892 890 mt76_dma_rx_fill(dev, q, true);
+2 -1
drivers/nvme/host/core.c
··· 4137 4137 struct nvme_ctrl, fw_act_work); 4138 4138 unsigned long fw_act_timeout; 4139 4139 4140 + nvme_auth_stop(ctrl); 4141 + 4140 4142 if (ctrl->mtfa) 4141 4143 fw_act_timeout = jiffies + 4142 4144 msecs_to_jiffies(ctrl->mtfa * 100); ··· 4194 4192 * firmware activation. 4195 4193 */ 4196 4194 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4197 - nvme_auth_stop(ctrl); 4198 4195 requeue = false; 4199 4196 queue_work(nvme_wq, &ctrl->fw_act_work); 4200 4197 }
+5 -16
drivers/nvme/host/fc.c
··· 2548 2548 * the controller. Abort any ios on the association and let the 2549 2549 * create_association error path resolve things. 2550 2550 */ 2551 - enum nvme_ctrl_state state; 2552 - unsigned long flags; 2553 - 2554 - spin_lock_irqsave(&ctrl->lock, flags); 2555 - state = ctrl->ctrl.state; 2556 - if (state == NVME_CTRL_CONNECTING) { 2557 - set_bit(ASSOC_FAILED, &ctrl->flags); 2558 - spin_unlock_irqrestore(&ctrl->lock, flags); 2551 + if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { 2559 2552 __nvme_fc_abort_outstanding_ios(ctrl, true); 2553 + set_bit(ASSOC_FAILED, &ctrl->flags); 2560 2554 dev_warn(ctrl->ctrl.device, 2561 2555 "NVME-FC{%d}: transport error during (re)connect\n", 2562 2556 ctrl->cnum); 2563 2557 return; 2564 2558 } 2565 - spin_unlock_irqrestore(&ctrl->lock, flags); 2566 2559 2567 2560 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ 2568 - if (state != NVME_CTRL_LIVE) 2561 + if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2569 2562 return; 2570 2563 2571 2564 dev_warn(ctrl->ctrl.device, ··· 3173 3180 else 3174 3181 ret = nvme_fc_recreate_io_queues(ctrl); 3175 3182 } 3176 - 3177 - spin_lock_irqsave(&ctrl->lock, flags); 3178 3183 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) 3179 3184 ret = -EIO; 3180 - if (ret) { 3181 - spin_unlock_irqrestore(&ctrl->lock, flags); 3185 + if (ret) 3182 3186 goto out_term_aen_ops; 3183 - } 3187 + 3184 3188 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 3185 - spin_unlock_irqrestore(&ctrl->lock, flags); 3186 3189 3187 3190 ctrl->ctrl.nr_reconnects = 0; 3188 3191
+94 -40
drivers/nvmem/brcm_nvram.c
··· 17 17 18 18 #define NVRAM_MAGIC "FLSH" 19 19 20 + /** 21 + * struct brcm_nvram - driver state internal struct 22 + * 23 + * @dev: NVMEM device pointer 24 + * @nvmem_size: Size of the whole space available for NVRAM 25 + * @data: NVRAM data copy stored to avoid poking underlaying flash controller 26 + * @data_len: NVRAM data size 27 + * @padding_byte: Padding value used to fill remaining space 28 + * @cells: Array of discovered NVMEM cells 29 + * @ncells: Number of elements in cells 30 + */ 20 31 struct brcm_nvram { 21 32 struct device *dev; 22 - void __iomem *base; 33 + size_t nvmem_size; 34 + uint8_t *data; 35 + size_t data_len; 36 + uint8_t padding_byte; 23 37 struct nvmem_cell_info *cells; 24 38 int ncells; 25 39 }; ··· 50 36 size_t bytes) 51 37 { 52 38 struct brcm_nvram *priv = context; 53 - u8 *dst = val; 39 + size_t to_copy; 54 40 55 - while (bytes--) 56 - *dst++ = readb(priv->base + offset++); 41 + if (offset + bytes > priv->data_len) 42 + to_copy = max_t(ssize_t, (ssize_t)priv->data_len - offset, 0); 43 + else 44 + to_copy = bytes; 45 + 46 + memcpy(val, priv->data + offset, to_copy); 47 + 48 + memset((uint8_t *)val + to_copy, priv->padding_byte, bytes - to_copy); 49 + 50 + return 0; 51 + } 52 + 53 + static int brcm_nvram_copy_data(struct brcm_nvram *priv, struct platform_device *pdev) 54 + { 55 + struct resource *res; 56 + void __iomem *base; 57 + 58 + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 59 + if (IS_ERR(base)) 60 + return PTR_ERR(base); 61 + 62 + priv->nvmem_size = resource_size(res); 63 + 64 + priv->padding_byte = readb(base + priv->nvmem_size - 1); 65 + for (priv->data_len = priv->nvmem_size; 66 + priv->data_len; 67 + priv->data_len--) { 68 + if (readb(base + priv->data_len - 1) != priv->padding_byte) 69 + break; 70 + } 71 + WARN(priv->data_len > SZ_128K, "Unexpected (big) NVRAM size: %zu B\n", priv->data_len); 72 + 73 + priv->data = devm_kzalloc(priv->dev, priv->data_len, GFP_KERNEL); 74 + if (!priv->data) 75 + return -ENOMEM; 76 + 77 + memcpy_fromio(priv->data, base, priv->data_len); 78 + 79 + bcm47xx_nvram_init_from_iomem(base, priv->data_len); 57 80 58 81 return 0; 59 82 } ··· 118 67 size_t len) 119 68 { 120 69 struct device *dev = priv->dev; 121 - char *var, *value, *eq; 70 + char *var, *value; 71 + uint8_t tmp; 122 72 int idx; 73 + int err = 0; 74 + 75 + tmp = priv->data[len - 1]; 76 + priv->data[len - 1] = '\0'; 123 77 124 78 priv->ncells = 0; 125 79 for (var = data + sizeof(struct brcm_nvram_header); ··· 134 78 } 135 79 136 80 priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); 137 - if (!priv->cells) 138 - return -ENOMEM; 81 + if (!priv->cells) { 82 + err = -ENOMEM; 83 + goto out; 84 + } 139 85 140 86 for (var = data + sizeof(struct brcm_nvram_header), idx = 0; 141 87 var < (char *)data + len && *var; 142 88 var = value + strlen(value) + 1, idx++) { 89 + char *eq, *name; 90 + 143 91 eq = strchr(var, '='); 144 92 if (!eq) 145 93 break; 146 94 *eq = '\0'; 95 + name = devm_kstrdup(dev, var, GFP_KERNEL); 96 + *eq = '='; 97 + if (!name) { 98 + err = -ENOMEM; 99 + goto out; 100 + } 147 101 value = eq + 1; 148 102 149 - priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL); 150 - if (!priv->cells[idx].name) 151 - return -ENOMEM; 103 + priv->cells[idx].name = name; 152 104 priv->cells[idx].offset = value - (char *)data; 153 105 priv->cells[idx].bytes = strlen(value); 154 106 priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); 155 - if (!strcmp(var, "et0macaddr") || 156 - !strcmp(var, "et1macaddr") || 157 - !strcmp(var, "et2macaddr")) { 107 + if (!strcmp(name, "et0macaddr") || 108 + !strcmp(name, "et1macaddr") || 109 + !strcmp(name, "et2macaddr")) { 158 110 priv->cells[idx].raw_len = strlen(value); 159 111 priv->cells[idx].bytes = ETH_ALEN; 160 112 priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr; 161 113 } 162 114 } 163 115 164 - return 0; 116 + out: 117 + priv->data[len - 1] = tmp; 118 + return err; 165 119 } 166 120 167 121 static int brcm_nvram_parse(struct brcm_nvram *priv) 168 122 { 123 + struct brcm_nvram_header *header = (struct brcm_nvram_header *)priv->data; 169 124 struct device *dev = priv->dev; 170 - struct brcm_nvram_header header; 171 - uint8_t *data; 172 125 size_t len; 173 126 int err; 174 127 175 - memcpy_fromio(&header, priv->base, sizeof(header)); 176 - 177 - if (memcmp(header.magic, NVRAM_MAGIC, 4)) { 128 + if (memcmp(header->magic, NVRAM_MAGIC, 4)) { 178 129 dev_err(dev, "Invalid NVRAM magic\n"); 179 130 return -EINVAL; 180 131 } 181 132 182 - len = le32_to_cpu(header.len); 183 - 184 - data = kzalloc(len, GFP_KERNEL); 185 - if (!data) 186 - return -ENOMEM; 187 - 188 - memcpy_fromio(data, priv->base, len); 189 - data[len - 1] = '\0'; 190 - 191 - err = brcm_nvram_add_cells(priv, data, len); 192 - if (err) { 193 - dev_err(dev, "Failed to add cells: %d\n", err); 194 - return err; 133 + len = le32_to_cpu(header->len); 134 + if (len > priv->nvmem_size) { 135 + dev_err(dev, "NVRAM length (%zd) exceeds mapped size (%zd)\n", len, 136 + priv->nvmem_size); 137 + return -EINVAL; 195 138 } 196 139 197 - kfree(data); 140 + err = brcm_nvram_add_cells(priv, priv->data, len); 141 + if (err) 142 + dev_err(dev, "Failed to add cells: %d\n", err); 198 143 199 144 return 0; 200 145 } ··· 207 150 .reg_read = brcm_nvram_read, 208 151 }; 209 152 struct device *dev = &pdev->dev; 210 - struct resource *res; 211 153 struct brcm_nvram *priv; 212 154 int err; 213 155 ··· 215 159 return -ENOMEM; 216 160 priv->dev = dev; 217 161 218 - priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 219 - if (IS_ERR(priv->base)) 220 - return PTR_ERR(priv->base); 162 + err = brcm_nvram_copy_data(priv, pdev); 163 + if (err) 164 + return err; 221 165 222 166 err = brcm_nvram_parse(priv); 223 167 if (err) 224 168 return err; 225 169 226 - bcm47xx_nvram_init_from_iomem(priv->base, resource_size(res)); 227 - 228 170 config.dev = dev; 229 171 config.cells = priv->cells; 230 172 config.ncells = priv->ncells; 231 173 config.priv = priv; 232 - config.size = resource_size(res); 174 + config.size = priv->nvmem_size; 233 175 234 176 return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); 235 177 }
+9
drivers/pinctrl/pinctrl-amd.c
··· 923 923 924 924 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 925 925 gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING; 926 + 927 + /* mask any interrupts not intended to be a wake source */ 928 + if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) { 929 + writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF), 930 + gpio_dev->base + pin * 4); 931 + pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n", 932 + pin); 933 + } 934 + 926 935 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 927 936 } 928 937
+5
drivers/pinctrl/pinctrl-amd.h
··· 80 80 #define FUNCTION_MASK GENMASK(1, 0) 81 81 #define FUNCTION_INVALID GENMASK(7, 0) 82 82 83 + #define WAKE_SOURCE (BIT(WAKE_CNTRL_OFF_S0I3) | \ 84 + BIT(WAKE_CNTRL_OFF_S3) | \ 85 + BIT(WAKE_CNTRL_OFF_S4) | \ 86 + BIT(WAKECNTRL_Z_OFF)) 87 + 83 88 struct amd_function { 84 89 const char *name; 85 90 const char * const groups[NSELECTS];
+8
drivers/pinctrl/pinctrl-at91-pio4.c
··· 1068 1068 } 1069 1069 }; 1070 1070 1071 + /* 1072 + * This lock class allows to tell lockdep that parent IRQ and children IRQ do 1073 + * not share the same class so it does not raise false positive 1074 + */ 1075 + static struct lock_class_key atmel_lock_key; 1076 + static struct lock_class_key atmel_request_key; 1077 + 1071 1078 static int atmel_pinctrl_probe(struct platform_device *pdev) 1072 1079 { 1073 1080 struct device *dev = &pdev->dev; ··· 1221 1214 irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip, 1222 1215 handle_simple_irq); 1223 1216 irq_set_chip_data(irq, atmel_pioctrl); 1217 + irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key); 1224 1218 dev_dbg(dev, 1225 1219 "atmel gpio irq domain: hwirq: %d, linux irq: %d\n", 1226 1220 i, irq);
+13 -1
drivers/pinctrl/pinctrl-cy8c95x0.c
··· 308 308 "gp77", 309 309 }; 310 310 311 + static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip, 312 + unsigned int pin, bool input); 313 + 311 314 static inline u8 cypress_get_port(struct cy8c95x0_pinctrl *chip, unsigned int pin) 312 315 { 313 316 /* Account for GPORT2 which only has 4 bits */ ··· 715 712 ret = regmap_read(chip->regmap, reg, &reg_val); 716 713 if (reg_val & bit) 717 714 arg = 1; 715 + if (param == PIN_CONFIG_OUTPUT_ENABLE) 716 + arg = !arg; 718 717 719 718 *config = pinconf_to_config_packed(param, (u16)arg); 720 719 out: ··· 732 727 u8 port = cypress_get_port(chip, off); 733 728 u8 bit = cypress_get_pin_mask(chip, off); 734 729 unsigned long param = pinconf_to_config_param(config); 730 + unsigned long arg = pinconf_to_config_argument(config); 735 731 unsigned int reg; 736 732 int ret; 737 733 ··· 771 765 case PIN_CONFIG_MODE_PWM: 772 766 reg = CY8C95X0_PWMSEL; 773 767 break; 768 + case PIN_CONFIG_OUTPUT_ENABLE: 769 + ret = cy8c95x0_pinmux_direction(chip, off, !arg); 770 + goto out; 771 + case PIN_CONFIG_INPUT_ENABLE: 772 + ret = cy8c95x0_pinmux_direction(chip, off, arg); 773 + goto out; 774 774 default: 775 775 ret = -ENOTSUPP; 776 776 goto out; ··· 834 822 gc->get_direction = cy8c95x0_gpio_get_direction; 835 823 gc->get_multiple = cy8c95x0_gpio_get_multiple; 836 824 gc->set_multiple = cy8c95x0_gpio_set_multiple; 837 - gc->set_config = gpiochip_generic_config, 825 + gc->set_config = gpiochip_generic_config; 838 826 gc->can_sleep = true; 839 827 gc->add_pin_ranges = cy8c95x0_add_pin_ranges; 840 828
+2 -2
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
··· 492 492 493 493 nmaps = 0; 494 494 ngroups = 0; 495 - for_each_child_of_node(np, child) { 495 + for_each_available_child_of_node(np, child) { 496 496 int npinmux = of_property_count_u32_elems(child, "pinmux"); 497 497 int npins = of_property_count_u32_elems(child, "pins"); 498 498 ··· 527 527 nmaps = 0; 528 528 ngroups = 0; 529 529 mutex_lock(&sfp->mutex); 530 - for_each_child_of_node(np, child) { 530 + for_each_available_child_of_node(np, child) { 531 531 int npins; 532 532 int i; 533 533
+2 -2
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
··· 135 135 int ret; 136 136 137 137 ngroups = 0; 138 - for_each_child_of_node(np, child) 138 + for_each_available_child_of_node(np, child) 139 139 ngroups += 1; 140 140 nmaps = 2 * ngroups; 141 141 ··· 150 150 nmaps = 0; 151 151 ngroups = 0; 152 152 mutex_lock(&sfp->mutex); 153 - for_each_child_of_node(np, child) { 153 + for_each_available_child_of_node(np, child) { 154 154 int npins = of_property_count_u32_elems(child, "pinmux"); 155 155 int *pins; 156 156 u32 *pinmux;
+20
drivers/platform/x86/amd/pmc/pmc-quirks.c
··· 16 16 17 17 struct quirk_entry { 18 18 u32 s2idle_bug_mmio; 19 + bool spurious_8042; 19 20 }; 20 21 21 22 static struct quirk_entry quirk_s2idle_bug = { 22 23 .s2idle_bug_mmio = 0xfed80380, 24 + }; 25 + 26 + static struct quirk_entry quirk_spurious_8042 = { 27 + .spurious_8042 = true, 23 28 }; 24 29 25 30 static const struct dmi_system_id fwbug_list[] = { ··· 198 193 DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"), 199 194 } 200 195 }, 196 + /* https://community.frame.work/t/tracking-framework-amd-ryzen-7040-series-lid-wakeup-behavior-feedback/39128 */ 197 + { 198 + .ident = "Framework Laptop 13 (Phoenix)", 199 + .driver_data = &quirk_spurious_8042, 200 + .matches = { 201 + DMI_MATCH(DMI_SYS_VENDOR, "Framework"), 202 + DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"), 203 + DMI_MATCH(DMI_BIOS_VERSION, "03.03"), 204 + } 205 + }, 201 206 {} 202 207 }; 203 208 ··· 250 235 { 251 236 const struct dmi_system_id *dmi_id; 252 237 238 + if (dev->cpu_id == AMD_CPU_ID_CZN) 239 + dev->disable_8042_wakeup = true; 240 + 253 241 dmi_id = dmi_first_match(fwbug_list); 254 242 if (!dmi_id) 255 243 return; ··· 260 242 if (dev->quirks->s2idle_bug_mmio) 261 243 pr_info("Using s2idle quirk to avoid %s platform firmware bug\n", 262 244 dmi_id->ident); 245 + if (dev->quirks->spurious_8042) 246 + dev->disable_8042_wakeup = true; 263 247 }
+13 -20
drivers/platform/x86/amd/pmc/pmc.c
··· 91 91 #define SMU_MSG_LOG_RESET 0x07 92 92 #define SMU_MSG_LOG_DUMP_DATA 0x08 93 93 #define SMU_MSG_GET_SUP_CONSTRAINTS 0x09 94 - /* List of supported CPU ids */ 95 - #define AMD_CPU_ID_RV 0x15D0 96 - #define AMD_CPU_ID_RN 0x1630 97 - #define AMD_CPU_ID_PCO AMD_CPU_ID_RV 98 - #define AMD_CPU_ID_CZN AMD_CPU_ID_RN 99 - #define AMD_CPU_ID_YC 0x14B5 100 - #define AMD_CPU_ID_CB 0x14D8 101 - #define AMD_CPU_ID_PS 0x14E8 102 - #define AMD_CPU_ID_SP 0x14A4 103 - #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 104 94 105 95 #define PMC_MSG_DELAY_MIN_US 50 106 96 #define RESPONSE_REGISTER_LOOP_MAX 20000 ··· 756 766 return -EINVAL; 757 767 } 758 768 759 - static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev) 769 + static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev) 760 770 { 761 771 struct device *d; 762 772 int rc; 763 773 764 - if (!pdev->major) { 765 - rc = amd_pmc_get_smu_version(pdev); 766 - if (rc) 767 - return rc; 768 - } 774 + /* cezanne platform firmware has a fix in 64.66.0 */ 775 + if (pdev->cpu_id == AMD_CPU_ID_CZN) { 776 + if (!pdev->major) { 777 + rc = amd_pmc_get_smu_version(pdev); 778 + if (rc) 779 + return rc; 780 + } 769 781 770 - if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65)) 771 - return 0; 782 + if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65)) 783 + return 0; 784 + } 772 785 773 786 d = bus_find_device_by_name(&serio_bus, NULL, "serio0"); 774 787 if (!d) ··· 930 937 { 931 938 struct amd_pmc_dev *pdev = dev_get_drvdata(dev); 932 939 933 - if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) { 934 - int rc = amd_pmc_czn_wa_irq1(pdev); 940 + if (pdev->disable_8042_wakeup && !disable_workarounds) { 941 + int rc = amd_pmc_wa_irq1(pdev); 935 942 936 943 if (rc) { 937 944 dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
+12
drivers/platform/x86/amd/pmc/pmc.h
··· 36 36 struct mutex lock; /* generic mutex lock */ 37 37 struct dentry *dbgfs_dir; 38 38 struct quirk_entry *quirks; 39 + bool disable_8042_wakeup; 39 40 }; 40 41 41 42 void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev); 42 43 void amd_pmc_quirks_init(struct amd_pmc_dev *dev); 44 + 45 + /* List of supported CPU ids */ 46 + #define AMD_CPU_ID_RV 0x15D0 47 + #define AMD_CPU_ID_RN 0x1630 48 + #define AMD_CPU_ID_PCO AMD_CPU_ID_RV 49 + #define AMD_CPU_ID_CZN AMD_CPU_ID_RN 50 + #define AMD_CPU_ID_YC 0x14B5 51 + #define AMD_CPU_ID_CB 0x14D8 52 + #define AMD_CPU_ID_PS 0x14E8 53 + #define AMD_CPU_ID_SP 0x14A4 54 + #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 43 55 44 56 #endif /* PMC_H */
+1 -1
drivers/platform/x86/intel/pmc/core.c
··· 472 472 * is based on the contiguous indexes from ltr_show output. 473 473 * pmc index and ltr index needs to be calculated from it. 474 474 */ 475 - for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index > 0; pmc_index++) { 475 + for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) { 476 476 pmc = pmcdev->pmcs[pmc_index]; 477 477 478 478 if (!pmc)
+85 -13
drivers/platform/x86/thinkpad_acpi.c
··· 7948 7948 * TPACPI_FAN_WR_TPEC is also available and should be used to 7949 7949 * command the fan. The X31/X40/X41 seems to have 8 fan levels, 7950 7950 * but the ACPI tables just mention level 7. 7951 + * 7952 + * TPACPI_FAN_RD_TPEC_NS: 7953 + * This mode is used for a few ThinkPads (L13 Yoga Gen2, X13 Yoga Gen2 etc.) 7954 + * that are using non-standard EC locations for reporting fan speeds. 7955 + * Currently these platforms only provide fan rpm reporting. 7956 + * 7951 7957 */ 7958 + 7959 + #define FAN_RPM_CAL_CONST 491520 /* FAN RPM calculation offset for some non-standard ECFW */ 7960 + 7961 + #define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */ 7962 + #define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */ 7952 7963 7953 7964 enum { /* Fan control constants */ 7954 7965 fan_status_offset = 0x2f, /* EC register 0x2f */ ··· 7967 7956 * 0x84 must be read before 0x85 */ 7968 7957 fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M) 7969 7958 bit 0 selects which fan is active */ 7959 + 7960 + fan_status_offset_ns = 0x93, /* Special status/control offset for non-standard EC Fan1 */ 7961 + fan2_status_offset_ns = 0x96, /* Special status/control offset for non-standard EC Fan2 */ 7962 + fan_rpm_status_ns = 0x95, /* Special offset for Fan1 RPM status for non-standard EC */ 7963 + fan2_rpm_status_ns = 0x98, /* Special offset for Fan2 RPM status for non-standard EC */ 7970 7964 7971 7965 TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */ 7972 7966 TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */ ··· 7983 7967 TPACPI_FAN_NONE = 0, /* No fan status or control */ 7984 7968 TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */ 7985 7969 TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */ 7970 + TPACPI_FAN_RD_TPEC_NS, /* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */ 7986 7971 }; 7987 7972 7988 7973 enum fan_control_access_mode { ··· 8010 7993 static u8 fan_control_desired_level; 8011 7994 static u8 fan_control_resume_level; 8012 7995 static int fan_watchdog_maxinterval; 7996 + 7997 + static bool fan_with_ns_addr; 8013 7998 8014 7999 static struct mutex fan_mutex; 8015 8000 ··· 8142 8123 } 8143 8124 8144 8125 break; 8126 + case TPACPI_FAN_RD_TPEC_NS: 8127 + /* Default mode is AUTO which means controlled by EC */ 8128 + if (!acpi_ec_read(fan_status_offset_ns, &s)) 8129 + return -EIO; 8130 + 8131 + if (status) 8132 + *status = s; 8133 + 8134 + break; 8145 8135 8146 8136 default: 8147 8137 return -ENXIO; ··· 8167 8139 if (mutex_lock_killable(&fan_mutex)) 8168 8140 return -ERESTARTSYS; 8169 8141 rc = fan_get_status(&s); 8170 - if (!rc) 8142 + /* NS EC doesn't have register with level settings */ 8143 + if (!rc && !fan_with_ns_addr) 8171 8144 fan_update_desired_level(s); 8172 8145 mutex_unlock(&fan_mutex); 8173 8146 ··· 8195 8166 8196 8167 if (likely(speed)) 8197 8168 *speed = (hi << 8) | lo; 8169 + break; 8170 + case TPACPI_FAN_RD_TPEC_NS: 8171 + if (!acpi_ec_read(fan_rpm_status_ns, &lo)) 8172 + return -EIO; 8198 8173 8174 + if (speed) 8175 + *speed = lo ? FAN_RPM_CAL_CONST / lo : 0; 8199 8176 break; 8200 8177 8201 8178 default: ··· 8213 8178 8214 8179 static int fan2_get_speed(unsigned int *speed) 8215 8180 { 8216 - u8 hi, lo; 8181 + u8 hi, lo, status; 8217 8182 bool rc; 8218 8183 8219 8184 switch (fan_status_access_mode) { ··· 8229 8194 8230 8195 if (likely(speed)) 8231 8196 *speed = (hi << 8) | lo; 8197 + break; 8232 8198 8199 + case TPACPI_FAN_RD_TPEC_NS: 8200 + rc = !acpi_ec_read(fan2_status_offset_ns, &status); 8201 + if (rc) 8202 + return -EIO; 8203 + if (!(status & FAN_NS_CTRL_STATUS)) { 8204 + pr_info("secondary fan control not supported\n"); 8205 + return -EIO; 8206 + } 8207 + rc = !acpi_ec_read(fan2_rpm_status_ns, &lo); 8208 + if (rc) 8209 + return -EIO; 8210 + if (speed) 8211 + *speed = lo ? FAN_RPM_CAL_CONST / lo : 0; 8233 8212 break; 8234 8213 8235 8214 default: ··· 8746 8697 #define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ 8747 8698 #define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ 8748 8699 #define TPACPI_FAN_NOFAN 0x0008 /* no fan available */ 8700 + #define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */ 8749 8701 8750 8702 static const struct tpacpi_quirk fan_quirk_table[] __initconst = { 8751 8703 TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), ··· 8765 8715 TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */ 8766 8716 TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ 8767 8717 TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */ 8718 + TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */ 8719 + TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/ 8768 8720 TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ 8769 8721 }; 8770 8722 ··· 8801 8749 return -ENODEV; 8802 8750 } 8803 8751 8752 + if (quirks & TPACPI_FAN_NS) { 8753 + pr_info("ECFW with non-standard fan reg control found\n"); 8754 + fan_with_ns_addr = 1; 8755 + /* Fan ctrl support from host is undefined for now */ 8756 + tp_features.fan_ctrl_status_undef = 1; 8757 + } 8758 + 8804 8759 if (gfan_handle) { 8805 8760 /* 570, 600e/x, 770e, 770x */ 8806 8761 fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; 8807 8762 } else { 8808 8763 /* all other ThinkPads: note that even old-style 8809 8764 * ThinkPad ECs supports the fan control register */ 8810 - if (likely(acpi_ec_read(fan_status_offset, 8811 - &fan_control_initial_status))) { 8765 + if (fan_with_ns_addr || 8766 + likely(acpi_ec_read(fan_status_offset, &fan_control_initial_status))) { 8812 8767 int res; 8813 8768 unsigned int speed; 8814 8769 8815 - fan_status_access_mode = TPACPI_FAN_RD_TPEC; 8770 + fan_status_access_mode = fan_with_ns_addr ? 8771 + TPACPI_FAN_RD_TPEC_NS : TPACPI_FAN_RD_TPEC; 8772 + 8816 8773 if (quirks & TPACPI_FAN_Q1) 8817 8774 fan_quirk1_setup(); 8818 8775 /* Try and probe the 2nd fan */ ··· 8830 8769 if (res >= 0 && speed != FAN_NOT_PRESENT) { 8831 8770 /* It responded - so let's assume it's there */ 8832 8771 tp_features.second_fan = 1; 8833 - tp_features.second_fan_ctl = 1; 8772 + /* fan control not currently available for ns ECFW */ 8773 + tp_features.second_fan_ctl = !fan_with_ns_addr; 8834 8774 pr_info("secondary fan control detected & enabled\n"); 8835 8775 } else { 8836 8776 /* Fan not auto-detected */ ··· 9006 8944 str_enabled_disabled(status), status); 9007 8945 break; 9008 8946 8947 + case TPACPI_FAN_RD_TPEC_NS: 9009 8948 case TPACPI_FAN_RD_TPEC: 9010 8949 /* all except 570, 600e/x, 770e, 770x */ 9011 8950 rc = fan_get_status_safe(&status); ··· 9021 8958 9022 8959 seq_printf(m, "speed:\t\t%d\n", speed); 9023 8960 9024 - if (status & TP_EC_FAN_FULLSPEED) 9025 - /* Disengaged mode takes precedence */ 9026 - seq_printf(m, "level:\t\tdisengaged\n"); 9027 - else if (status & TP_EC_FAN_AUTO) 9028 - seq_printf(m, "level:\t\tauto\n"); 9029 - else 9030 - seq_printf(m, "level:\t\t%d\n", status); 8961 + if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) { 8962 + /* 8963 + * No full speed bit in NS EC 8964 + * EC Auto mode is set by default. 8965 + * No other levels settings available 8966 + */ 8967 + seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto"); 8968 + } else { 8969 + if (status & TP_EC_FAN_FULLSPEED) 8970 + /* Disengaged mode takes precedence */ 8971 + seq_printf(m, "level:\t\tdisengaged\n"); 8972 + else if (status & TP_EC_FAN_AUTO) 8973 + seq_printf(m, "level:\t\tauto\n"); 8974 + else 8975 + seq_printf(m, "level:\t\t%d\n", status); 8976 + } 9031 8977 break; 9032 8978 9033 8979 case TPACPI_FAN_NONE:
+4 -4
drivers/reset/core.c
··· 807 807 { 808 808 lockdep_assert_held(&reset_list_mutex); 809 809 810 + if (IS_ERR_OR_NULL(rstc)) 811 + return; 812 + 810 813 kref_put(&rstc->refcnt, __reset_control_release); 811 814 } 812 815 ··· 1020 1017 void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs) 1021 1018 { 1022 1019 mutex_lock(&reset_list_mutex); 1023 - while (num_rstcs--) { 1024 - if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc)) 1025 - continue; 1020 + while (num_rstcs--) 1026 1021 __reset_control_put_internal(rstcs[num_rstcs].rstc); 1027 - } 1028 1022 mutex_unlock(&reset_list_mutex); 1029 1023 } 1030 1024 EXPORT_SYMBOL_GPL(reset_control_bulk_put);
+1 -1
drivers/reset/hisilicon/hi6220_reset.c
··· 163 163 if (!data) 164 164 return -ENOMEM; 165 165 166 - type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev); 166 + type = (uintptr_t)of_device_get_match_data(dev); 167 167 168 168 regmap = syscon_node_to_regmap(np); 169 169 if (IS_ERR(regmap)) {
+4 -3
drivers/s390/block/scm_blk.c
··· 17 17 #include <linux/blk-mq.h> 18 18 #include <linux/slab.h> 19 19 #include <linux/list.h> 20 + #include <linux/io.h> 20 21 #include <asm/eadm.h> 21 22 #include "scm_blk.h" 22 23 ··· 131 130 132 131 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { 133 132 msb = &scmrq->aob->msb[i]; 134 - aidaw = msb->data_addr; 133 + aidaw = (u64)phys_to_virt(msb->data_addr); 135 134 136 135 if ((msb->flags & MSB_FLAG_IDA) && aidaw && 137 136 IS_ALIGNED(aidaw, PAGE_SIZE)) ··· 196 195 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 197 196 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; 198 197 msb->flags |= MSB_FLAG_IDA; 199 - msb->data_addr = (u64) aidaw; 198 + msb->data_addr = (u64)virt_to_phys(aidaw); 200 199 201 200 rq_for_each_segment(bv, req, iter) { 202 201 WARN_ON(bv.bv_offset); 203 202 msb->blk_count += bv.bv_len >> 12; 204 - aidaw->data_addr = (u64) page_address(bv.bv_page); 203 + aidaw->data_addr = virt_to_phys(page_address(bv.bv_page)); 205 204 aidaw++; 206 205 } 207 206
-1
drivers/scsi/aacraid/aacraid.h
··· 1678 1678 u32 handle_pci_error; 1679 1679 bool init_reset; 1680 1680 u8 soft_reset_support; 1681 - u8 use_map_queue; 1682 1681 }; 1683 1682 1684 1683 #define aac_adapter_interrupt(dev) \
+1 -5
drivers/scsi/aacraid/commsup.c
··· 223 223 struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) 224 224 { 225 225 struct fib *fibptr; 226 - u32 blk_tag; 227 - int i; 228 226 229 - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 230 - i = blk_mq_unique_tag_to_tag(blk_tag); 231 - fibptr = &dev->fibs[i]; 227 + fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag]; 232 228 /* 233 229 * Null out fields that depend on being zero at the start of 234 230 * each I/O
-14
drivers/scsi/aacraid/linit.c
··· 19 19 20 20 #include <linux/compat.h> 21 21 #include <linux/blkdev.h> 22 - #include <linux/blk-mq-pci.h> 23 22 #include <linux/completion.h> 24 23 #include <linux/init.h> 25 24 #include <linux/interrupt.h> ··· 502 503 sdev->tagged_supported = 1; 503 504 504 505 return 0; 505 - } 506 - 507 - static void aac_map_queues(struct Scsi_Host *shost) 508 - { 509 - struct aac_dev *aac = (struct aac_dev *)shost->hostdata; 510 - 511 - blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 512 - aac->pdev, 0); 513 - aac->use_map_queue = true; 514 506 } 515 507 516 508 /** ··· 1488 1498 .bios_param = aac_biosparm, 1489 1499 .shost_groups = aac_host_groups, 1490 1500 .slave_configure = aac_slave_configure, 1491 - .map_queues = aac_map_queues, 1492 1501 .change_queue_depth = aac_change_queue_depth, 1493 1502 .sdev_groups = aac_dev_groups, 1494 1503 .eh_abort_handler = aac_eh_abort, ··· 1775 1786 shost->max_lun = AAC_MAX_LUN; 1776 1787 1777 1788 pci_set_drvdata(pdev, shost); 1778 - shost->nr_hw_queues = aac->max_msix; 1779 - shost->host_tagset = 1; 1780 1789 1781 1790 error = scsi_add_host(shost, &pdev->dev); 1782 1791 if (error) ··· 1906 1919 struct aac_dev *aac = (struct aac_dev *)shost->hostdata; 1907 1920 1908 1921 aac_cancel_rescan_worker(aac); 1909 - aac->use_map_queue = false; 1910 1922 scsi_remove_host(shost); 1911 1923 1912 1924 __aac_shutdown(aac);
+2 -23
drivers/scsi/aacraid/src.c
··· 493 493 #endif 494 494 495 495 u16 vector_no; 496 - struct scsi_cmnd *scmd; 497 - u32 blk_tag; 498 - struct Scsi_Host *shost = dev->scsi_host_ptr; 499 - struct blk_mq_queue_map *qmap; 500 496 501 497 atomic_inc(&q->numpending); 502 498 ··· 505 509 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) 506 510 && dev->sa_firmware) 507 511 vector_no = aac_get_vector(dev); 508 - else { 509 - if (!fib->vector_no || !fib->callback_data) { 510 - if (shost && dev->use_map_queue) { 511 - qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 512 - vector_no = qmap->mq_map[raw_smp_processor_id()]; 513 - } 514 - /* 515 - * We hardcode the vector_no for 516 - * reserved commands as a valid shost is 517 - * absent during the init 518 - */ 519 - else 520 - vector_no = 0; 521 - } else { 522 - scmd = (struct scsi_cmnd *)fib->callback_data; 523 - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 524 - vector_no = blk_mq_unique_tag_to_hwq(blk_tag); 525 - } 526 - } 512 + else 513 + vector_no = fib->vector_no; 527 514 528 515 if (native_hba) { 529 516 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+3 -6
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 429 429 struct fcoe_ctlr *ctlr; 430 430 struct fcoe_rcv_info *fr; 431 431 struct fcoe_percpu_s *bg; 432 - struct sk_buff *tmp_skb; 433 432 434 433 interface = container_of(ptype, struct bnx2fc_interface, 435 434 fcoe_packet_type); ··· 440 441 goto err; 441 442 } 442 443 443 - tmp_skb = skb_share_check(skb, GFP_ATOMIC); 444 - if (!tmp_skb) 445 - goto err; 446 - 447 - skb = tmp_skb; 444 + skb = skb_share_check(skb, GFP_ATOMIC); 445 + if (!skb) 446 + return -1; 448 447 449 448 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 450 449 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
+2
drivers/scsi/scsi_error.c
··· 1152 1152 1153 1153 scsi_log_send(scmd); 1154 1154 scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER; 1155 + scmd->flags |= SCMD_LAST; 1155 1156 1156 1157 /* 1157 1158 * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can ··· 2460 2459 scsi_init_command(dev, scmd); 2461 2460 2462 2461 scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL; 2462 + scmd->flags |= SCMD_LAST; 2463 2463 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 2464 2464 2465 2465 scmd->cmd_len = 0;
+85 -10
drivers/spi/spi-atmel.c
··· 22 22 #include <linux/gpio/consumer.h> 23 23 #include <linux/pinctrl/consumer.h> 24 24 #include <linux/pm_runtime.h> 25 + #include <linux/iopoll.h> 25 26 #include <trace/events/spi.h> 26 27 27 28 /* SPI register offsets */ ··· 234 233 */ 235 234 #define DMA_MIN_BYTES 16 236 235 237 - #define SPI_DMA_MIN_TIMEOUT (msecs_to_jiffies(1000)) 238 - #define SPI_DMA_TIMEOUT_PER_10K (msecs_to_jiffies(4)) 239 - 240 236 #define AUTOSUSPEND_TIMEOUT 2000 241 237 242 238 struct atmel_spi_caps { ··· 277 279 bool keep_cs; 278 280 279 281 u32 fifo_size; 282 + bool last_polarity; 280 283 u8 native_cs_free; 281 284 u8 native_cs_for_gpio; 282 285 }; ··· 291 292 #define INVALID_DMA_ADDRESS 0xffffffff 292 293 293 294 /* 295 + * This frequency can be anything supported by the controller, but to avoid 296 + * unnecessary delay, the highest possible frequency is chosen. 297 + * 298 + * This frequency is the highest possible which is not interfering with other 299 + * chip select registers (see Note for Serial Clock Bit Rate configuration in 300 + * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283) 301 + */ 302 + #define DUMMY_MSG_FREQUENCY 0x02 303 + /* 304 + * 8 bits is the minimum data the controller is capable of sending. 305 + * 306 + * This message can be anything as it should not be treated by any SPI device. 307 + */ 308 + #define DUMMY_MSG 0xAA 309 + 310 + /* 294 311 * Version 2 of the SPI controller has 295 312 * - CR.LASTXFER 296 313 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) ··· 318 303 { 319 304 return as->caps.is_spi2; 320 305 } 306 + 307 + /* 308 + * Send a dummy message. 309 + * 310 + * This is sometimes needed when using a CS GPIO to force clock transition when 311 + * switching between devices with different polarities. 312 + */ 313 + static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select) 314 + { 315 + u32 status; 316 + u32 csr; 317 + 318 + /* 319 + * Set a clock frequency to allow sending message on SPI bus. 320 + * The frequency here can be anything, but is needed for 321 + * the controller to send the data. 322 + */ 323 + csr = spi_readl(as, CSR0 + 4 * chip_select); 324 + csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr); 325 + spi_writel(as, CSR0 + 4 * chip_select, csr); 326 + 327 + /* 328 + * Read all data coming from SPI bus, needed to be able to send 329 + * the message. 330 + */ 331 + spi_readl(as, RDR); 332 + while (spi_readl(as, SR) & SPI_BIT(RDRF)) { 333 + spi_readl(as, RDR); 334 + cpu_relax(); 335 + } 336 + 337 + spi_writel(as, TDR, DUMMY_MSG); 338 + 339 + readl_poll_timeout_atomic(as->regs + SPI_SR, status, 340 + (status & SPI_BIT(TXEMPTY)), 1, 1000); 341 + } 342 + 321 343 322 344 /* 323 345 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby ··· 372 320 * Master on Chip Select 0.") No workaround exists for that ... so for 373 321 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, 374 322 * and (c) will trigger that first erratum in some cases. 323 + * 324 + * When changing the clock polarity, the SPI controller waits for the next 325 + * transmission to enforce the default clock state. This may be an issue when 326 + * using a GPIO as Chip Select: the clock level is applied only when the first 327 + * packet is sent, once the CS has already been asserted. The workaround is to 328 + * avoid this by sending a first (dummy) message before toggling the CS state. 375 329 */ 376 - 377 330 static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 378 331 { 379 332 struct atmel_spi_device *asd = spi->controller_state; 333 + bool new_polarity; 380 334 int chip_select; 381 335 u32 mr; 382 336 ··· 411 353 } 412 354 413 355 mr = spi_readl(as, MR); 356 + 357 + /* 358 + * Ensures the clock polarity is valid before we actually 359 + * assert the CS to avoid spurious clock edges to be 360 + * processed by the spi devices. 361 + */ 362 + if (spi_get_csgpiod(spi, 0)) { 363 + new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0; 364 + if (new_polarity != as->last_polarity) { 365 + /* 366 + * Need to disable the GPIO before sending the dummy 367 + * message because it is already set by the spi core. 368 + */ 369 + gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0); 370 + atmel_spi_send_dummy(as, spi, chip_select); 371 + as->last_polarity = new_polarity; 372 + gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1); 373 + } 374 + } 414 375 } else { 415 376 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 416 377 int i; ··· 1413 1336 } 1414 1337 1415 1338 dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer)); 1416 - ret_timeout = wait_for_completion_interruptible_timeout(&as->xfer_completion, 1417 - dma_timeout); 1418 - if (ret_timeout <= 0) { 1419 - dev_err(&spi->dev, "spi transfer %s\n", 1420 - !ret_timeout ? "timeout" : "canceled"); 1421 - as->done_status = ret_timeout < 0 ? ret_timeout : -EIO; 1339 + ret_timeout = wait_for_completion_timeout(&as->xfer_completion, dma_timeout); 1340 + if (!ret_timeout) { 1341 + dev_err(&spi->dev, "spi transfer timeout\n"); 1342 + as->done_status = -EIO; 1422 1343 } 1423 1344 1424 1345 if (as->done_status)
-1
drivers/spi/spi-cadence.c
··· 451 451 udelay(10); 452 452 453 453 cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0); 454 - spi_transfer_delay_exec(transfer); 455 454 456 455 cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); 457 456 return transfer->len;
+11 -4
drivers/spi/spi-imx.c
··· 659 659 ctrl |= (spi_imx->target_burst * 8 - 1) 660 660 << MX51_ECSPI_CTRL_BL_OFFSET; 661 661 else { 662 - if (spi_imx->count >= 512) 663 - ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET; 664 - else 665 - ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1) 662 + if (spi_imx->usedma) { 663 + ctrl |= (spi_imx->bits_per_word * 664 + spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1) 666 665 << MX51_ECSPI_CTRL_BL_OFFSET; 666 + } else { 667 + if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) 668 + ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1) 669 + << MX51_ECSPI_CTRL_BL_OFFSET; 670 + else 671 + ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1) 672 + << MX51_ECSPI_CTRL_BL_OFFSET; 673 + } 667 674 } 668 675 669 676 /* set clock speed */
+1 -1
drivers/thunderbolt/debugfs.c
··· 959 959 snprintf(dir_name, sizeof(dir_name), "port%d", port->port); 960 960 parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); 961 961 if (parent) 962 - debugfs_remove_recursive(debugfs_lookup("margining", parent)); 962 + debugfs_lookup_and_remove("margining", parent); 963 963 964 964 kfree(port->usb4->margining); 965 965 port->usb4->margining = NULL;
+5 -5
drivers/thunderbolt/usb4.c
··· 2311 2311 goto err_request; 2312 2312 2313 2313 /* 2314 - * Always keep 1000 Mb/s to make sure xHCI has at least some 2314 + * Always keep 900 Mb/s to make sure xHCI has at least some 2315 2315 * bandwidth available for isochronous traffic. 2316 2316 */ 2317 - if (consumed_up < 1000) 2318 - consumed_up = 1000; 2319 - if (consumed_down < 1000) 2320 - consumed_down = 1000; 2317 + if (consumed_up < 900) 2318 + consumed_up = 900; 2319 + if (consumed_down < 900) 2320 + consumed_down = 900; 2321 2321 2322 2322 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, 2323 2323 consumed_down);
+2 -1
drivers/ufs/core/ufshcd.c
··· 2274 2274 if (is_mcq_enabled(hba)) { 2275 2275 int utrd_size = sizeof(struct utp_transfer_req_desc); 2276 2276 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr; 2277 - struct utp_transfer_req_desc *dest = hwq->sqe_base_addr + hwq->sq_tail_slot; 2277 + struct utp_transfer_req_desc *dest; 2278 2278 2279 2279 spin_lock(&hwq->sq_lock); 2280 + dest = hwq->sqe_base_addr + hwq->sq_tail_slot; 2280 2281 memcpy(dest, src, utrd_size); 2281 2282 ufshcd_inc_sq_tail(hwq); 2282 2283 spin_unlock(&hwq->sq_lock);
+4 -2
drivers/ufs/host/ufs-qcom.c
··· 1516 1516 err = ufs_qcom_clk_scale_up_pre_change(hba); 1517 1517 else 1518 1518 err = ufs_qcom_clk_scale_down_pre_change(hba); 1519 - if (err) 1520 - ufshcd_uic_hibern8_exit(hba); 1521 1519 1520 + if (err) { 1521 + ufshcd_uic_hibern8_exit(hba); 1522 + return err; 1523 + } 1522 1524 } else { 1523 1525 if (scale_up) 1524 1526 err = ufs_qcom_clk_scale_up_post_change(hba);
+54
drivers/ufs/host/ufshcd-pltfrm.c
··· 8 8 * Vinayak Holikatti <h.vinayak@samsung.com> 9 9 */ 10 10 11 + #include <linux/clk.h> 11 12 #include <linux/module.h> 12 13 #include <linux/platform_device.h> 13 14 #include <linux/pm_opp.h> ··· 214 213 } 215 214 } 216 215 216 + /** 217 + * ufshcd_parse_clock_min_max_freq - Parse MIN and MAX clocks freq 218 + * @hba: per adapter instance 219 + * 220 + * This function parses MIN and MAX frequencies of all clocks required 221 + * by the host drivers. 222 + * 223 + * Returns 0 for success and non-zero for failure 224 + */ 225 + static int ufshcd_parse_clock_min_max_freq(struct ufs_hba *hba) 226 + { 227 + struct list_head *head = &hba->clk_list_head; 228 + struct ufs_clk_info *clki; 229 + struct dev_pm_opp *opp; 230 + unsigned long freq; 231 + u8 idx = 0; 232 + 233 + list_for_each_entry(clki, head, list) { 234 + if (!clki->name) 235 + continue; 236 + 237 + clki->clk = devm_clk_get(hba->dev, clki->name); 238 + if (IS_ERR(clki->clk)) 239 + continue; 240 + 241 + /* Find Max Freq */ 242 + freq = ULONG_MAX; 243 + opp = dev_pm_opp_find_freq_floor_indexed(hba->dev, &freq, idx); 244 + if (IS_ERR(opp)) { 245 + dev_err(hba->dev, "Failed to find OPP for MAX frequency\n"); 246 + return PTR_ERR(opp); 247 + } 248 + clki->max_freq = dev_pm_opp_get_freq_indexed(opp, idx); 249 + dev_pm_opp_put(opp); 250 + 251 + /* Find Min Freq */ 252 + freq = 0; 253 + opp = dev_pm_opp_find_freq_ceil_indexed(hba->dev, &freq, idx); 254 + if (IS_ERR(opp)) { 255 + dev_err(hba->dev, "Failed to find OPP for MIN frequency\n"); 256 + return PTR_ERR(opp); 257 + } 258 + clki->min_freq = dev_pm_opp_get_freq_indexed(opp, idx++); 259 + dev_pm_opp_put(opp); 260 + } 261 + 262 + return 0; 263 + } 264 + 217 265 static int ufshcd_parse_operating_points(struct ufs_hba *hba) 218 266 { 219 267 struct device *dev = hba->dev; ··· 328 278 dev_err(dev, "Failed to add OPP table: %d\n", ret); 329 279 return ret; 330 280 } 281 + 282 + ret = ufshcd_parse_clock_min_max_freq(hba); 283 + if (ret) 284 + return ret; 331 285 332 286 hba->use_pm_opp = true; 333 287
-3
drivers/usb/fotg210/fotg210-hcd.c
··· 428 428 temp = size; 429 429 size -= temp; 430 430 next += temp; 431 - if (temp == size) 432 - goto done; 433 431 } 434 432 435 433 temp = snprintf(next, size, "\n"); ··· 437 439 size -= temp; 438 440 next += temp; 439 441 440 - done: 441 442 *sizep = size; 442 443 *nextp = next; 443 444 }
+3 -3
drivers/usb/serial/ftdi_sio.c
··· 1033 1033 { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, 1034 1034 { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, 1035 1035 { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, 1036 - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, 1037 - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, 1038 - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, 1036 + { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) }, 1037 + { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) }, 1038 + { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) }, 1039 1039 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, 1040 1040 { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, 1041 1041 { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
+3 -3
drivers/usb/serial/ftdi_sio_ids.h
··· 1568 1568 #define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ 1569 1569 #define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ 1570 1570 #define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ 1571 - #define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ 1572 - #define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ 1573 - #define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ 1571 + #define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */ 1572 + #define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */ 1573 + #define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */ 1574 1574 #define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ 1575 1575 #define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ 1576 1576 #define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
+5
drivers/usb/serial/option.c
··· 272 272 #define QUECTEL_PRODUCT_RM500Q 0x0800 273 273 #define QUECTEL_PRODUCT_RM520N 0x0801 274 274 #define QUECTEL_PRODUCT_EC200U 0x0901 275 + #define QUECTEL_PRODUCT_EG912Y 0x6001 275 276 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 276 277 #define QUECTEL_PRODUCT_EC200A 0x6005 277 278 #define QUECTEL_PRODUCT_EM061K_LWW 0x6008 ··· 1233 1232 { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */ 1234 1233 .driver_info = RSVD(3) | ZLP }, 1235 1234 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, 1235 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) }, 1236 1236 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, 1237 1237 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), 1238 1238 .driver_info = ZLP }, ··· 1246 1244 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, 1247 1245 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, 1248 1246 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, 1247 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) }, 1249 1248 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, 1250 1249 1251 1250 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, ··· 2245 2242 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, 2246 2243 { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ 2247 2244 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, 2245 + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */ 2246 + .driver_info = RSVD(3) | RSVD(5) }, 2248 2247 { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */ 2249 2248 .driver_info = RSVD(3) }, 2250 2249 { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
+11
drivers/usb/storage/unusual_devs.h
··· 1306 1306 US_FL_INITIAL_READ10 ), 1307 1307 1308 1308 /* 1309 + * Patch by Tasos Sahanidis <tasos@tasossah.com> 1310 + * This flash drive always shows up with write protect enabled 1311 + * during the first mode sense. 1312 + */ 1313 + UNUSUAL_DEV(0x0951, 0x1697, 0x0100, 0x0100, 1314 + "Kingston", 1315 + "DT Ultimate G3", 1316 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1317 + US_FL_NO_WP_DETECT), 1318 + 1319 + /* 1309 1320 * This Pentax still camera is not conformant 1310 1321 * to the USB storage specification: - 1311 1322 * - It does not like the INQUIRY command. So we must handle this command
+1 -1
drivers/usb/typec/ucsi/ucsi_glink.c
··· 228 228 229 229 con_num = UCSI_CCI_CONNECTOR(cci); 230 230 if (con_num) { 231 - if (con_num < PMIC_GLINK_MAX_PORTS && 231 + if (con_num <= PMIC_GLINK_MAX_PORTS && 232 232 ucsi->port_orientation[con_num - 1]) { 233 233 int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]); 234 234
+4 -2
fs/afs/cell.c
··· 409 409 if (ret == -ENOMEM) 410 410 goto out_wake; 411 411 412 - ret = -ENOMEM; 413 412 vllist = afs_alloc_vlserver_list(0); 414 - if (!vllist) 413 + if (!vllist) { 414 + if (ret >= 0) 415 + ret = -ENOMEM; 415 416 goto out_wake; 417 + } 416 418 417 419 switch (ret) { 418 420 case -ENODATA:
+17 -14
fs/afs/dynroot.c
··· 114 114 struct afs_net *net = afs_d2net(dentry); 115 115 const char *name = dentry->d_name.name; 116 116 size_t len = dentry->d_name.len; 117 + char *result = NULL; 117 118 int ret; 118 119 119 120 /* Names prefixed with a dot are R/W mounts. */ ··· 132 131 } 133 132 134 133 ret = dns_query(net->net, "afsdb", name, len, "srv=1", 135 - NULL, NULL, false); 136 - if (ret == -ENODATA || ret == -ENOKEY) 134 + &result, NULL, false); 135 + if (ret == -ENODATA || ret == -ENOKEY || ret == 0) 137 136 ret = -ENOENT; 137 + if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) { 138 + struct dns_server_list_v1_header *v1 = (void *)result; 139 + 140 + if (v1->hdr.zero == 0 && 141 + v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST && 142 + v1->hdr.version == 1 && 143 + (v1->status != DNS_LOOKUP_GOOD && 144 + v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) 145 + return -ENOENT; 146 + 147 + } 148 + 149 + kfree(result); 138 150 return ret; 139 151 } 140 152 ··· 266 252 return 1; 267 253 } 268 254 269 - /* 270 - * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't 271 - * sleep) 272 - * - called from dput() when d_count is going to 0. 273 - * - return 1 to request dentry be unhashed, 0 otherwise 274 - */ 275 - static int afs_dynroot_d_delete(const struct dentry *dentry) 276 - { 277 - return d_really_is_positive(dentry); 278 - } 279 - 280 255 const struct dentry_operations afs_dynroot_dentry_operations = { 281 256 .d_revalidate = afs_dynroot_d_revalidate, 282 - .d_delete = afs_dynroot_d_delete, 257 + .d_delete = always_delete_dentry, 283 258 .d_release = afs_d_release, 284 259 .d_automount = afs_d_automount, 285 260 };
+2
fs/afs/internal.h
··· 586 586 #define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */ 587 587 #define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */ 588 588 #define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */ 589 + #define AFS_VOLUME_RM_TREE 7 /* - Set if volume removed from cell->volumes */ 589 590 #ifdef CONFIG_AFS_FSCACHE 590 591 struct fscache_volume *cache; /* Caching cookie */ 591 592 #endif ··· 1514 1513 extern struct afs_volume *afs_create_volume(struct afs_fs_context *); 1515 1514 extern int afs_activate_volume(struct afs_volume *); 1516 1515 extern void afs_deactivate_volume(struct afs_volume *); 1516 + bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason); 1517 1517 extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace); 1518 1518 extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace); 1519 1519 extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
+23 -3
fs/afs/volume.c
··· 32 32 } else if (p->vid > volume->vid) { 33 33 pp = &(*pp)->rb_right; 34 34 } else { 35 - volume = afs_get_volume(p, afs_volume_trace_get_cell_insert); 36 - goto found; 35 + if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) { 36 + volume = p; 37 + goto found; 38 + } 39 + 40 + set_bit(AFS_VOLUME_RM_TREE, &volume->flags); 41 + rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes); 37 42 } 38 43 } 39 44 ··· 61 56 afs_volume_trace_remove); 62 57 write_seqlock(&cell->volume_lock); 63 58 hlist_del_rcu(&volume->proc_link); 64 - rb_erase(&volume->cell_node, &cell->volumes); 59 + if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags)) 60 + rb_erase(&volume->cell_node, &cell->volumes); 65 61 write_sequnlock(&cell->volume_lock); 66 62 } 67 63 } ··· 235 229 kfree_rcu(volume, rcu); 236 230 237 231 _leave(" [destroyed]"); 232 + } 233 + 234 + /* 235 + * Try to get a reference on a volume record. 236 + */ 237 + bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason) 238 + { 239 + int r; 240 + 241 + if (__refcount_inc_not_zero(&volume->ref, &r)) { 242 + trace_afs_volume(volume->vid, r + 1, reason); 243 + return true; 244 + } 245 + return false; 238 246 } 239 247 240 248 /*
+11 -3
fs/bcachefs/alloc_foreground.c
··· 1374 1374 goto alloc_done; 1375 1375 1376 1376 /* Don't retry from all devices if we're out of open buckets: */ 1377 - if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) 1378 - goto allocate_blocking; 1377 + if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) { 1378 + int ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, 1379 + target, erasure_code, 1380 + nr_replicas, &nr_effective, 1381 + &have_cache, watermark, 1382 + flags, cl); 1383 + if (!ret || 1384 + bch2_err_matches(ret, BCH_ERR_transaction_restart) || 1385 + bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) 1386 + goto alloc_done; 1387 + } 1379 1388 1380 1389 /* 1381 1390 * Only try to allocate cache (durability = 0 devices) from the ··· 1398 1389 &have_cache, watermark, 1399 1390 flags, cl); 1400 1391 } else { 1401 - allocate_blocking: 1402 1392 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, 1403 1393 target, erasure_code, 1404 1394 nr_replicas, &nr_effective,
+6 -2
fs/bcachefs/btree_iter.c
··· 3214 3214 mempool_exit(&c->btree_trans_pool); 3215 3215 } 3216 3216 3217 - int bch2_fs_btree_iter_init(struct bch_fs *c) 3217 + void bch2_fs_btree_iter_init_early(struct bch_fs *c) 3218 3218 { 3219 3219 struct btree_transaction_stats *s; 3220 - int ret; 3221 3220 3222 3221 for (s = c->btree_transaction_stats; 3223 3222 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); ··· 3227 3228 3228 3229 INIT_LIST_HEAD(&c->btree_trans_list); 3229 3230 seqmutex_init(&c->btree_trans_lock); 3231 + } 3232 + 3233 + int bch2_fs_btree_iter_init(struct bch_fs *c) 3234 + { 3235 + int ret; 3230 3236 3231 3237 c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf); 3232 3238 if (!c->btree_trans_bufs)
+1
fs/bcachefs/btree_iter.h
··· 938 938 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *); 939 939 940 940 void bch2_fs_btree_iter_exit(struct bch_fs *); 941 + void bch2_fs_btree_iter_init_early(struct bch_fs *); 941 942 int bch2_fs_btree_iter_init(struct bch_fs *); 942 943 943 944 #endif /* _BCACHEFS_BTREE_ITER_H */
+26 -13
fs/bcachefs/btree_update_interior.c
··· 99 99 100 100 /* Calculate ideal packed bkey format for new btree nodes: */ 101 101 102 - void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) 102 + static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) 103 103 { 104 104 struct bkey_packed *k; 105 105 struct bset_tree *t; ··· 125 125 return bch2_bkey_format_done(&s); 126 126 } 127 127 128 - static size_t btree_node_u64s_with_format(struct btree *b, 128 + static size_t btree_node_u64s_with_format(struct btree_nr_keys nr, 129 + struct bkey_format *old_f, 129 130 struct bkey_format *new_f) 130 131 { 131 - struct bkey_format *old_f = &b->format; 132 - 133 132 /* stupid integer promotion rules */ 134 133 ssize_t delta = 135 134 (((int) new_f->key_u64s - old_f->key_u64s) * 136 - (int) b->nr.packed_keys) + 135 + (int) nr.packed_keys) + 137 136 (((int) new_f->key_u64s - BKEY_U64s) * 138 - (int) b->nr.unpacked_keys); 137 + (int) nr.unpacked_keys); 139 138 140 - BUG_ON(delta + b->nr.live_u64s < 0); 139 + BUG_ON(delta + nr.live_u64s < 0); 141 140 142 - return b->nr.live_u64s + delta; 141 + return nr.live_u64s + delta; 143 142 } 144 143 145 144 /** ··· 146 147 * 147 148 * @c: filesystem handle 148 149 * @b: btree node to rewrite 150 + * @nr: number of keys for new node (i.e. b->nr) 149 151 * @new_f: bkey format to translate keys to 150 152 * 151 153 * Returns: true if all re-packed keys will be able to fit in a new node. 152 154 * 153 155 * Assumes all keys will successfully pack with the new format. 154 156 */ 155 - bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, 157 + static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, 158 + struct btree_nr_keys nr, 156 159 struct bkey_format *new_f) 157 160 { 158 - size_t u64s = btree_node_u64s_with_format(b, new_f); 161 + size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f); 159 162 160 163 return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c); 161 164 } ··· 392 391 * The keys might expand with the new format - if they wouldn't fit in 393 392 * the btree node anymore, use the old format for now: 394 393 */ 395 - if (!bch2_btree_node_format_fits(as->c, b, &format)) 394 + if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format)) 396 395 format = b->format; 397 396 398 397 SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1); ··· 1346 1345 struct bkey_packed *out[2]; 1347 1346 struct bkey uk; 1348 1347 unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5; 1348 + struct { unsigned nr_keys, val_u64s; } nr_keys[2]; 1349 1349 int i; 1350 + 1351 + memset(&nr_keys, 0, sizeof(nr_keys)); 1350 1352 1351 1353 for (i = 0; i < 2; i++) { 1352 1354 BUG_ON(n[i]->nsets != 1); ··· 1372 1368 if (!i) 1373 1369 n1_pos = uk.p; 1374 1370 bch2_bkey_format_add_key(&format[i], &uk); 1371 + 1372 + nr_keys[i].nr_keys++; 1373 + nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k); 1375 1374 } 1376 1375 1377 1376 btree_set_min(n[0], b->data->min_key); ··· 1387 1380 bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key); 1388 1381 1389 1382 n[i]->data->format = bch2_bkey_format_done(&format[i]); 1383 + 1384 + unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s + 1385 + nr_keys[i].val_u64s; 1386 + if (__vstruct_bytes(struct btree_node, u64s) > btree_bytes(as->c)) 1387 + n[i]->data->format = b->format; 1388 + 1390 1389 btree_node_set_format(n[i], n[i]->data->format); 1391 1390 } 1392 1391 ··· 1835 1822 bch2_bkey_format_add_pos(&new_s, next->data->max_key); 1836 1823 new_f = bch2_bkey_format_done(&new_s); 1837 1824 1838 - sib_u64s = btree_node_u64s_with_format(b, &new_f) + 1839 - btree_node_u64s_with_format(m, &new_f); 1825 + sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) + 1826 + btree_node_u64s_with_format(m->nr, &m->format, &new_f); 1840 1827 1841 1828 if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) { 1842 1829 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
-4
fs/bcachefs/btree_update_interior.h
··· 6 6 #include "btree_locking.h" 7 7 #include "btree_update.h" 8 8 9 - void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *); 10 - bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *, 11 - struct bkey_format *); 12 - 13 9 #define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES) 14 10 15 11 #define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
+2 -1
fs/bcachefs/data_update.c
··· 560 560 move_ctxt_wait_event(ctxt, 561 561 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, 562 562 PTR_BUCKET_POS(c, &p.ptr), 0)) || 563 - !atomic_read(&ctxt->read_sectors)); 563 + (!atomic_read(&ctxt->read_sectors) && 564 + !atomic_read(&ctxt->write_sectors))); 564 565 565 566 if (!locked) 566 567 bch2_bucket_nocow_lock(&c->nocow_locks,
+14 -5
fs/bcachefs/fs.c
··· 1143 1143 { 1144 1144 struct bch_inode_info *inode = to_bch_ei(vinode); 1145 1145 struct bch_inode_info *dir = to_bch_ei(vdir); 1146 - 1147 - if (*len < sizeof(struct bcachefs_fid_with_parent) / sizeof(u32)) 1148 - return FILEID_INVALID; 1146 + int min_len; 1149 1147 1150 1148 if (!S_ISDIR(inode->v.i_mode) && dir) { 1151 1149 struct bcachefs_fid_with_parent *fid = (void *) fh; 1152 1150 1151 + min_len = sizeof(*fid) / sizeof(u32); 1152 + if (*len < min_len) { 1153 + *len = min_len; 1154 + return FILEID_INVALID; 1155 + } 1156 + 1153 1157 fid->fid = bch2_inode_to_fid(inode); 1154 1158 fid->dir = bch2_inode_to_fid(dir); 1155 1159 1156 - *len = sizeof(*fid) / sizeof(u32); 1160 + *len = min_len; 1157 1161 return FILEID_BCACHEFS_WITH_PARENT; 1158 1162 } else { 1159 1163 struct bcachefs_fid *fid = (void *) fh; 1160 1164 1165 + min_len = sizeof(*fid) / sizeof(u32); 1166 + if (*len < min_len) { 1167 + *len = min_len; 1168 + return FILEID_INVALID; 1169 + } 1161 1170 *fid = bch2_inode_to_fid(inode); 1162 1171 1163 - *len = sizeof(*fid) / sizeof(u32); 1172 + *len = min_len; 1164 1173 return FILEID_BCACHEFS_WITHOUT_PARENT; 1165 1174 } 1166 1175 }
+3
fs/bcachefs/recovery.h
··· 10 10 static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c, 11 11 enum bch_recovery_pass pass) 12 12 { 13 + if (c->recovery_passes_explicit & BIT_ULL(pass)) 14 + return 0; 15 + 13 16 bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)", 14 17 bch2_recovery_passes[pass], pass, 15 18 bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
+7
fs/bcachefs/super.c
··· 72 72 MODULE_LICENSE("GPL"); 73 73 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 74 74 MODULE_DESCRIPTION("bcachefs filesystem"); 75 + MODULE_SOFTDEP("pre: crc32c"); 76 + MODULE_SOFTDEP("pre: crc64"); 77 + MODULE_SOFTDEP("pre: sha256"); 78 + MODULE_SOFTDEP("pre: chacha20"); 79 + MODULE_SOFTDEP("pre: poly1305"); 80 + MODULE_SOFTDEP("pre: xxhash"); 75 81 76 82 #define KTYPE(type) \ 77 83 static const struct attribute_group type ## _group = { \ ··· 720 714 721 715 bch2_fs_copygc_init(c); 722 716 bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); 717 + bch2_fs_btree_iter_init_early(c); 723 718 bch2_fs_btree_interior_update_init_early(c); 724 719 bch2_fs_allocator_background_init(c); 725 720 bch2_fs_allocator_foreground_init(c);
+4 -2
fs/debugfs/file.c
··· 104 104 ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); 105 105 refcount_set(&fsd->active_users, 1); 106 106 init_completion(&fsd->active_users_drained); 107 + INIT_LIST_HEAD(&fsd->cancellations); 108 + mutex_init(&fsd->cancellations_mtx); 109 + 107 110 if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) { 111 + mutex_destroy(&fsd->cancellations_mtx); 108 112 kfree(fsd); 109 113 fsd = READ_ONCE(dentry->d_fsdata); 110 114 } 111 - INIT_LIST_HEAD(&fsd->cancellations); 112 - mutex_init(&fsd->cancellations_mtx); 113 115 } 114 116 115 117 /*
+1 -96
fs/nfsd/nfs4callback.c
··· 84 84 static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap, 85 85 size_t len) 86 86 { 87 - xdr_stream_encode_uint32_array(xdr, bitmap, len); 88 - } 89 - 90 - static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap, 91 - struct nfs4_cb_fattr *fattr) 92 - { 93 - fattr->ncf_cb_change = 0; 94 - fattr->ncf_cb_fsize = 0; 95 - if (bitmap[0] & FATTR4_WORD0_CHANGE) 96 - if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0) 97 - return -NFSERR_BAD_XDR; 98 - if (bitmap[0] & FATTR4_WORD0_SIZE) 99 - if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0) 100 - return -NFSERR_BAD_XDR; 101 - return 0; 87 + WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0); 102 88 } 103 89 104 90 /* ··· 358 372 } 359 373 360 374 /* 361 - * CB_GETATTR4args 362 - * struct CB_GETATTR4args { 363 - * nfs_fh4 fh; 364 - * bitmap4 attr_request; 365 - * }; 366 - * 367 - * The size and change attributes are the only one 368 - * guaranteed to be serviced by the client. 369 - */ 370 - static void 371 - encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr, 372 - struct nfs4_cb_fattr *fattr) 373 - { 374 - struct nfs4_delegation *dp = 375 - container_of(fattr, struct nfs4_delegation, dl_cb_fattr); 376 - struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle; 377 - 378 - encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR); 379 - encode_nfs_fh4(xdr, fh); 380 - encode_bitmap4(xdr, fattr->ncf_cb_bmap, ARRAY_SIZE(fattr->ncf_cb_bmap)); 381 - hdr->nops++; 382 - } 383 - 384 - /* 385 375 * CB_SEQUENCE4args 386 376 * 387 377 * struct CB_SEQUENCE4args { ··· 493 531 } 494 532 495 533 /* 496 - * 20.1. Operation 3: CB_GETATTR - Get Attributes 497 - */ 498 - static void nfs4_xdr_enc_cb_getattr(struct rpc_rqst *req, 499 - struct xdr_stream *xdr, const void *data) 500 - { 501 - const struct nfsd4_callback *cb = data; 502 - struct nfs4_cb_fattr *ncf = 503 - container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 504 - struct nfs4_cb_compound_hdr hdr = { 505 - .ident = cb->cb_clp->cl_cb_ident, 506 - .minorversion = cb->cb_clp->cl_minorversion, 507 - }; 508 - 509 - encode_cb_compound4args(xdr, &hdr); 510 - encode_cb_sequence4args(xdr, cb, &hdr); 511 - encode_cb_getattr4args(xdr, &hdr, ncf); 512 - encode_cb_nops(&hdr); 513 - } 514 - 515 - /* 516 534 * 20.2. Operation 4: CB_RECALL - Recall a Delegation 517 535 */ 518 536 static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr, ··· 545 603 void *__unused) 546 604 { 547 605 return 0; 548 - } 549 - 550 - /* 551 - * 20.1. Operation 3: CB_GETATTR - Get Attributes 552 - */ 553 - static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp, 554 - struct xdr_stream *xdr, 555 - void *data) 556 - { 557 - struct nfsd4_callback *cb = data; 558 - struct nfs4_cb_compound_hdr hdr; 559 - int status; 560 - u32 bitmap[3] = {0}; 561 - u32 attrlen; 562 - struct nfs4_cb_fattr *ncf = 563 - container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 564 - 565 - status = decode_cb_compound4res(xdr, &hdr); 566 - if (unlikely(status)) 567 - return status; 568 - 569 - status = decode_cb_sequence4res(xdr, cb); 570 - if (unlikely(status || cb->cb_seq_status)) 571 - return status; 572 - 573 - status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status); 574 - if (status) 575 - return status; 576 - if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0) 577 - return -NFSERR_BAD_XDR; 578 - if (xdr_stream_decode_u32(xdr, &attrlen) < 0) 579 - return -NFSERR_BAD_XDR; 580 - if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize))) 581 - return -NFSERR_BAD_XDR; 582 - status = decode_cb_fattr4(xdr, bitmap, ncf); 583 - return status; 584 606 } 585 607 586 608 /* ··· 855 949 PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock), 856 950 PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload), 857 951 PROC(CB_RECALL_ANY, COMPOUND, cb_recall_any, cb_recall_any), 858 - PROC(CB_GETATTR, COMPOUND, cb_getattr, cb_getattr), 859 952 }; 860 953 861 954 static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
+12 -104
fs/nfsd/nfs4state.c
··· 127 127 128 128 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 129 129 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; 130 - static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops; 131 130 132 131 static struct workqueue_struct *laundry_wq; 133 132 ··· 1189 1190 dp->dl_recalled = false; 1190 1191 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 1191 1192 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 1192 - nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client, 1193 - &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR); 1194 - dp->dl_cb_fattr.ncf_file_modified = false; 1195 - dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE; 1196 1193 get_nfs4_file(fp); 1197 1194 dp->dl_stid.sc_file = fp; 1198 1195 return dp; ··· 2896 2901 spin_unlock(&nn->client_lock); 2897 2902 } 2898 2903 2899 - static int 2900 - nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task) 2901 - { 2902 - struct nfs4_cb_fattr *ncf = 2903 - container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 2904 - 2905 - ncf->ncf_cb_status = task->tk_status; 2906 - switch (task->tk_status) { 2907 - case -NFS4ERR_DELAY: 2908 - rpc_delay(task, 2 * HZ); 2909 - return 0; 2910 - default: 2911 - return 1; 2912 - } 2913 - } 2914 - 2915 - static void 2916 - nfsd4_cb_getattr_release(struct nfsd4_callback *cb) 2917 - { 2918 - struct nfs4_cb_fattr *ncf = 2919 - container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 2920 - struct nfs4_delegation *dp = 2921 - container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 2922 - 2923 - nfs4_put_stid(&dp->dl_stid); 2924 - clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags); 2925 - wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY); 2926 - } 2927 - 2928 2904 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = { 2929 2905 .done = nfsd4_cb_recall_any_done, 2930 2906 .release = nfsd4_cb_recall_any_release, 2931 2907 }; 2932 - 2933 - static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = { 2934 - .done = nfsd4_cb_getattr_done, 2935 - .release = nfsd4_cb_getattr_release, 2936 - }; 2937 - 2938 - void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf) 2939 - { 2940 - struct nfs4_delegation *dp = 2941 - container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 2942 - 2943 - if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags)) 2944 - return; 2945 - refcount_inc(&dp->dl_stid.sc_count); 2946 - nfsd4_run_cb(&ncf->ncf_getattr); 2947 - } 2948 2908 2949 2909 static struct nfs4_client *create_client(struct xdr_netobj name, 2950 2910 struct svc_rqst *rqstp, nfs4_verifier *verf) ··· 5635 5685 struct svc_fh *parent = NULL; 5636 5686 int cb_up; 5637 5687 int status = 0; 5638 - struct kstat stat; 5639 - struct path path; 5640 5688 5641 5689 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 5642 5690 open->op_recall = false; ··· 5672 5724 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { 5673 5725 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE; 5674 5726 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); 5675 - path.mnt = currentfh->fh_export->ex_path.mnt; 5676 - path.dentry = currentfh->fh_dentry; 5677 - if (vfs_getattr(&path, &stat, 5678 - (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE), 5679 - AT_STATX_SYNC_AS_STAT)) { 5680 - nfs4_put_stid(&dp->dl_stid); 5681 - destroy_delegation(dp); 5682 - goto out_no_deleg; 5683 - } 5684 - dp->dl_cb_fattr.ncf_cur_fsize = stat.size; 5685 - dp->dl_cb_fattr.ncf_initial_cinfo = 5686 - nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry)); 5687 5727 } else { 5688 5728 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 5689 5729 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); ··· 8428 8492 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict 8429 8493 * @rqstp: RPC transaction context 8430 8494 * @inode: file to be checked for a conflict 8431 - * @modified: return true if file was modified 8432 - * @size: new size of file if modified is true 8433 8495 * 8434 8496 * This function is called when there is a conflict between a write 8435 8497 * delegation and a change/size GETATTR from another client. The server ··· 8436 8502 * delegation before replying to the GETATTR. See RFC 8881 section 8437 8503 * 18.7.4. 8438 8504 * 8505 + * The current implementation does not support CB_GETATTR yet. However 8506 + * this can avoid recalling the delegation could be added in follow up 8507 + * work. 8508 + * 8439 8509 * Returns 0 if there is no conflict; otherwise an nfs_stat 8440 8510 * code is returned. 8441 8511 */ 8442 8512 __be32 8443 - nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode, 8444 - bool *modified, u64 *size) 8513 + nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) 8445 8514 { 8446 - struct file_lock_context *ctx; 8447 - struct nfs4_delegation *dp; 8448 - struct nfs4_cb_fattr *ncf; 8449 - struct file_lock *fl; 8450 - struct iattr attrs; 8451 8515 __be32 status; 8516 + struct file_lock_context *ctx; 8517 + struct file_lock *fl; 8518 + struct nfs4_delegation *dp; 8452 8519 8453 - might_sleep(); 8454 - 8455 - *modified = false; 8456 8520 ctx = locks_inode_context(inode); 8457 8521 if (!ctx) 8458 8522 return 0; ··· 8477 8545 break_lease: 8478 8546 spin_unlock(&ctx->flc_lock); 8479 8547 nfsd_stats_wdeleg_getattr_inc(); 8480 - 8481 - dp = fl->fl_owner; 8482 - ncf = &dp->dl_cb_fattr; 8483 - nfs4_cb_getattr(&dp->dl_cb_fattr); 8484 - wait_on_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY, TASK_INTERRUPTIBLE); 8485 - if (ncf->ncf_cb_status) { 8486 - status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 8487 - if (status != nfserr_jukebox || 8488 - !nfsd_wait_for_delegreturn(rqstp, inode)) 8489 - return status; 8490 - } 8491 - if (!ncf->ncf_file_modified && 8492 - (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || 8493 - ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) 8494 - ncf->ncf_file_modified = true; 8495 - if (ncf->ncf_file_modified) { 8496 - /* 8497 - * The server would not update the file's metadata 8498 - * with the client's modified size. 8499 - */ 8500 - attrs.ia_mtime = attrs.ia_ctime = current_time(inode); 8501 - attrs.ia_valid = ATTR_MTIME | ATTR_CTIME; 8502 - setattr_copy(&nop_mnt_idmap, inode, &attrs); 8503 - mark_inode_dirty(inode); 8504 - ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; 8505 - *size = ncf->ncf_cur_fsize; 8506 - *modified = true; 8507 - } 8548 + status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 8549 + if (status != nfserr_jukebox || 8550 + !nfsd_wait_for_delegreturn(rqstp, inode)) 8551 + return status; 8508 8552 return 0; 8509 8553 } 8510 8554 break;
+2 -5
fs/nfsd/nfs4xdr.c
··· 3505 3505 u32 attrmask[3]; 3506 3506 unsigned long mask[2]; 3507 3507 } u; 3508 - bool file_modified; 3509 3508 unsigned long bit; 3510 - u64 size = 0; 3511 3509 3512 3510 WARN_ON_ONCE(bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1); 3513 3511 WARN_ON_ONCE(!nfsd_attrs_supported(minorversion, bmval)); ··· 3532 3534 } 3533 3535 args.size = 0; 3534 3536 if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) { 3535 - status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry), 3536 - &file_modified, &size); 3537 + status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry)); 3537 3538 if (status) 3538 3539 goto out; 3539 3540 } ··· 3542 3545 AT_STATX_SYNC_AS_STAT); 3543 3546 if (err) 3544 3547 goto out_nfserr; 3545 - args.size = file_modified ? size : args.stat.size; 3548 + args.size = args.stat.size; 3546 3549 3547 3550 if (!(args.stat.result_mask & STATX_BTIME)) 3548 3551 /* underlying FS does not offer btime so we can't share it */
+10 -8
fs/nfsd/nfsctl.c
··· 705 705 706 706 err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); 707 707 708 - if (err >= 0 && 709 - !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1)) 708 + if (err < 0 && !nn->nfsd_serv->sv_nrthreads && !nn->keep_active) 709 + nfsd_last_thread(net); 710 + else if (err >= 0 && 711 + !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1)) 710 712 svc_get(nn->nfsd_serv); 711 713 712 714 nfsd_put(net); ··· 759 757 svc_xprt_put(xprt); 760 758 } 761 759 out_err: 760 + if (!nn->nfsd_serv->sv_nrthreads && !nn->keep_active) 761 + nfsd_last_thread(net); 762 + 762 763 nfsd_put(net); 763 764 return err; 764 765 } ··· 1515 1510 int ret = -ENODEV; 1516 1511 1517 1512 mutex_lock(&nfsd_mutex); 1518 - if (nn->nfsd_serv) { 1519 - svc_get(nn->nfsd_serv); 1513 + if (nn->nfsd_serv) 1520 1514 ret = 0; 1521 - } 1522 - mutex_unlock(&nfsd_mutex); 1515 + else 1516 + mutex_unlock(&nfsd_mutex); 1523 1517 1524 1518 return ret; 1525 1519 } ··· 1690 1686 */ 1691 1687 int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb) 1692 1688 { 1693 - mutex_lock(&nfsd_mutex); 1694 - nfsd_put(sock_net(cb->skb->sk)); 1695 1689 mutex_unlock(&nfsd_mutex); 1696 1690 1697 1691 return 0;
+1
fs/nfsd/nfsd.h
··· 155 155 int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change); 156 156 void nfsd_reset_versions(struct nfsd_net *nn); 157 157 int nfsd_create_serv(struct net *net); 158 + void nfsd_last_thread(struct net *net); 158 159 159 160 extern int nfsd_max_blksize; 160 161
+1 -1
fs/nfsd/nfssvc.c
··· 542 542 /* Only used under nfsd_mutex, so this atomic may be overkill: */ 543 543 static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0); 544 544 545 - static void nfsd_last_thread(struct net *net) 545 + void nfsd_last_thread(struct net *net) 546 546 { 547 547 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 548 548 struct svc_serv *serv = nn->nfsd_serv;
+1 -24
fs/nfsd/state.h
··· 117 117 time64_t cpntf_time; /* last time stateid used */ 118 118 }; 119 119 120 - struct nfs4_cb_fattr { 121 - struct nfsd4_callback ncf_getattr; 122 - u32 ncf_cb_status; 123 - u32 ncf_cb_bmap[1]; 124 - 125 - /* from CB_GETATTR reply */ 126 - u64 ncf_cb_change; 127 - u64 ncf_cb_fsize; 128 - 129 - unsigned long ncf_cb_flags; 130 - bool ncf_file_modified; 131 - u64 ncf_initial_cinfo; 132 - u64 ncf_cur_fsize; 133 - }; 134 - 135 - /* bits for ncf_cb_flags */ 136 - #define CB_GETATTR_BUSY 0 137 - 138 120 /* 139 121 * Represents a delegation stateid. The nfs4_client holds references to these 140 122 * and they are put when it is being destroyed or when the delegation is ··· 150 168 int dl_retries; 151 169 struct nfsd4_callback dl_recall; 152 170 bool dl_recalled; 153 - 154 - /* for CB_GETATTR */ 155 - struct nfs4_cb_fattr dl_cb_fattr; 156 171 }; 157 172 158 173 #define cb_to_delegation(cb) \ ··· 640 661 NFSPROC4_CLNT_CB_SEQUENCE, 641 662 NFSPROC4_CLNT_CB_NOTIFY_LOCK, 642 663 NFSPROC4_CLNT_CB_RECALL_ANY, 643 - NFSPROC4_CLNT_CB_GETATTR, 644 664 }; 645 665 646 666 /* Returns true iff a is later than b: */ ··· 732 754 } 733 755 734 756 extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, 735 - struct inode *inode, bool *file_modified, u64 *size); 736 - extern void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf); 757 + struct inode *inode); 737 758 #endif /* NFSD4_STATE_H */
-18
fs/nfsd/xdr4cb.h
··· 54 54 #define NFS4_dec_cb_recall_any_sz (cb_compound_dec_hdr_sz + \ 55 55 cb_sequence_dec_sz + \ 56 56 op_dec_sz) 57 - 58 - /* 59 - * 1: CB_GETATTR opcode (32-bit) 60 - * N: file_handle 61 - * 1: number of entry in attribute array (32-bit) 62 - * 1: entry 0 in attribute array (32-bit) 63 - */ 64 - #define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \ 65 - cb_sequence_enc_sz + \ 66 - 1 + enc_nfs4_fh_sz + 1 + 1) 67 - /* 68 - * 4: fattr_bitmap_maxsz 69 - * 1: attribute array len 70 - * 2: change attr (64-bit) 71 - * 2: size (64-bit) 72 - */ 73 - #define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \ 74 - cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz)
+3 -2
fs/overlayfs/copy_up.c
··· 753 753 path.dentry = temp; 754 754 err = ovl_copy_up_data(c, &path); 755 755 /* 756 - * We cannot hold lock_rename() throughout this helper, because or 756 + * We cannot hold lock_rename() throughout this helper, because of 757 757 * lock ordering with sb_writers, which shouldn't be held when calling 758 758 * ovl_copy_up_data(), so lock workdir and destdir and make sure that 759 759 * temp wasn't moved before copy up completion or cleanup. 760 - * If temp was moved, abort without the cleanup. 761 760 */ 762 761 ovl_start_write(c->dentry); 763 762 if (lock_rename(c->workdir, c->destdir) != NULL || 764 763 temp->d_parent != c->workdir) { 764 + /* temp or workdir moved underneath us? abort without cleanup */ 765 + dput(temp); 765 766 err = -EIO; 766 767 goto unlock; 767 768 } else if (err) {
+7 -5
fs/smb/client/cifs_debug.c
··· 40 40 #ifdef CONFIG_CIFS_DEBUG2 41 41 struct smb_hdr *smb = buf; 42 42 43 - cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n", 44 - smb->Command, smb->Status.CifsError, 45 - smb->Flags, smb->Flags2, smb->Mid, smb->Pid); 46 - cifs_dbg(VFS, "smb buf %p len %u\n", smb, 47 - server->ops->calc_smb_size(smb)); 43 + cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n", 44 + smb->Command, smb->Status.CifsError, smb->Flags, 45 + smb->Flags2, smb->Mid, smb->Pid, smb->WordCount); 46 + if (!server->ops->check_message(buf, server->total_read, server)) { 47 + cifs_dbg(VFS, "smb buf %p len %u\n", smb, 48 + server->ops->calc_smb_size(smb)); 49 + } 48 50 #endif /* CONFIG_CIFS_DEBUG2 */ 49 51 } 50 52
+2 -1
fs/smb/client/cifsglob.h
··· 532 532 struct mid_q_entry **, char **, int *); 533 533 enum securityEnum (*select_sectype)(struct TCP_Server_Info *, 534 534 enum securityEnum); 535 - int (*next_header)(char *); 535 + int (*next_header)(struct TCP_Server_Info *server, char *buf, 536 + unsigned int *noff); 536 537 /* ioctl passthrough for query_info */ 537 538 int (*ioctl_query_info)(const unsigned int xid, 538 539 struct cifs_tcon *tcon,
+6 -1
fs/smb/client/connect.c
··· 1201 1201 server->total_read += length; 1202 1202 1203 1203 if (server->ops->next_header) { 1204 - next_offset = server->ops->next_header(buf); 1204 + if (server->ops->next_header(server, buf, &next_offset)) { 1205 + cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n", 1206 + __func__, next_offset); 1207 + cifs_reconnect(server, true); 1208 + continue; 1209 + } 1205 1210 if (next_offset) 1206 1211 server->pdu_size = next_offset; 1207 1212 }
+1 -1
fs/smb/client/file.c
··· 4671 4671 /* we do not want atime to be less than mtime, it broke some apps */ 4672 4672 atime = inode_set_atime_to_ts(inode, current_time(inode)); 4673 4673 mtime = inode_get_mtime(inode); 4674 - if (timespec64_compare(&atime, &mtime)) 4674 + if (timespec64_compare(&atime, &mtime) < 0) 4675 4675 inode_set_atime_to_ts(inode, inode_get_mtime(inode)); 4676 4676 4677 4677 if (PAGE_SIZE > rc)
+4
fs/smb/client/misc.c
··· 363 363 cifs_dbg(VFS, "Length less than smb header size\n"); 364 364 } 365 365 return -EIO; 366 + } else if (total_read < sizeof(*smb) + 2 * smb->WordCount) { 367 + cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n", 368 + __func__, smb->WordCount); 369 + return -EIO; 366 370 } 367 371 368 372 /* otherwise, there is enough to get to the BCC */
+19 -31
fs/smb/client/sess.c
··· 439 439 cifs_dbg(FYI, "unable to find a suitable iface\n"); 440 440 } 441 441 442 - if (!chan_index && !iface) { 442 + if (!iface) { 443 443 cifs_dbg(FYI, "unable to get the interface matching: %pIS\n", 444 444 &ss); 445 445 spin_unlock(&ses->iface_lock); ··· 447 447 } 448 448 449 449 /* now drop the ref to the current iface */ 450 - if (old_iface && iface) { 450 + if (old_iface) { 451 451 cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n", 452 452 &old_iface->sockaddr, 453 453 &iface->sockaddr); ··· 460 460 461 461 kref_put(&old_iface->refcount, release_iface); 462 462 } else if (old_iface) { 463 - cifs_dbg(FYI, "releasing ref to iface: %pIS\n", 463 + /* if a new candidate is not found, keep things as is */ 464 + cifs_dbg(FYI, "could not replace iface: %pIS\n", 464 465 &old_iface->sockaddr); 465 - 466 - old_iface->num_channels--; 467 - if (old_iface->weight_fulfilled) 468 - old_iface->weight_fulfilled--; 469 - 470 - kref_put(&old_iface->refcount, release_iface); 471 466 } else if (!chan_index) { 472 467 /* special case: update interface for primary channel */ 473 - cifs_dbg(FYI, "referencing primary channel iface: %pIS\n", 474 - &iface->sockaddr); 475 - iface->num_channels++; 476 - iface->weight_fulfilled++; 477 - } else { 478 - WARN_ON(!iface); 479 - cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr); 468 + if (iface) { 469 + cifs_dbg(FYI, "referencing primary channel iface: %pIS\n", 470 + &iface->sockaddr); 471 + iface->num_channels++; 472 + iface->weight_fulfilled++; 473 + } 480 474 } 481 475 spin_unlock(&ses->iface_lock); 482 476 483 - spin_lock(&ses->chan_lock); 484 - chan_index = cifs_ses_get_chan_index(ses, server); 485 - if (chan_index == CIFS_INVAL_CHAN_INDEX) { 477 + if (iface) { 478 + spin_lock(&ses->chan_lock); 479 + chan_index = cifs_ses_get_chan_index(ses, server); 480 + if (chan_index == CIFS_INVAL_CHAN_INDEX) { 481 + spin_unlock(&ses->chan_lock); 482 + return 0; 483 + } 484 + 485 + ses->chans[chan_index].iface = iface; 486 486 spin_unlock(&ses->chan_lock); 487 - return 0; 488 487 } 489 - 490 - ses->chans[chan_index].iface = iface; 491 - 492 - /* No iface is found. if secondary chan, drop connection */ 493 - if (!iface && SERVER_IS_CHAN(server)) 494 - ses->chans[chan_index].server = NULL; 495 - 496 - spin_unlock(&ses->chan_lock); 497 - 498 - if (!iface && SERVER_IS_CHAN(server)) 499 - cifs_put_tcp_session(server, false); 500 488 501 489 return rc; 502 490 }
+15 -15
fs/smb/client/smb2misc.c
··· 173 173 } 174 174 175 175 mid = le64_to_cpu(shdr->MessageId); 176 + if (check_smb2_hdr(shdr, mid)) 177 + return 1; 178 + 179 + if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { 180 + cifs_dbg(VFS, "Invalid structure size %u\n", 181 + le16_to_cpu(shdr->StructureSize)); 182 + return 1; 183 + } 184 + 185 + command = le16_to_cpu(shdr->Command); 186 + if (command >= NUMBER_OF_SMB2_COMMANDS) { 187 + cifs_dbg(VFS, "Invalid SMB2 command %d\n", command); 188 + return 1; 189 + } 190 + 176 191 if (len < pdu_size) { 177 192 if ((len >= hdr_size) 178 193 && (shdr->Status != 0)) { ··· 205 190 if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE) { 206 191 cifs_dbg(VFS, "SMB length greater than maximum, mid=%llu\n", 207 192 mid); 208 - return 1; 209 - } 210 - 211 - if (check_smb2_hdr(shdr, mid)) 212 - return 1; 213 - 214 - if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { 215 - cifs_dbg(VFS, "Invalid structure size %u\n", 216 - le16_to_cpu(shdr->StructureSize)); 217 - return 1; 218 - } 219 - 220 - command = le16_to_cpu(shdr->Command); 221 - if (command >= NUMBER_OF_SMB2_COMMANDS) { 222 - cifs_dbg(VFS, "Invalid SMB2 command %d\n", command); 223 193 return 1; 224 194 } 225 195
+16 -9
fs/smb/client/smb2ops.c
··· 403 403 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n", 404 404 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId, 405 405 shdr->Id.SyncId.ProcessId); 406 - cifs_server_dbg(VFS, "smb buf %p len %u\n", buf, 407 - server->ops->calc_smb_size(buf)); 406 + if (!server->ops->check_message(buf, server->total_read, server)) { 407 + cifs_server_dbg(VFS, "smb buf %p len %u\n", buf, 408 + server->ops->calc_smb_size(buf)); 409 + } 408 410 #endif 409 411 } 410 412 ··· 5076 5074 NULL, 0, false); 5077 5075 } 5078 5076 5079 - static int 5080 - smb2_next_header(char *buf) 5077 + static int smb2_next_header(struct TCP_Server_Info *server, char *buf, 5078 + unsigned int *noff) 5081 5079 { 5082 5080 struct smb2_hdr *hdr = (struct smb2_hdr *)buf; 5083 5081 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf; 5084 5082 5085 - if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) 5086 - return sizeof(struct smb2_transform_hdr) + 5087 - le32_to_cpu(t_hdr->OriginalMessageSize); 5088 - 5089 - return le32_to_cpu(hdr->NextCommand); 5083 + if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { 5084 + *noff = le32_to_cpu(t_hdr->OriginalMessageSize); 5085 + if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff))) 5086 + return -EINVAL; 5087 + } else { 5088 + *noff = le32_to_cpu(hdr->NextCommand); 5089 + } 5090 + if (unlikely(*noff && *noff < MID_HEADER_SIZE(server))) 5091 + return -EINVAL; 5092 + return 0; 5090 5093 } 5091 5094 5092 5095 int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+23 -9
fs/smb/client/smb2pdu.c
··· 411 411 } 412 412 413 413 if (smb2_command != SMB2_INTERNAL_CMD) 414 - if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0)) 415 - cifs_put_tcp_session(server, false); 414 + mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 416 415 417 416 atomic_inc(&tconInfoReconnectCount); 418 417 out: ··· 470 471 void **request_buf, unsigned int *total_len) 471 472 { 472 473 /* BB eventually switch this to SMB2 specific small buf size */ 473 - if (smb2_command == SMB2_SET_INFO) 474 + switch (smb2_command) { 475 + case SMB2_SET_INFO: 476 + case SMB2_QUERY_INFO: 474 477 *request_buf = cifs_buf_get(); 475 - else 478 + break; 479 + default: 476 480 *request_buf = cifs_small_buf_get(); 481 + break; 482 + } 477 483 if (*request_buf == NULL) { 478 484 /* BB should we add a retry in here if not a writepage? */ 479 485 return -ENOMEM; ··· 3591 3587 struct smb2_query_info_req *req; 3592 3588 struct kvec *iov = rqst->rq_iov; 3593 3589 unsigned int total_len; 3590 + size_t len; 3594 3591 int rc; 3592 + 3593 + if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) || 3594 + len > CIFSMaxBufSize)) 3595 + return -EINVAL; 3595 3596 3596 3597 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 3597 3598 (void **) &req, &total_len); ··· 3619 3610 3620 3611 iov[0].iov_base = (char *)req; 3621 3612 /* 1 for Buffer */ 3622 - iov[0].iov_len = total_len - 1 + input_len; 3613 + iov[0].iov_len = len; 3623 3614 return 0; 3624 3615 } 3625 3616 ··· 3627 3618 SMB2_query_info_free(struct smb_rqst *rqst) 3628 3619 { 3629 3620 if (rqst && rqst->rq_iov) 3630 - cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3621 + cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3631 3622 } 3632 3623 3633 3624 static int ··· 5502 5493 return 0; 5503 5494 } 5504 5495 5496 + static inline void free_qfs_info_req(struct kvec *iov) 5497 + { 5498 + cifs_buf_release(iov->iov_base); 5499 + } 5500 + 5505 5501 int 5506 5502 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon, 5507 5503 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) ··· 5538 5524 5539 5525 rc = cifs_send_recv(xid, ses, server, 5540 5526 &rqst, &resp_buftype, flags, &rsp_iov); 5541 - cifs_small_buf_release(iov.iov_base); 5527 + free_qfs_info_req(&iov); 5542 5528 if (rc) { 5543 5529 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5544 5530 goto posix_qfsinf_exit; ··· 5589 5575 5590 5576 rc = cifs_send_recv(xid, ses, server, 5591 5577 &rqst, &resp_buftype, flags, &rsp_iov); 5592 - cifs_small_buf_release(iov.iov_base); 5578 + free_qfs_info_req(&iov); 5593 5579 if (rc) { 5594 5580 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5595 5581 goto qfsinf_exit; ··· 5656 5642 5657 5643 rc = cifs_send_recv(xid, ses, server, 5658 5644 &rqst, &resp_buftype, flags, &rsp_iov); 5659 - cifs_small_buf_release(iov.iov_base); 5645 + free_qfs_info_req(&iov); 5660 5646 if (rc) { 5661 5647 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5662 5648 goto qfsattr_exit;
+9 -3
fs/tracefs/event_inode.c
··· 148 148 .release = eventfs_release, 149 149 }; 150 150 151 - static void update_inode_attr(struct inode *inode, struct eventfs_attr *attr, umode_t mode) 151 + static void update_inode_attr(struct dentry *dentry, struct inode *inode, 152 + struct eventfs_attr *attr, umode_t mode) 152 153 { 153 154 if (!attr) { 154 155 inode->i_mode = mode; ··· 163 162 164 163 if (attr->mode & EVENTFS_SAVE_UID) 165 164 inode->i_uid = attr->uid; 165 + else 166 + inode->i_uid = d_inode(dentry->d_parent)->i_uid; 166 167 167 168 if (attr->mode & EVENTFS_SAVE_GID) 168 169 inode->i_gid = attr->gid; 170 + else 171 + inode->i_gid = d_inode(dentry->d_parent)->i_gid; 169 172 } 170 173 171 174 /** ··· 211 206 return eventfs_failed_creating(dentry); 212 207 213 208 /* If the user updated the directory's attributes, use them */ 214 - update_inode_attr(inode, attr, mode); 209 + update_inode_attr(dentry, inode, attr, mode); 215 210 216 211 inode->i_op = &eventfs_file_inode_operations; 217 212 inode->i_fop = fop; ··· 247 242 return eventfs_failed_creating(dentry); 248 243 249 244 /* If the user updated the directory's attributes, use them */ 250 - update_inode_attr(inode, &ei->attr, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); 245 + update_inode_attr(dentry, inode, &ei->attr, 246 + S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); 251 247 252 248 inode->i_op = &eventfs_root_dir_inode_operations; 253 249 inode->i_fop = &eventfs_file_operations;
+4
include/linux/bpf_types.h
··· 142 142 #ifdef CONFIG_NET 143 143 BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) 144 144 BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) 145 + BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter) 146 + BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx) 147 + BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit) 145 148 #endif 146 149 #ifdef CONFIG_PERF_EVENTS 147 150 BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) 148 151 #endif 149 152 BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) 150 153 BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops) 154 + BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
+2
include/linux/device.h
··· 1007 1007 mutex_unlock(&dev->mutex); 1008 1008 } 1009 1009 1010 + DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T)) 1011 + 1010 1012 static inline void device_lock_assert(struct device *dev) 1011 1013 { 1012 1014 lockdep_assert_held(&dev->mutex);
-4
include/linux/hid-sensor-ids.h
··· 21 21 #define HID_USAGE_SENSOR_ALS 0x200041 22 22 #define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0 23 23 #define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1 24 - #define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2 25 - #define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3 26 - #define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4 27 - #define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5 28 24 29 25 /* PROX (200011) */ 30 26 #define HID_USAGE_SENSOR_PROX 0x200011
+2 -1
include/linux/ieee80211.h
··· 4447 4447 action != WLAN_PUB_ACTION_LOC_TRACK_NOTI && 4448 4448 action != WLAN_PUB_ACTION_FTM_REQUEST && 4449 4449 action != WLAN_PUB_ACTION_FTM_RESPONSE && 4450 - action != WLAN_PUB_ACTION_FILS_DISCOVERY; 4450 + action != WLAN_PUB_ACTION_FILS_DISCOVERY && 4451 + action != WLAN_PUB_ACTION_VENDOR_SPECIFIC; 4451 4452 } 4452 4453 4453 4454 /**
+1
include/linux/key-type.h
··· 73 73 74 74 unsigned int flags; 75 75 #define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */ 76 + #define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */ 76 77 77 78 /* vet a description */ 78 79 int (*vet_description)(const char *description);
+7 -2
include/net/bluetooth/hci_core.h
··· 189 189 struct smp_csrk { 190 190 bdaddr_t bdaddr; 191 191 u8 bdaddr_type; 192 + u8 link_type; 192 193 u8 type; 193 194 u8 val[16]; 194 195 }; ··· 199 198 struct rcu_head rcu; 200 199 bdaddr_t bdaddr; 201 200 u8 bdaddr_type; 201 + u8 link_type; 202 202 u8 authenticated; 203 203 u8 type; 204 204 u8 enc_size; ··· 214 212 bdaddr_t rpa; 215 213 bdaddr_t bdaddr; 216 214 u8 addr_type; 215 + u8 link_type; 217 216 u8 val[16]; 218 217 }; 219 218 ··· 222 219 struct list_head list; 223 220 struct rcu_head rcu; 224 221 bdaddr_t bdaddr; 222 + u8 bdaddr_type; 223 + u8 link_type; 225 224 u8 type; 226 225 u8 val[HCI_LINK_KEY_SIZE]; 227 226 u8 pin_len; ··· 1232 1227 continue; 1233 1228 1234 1229 /* Match CIG ID if set */ 1235 - if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig) 1230 + if (cig != c->iso_qos.ucast.cig) 1236 1231 continue; 1237 1232 1238 1233 /* Match CIS ID if set */ 1239 - if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis) 1234 + if (id != c->iso_qos.ucast.cis) 1240 1235 continue; 1241 1236 1242 1237 /* Match destination address if set */
+13 -51
include/net/ip6_fib.h
··· 179 179 180 180 refcount_t fib6_ref; 181 181 unsigned long expires; 182 - 183 - struct hlist_node gc_link; 184 - 185 182 struct dst_metrics *fib6_metrics; 186 183 #define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1] 187 184 ··· 247 250 return rt->fib6_src.plen > 0; 248 251 } 249 252 253 + static inline void fib6_clean_expires(struct fib6_info *f6i) 254 + { 255 + f6i->fib6_flags &= ~RTF_EXPIRES; 256 + f6i->expires = 0; 257 + } 258 + 259 + static inline void fib6_set_expires(struct fib6_info *f6i, 260 + unsigned long expires) 261 + { 262 + f6i->expires = expires; 263 + f6i->fib6_flags |= RTF_EXPIRES; 264 + } 265 + 250 266 static inline bool fib6_check_expired(const struct fib6_info *f6i) 251 267 { 252 268 if (f6i->fib6_flags & RTF_EXPIRES) 253 269 return time_after(jiffies, f6i->expires); 254 270 return false; 255 - } 256 - 257 - static inline bool fib6_has_expires(const struct fib6_info *f6i) 258 - { 259 - return f6i->fib6_flags & RTF_EXPIRES; 260 271 } 261 272 262 273 /* Function to safely get fn->fn_sernum for passed in rt ··· 388 383 struct inet_peer_base tb6_peers; 389 384 unsigned int flags; 390 385 unsigned int fib_seq; 391 - struct hlist_head tb6_gc_hlist; /* GC candidates */ 392 386 #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) 393 387 }; 394 388 ··· 503 499 void fib6_gc_cleanup(void); 504 500 505 501 int fib6_init(void); 506 - 507 - /* fib6_info must be locked by the caller, and fib6_info->fib6_table can be 508 - * NULL. 509 - */ 510 - static inline void fib6_set_expires_locked(struct fib6_info *f6i, 511 - unsigned long expires) 512 - { 513 - struct fib6_table *tb6; 514 - 515 - tb6 = f6i->fib6_table; 516 - f6i->expires = expires; 517 - if (tb6 && !fib6_has_expires(f6i)) 518 - hlist_add_head(&f6i->gc_link, &tb6->tb6_gc_hlist); 519 - f6i->fib6_flags |= RTF_EXPIRES; 520 - } 521 - 522 - /* fib6_info must be locked by the caller, and fib6_info->fib6_table can be 523 - * NULL. If fib6_table is NULL, the fib6_info will no be inserted into the 524 - * list of GC candidates until it is inserted into a table. 525 - */ 526 - static inline void fib6_set_expires(struct fib6_info *f6i, 527 - unsigned long expires) 528 - { 529 - spin_lock_bh(&f6i->fib6_table->tb6_lock); 530 - fib6_set_expires_locked(f6i, expires); 531 - spin_unlock_bh(&f6i->fib6_table->tb6_lock); 532 - } 533 - 534 - static inline void fib6_clean_expires_locked(struct fib6_info *f6i) 535 - { 536 - if (fib6_has_expires(f6i)) 537 - hlist_del_init(&f6i->gc_link); 538 - f6i->fib6_flags &= ~RTF_EXPIRES; 539 - f6i->expires = 0; 540 - } 541 - 542 - static inline void fib6_clean_expires(struct fib6_info *f6i) 543 - { 544 - spin_lock_bh(&f6i->fib6_table->tb6_lock); 545 - fib6_clean_expires_locked(f6i); 546 - spin_unlock_bh(&f6i->fib6_table->tb6_lock); 547 - } 548 502 549 503 struct ipv6_route_iter { 550 504 struct seq_net_private p;
+5
include/net/sock.h
··· 2799 2799 return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; 2800 2800 } 2801 2801 2802 + static inline bool sk_is_stream_unix(const struct sock *sk) 2803 + { 2804 + return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM; 2805 + } 2806 + 2802 2807 /** 2803 2808 * sk_eat_skb - Release a skb if it is no longer needed 2804 2809 * @sk: socket to eat this skb from
+7 -4
include/trace/events/9p.h
··· 178 178 __field( void *, clnt ) 179 179 __field( __u8, type ) 180 180 __field( __u16, tag ) 181 - __array( unsigned char, line, P9_PROTO_DUMP_SZ ) 181 + __dynamic_array(unsigned char, line, 182 + min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ)) 182 183 ), 183 184 184 185 TP_fast_assign( 185 186 __entry->clnt = clnt; 186 187 __entry->type = pdu->id; 187 188 __entry->tag = pdu->tag; 188 - memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ); 189 + memcpy(__get_dynamic_array(line), pdu->sdata, 190 + __get_dynamic_array_len(line)); 189 191 ), 190 - TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n", 192 + TP_printk("clnt %lu %s(tag = %d)\n%*ph\n", 191 193 (unsigned long)__entry->clnt, show_9p_op(__entry->type), 192 - __entry->tag, 0, __entry->line, 16, __entry->line + 16) 194 + __entry->tag, __get_dynamic_array_len(line), 195 + __get_dynamic_array(line)) 193 196 ); 194 197 195 198
+14
kernel/sys_ni.c
··· 201 201 COND_SYSCALL_COMPAT(recvmmsg_time32); 202 202 COND_SYSCALL_COMPAT(recvmmsg_time64); 203 203 204 + /* Posix timer syscalls may be configured out */ 205 + COND_SYSCALL(timer_create); 206 + COND_SYSCALL(timer_gettime); 207 + COND_SYSCALL(timer_getoverrun); 208 + COND_SYSCALL(timer_settime); 209 + COND_SYSCALL(timer_delete); 210 + COND_SYSCALL(clock_adjtime); 211 + COND_SYSCALL(getitimer); 212 + COND_SYSCALL(setitimer); 213 + COND_SYSCALL(alarm); 214 + COND_SYSCALL_COMPAT(timer_create); 215 + COND_SYSCALL_COMPAT(getitimer); 216 + COND_SYSCALL_COMPAT(setitimer); 217 + 204 218 /* 205 219 * Architecture specific syscalls: see further below 206 220 */
-45
kernel/time/posix-stubs.c
··· 17 17 #include <linux/time_namespace.h> 18 18 #include <linux/compat.h> 19 19 20 - #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER 21 - /* Architectures may override SYS_NI and COMPAT_SYS_NI */ 22 - #include <asm/syscall_wrapper.h> 23 - #endif 24 - 25 - asmlinkage long sys_ni_posix_timers(void) 26 - { 27 - pr_err_once("process %d (%s) attempted a POSIX timer syscall " 28 - "while CONFIG_POSIX_TIMERS is not set\n", 29 - current->pid, current->comm); 30 - return -ENOSYS; 31 - } 32 - 33 - #ifndef SYS_NI 34 - #define SYS_NI(name) SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers) 35 - #endif 36 - 37 - #ifndef COMPAT_SYS_NI 38 - #define COMPAT_SYS_NI(name) SYSCALL_ALIAS(compat_sys_##name, sys_ni_posix_timers) 39 - #endif 40 - 41 - SYS_NI(timer_create); 42 - SYS_NI(timer_gettime); 43 - SYS_NI(timer_getoverrun); 44 - SYS_NI(timer_settime); 45 - SYS_NI(timer_delete); 46 - SYS_NI(clock_adjtime); 47 - SYS_NI(getitimer); 48 - SYS_NI(setitimer); 49 - SYS_NI(clock_adjtime32); 50 - #ifdef __ARCH_WANT_SYS_ALARM 51 - SYS_NI(alarm); 52 - #endif 53 - 54 20 /* 55 21 * We preserve minimal support for CLOCK_REALTIME and CLOCK_MONOTONIC 56 22 * as it is easy to remain compatible with little code. CLOCK_BOOTTIME ··· 124 158 which_clock); 125 159 } 126 160 127 - #ifdef CONFIG_COMPAT 128 - COMPAT_SYS_NI(timer_create); 129 - #endif 130 - 131 - #if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA) 132 - COMPAT_SYS_NI(getitimer); 133 - COMPAT_SYS_NI(setitimer); 134 - #endif 135 - 136 161 #ifdef CONFIG_COMPAT_32BIT_TIME 137 - SYS_NI(timer_settime32); 138 - SYS_NI(timer_gettime32); 139 162 140 163 SYSCALL_DEFINE2(clock_settime32, const clockid_t, which_clock, 141 164 struct old_timespec32 __user *, tp)
+24 -55
kernel/trace/ring_buffer.c
··· 700 700 return local_try_cmpxchg(l, &expect, set); 701 701 } 702 702 703 - static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 704 - { 705 - unsigned long cnt, top, bottom, msb; 706 - unsigned long cnt2, top2, bottom2, msb2; 707 - u64 val; 708 - 709 - /* Any interruptions in this function should cause a failure */ 710 - cnt = local_read(&t->cnt); 711 - 712 - /* The cmpxchg always fails if it interrupted an update */ 713 - if (!__rb_time_read(t, &val, &cnt2)) 714 - return false; 715 - 716 - if (val != expect) 717 - return false; 718 - 719 - if ((cnt & 3) != cnt2) 720 - return false; 721 - 722 - cnt2 = cnt + 1; 723 - 724 - rb_time_split(val, &top, &bottom, &msb); 725 - msb = rb_time_val_cnt(msb, cnt); 726 - top = rb_time_val_cnt(top, cnt); 727 - bottom = rb_time_val_cnt(bottom, cnt); 728 - 729 - rb_time_split(set, &top2, &bottom2, &msb2); 730 - msb2 = rb_time_val_cnt(msb2, cnt); 731 - top2 = rb_time_val_cnt(top2, cnt2); 732 - bottom2 = rb_time_val_cnt(bottom2, cnt2); 733 - 734 - if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 735 - return false; 736 - if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 737 - return false; 738 - if (!rb_time_read_cmpxchg(&t->top, top, top2)) 739 - return false; 740 - if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 741 - return false; 742 - return true; 743 - } 744 - 745 703 #else /* 64 bits */ 746 704 747 705 /* local64_t always succeeds */ ··· 712 754 static void rb_time_set(rb_time_t *t, u64 val) 713 755 { 714 756 local64_set(&t->time, val); 715 - } 716 - 717 - static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 718 - { 719 - return local64_try_cmpxchg(&t->time, &expect, set); 720 757 } 721 758 #endif 722 759 ··· 3563 3610 } else { 3564 3611 u64 ts; 3565 3612 /* SLOW PATH - Interrupted between A and C */ 3566 - a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3613 + 3614 + /* Save the old before_stamp */ 3615 + a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3616 + RB_WARN_ON(cpu_buffer, !a_ok); 3617 + 3618 + /* 3619 + * Read a new timestamp and update the before_stamp to make 3620 + * the next event after this one force using an absolute 3621 + * timestamp. This is in case an interrupt were to come in 3622 + * between E and F. 3623 + */ 3624 + ts = rb_time_stamp(cpu_buffer->buffer); 3625 + rb_time_set(&cpu_buffer->before_stamp, ts); 3626 + 3627 + barrier(); 3628 + /*E*/ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3567 3629 /* Was interrupted before here, write_stamp must be valid */ 3568 3630 RB_WARN_ON(cpu_buffer, !a_ok); 3569 - ts = rb_time_stamp(cpu_buffer->buffer); 3570 3631 barrier(); 3571 - /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3572 - info->after < ts && 3573 - rb_time_cmpxchg(&cpu_buffer->write_stamp, 3574 - info->after, ts)) { 3575 - /* Nothing came after this event between C and E */ 3632 + /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3633 + info->after == info->before && info->after < ts) { 3634 + /* 3635 + * Nothing came after this event between C and F, it is 3636 + * safe to use info->after for the delta as it 3637 + * matched info->before and is still valid. 3638 + */ 3576 3639 info->delta = ts - info->after; 3577 3640 } else { 3578 3641 /* 3579 - * Interrupted between C and E: 3642 + * Interrupted between C and F: 3580 3643 * Lost the previous events time stamp. Just set the 3581 3644 * delta to zero, and this will be the same time as 3582 3645 * the event this event interrupted. And the events that
+11
kernel/trace/synth_event_gen_test.c
··· 477 477 478 478 ret = test_trace_synth_event(); 479 479 WARN_ON(ret); 480 + 481 + /* Disable when done */ 482 + trace_array_set_clr_event(gen_synth_test->tr, 483 + "synthetic", 484 + "gen_synth_test", false); 485 + trace_array_set_clr_event(empty_synth_test->tr, 486 + "synthetic", 487 + "empty_synth_test", false); 488 + trace_array_set_clr_event(create_synth_test->tr, 489 + "synthetic", 490 + "create_synth_test", false); 480 491 out: 481 492 return ret; 482 493 }
+2 -2
kernel/trace/trace_events_synth.c
··· 1137 1137 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1138 1138 * @name: The name of the synthetic event 1139 1139 * @mod: The module creating the event, NULL if not created from a module 1140 - * @args: Variable number of arg (pairs), one pair for each field 1140 + * @...: Variable number of arg (pairs), one pair for each field 1141 1141 * 1142 1142 * NOTE: Users normally won't want to call this function directly, but 1143 1143 * rather use the synth_event_gen_cmd_start() wrapper, which ··· 1695 1695 * synth_event_trace - Trace a synthetic event 1696 1696 * @file: The trace_event_file representing the synthetic event 1697 1697 * @n_vals: The number of values in vals 1698 - * @args: Variable number of args containing the event values 1698 + * @...: Variable number of args containing the event values 1699 1699 * 1700 1700 * Trace a synthetic event using the values passed in the variable 1701 1701 * argument list.
+1 -1
lib/idr.c
··· 508 508 goto delete; 509 509 xas_store(&xas, xa_mk_value(v)); 510 510 } else { 511 - if (!test_bit(bit, bitmap->bitmap)) 511 + if (!bitmap || !test_bit(bit, bitmap->bitmap)) 512 512 goto err; 513 513 __clear_bit(bit, bitmap->bitmap); 514 514 xas_set_mark(&xas, XA_FREE_MARK);
+40
lib/test_ida.c
··· 150 150 IDA_BUG_ON(ida, !ida_is_empty(ida)); 151 151 } 152 152 153 + /* 154 + * Check various situations where we attempt to free an ID we don't own. 155 + */ 156 + static void ida_check_bad_free(struct ida *ida) 157 + { 158 + unsigned long i; 159 + 160 + printk("vvv Ignore \"not allocated\" warnings\n"); 161 + /* IDA is empty; all of these will fail */ 162 + ida_free(ida, 0); 163 + for (i = 0; i < 31; i++) 164 + ida_free(ida, 1 << i); 165 + 166 + /* IDA contains a single value entry */ 167 + IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3); 168 + ida_free(ida, 0); 169 + for (i = 0; i < 31; i++) 170 + ida_free(ida, 1 << i); 171 + 172 + /* IDA contains a single bitmap */ 173 + IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023); 174 + ida_free(ida, 0); 175 + for (i = 0; i < 31; i++) 176 + ida_free(ida, 1 << i); 177 + 178 + /* IDA contains a tree */ 179 + IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1); 180 + ida_free(ida, 0); 181 + for (i = 0; i < 31; i++) 182 + ida_free(ida, 1 << i); 183 + printk("^^^ \"not allocated\" warnings over\n"); 184 + 185 + ida_free(ida, 3); 186 + ida_free(ida, 1023); 187 + ida_free(ida, (1 << 20) - 1); 188 + 189 + IDA_BUG_ON(ida, !ida_is_empty(ida)); 190 + } 191 + 153 192 static DEFINE_IDA(ida); 154 193 155 194 static int ida_checks(void) ··· 201 162 ida_check_leaf(&ida, 1024 * 64); 202 163 ida_check_max(&ida); 203 164 ida_check_conv(&ida); 165 + ida_check_bad_free(&ida); 204 166 205 167 printk("IDA: %u of %u tests passed\n", tests_passed, tests_run); 206 168 return (tests_run != tests_passed) ? 0 : -EINVAL;
+8 -3
lib/vsprintf.c
··· 2111 2111 2112 2112 /* Loop starting from the root node to the current node. */ 2113 2113 for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) { 2114 - struct fwnode_handle *__fwnode = 2115 - fwnode_get_nth_parent(fwnode, depth); 2114 + /* 2115 + * Only get a reference for other nodes (i.e. parent nodes). 2116 + * fwnode refcount may be 0 here. 2117 + */ 2118 + struct fwnode_handle *__fwnode = depth ? 2119 + fwnode_get_nth_parent(fwnode, depth) : fwnode; 2116 2120 2117 2121 buf = string(buf, end, fwnode_get_name_prefix(__fwnode), 2118 2122 default_str_spec); 2119 2123 buf = string(buf, end, fwnode_get_name(__fwnode), 2120 2124 default_str_spec); 2121 2125 2122 - fwnode_handle_put(__fwnode); 2126 + if (depth) 2127 + fwnode_handle_put(__fwnode); 2123 2128 } 2124 2129 2125 2130 return buf;
+8 -1
net/8021q/vlan_core.c
··· 407 407 return 0; 408 408 409 409 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 410 + if (!vlan_hw_filter_capable(by_dev, vid_info->proto)) 411 + continue; 410 412 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); 411 413 if (err) 412 414 goto unwind; ··· 419 417 list_for_each_entry_continue_reverse(vid_info, 420 418 &vlan_info->vid_list, 421 419 list) { 420 + if (!vlan_hw_filter_capable(by_dev, vid_info->proto)) 421 + continue; 422 422 vlan_vid_del(dev, vid_info->proto, vid_info->vid); 423 423 } 424 424 ··· 440 436 if (!vlan_info) 441 437 return; 442 438 443 - list_for_each_entry(vid_info, &vlan_info->vid_list, list) 439 + list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 440 + if (!vlan_hw_filter_capable(by_dev, vid_info->proto)) 441 + continue; 444 442 vlan_vid_del(dev, vid_info->proto, vid_info->vid); 443 + } 445 444 } 446 445 EXPORT_SYMBOL(vlan_vids_del_by_dev); 447 446
+13 -4
net/9p/protocol.c
··· 394 394 uint16_t *nwname = va_arg(ap, uint16_t *); 395 395 char ***wnames = va_arg(ap, char ***); 396 396 397 + *wnames = NULL; 398 + 397 399 errcode = p9pdu_readf(pdu, proto_version, 398 400 "w", nwname); 399 401 if (!errcode) { ··· 405 403 GFP_NOFS); 406 404 if (!*wnames) 407 405 errcode = -ENOMEM; 406 + else 407 + (*wnames)[0] = NULL; 408 408 } 409 409 410 410 if (!errcode) { ··· 418 414 proto_version, 419 415 "s", 420 416 &(*wnames)[i]); 421 - if (errcode) 417 + if (errcode) { 418 + (*wnames)[i] = NULL; 422 419 break; 420 + } 423 421 } 424 422 } 425 423 ··· 429 423 if (*wnames) { 430 424 int i; 431 425 432 - for (i = 0; i < *nwname; i++) 426 + for (i = 0; i < *nwname; i++) { 427 + if (!(*wnames)[i]) 428 + break; 433 429 kfree((*wnames)[i]); 430 + } 431 + kfree(*wnames); 432 + *wnames = NULL; 434 433 } 435 - kfree(*wnames); 436 - *wnames = NULL; 437 434 } 438 435 } 439 436 break;
+6 -1
net/bluetooth/af_bluetooth.c
··· 309 309 if (flags & MSG_OOB) 310 310 return -EOPNOTSUPP; 311 311 312 + lock_sock(sk); 313 + 312 314 skb = skb_recv_datagram(sk, flags, &err); 313 315 if (!skb) { 314 316 if (sk->sk_shutdown & RCV_SHUTDOWN) 315 - return 0; 317 + err = 0; 316 318 319 + release_sock(sk); 317 320 return err; 318 321 } 319 322 ··· 345 342 } 346 343 347 344 skb_free_datagram(sk, skb); 345 + 346 + release_sock(sk); 348 347 349 348 if (flags & MSG_TRUNC) 350 349 copied = skblen;
+21 -9
net/bluetooth/hci_event.c
··· 516 516 { 517 517 struct hci_rp_read_class_of_dev *rp = data; 518 518 519 + if (WARN_ON(!hdev)) 520 + return HCI_ERROR_UNSPECIFIED; 521 + 519 522 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 520 523 521 524 if (rp->status) ··· 750 747 } else { 751 748 conn->enc_key_size = rp->key_size; 752 749 status = 0; 750 + 751 + if (conn->enc_key_size < hdev->min_enc_key_size) { 752 + /* As slave role, the conn->state has been set to 753 + * BT_CONNECTED and l2cap conn req might not be received 754 + * yet, at this moment the l2cap layer almost does 755 + * nothing with the non-zero status. 756 + * So we also clear encrypt related bits, and then the 757 + * handler of l2cap conn req will get the right secure 758 + * state at a later time. 759 + */ 760 + status = HCI_ERROR_AUTH_FAILURE; 761 + clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 762 + clear_bit(HCI_CONN_AES_CCM, &conn->flags); 763 + } 753 764 } 754 765 755 - hci_encrypt_cfm(conn, 0); 766 + hci_encrypt_cfm(conn, status); 756 767 757 768 done: 758 769 hci_dev_unlock(hdev); ··· 836 819 837 820 if (!rp->status) 838 821 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 839 - 840 - hci_encrypt_cfm(conn, 0); 841 822 842 823 unlock: 843 824 hci_dev_unlock(hdev); ··· 2319 2304 return; 2320 2305 } 2321 2306 2322 - set_bit(HCI_INQUIRY, &hdev->flags); 2307 + if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY)) 2308 + set_bit(HCI_INQUIRY, &hdev->flags); 2323 2309 } 2324 2310 2325 2311 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) ··· 3699 3683 cp.handle = cpu_to_le16(conn->handle); 3700 3684 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3701 3685 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3702 - sizeof(cp), &cp)) { 3686 + sizeof(cp), &cp)) 3703 3687 bt_dev_err(hdev, "write auth payload timeout failed"); 3704 - goto notify; 3705 - } 3706 - 3707 - goto unlock; 3708 3688 } 3709 3689 3710 3690 notify:
+15 -6
net/bluetooth/l2cap_core.c
··· 6492 6492 kfree_skb(skb); 6493 6493 } 6494 6494 6495 + static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident) 6496 + { 6497 + struct l2cap_cmd_rej_unk rej; 6498 + 6499 + rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 6500 + l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 6501 + } 6502 + 6495 6503 static inline void l2cap_sig_channel(struct l2cap_conn *conn, 6496 6504 struct sk_buff *skb) 6497 6505 { ··· 6525 6517 6526 6518 if (len > skb->len || !cmd->ident) { 6527 6519 BT_DBG("corrupted command"); 6520 + l2cap_sig_send_rej(conn, cmd->ident); 6528 6521 break; 6529 6522 } 6530 6523 6531 6524 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); 6532 6525 if (err) { 6533 - struct l2cap_cmd_rej_unk rej; 6534 - 6535 6526 BT_ERR("Wrong link type (%d)", err); 6536 - 6537 - rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 6538 - l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 6539 - sizeof(rej), &rej); 6527 + l2cap_sig_send_rej(conn, cmd->ident); 6540 6528 } 6541 6529 6542 6530 skb_pull(skb, len); 6531 + } 6532 + 6533 + if (skb->len > 0) { 6534 + BT_DBG("corrupted command"); 6535 + l2cap_sig_send_rej(conn, 0); 6543 6536 } 6544 6537 6545 6538 drop:
+18 -7
net/bluetooth/mgmt.c
··· 2897 2897 for (i = 0; i < key_count; i++) { 2898 2898 struct mgmt_link_key_info *key = &cp->keys[i]; 2899 2899 2900 - if (key->addr.type != BDADDR_BREDR || key->type > 0x08) 2900 + /* Considering SMP over BREDR/LE, there is no need to check addr_type */ 2901 + if (key->type > 0x08) 2901 2902 return mgmt_cmd_status(sk, hdev->id, 2902 2903 MGMT_OP_LOAD_LINK_KEYS, 2903 2904 MGMT_STATUS_INVALID_PARAMS); ··· 7131 7130 7132 7131 for (i = 0; i < irk_count; i++) { 7133 7132 struct mgmt_irk_info *irk = &cp->irks[i]; 7133 + u8 addr_type = le_addr_type(irk->addr.type); 7134 7134 7135 7135 if (hci_is_blocked_key(hdev, 7136 7136 HCI_BLOCKED_KEY_TYPE_IRK, ··· 7141 7139 continue; 7142 7140 } 7143 7141 7142 + /* When using SMP over BR/EDR, the addr type should be set to BREDR */ 7143 + if (irk->addr.type == BDADDR_BREDR) 7144 + addr_type = BDADDR_BREDR; 7145 + 7144 7146 hci_add_irk(hdev, &irk->addr.bdaddr, 7145 - le_addr_type(irk->addr.type), irk->val, 7147 + addr_type, irk->val, 7146 7148 BDADDR_ANY); 7147 7149 } 7148 7150 ··· 7227 7221 for (i = 0; i < key_count; i++) { 7228 7222 struct mgmt_ltk_info *key = &cp->keys[i]; 7229 7223 u8 type, authenticated; 7224 + u8 addr_type = le_addr_type(key->addr.type); 7230 7225 7231 7226 if (hci_is_blocked_key(hdev, 7232 7227 HCI_BLOCKED_KEY_TYPE_LTK, ··· 7262 7255 continue; 7263 7256 } 7264 7257 7258 + /* When using SMP over BR/EDR, the addr type should be set to BREDR */ 7259 + if (key->addr.type == BDADDR_BREDR) 7260 + addr_type = BDADDR_BREDR; 7261 + 7265 7262 hci_add_ltk(hdev, &key->addr.bdaddr, 7266 - le_addr_type(key->addr.type), type, authenticated, 7263 + addr_type, type, authenticated, 7267 7264 key->val, key->enc_size, key->ediv, key->rand); 7268 7265 } 7269 7266 ··· 9534 9523 9535 9524 ev.store_hint = persistent; 9536 9525 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 9537 - ev.key.addr.type = BDADDR_BREDR; 9526 + ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); 9538 9527 ev.key.type = key->type; 9539 9528 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); 9540 9529 ev.key.pin_len = key->pin_len; ··· 9585 9574 ev.store_hint = persistent; 9586 9575 9587 9576 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 9588 - ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); 9577 + ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); 9589 9578 ev.key.type = mgmt_ltk_type(key); 9590 9579 ev.key.enc_size = key->enc_size; 9591 9580 ev.key.ediv = key->ediv; ··· 9614 9603 9615 9604 bacpy(&ev.rpa, &irk->rpa); 9616 9605 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); 9617 - ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type); 9606 + ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type); 9618 9607 memcpy(ev.irk.val, irk->val, sizeof(irk->val)); 9619 9608 9620 9609 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL); ··· 9643 9632 ev.store_hint = persistent; 9644 9633 9645 9634 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); 9646 - ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type); 9635 + ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type); 9647 9636 ev.key.type = csrk->type; 9648 9637 memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); 9649 9638
+7
net/bluetooth/smp.c
··· 1059 1059 } 1060 1060 1061 1061 if (smp->remote_irk) { 1062 + smp->remote_irk->link_type = hcon->type; 1062 1063 mgmt_new_irk(hdev, smp->remote_irk, persistent); 1063 1064 1064 1065 /* Now that user space can be considered to know the ··· 1079 1078 } 1080 1079 1081 1080 if (smp->csrk) { 1081 + smp->csrk->link_type = hcon->type; 1082 1082 smp->csrk->bdaddr_type = hcon->dst_type; 1083 1083 bacpy(&smp->csrk->bdaddr, &hcon->dst); 1084 1084 mgmt_new_csrk(hdev, smp->csrk, persistent); 1085 1085 } 1086 1086 1087 1087 if (smp->responder_csrk) { 1088 + smp->responder_csrk->link_type = hcon->type; 1088 1089 smp->responder_csrk->bdaddr_type = hcon->dst_type; 1089 1090 bacpy(&smp->responder_csrk->bdaddr, &hcon->dst); 1090 1091 mgmt_new_csrk(hdev, smp->responder_csrk, persistent); 1091 1092 } 1092 1093 1093 1094 if (smp->ltk) { 1095 + smp->ltk->link_type = hcon->type; 1094 1096 smp->ltk->bdaddr_type = hcon->dst_type; 1095 1097 bacpy(&smp->ltk->bdaddr, &hcon->dst); 1096 1098 mgmt_new_ltk(hdev, smp->ltk, persistent); 1097 1099 } 1098 1100 1099 1101 if (smp->responder_ltk) { 1102 + smp->responder_ltk->link_type = hcon->type; 1100 1103 smp->responder_ltk->bdaddr_type = hcon->dst_type; 1101 1104 bacpy(&smp->responder_ltk->bdaddr, &hcon->dst); 1102 1105 mgmt_new_ltk(hdev, smp->responder_ltk, persistent); ··· 1120 1115 key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst, 1121 1116 smp->link_key, type, 0, &persistent); 1122 1117 if (key) { 1118 + key->link_type = hcon->type; 1119 + key->bdaddr_type = hcon->dst_type; 1123 1120 mgmt_new_link_key(hdev, key, persistent); 1124 1121 1125 1122 /* Don't keep debug keys around if the relevant
+3
net/core/dev.c
··· 3472 3472 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3473 3473 return features & ~NETIF_F_GSO_MASK; 3474 3474 3475 + if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size))) 3476 + return features & ~NETIF_F_GSO_MASK; 3477 + 3475 3478 if (!skb_shinfo(skb)->gso_type) { 3476 3479 skb_warn_bad_offload(skb); 3477 3480 return features & ~NETIF_F_GSO_MASK;
+2
net/core/skbuff.c
··· 4825 4825 static void skb_extensions_init(void) 4826 4826 { 4827 4827 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4828 + #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 4828 4829 BUILD_BUG_ON(skb_ext_total_length() > 255); 4830 + #endif 4829 4831 4830 4832 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4831 4833 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
+2
net/core/sock_map.c
··· 536 536 { 537 537 if (sk_is_tcp(sk)) 538 538 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); 539 + if (sk_is_stream_unix(sk)) 540 + return (1 << sk->sk_state) & TCPF_ESTABLISHED; 539 541 return true; 540 542 } 541 543
+1 -1
net/core/stream.c
··· 79 79 remove_wait_queue(sk_sleep(sk), &wait); 80 80 sk->sk_write_pending--; 81 81 } while (!done); 82 - return 0; 82 + return done < 0 ? done : 0; 83 83 } 84 84 EXPORT_SYMBOL(sk_stream_wait_connect); 85 85
+9 -1
net/dns_resolver/dns_key.c
··· 91 91 static int 92 92 dns_resolver_preparse(struct key_preparsed_payload *prep) 93 93 { 94 + const struct dns_server_list_v1_header *v1; 94 95 const struct dns_payload_header *bin; 95 96 struct user_key_payload *upayload; 96 97 unsigned long derrno; ··· 121 120 "dns_resolver: Unsupported server list version (%u)\n", 122 121 bin->version); 123 122 return -EINVAL; 123 + } 124 + 125 + v1 = (const struct dns_server_list_v1_header *)bin; 126 + if ((v1->status != DNS_LOOKUP_GOOD && 127 + v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) { 128 + if (prep->expiry == TIME64_MAX) 129 + prep->expiry = ktime_get_real_seconds() + 1; 124 130 } 125 131 126 132 result_len = datalen; ··· 322 314 323 315 struct key_type key_type_dns_resolver = { 324 316 .name = "dns_resolver", 325 - .flags = KEY_TYPE_NET_DOMAIN, 317 + .flags = KEY_TYPE_NET_DOMAIN | KEY_TYPE_INSTANT_REAP, 326 318 .preparse = dns_resolver_preparse, 327 319 .free_preparse = dns_resolver_free_preparse, 328 320 .instantiate = generic_key_instantiate,
+1
net/ife/ife.c
··· 82 82 if (unlikely(!pskb_may_pull(skb, total_pull))) 83 83 return NULL; 84 84 85 + ifehdr = (struct ifeheadr *)(skb->data + skb->dev->hard_header_len); 85 86 skb_set_mac_header(skb, total_pull); 86 87 __skb_pull(skb, total_pull); 87 88 *metalen = ifehdrln - IFE_METAHDRLEN;
+6 -49
net/ipv6/ip6_fib.c
··· 160 160 INIT_LIST_HEAD(&f6i->fib6_siblings); 161 161 refcount_set(&f6i->fib6_ref, 1); 162 162 163 - INIT_HLIST_NODE(&f6i->gc_link); 164 - 165 163 return f6i; 166 164 } 167 165 ··· 246 248 net->ipv6.fib6_null_entry); 247 249 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 248 250 inet_peer_base_init(&table->tb6_peers); 249 - INIT_HLIST_HEAD(&table->tb6_gc_hlist); 250 251 } 251 252 252 253 return table; ··· 1057 1060 lockdep_is_held(&table->tb6_lock)); 1058 1061 } 1059 1062 } 1060 - 1061 - fib6_clean_expires_locked(rt); 1062 1063 } 1063 1064 1064 1065 /* ··· 1118 1123 if (!(iter->fib6_flags & RTF_EXPIRES)) 1119 1124 return -EEXIST; 1120 1125 if (!(rt->fib6_flags & RTF_EXPIRES)) 1121 - fib6_clean_expires_locked(iter); 1126 + fib6_clean_expires(iter); 1122 1127 else 1123 - fib6_set_expires_locked(iter, 1124 - rt->expires); 1128 + fib6_set_expires(iter, rt->expires); 1125 1129 1126 1130 if (rt->fib6_pmtu) 1127 1131 fib6_metric_set(iter, RTAX_MTU, ··· 1479 1485 if (rt->nh) 1480 1486 list_add(&rt->nh_list, &rt->nh->f6i_list); 1481 1487 __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net)); 1482 - 1483 - if (fib6_has_expires(rt)) 1484 - hlist_add_head(&rt->gc_link, &table->tb6_gc_hlist); 1485 - 1486 1488 fib6_start_gc(info->nl_net, rt); 1487 1489 } 1488 1490 ··· 2281 2291 * Garbage collection 2282 2292 */ 2283 2293 2284 - static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args) 2294 + static int fib6_age(struct fib6_info *rt, void *arg) 2285 2295 { 2296 + struct fib6_gc_args *gc_args = arg; 2286 2297 unsigned long now = jiffies; 2287 2298 2288 2299 /* ··· 2291 2300 * Routes are expired even if they are in use. 2292 2301 */ 2293 2302 2294 - if (fib6_has_expires(rt) && rt->expires) { 2303 + if (rt->fib6_flags & RTF_EXPIRES && rt->expires) { 2295 2304 if (time_after(now, rt->expires)) { 2296 2305 RT6_TRACE("expiring %p\n", rt); 2297 2306 return -1; ··· 2306 2315 rt6_age_exceptions(rt, gc_args, now); 2307 2316 2308 2317 return 0; 2309 - } 2310 - 2311 - static void fib6_gc_table(struct net *net, 2312 - struct fib6_table *tb6, 2313 - struct fib6_gc_args *gc_args) 2314 - { 2315 - struct fib6_info *rt; 2316 - struct hlist_node *n; 2317 - struct nl_info info = { 2318 - .nl_net = net, 2319 - .skip_notify = false, 2320 - }; 2321 - 2322 - hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link) 2323 - if (fib6_age(rt, gc_args) == -1) 2324 - fib6_del(rt, &info); 2325 - } 2326 - 2327 - static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args) 2328 - { 2329 - struct fib6_table *table; 2330 - struct hlist_head *head; 2331 - unsigned int h; 2332 - 2333 - rcu_read_lock(); 2334 - for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 2335 - head = &net->ipv6.fib_table_hash[h]; 2336 - hlist_for_each_entry_rcu(table, head, tb6_hlist) { 2337 - spin_lock_bh(&table->tb6_lock); 2338 - fib6_gc_table(net, table, gc_args); 2339 - spin_unlock_bh(&table->tb6_lock); 2340 - } 2341 - } 2342 - rcu_read_unlock(); 2343 2318 } 2344 2319 2345 2320 void fib6_run_gc(unsigned long expires, struct net *net, bool force) ··· 2323 2366 net->ipv6.sysctl.ip6_rt_gc_interval; 2324 2367 gc_args.more = 0; 2325 2368 2326 - fib6_gc_all(net, &gc_args); 2369 + fib6_clean_all(net, fib6_age, &gc_args); 2327 2370 now = jiffies; 2328 2371 net->ipv6.ip6_rt_last_gc = now; 2329 2372
+3 -3
net/ipv6/route.c
··· 3763 3763 rt->dst_nocount = true; 3764 3764 3765 3765 if (cfg->fc_flags & RTF_EXPIRES) 3766 - fib6_set_expires_locked(rt, jiffies + 3767 - clock_t_to_jiffies(cfg->fc_expires)); 3766 + fib6_set_expires(rt, jiffies + 3767 + clock_t_to_jiffies(cfg->fc_expires)); 3768 3768 else 3769 - fib6_clean_expires_locked(rt); 3769 + fib6_clean_expires(rt); 3770 3770 3771 3771 if (cfg->fc_protocol == RTPROT_UNSPEC) 3772 3772 cfg->fc_protocol = RTPROT_BOOT;
+2 -2
net/mac80211/cfg.c
··· 1788 1788 lockdep_is_held(&local->hw.wiphy->mtx)); 1789 1789 1790 1790 /* 1791 - * If there are no changes, then accept a link that doesn't exist, 1791 + * If there are no changes, then accept a link that exist, 1792 1792 * unless it's a new link. 1793 1793 */ 1794 - if (params->link_id < 0 && !new_link && 1794 + if (params->link_id >= 0 && !new_link && 1795 1795 !params->link_mac && !params->txpwr_set && 1796 1796 !params->supported_rates_len && 1797 1797 !params->ht_capa && !params->vht_capa &&
+5 -1
net/mac80211/driver-ops.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 3 * Copyright 2015 Intel Deutschland GmbH 4 - * Copyright (C) 2022 Intel Corporation 4 + * Copyright (C) 2022-2023 Intel Corporation 5 5 */ 6 6 #include <net/mac80211.h> 7 7 #include "ieee80211_i.h" ··· 588 588 589 589 if (ret) 590 590 return ret; 591 + 592 + /* during reconfig don't add it to debugfs again */ 593 + if (local->in_reconfig) 594 + return 0; 591 595 592 596 for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { 593 597 link_sta = rcu_dereference_protected(info->link[link_id],
+9 -7
net/mac80211/mesh_plink.c
··· 1068 1068 case WLAN_SP_MESH_PEERING_OPEN: 1069 1069 if (!matches_local) 1070 1070 event = OPN_RJCT; 1071 - if (!mesh_plink_free_count(sdata) || 1072 - (sta->mesh->plid && sta->mesh->plid != plid)) 1071 + else if (!mesh_plink_free_count(sdata) || 1072 + (sta->mesh->plid && sta->mesh->plid != plid)) 1073 1073 event = OPN_IGNR; 1074 1074 else 1075 1075 event = OPN_ACPT; ··· 1077 1077 case WLAN_SP_MESH_PEERING_CONFIRM: 1078 1078 if (!matches_local) 1079 1079 event = CNF_RJCT; 1080 - if (!mesh_plink_free_count(sdata) || 1081 - sta->mesh->llid != llid || 1082 - (sta->mesh->plid && sta->mesh->plid != plid)) 1080 + else if (!mesh_plink_free_count(sdata) || 1081 + sta->mesh->llid != llid || 1082 + (sta->mesh->plid && sta->mesh->plid != plid)) 1083 1083 event = CNF_IGNR; 1084 1084 else 1085 1085 event = CNF_ACPT; ··· 1247 1247 return; 1248 1248 } 1249 1249 elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL); 1250 - mesh_process_plink_frame(sdata, mgmt, elems, rx_status); 1251 - kfree(elems); 1250 + if (elems) { 1251 + mesh_process_plink_frame(sdata, mgmt, elems, rx_status); 1252 + kfree(elems); 1253 + } 1252 1254 }
+3 -1
net/mac80211/mlme.c
··· 5782 5782 { 5783 5783 const struct ieee80211_multi_link_elem *ml; 5784 5784 const struct element *sub; 5785 - size_t ml_len; 5785 + ssize_t ml_len; 5786 5786 unsigned long removed_links = 0; 5787 5787 u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {}; 5788 5788 u8 link_id; ··· 5798 5798 elems->scratch + elems->scratch_len - 5799 5799 elems->scratch_pos, 5800 5800 WLAN_EID_FRAGMENT); 5801 + if (ml_len < 0) 5802 + return; 5801 5803 5802 5804 elems->ml_reconf = (const void *)elems->scratch_pos; 5803 5805 elems->ml_reconf_len = ml_len;
+1
net/mptcp/crypto_test.c
··· 70 70 kunit_test_suite(mptcp_crypto_suite); 71 71 72 72 MODULE_LICENSE("GPL"); 73 + MODULE_DESCRIPTION("KUnit tests for MPTCP Crypto");
+3 -3
net/mptcp/protocol.c
··· 3402 3402 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) 3403 3403 __mptcp_clean_una_wakeup(sk); 3404 3404 if (unlikely(msk->cb_flags)) { 3405 - /* be sure to set the current sk state before taking actions 3405 + /* be sure to sync the msk state before taking actions 3406 3406 * depending on sk_state (MPTCP_ERROR_REPORT) 3407 3407 * On sk release avoid actions depending on the first subflow 3408 3408 */ 3409 - if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags) && msk->first) 3410 - __mptcp_set_connected(sk); 3409 + if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) 3410 + __mptcp_sync_state(sk, msk->pending_state); 3411 3411 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) 3412 3412 __mptcp_error_report(sk); 3413 3413 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
+6 -3
net/mptcp/protocol.h
··· 124 124 #define MPTCP_ERROR_REPORT 3 125 125 #define MPTCP_RETRANSMIT 4 126 126 #define MPTCP_FLUSH_JOIN_LIST 5 127 - #define MPTCP_CONNECTED 6 127 + #define MPTCP_SYNC_STATE 6 128 128 #define MPTCP_SYNC_SNDBUF 7 129 129 130 130 struct mptcp_skb_cb { ··· 296 296 bool use_64bit_ack; /* Set when we received a 64-bit DSN */ 297 297 bool csum_enabled; 298 298 bool allow_infinite_fallback; 299 + u8 pending_state; /* A subflow asked to set this sk_state, 300 + * protected by the msk data lock 301 + */ 299 302 u8 mpc_endpoint_id; 300 303 u8 recvmsg_inq:1, 301 304 cork:1, ··· 731 728 struct mptcp_options_received *mp_opt); 732 729 733 730 void mptcp_finish_connect(struct sock *sk); 734 - void __mptcp_set_connected(struct sock *sk); 731 + void __mptcp_sync_state(struct sock *sk, int state); 735 732 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout); 736 733 737 734 static inline void mptcp_stop_tout_timer(struct sock *sk) ··· 1118 1115 { 1119 1116 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1120 1117 1121 - return sk->sk_state == TCP_ESTABLISHED && 1118 + return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1) && 1122 1119 is_active_ssk(subflow) && 1123 1120 !subflow->conn_finished; 1124 1121 }
+17 -11
net/mptcp/subflow.c
··· 419 419 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; 420 420 } 421 421 422 - void __mptcp_set_connected(struct sock *sk) 422 + void __mptcp_sync_state(struct sock *sk, int state) 423 423 { 424 - __mptcp_propagate_sndbuf(sk, mptcp_sk(sk)->first); 424 + struct mptcp_sock *msk = mptcp_sk(sk); 425 + 426 + __mptcp_propagate_sndbuf(sk, msk->first); 425 427 if (sk->sk_state == TCP_SYN_SENT) { 426 - inet_sk_state_store(sk, TCP_ESTABLISHED); 428 + inet_sk_state_store(sk, state); 427 429 sk->sk_state_change(sk); 428 430 } 429 431 } 430 432 431 - static void mptcp_set_connected(struct sock *sk) 433 + static void mptcp_propagate_state(struct sock *sk, struct sock *ssk) 432 434 { 435 + struct mptcp_sock *msk = mptcp_sk(sk); 436 + 433 437 mptcp_data_lock(sk); 434 - if (!sock_owned_by_user(sk)) 435 - __mptcp_set_connected(sk); 436 - else 437 - __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags); 438 + if (!sock_owned_by_user(sk)) { 439 + __mptcp_sync_state(sk, ssk->sk_state); 440 + } else { 441 + msk->pending_state = ssk->sk_state; 442 + __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); 443 + } 438 444 mptcp_data_unlock(sk); 439 445 } 440 446 ··· 502 496 subflow_set_remote_key(msk, subflow, &mp_opt); 503 497 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 504 498 mptcp_finish_connect(sk); 505 - mptcp_set_connected(parent); 499 + mptcp_propagate_state(parent, sk); 506 500 } else if (subflow->request_join) { 507 501 u8 hmac[SHA256_DIGEST_SIZE]; 508 502 ··· 546 540 } else if (mptcp_check_fallback(sk)) { 547 541 fallback: 548 542 mptcp_rcv_space_init(msk, sk); 549 - mptcp_set_connected(parent); 543 + mptcp_propagate_state(parent, sk); 550 544 } 551 545 return; 552 546 ··· 1746 1740 mptcp_rcv_space_init(msk, sk); 1747 1741 pr_fallback(msk); 1748 1742 subflow->conn_finished = 1; 1749 - mptcp_set_connected(parent); 1743 + mptcp_propagate_state(parent, sk); 1750 1744 } 1751 1745 1752 1746 /* as recvmsg() does not acquire the subflow socket for ssk selection
+1
net/mptcp/token_test.c
··· 143 143 kunit_test_suite(mptcp_token_suite); 144 144 145 145 MODULE_LICENSE("GPL"); 146 + MODULE_DESCRIPTION("KUnit tests for MPTCP Token");
+8
net/rfkill/rfkill-gpio.c
··· 126 126 return -EINVAL; 127 127 } 128 128 129 + ret = gpiod_direction_output(rfkill->reset_gpio, true); 130 + if (ret) 131 + return ret; 132 + 133 + ret = gpiod_direction_output(rfkill->shutdown_gpio, true); 134 + if (ret) 135 + return ret; 136 + 129 137 rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev, 130 138 rfkill->type, &rfkill_gpio_ops, 131 139 rfkill);
+35 -6
net/rose/af_rose.c
··· 182 182 */ 183 183 static void rose_kill_by_device(struct net_device *dev) 184 184 { 185 - struct sock *s; 185 + struct sock *sk, *array[16]; 186 + struct rose_sock *rose; 187 + bool rescan; 188 + int i, cnt; 186 189 190 + start: 191 + rescan = false; 192 + cnt = 0; 187 193 spin_lock_bh(&rose_list_lock); 188 - sk_for_each(s, &rose_list) { 189 - struct rose_sock *rose = rose_sk(s); 190 - 194 + sk_for_each(sk, &rose_list) { 195 + rose = rose_sk(sk); 191 196 if (rose->device == dev) { 192 - rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 197 + if (cnt == ARRAY_SIZE(array)) { 198 + rescan = true; 199 + break; 200 + } 201 + sock_hold(sk); 202 + array[cnt++] = sk; 203 + } 204 + } 205 + spin_unlock_bh(&rose_list_lock); 206 + 207 + for (i = 0; i < cnt; i++) { 208 + sk = array[cnt]; 209 + rose = rose_sk(sk); 210 + lock_sock(sk); 211 + spin_lock_bh(&rose_list_lock); 212 + if (rose->device == dev) { 213 + rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 193 214 if (rose->neighbour) 194 215 rose->neighbour->use--; 195 216 netdev_put(rose->device, &rose->dev_tracker); 196 217 rose->device = NULL; 197 218 } 219 + spin_unlock_bh(&rose_list_lock); 220 + release_sock(sk); 221 + sock_put(sk); 222 + cond_resched(); 198 223 } 199 - spin_unlock_bh(&rose_list_lock); 224 + if (rescan) 225 + goto start; 200 226 } 201 227 202 228 /* ··· 682 656 break; 683 657 } 684 658 659 + spin_lock_bh(&rose_list_lock); 685 660 netdev_put(rose->device, &rose->dev_tracker); 661 + rose->device = NULL; 662 + spin_unlock_bh(&rose_list_lock); 686 663 sock->sk = NULL; 687 664 release_sock(sk); 688 665 sock_put(sk);
+2 -3
net/sunrpc/svc_xprt.c
··· 654 654 } 655 655 656 656 for (filled = 0; filled < pages; filled = ret) { 657 - ret = alloc_pages_bulk_array_node(GFP_KERNEL, 658 - rqstp->rq_pool->sp_id, 659 - pages, rqstp->rq_pages); 657 + ret = alloc_pages_bulk_array(GFP_KERNEL, pages, 658 + rqstp->rq_pages); 660 659 if (ret > filled) 661 660 /* Made progress, don't sleep yet */ 662 661 continue;
+87
net/wireless/certs/wens.hex
··· 1 + /* Chen-Yu Tsai's regdb certificate */ 2 + 0x30, 0x82, 0x02, 0xa7, 0x30, 0x82, 0x01, 0x8f, 3 + 0x02, 0x14, 0x61, 0xc0, 0x38, 0x65, 0x1a, 0xab, 4 + 0xdc, 0xf9, 0x4b, 0xd0, 0xac, 0x7f, 0xf0, 0x6c, 5 + 0x72, 0x48, 0xdb, 0x18, 0xc6, 0x00, 0x30, 0x0d, 6 + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 7 + 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x0f, 0x31, 8 + 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x03, 9 + 0x0c, 0x04, 0x77, 0x65, 0x6e, 0x73, 0x30, 0x20, 10 + 0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x31, 11 + 0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a, 0x18, 12 + 0x0f, 0x32, 0x31, 0x32, 0x33, 0x31, 0x31, 0x30, 13 + 0x37, 0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a, 14 + 0x30, 0x0f, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 15 + 0x55, 0x04, 0x03, 0x0c, 0x04, 0x77, 0x65, 0x6e, 16 + 0x73, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 17 + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 18 + 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 19 + 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 20 + 0x01, 0x00, 0xa9, 0x7a, 0x2c, 0x78, 0x4d, 0xa7, 21 + 0x19, 0x2d, 0x32, 0x52, 0xa0, 0x2e, 0x6c, 0xef, 22 + 0x88, 0x7f, 0x15, 0xc5, 0xb6, 0x69, 0x54, 0x16, 23 + 0x43, 0x14, 0x79, 0x53, 0xb7, 0xae, 0x88, 0xfe, 24 + 0xc0, 0xb7, 0x5d, 0x47, 0x8e, 0x1a, 0xe1, 0xef, 25 + 0xb3, 0x90, 0x86, 0xda, 0xd3, 0x64, 0x81, 0x1f, 26 + 0xce, 0x5d, 0x9e, 0x4b, 0x6e, 0x58, 0x02, 0x3e, 27 + 0xb2, 0x6f, 0x5e, 0x42, 0x47, 0x41, 0xf4, 0x2c, 28 + 0xb8, 0xa8, 0xd4, 0xaa, 0xc0, 0x0e, 0xe6, 0x48, 29 + 0xf0, 0xa8, 0xce, 0xcb, 0x08, 0xae, 0x37, 0xaf, 30 + 0xf6, 0x40, 0x39, 0xcb, 0x55, 0x6f, 0x5b, 0x4f, 31 + 0x85, 0x34, 0xe6, 0x69, 0x10, 0x50, 0x72, 0x5e, 32 + 0x4e, 0x9d, 0x4c, 0xba, 0x38, 0x36, 0x0d, 0xce, 33 + 0x73, 0x38, 0xd7, 0x27, 0x02, 0x2a, 0x79, 0x03, 34 + 0xe1, 0xac, 0xcf, 0xb0, 0x27, 0x85, 0x86, 0x93, 35 + 0x17, 0xab, 0xec, 0x42, 0x77, 0x37, 0x65, 0x8a, 36 + 0x44, 0xcb, 0xd6, 0x42, 0x93, 0x92, 0x13, 0xe3, 37 + 0x39, 0x45, 0xc5, 0x6e, 0x00, 0x4a, 0x7f, 0xcb, 38 + 0x42, 0x17, 0x2b, 0x25, 0x8c, 0xb8, 0x17, 0x3b, 39 + 0x15, 0x36, 0x59, 0xde, 0x42, 0xce, 0x21, 0xe6, 40 + 0xb6, 0xc7, 0x6e, 0x5e, 0x26, 0x1f, 0xf7, 0x8a, 41 + 0x57, 0x9e, 0xa5, 0x96, 0x72, 0xb7, 0x02, 0x32, 42 + 0xeb, 0x07, 0x2b, 0x73, 0xe2, 0x4f, 0x66, 0x58, 43 + 0x9a, 0xeb, 0x0f, 0x07, 0xb6, 0xab, 0x50, 0x8b, 44 + 0xc3, 0x8f, 0x17, 0xfa, 0x0a, 0x99, 0xc2, 0x16, 45 + 0x25, 0xbf, 0x2d, 0x6b, 0x1a, 0xaa, 0xe6, 0x3e, 46 + 0x5f, 0xeb, 0x6d, 0x9b, 0x5d, 0x4d, 0x42, 0x83, 47 + 0x2d, 0x39, 0xb8, 0xc9, 0xac, 0xdb, 0x3a, 0x91, 48 + 0x50, 0xdf, 0xbb, 0xb1, 0x76, 0x6d, 0x15, 0x73, 49 + 0xfd, 0xc6, 0xe6, 0x6b, 0x71, 0x9e, 0x67, 0x36, 50 + 0x22, 0x83, 0x79, 0xb1, 0xd6, 0xb8, 0x84, 0x52, 51 + 0xaf, 0x96, 0x5b, 0xc3, 0x63, 0x02, 0x4e, 0x78, 52 + 0x70, 0x57, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30, 53 + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 54 + 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82, 55 + 0x01, 0x01, 0x00, 0x24, 0x28, 0xee, 0x22, 0x74, 56 + 0x7f, 0x7c, 0xfa, 0x6c, 0x1f, 0xb3, 0x18, 0xd1, 57 + 0xc2, 0x3d, 0x7d, 0x29, 0x42, 0x88, 0xad, 0x82, 58 + 0xa5, 0xb1, 0x8a, 0x05, 0xd0, 0xec, 0x5c, 0x91, 59 + 0x20, 0xf6, 0x82, 0xfd, 0xd5, 0x67, 0x60, 0x5f, 60 + 0x31, 0xf5, 0xbd, 0x88, 0x91, 0x70, 0xbd, 0xb8, 61 + 0xb9, 0x8c, 0x88, 0xfe, 0x53, 0xc9, 0x54, 0x9b, 62 + 0x43, 0xc4, 0x7a, 0x43, 0x74, 0x6b, 0xdd, 0xb0, 63 + 0xb1, 0x3b, 0x33, 0x45, 0x46, 0x78, 0xa3, 0x1c, 64 + 0xef, 0x54, 0x68, 0xf7, 0x85, 0x9c, 0xe4, 0x51, 65 + 0x6f, 0x06, 0xaf, 0x81, 0xdb, 0x2a, 0x7b, 0x7b, 66 + 0x6f, 0xa8, 0x9c, 0x67, 0xd8, 0xcb, 0xc9, 0x91, 67 + 0x40, 0x00, 0xae, 0xd9, 0xa1, 0x9f, 0xdd, 0xa6, 68 + 0x43, 0x0e, 0x28, 0x7b, 0xaa, 0x1b, 0xe9, 0x84, 69 + 0xdb, 0x76, 0x64, 0x42, 0x70, 0xc9, 0xc0, 0xeb, 70 + 0xae, 0x84, 0x11, 0x16, 0x68, 0x4e, 0x84, 0x9e, 71 + 0x7e, 0x92, 0x36, 0xee, 0x1c, 0x3b, 0x08, 0x63, 72 + 0xeb, 0x79, 0x84, 0x15, 0x08, 0x9d, 0xaf, 0xc8, 73 + 0x9a, 0xc7, 0x34, 0xd3, 0x94, 0x4b, 0xd1, 0x28, 74 + 0x97, 0xbe, 0xd1, 0x45, 0x75, 0xdc, 0x35, 0x62, 75 + 0xac, 0x1d, 0x1f, 0xb7, 0xb7, 0x15, 0x87, 0xc8, 76 + 0x98, 0xc0, 0x24, 0x31, 0x56, 0x8d, 0xed, 0xdb, 77 + 0x06, 0xc6, 0x46, 0xbf, 0x4b, 0x6d, 0xa6, 0xd5, 78 + 0xab, 0xcc, 0x60, 0xfc, 0xe5, 0x37, 0xb6, 0x53, 79 + 0x7d, 0x58, 0x95, 0xa9, 0x56, 0xc7, 0xf7, 0xee, 80 + 0xc3, 0xa0, 0x76, 0xf7, 0x65, 0x4d, 0x53, 0xfa, 81 + 0xff, 0x5f, 0x76, 0x33, 0x5a, 0x08, 0xfa, 0x86, 82 + 0x92, 0x5a, 0x13, 0xfa, 0x1a, 0xfc, 0xf2, 0x1b, 83 + 0x8c, 0x7f, 0x42, 0x6d, 0xb7, 0x7e, 0xb7, 0xb4, 84 + 0xf0, 0xc7, 0x83, 0xbb, 0xa2, 0x81, 0x03, 0x2d, 85 + 0xd4, 0x2a, 0x63, 0x3f, 0xf7, 0x31, 0x2e, 0x40, 86 + 0x33, 0x5c, 0x46, 0xbc, 0x9b, 0xc1, 0x05, 0xa5, 87 + 0x45, 0x4e, 0xc3,
+21 -10
security/keys/gc.c
··· 67 67 } 68 68 69 69 /* 70 + * Set the expiration time on a key. 71 + */ 72 + void key_set_expiry(struct key *key, time64_t expiry) 73 + { 74 + key->expiry = expiry; 75 + if (expiry != TIME64_MAX) { 76 + if (!(key->type->flags & KEY_TYPE_INSTANT_REAP)) 77 + expiry += key_gc_delay; 78 + key_schedule_gc(expiry); 79 + } 80 + } 81 + 82 + /* 70 83 * Schedule a dead links collection run. 71 84 */ 72 85 void key_schedule_gc_links(void) ··· 189 176 static u8 gc_state; /* Internal persistent state */ 190 177 #define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */ 191 178 #define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */ 192 - #define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */ 193 179 #define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */ 194 180 #define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */ 195 181 #define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */ ··· 196 184 197 185 struct rb_node *cursor; 198 186 struct key *key; 199 - time64_t new_timer, limit; 187 + time64_t new_timer, limit, expiry; 200 188 201 189 kenter("[%lx,%x]", key_gc_flags, gc_state); 202 190 203 191 limit = ktime_get_real_seconds(); 204 - if (limit > key_gc_delay) 205 - limit -= key_gc_delay; 206 - else 207 - limit = key_gc_delay; 208 192 209 193 /* Work out what we're going to be doing in this pass */ 210 194 gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2; 211 195 gc_state <<= 1; 212 196 if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags)) 213 - gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER; 197 + gc_state |= KEY_GC_REAPING_LINKS; 214 198 215 199 if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) 216 200 gc_state |= KEY_GC_REAPING_DEAD_1; ··· 241 233 } 242 234 } 243 235 244 - if (gc_state & KEY_GC_SET_TIMER) { 245 - if (key->expiry > limit && key->expiry < new_timer) { 236 + expiry = key->expiry; 237 + if (expiry != TIME64_MAX) { 238 + if (!(key->type->flags & KEY_TYPE_INSTANT_REAP)) 239 + expiry += key_gc_delay; 240 + if (expiry > limit && expiry < new_timer) { 246 241 kdebug("will expire %x in %lld", 247 242 key_serial(key), key->expiry - limit); 248 243 new_timer = key->expiry; ··· 287 276 */ 288 277 kdebug("pass complete"); 289 278 290 - if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) { 279 + if (new_timer != TIME64_MAX) { 291 280 new_timer += key_gc_delay; 292 281 key_schedule_gc(new_timer); 293 282 }
+10 -1
security/keys/internal.h
··· 167 167 extern void keyring_gc(struct key *keyring, time64_t limit); 168 168 extern void keyring_restriction_gc(struct key *keyring, 169 169 struct key_type *dead_type); 170 + void key_set_expiry(struct key *key, time64_t expiry); 170 171 extern void key_schedule_gc(time64_t gc_at); 171 172 extern void key_schedule_gc_links(void); 172 173 extern void key_gc_keytype(struct key_type *ktype); ··· 216 215 */ 217 216 static inline bool key_is_dead(const struct key *key, time64_t limit) 218 217 { 218 + time64_t expiry = key->expiry; 219 + 220 + if (expiry != TIME64_MAX) { 221 + if (!(key->type->flags & KEY_TYPE_INSTANT_REAP)) 222 + expiry += key_gc_delay; 223 + if (expiry <= limit) 224 + return true; 225 + } 226 + 219 227 return 220 228 key->flags & ((1 << KEY_FLAG_DEAD) | 221 229 (1 << KEY_FLAG_INVALIDATED)) || 222 - (key->expiry > 0 && key->expiry <= limit) || 223 230 key->domain_tag->removed; 224 231 } 225 232
+5 -10
security/keys/key.c
··· 294 294 key->uid = uid; 295 295 key->gid = gid; 296 296 key->perm = perm; 297 + key->expiry = TIME64_MAX; 297 298 key->restrict_link = restrict_link; 298 299 key->last_used_at = ktime_get_real_seconds(); 299 300 ··· 464 463 if (authkey) 465 464 key_invalidate(authkey); 466 465 467 - if (prep->expiry != TIME64_MAX) { 468 - key->expiry = prep->expiry; 469 - key_schedule_gc(prep->expiry + key_gc_delay); 470 - } 466 + key_set_expiry(key, prep->expiry); 471 467 } 472 468 } 473 469 ··· 604 606 atomic_inc(&key->user->nikeys); 605 607 mark_key_instantiated(key, -error); 606 608 notify_key(key, NOTIFY_KEY_INSTANTIATED, -error); 607 - key->expiry = ktime_get_real_seconds() + timeout; 608 - key_schedule_gc(key->expiry + key_gc_delay); 609 + key_set_expiry(key, ktime_get_real_seconds() + timeout); 609 610 610 611 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 611 612 awaken = 1; ··· 720 723 721 724 void key_set_timeout(struct key *key, unsigned timeout) 722 725 { 723 - time64_t expiry = 0; 726 + time64_t expiry = TIME64_MAX; 724 727 725 728 /* make the changes with the locks held to prevent races */ 726 729 down_write(&key->sem); 727 730 728 731 if (timeout > 0) 729 732 expiry = ktime_get_real_seconds() + timeout; 730 - 731 - key->expiry = expiry; 732 - key_schedule_gc(key->expiry + key_gc_delay); 733 + key_set_expiry(key, expiry); 733 734 734 735 up_write(&key->sem); 735 736 }
+1 -1
security/keys/proc.c
··· 198 198 199 199 /* come up with a suitable timeout value */ 200 200 expiry = READ_ONCE(key->expiry); 201 - if (expiry == 0) { 201 + if (expiry == TIME64_MAX) { 202 202 memcpy(xbuf, "perm", 5); 203 203 } else if (now >= expiry) { 204 204 memcpy(xbuf, "expd", 5);
+2
sound/pci/hda/cs35l41_hda.c
··· 1826 1826 if (cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type)) 1827 1827 gpiod_set_value_cansleep(cs35l41->reset_gpio, 0); 1828 1828 gpiod_put(cs35l41->reset_gpio); 1829 + gpiod_put(cs35l41->cs_gpio); 1829 1830 acpi_dev_put(cs35l41->dacpi); 1830 1831 kfree(cs35l41->acpi_subsystem_id); 1831 1832 ··· 1854 1853 if (cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type)) 1855 1854 gpiod_set_value_cansleep(cs35l41->reset_gpio, 0); 1856 1855 gpiod_put(cs35l41->reset_gpio); 1856 + gpiod_put(cs35l41->cs_gpio); 1857 1857 kfree(cs35l41->acpi_subsystem_id); 1858 1858 } 1859 1859 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
+3 -2
sound/pci/hda/cs35l41_hda.h
··· 35 35 } __packed; 36 36 37 37 enum cs35l41_hda_spk_pos { 38 - CS35l41_LEFT, 39 - CS35l41_RIGHT, 38 + CS35L41_LEFT, 39 + CS35L41_RIGHT, 40 40 }; 41 41 42 42 enum cs35l41_hda_gpio_function { ··· 50 50 struct device *dev; 51 51 struct regmap *regmap; 52 52 struct gpio_desc *reset_gpio; 53 + struct gpio_desc *cs_gpio; 53 54 struct cs35l41_hw_cfg hw_cfg; 54 55 struct hda_codec *codec; 55 56
+325 -40
sound/pci/hda/cs35l41_hda_property.c
··· 6 6 // 7 7 // Author: Stefan Binding <sbinding@opensource.cirrus.com> 8 8 9 + #include <linux/acpi.h> 9 10 #include <linux/gpio/consumer.h> 10 11 #include <linux/string.h> 11 12 #include "cs35l41_hda_property.h" 13 + #include <linux/spi/spi.h> 14 + 15 + #define MAX_AMPS 4 16 + 17 + struct cs35l41_config { 18 + const char *ssid; 19 + enum { 20 + SPI, 21 + I2C 22 + } bus; 23 + int num_amps; 24 + enum { 25 + INTERNAL, 26 + EXTERNAL 27 + } boost_type; 28 + u8 channel[MAX_AMPS]; 29 + int reset_gpio_index; /* -1 if no reset gpio */ 30 + int spkid_gpio_index; /* -1 if no spkid gpio */ 31 + int cs_gpio_index; /* -1 if no cs gpio, or cs-gpios already exists, max num amps == 2 */ 32 + int boost_ind_nanohenry; /* Required if boost_type == Internal */ 33 + int boost_peak_milliamp; /* Required if boost_type == Internal */ 34 + int boost_cap_microfarad; /* Required if boost_type == Internal */ 35 + }; 36 + 37 + static const struct cs35l41_config cs35l41_config_table[] = { 38 + /* 39 + * Device 103C89C6 does have _DSD, however it is setup to use the wrong boost type. 40 + * We can override the _DSD to correct the boost type here. 41 + * Since this laptop has valid ACPI, we do not need to handle cs-gpios, since that already exists 42 + * in the ACPI. The Reset GPIO is also valid, so we can use the Reset defined in _DSD. 43 + */ 44 + { "103C89C6", SPI, 2, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, -1, -1, -1, 1000, 4500, 24 }, 45 + { "104312AF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 46 + { "10431433", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 47 + { "10431463", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 48 + { "10431473", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 }, 49 + { "10431483", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 }, 50 + { "10431493", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 51 + { "104314D3", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 52 + { "104314E3", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 53 + { "10431503", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 54 + { "10431533", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 55 + { "10431573", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 56 + { "10431663", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 }, 57 + { "104316D3", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 }, 58 + { "104316F3", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 }, 59 + { "104317F3", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 60 + { "10431863", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 61 + { "104318D3", I2C, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 }, 62 + { "10431C9F", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 63 + { "10431CAF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 64 + { "10431CCF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 65 + { "10431CDF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 66 + { "10431CEF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, 67 + { "10431D1F", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 68 + { "10431DA2", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 }, 69 + { "10431E02", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 }, 70 + { "10431EE2", I2C, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 }, 71 + { "10431F12", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 }, 72 + { "10431F1F", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 0, 0, 0 }, 73 + { "10431F62", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 }, 74 + {} 75 + }; 76 + 77 + static int cs35l41_add_gpios(struct cs35l41_hda *cs35l41, struct device *physdev, int reset_gpio, 78 + int spkid_gpio, int cs_gpio_index, int num_amps) 79 + { 80 + struct acpi_gpio_mapping *gpio_mapping = NULL; 81 + struct acpi_gpio_params *reset_gpio_params = NULL; 82 + struct acpi_gpio_params *spkid_gpio_params = NULL; 83 + struct acpi_gpio_params *cs_gpio_params = NULL; 84 + unsigned int num_entries = 0; 85 + unsigned int reset_index, spkid_index, csgpio_index; 86 + int i; 87 + 88 + /* 89 + * GPIO Mapping only needs to be done once, since it would be available for subsequent amps 90 + */ 91 + if (cs35l41->dacpi->driver_gpios) 92 + return 0; 93 + 94 + if (reset_gpio >= 0) { 95 + reset_index = num_entries; 96 + num_entries++; 97 + } 98 + 99 + if (spkid_gpio >= 0) { 100 + spkid_index = num_entries; 101 + num_entries++; 102 + } 103 + 104 + if ((cs_gpio_index >= 0) && (num_amps == 2)) { 105 + csgpio_index = num_entries; 106 + num_entries++; 107 + } 108 + 109 + if (!num_entries) 110 + return 0; 111 + 112 + /* must include termination entry */ 113 + num_entries++; 114 + 115 + gpio_mapping = devm_kcalloc(physdev, num_entries, sizeof(struct acpi_gpio_mapping), 116 + GFP_KERNEL); 117 + 118 + if (!gpio_mapping) 119 + goto err; 120 + 121 + if (reset_gpio >= 0) { 122 + gpio_mapping[reset_index].name = "reset-gpios"; 123 + reset_gpio_params = devm_kcalloc(physdev, num_amps, sizeof(struct acpi_gpio_params), 124 + GFP_KERNEL); 125 + if (!reset_gpio_params) 126 + goto err; 127 + 128 + for (i = 0; i < num_amps; i++) 129 + reset_gpio_params[i].crs_entry_index = reset_gpio; 130 + 131 + gpio_mapping[reset_index].data = reset_gpio_params; 132 + gpio_mapping[reset_index].size = num_amps; 133 + } 134 + 135 + if (spkid_gpio >= 0) { 136 + gpio_mapping[spkid_index].name = "spk-id-gpios"; 137 + spkid_gpio_params = devm_kcalloc(physdev, num_amps, sizeof(struct acpi_gpio_params), 138 + GFP_KERNEL); 139 + if (!spkid_gpio_params) 140 + goto err; 141 + 142 + for (i = 0; i < num_amps; i++) 143 + spkid_gpio_params[i].crs_entry_index = spkid_gpio; 144 + 145 + gpio_mapping[spkid_index].data = spkid_gpio_params; 146 + gpio_mapping[spkid_index].size = num_amps; 147 + } 148 + 149 + if ((cs_gpio_index >= 0) && (num_amps == 2)) { 150 + gpio_mapping[csgpio_index].name = "cs-gpios"; 151 + /* only one GPIO CS is supported without using _DSD, obtained using index 0 */ 152 + cs_gpio_params = devm_kzalloc(physdev, sizeof(struct acpi_gpio_params), GFP_KERNEL); 153 + if (!cs_gpio_params) 154 + goto err; 155 + 156 + cs_gpio_params->crs_entry_index = cs_gpio_index; 157 + 158 + gpio_mapping[csgpio_index].data = cs_gpio_params; 159 + gpio_mapping[csgpio_index].size = 1; 160 + } 161 + 162 + return devm_acpi_dev_add_driver_gpios(physdev, gpio_mapping); 163 + err: 164 + devm_kfree(physdev, gpio_mapping); 165 + devm_kfree(physdev, reset_gpio_params); 166 + devm_kfree(physdev, spkid_gpio_params); 167 + devm_kfree(physdev, cs_gpio_params); 168 + return -ENOMEM; 169 + } 170 + 171 + static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physdev, int id, 172 + const char *hid) 173 + { 174 + struct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg; 175 + const struct cs35l41_config *cfg; 176 + struct gpio_desc *cs_gpiod; 177 + struct spi_device *spi; 178 + bool dsd_found; 179 + int ret; 180 + 181 + for (cfg = cs35l41_config_table; cfg->ssid; cfg++) { 182 + if (!strcasecmp(cfg->ssid, cs35l41->acpi_subsystem_id)) 183 + break; 184 + } 185 + 186 + if (!cfg->ssid) 187 + return -ENOENT; 188 + 189 + if (!cs35l41->dacpi || cs35l41->dacpi != ACPI_COMPANION(physdev)) { 190 + dev_err(cs35l41->dev, "ACPI Device does not match, cannot override _DSD.\n"); 191 + return -ENODEV; 192 + } 193 + 194 + dev_info(cs35l41->dev, "Adding DSD properties for %s\n", cs35l41->acpi_subsystem_id); 195 + 196 + dsd_found = acpi_dev_has_props(cs35l41->dacpi); 197 + 198 + if (!dsd_found) { 199 + ret = cs35l41_add_gpios(cs35l41, physdev, cfg->reset_gpio_index, 200 + cfg->spkid_gpio_index, cfg->cs_gpio_index, 201 + cfg->num_amps); 202 + if (ret) { 203 + dev_err(cs35l41->dev, "Error adding GPIO mapping: %d\n", ret); 204 + return ret; 205 + } 206 + } else if (cfg->reset_gpio_index >= 0 || cfg->spkid_gpio_index >= 0) { 207 + dev_warn(cs35l41->dev, "Cannot add Reset/Speaker ID/SPI CS GPIO Mapping, " 208 + "_DSD already exists.\n"); 209 + } 210 + 211 + if (cfg->bus == SPI) { 212 + cs35l41->index = id; 213 + 214 + #if IS_ENABLED(CONFIG_SPI) 215 + /* 216 + * Manually set the Chip Select for the second amp <cs_gpio_index> in the node. 217 + * This is only supported for systems with 2 amps, since we cannot expand the 218 + * default number of chip selects without using cs-gpios 219 + * The CS GPIO must be set high prior to communicating with the first amp (which 220 + * uses a native chip select), to ensure the second amp does not clash with the 221 + * first. 222 + */ 223 + if (cfg->cs_gpio_index >= 0) { 224 + spi = to_spi_device(cs35l41->dev); 225 + 226 + if (cfg->num_amps != 2) { 227 + dev_warn(cs35l41->dev, 228 + "Cannot update SPI CS, Number of Amps (%d) != 2\n", 229 + cfg->num_amps); 230 + } else if (dsd_found) { 231 + dev_warn(cs35l41->dev, 232 + "Cannot update SPI CS, _DSD already exists.\n"); 233 + } else { 234 + /* 235 + * This is obtained using driver_gpios, since only one GPIO for CS 236 + * exists, this can be obtained using index 0. 237 + */ 238 + cs_gpiod = gpiod_get_index(physdev, "cs", 0, GPIOD_OUT_LOW); 239 + if (IS_ERR(cs_gpiod)) { 240 + dev_err(cs35l41->dev, 241 + "Unable to get Chip Select GPIO descriptor\n"); 242 + return PTR_ERR(cs_gpiod); 243 + } 244 + if (id == 1) { 245 + spi_set_csgpiod(spi, 0, cs_gpiod); 246 + cs35l41->cs_gpio = cs_gpiod; 247 + } else { 248 + gpiod_set_value_cansleep(cs_gpiod, true); 249 + gpiod_put(cs_gpiod); 250 + } 251 + spi_setup(spi); 252 + } 253 + } 254 + #endif 255 + } else { 256 + if (cfg->num_amps > 2) 257 + /* 258 + * i2c addresses for 3/4 amps are used in order: 0x40, 0x41, 0x42, 0x43, 259 + * subtracting 0x40 would give zero-based index 260 + */ 261 + cs35l41->index = id - 0x40; 262 + else 263 + /* i2c addr 0x40 for first amp (always), 0x41/0x42 for 2nd amp */ 264 + cs35l41->index = id == 0x40 ? 0 : 1; 265 + } 266 + 267 + if (cfg->num_amps == 3) 268 + /* 3 amps means a center channel, so no duplicate channels */ 269 + cs35l41->channel_index = 0; 270 + else 271 + /* 272 + * if 4 amps, there are duplicate channels, so they need different indexes 273 + * if 2 amps, no duplicate channels, channel_index would be 0 274 + */ 275 + cs35l41->channel_index = cs35l41->index / 2; 276 + 277 + cs35l41->reset_gpio = fwnode_gpiod_get_index(acpi_fwnode_handle(cs35l41->dacpi), "reset", 278 + cs35l41->index, GPIOD_OUT_LOW, 279 + "cs35l41-reset"); 280 + cs35l41->speaker_id = cs35l41_get_speaker_id(physdev, cs35l41->index, cfg->num_amps, -1); 281 + 282 + hw_cfg->spk_pos = cfg->channel[cs35l41->index]; 283 + 284 + if (cfg->boost_type == INTERNAL) { 285 + hw_cfg->bst_type = CS35L41_INT_BOOST; 286 + hw_cfg->bst_ind = cfg->boost_ind_nanohenry; 287 + hw_cfg->bst_ipk = cfg->boost_peak_milliamp; 288 + hw_cfg->bst_cap = cfg->boost_cap_microfarad; 289 + hw_cfg->gpio1.func = CS35L41_NOT_USED; 290 + hw_cfg->gpio1.valid = true; 291 + } else { 292 + hw_cfg->bst_type = CS35L41_EXT_BOOST; 293 + hw_cfg->bst_ind = -1; 294 + hw_cfg->bst_ipk = -1; 295 + hw_cfg->bst_cap = -1; 296 + hw_cfg->gpio1.func = CS35l41_VSPK_SWITCH; 297 + hw_cfg->gpio1.valid = true; 298 + } 299 + 300 + hw_cfg->gpio2.func = CS35L41_INTERRUPT; 301 + hw_cfg->gpio2.valid = true; 302 + hw_cfg->valid = true; 303 + 304 + return 0; 305 + } 12 306 13 307 /* 14 308 * Device CLSA010(0/1) doesn't have _DSD so a gpiod_get by the label reset won't work. ··· 337 43 return 0; 338 44 } 339 45 340 - /* 341 - * Device 103C89C6 does have _DSD, however it is setup to use the wrong boost type. 342 - * We can override the _DSD to correct the boost type here. 343 - * Since this laptop has valid ACPI, we do not need to handle cs-gpios, since that already exists 344 - * in the ACPI. 345 - */ 346 - static int hp_vision_acpi_fix(struct cs35l41_hda *cs35l41, struct device *physdev, int id, 347 - const char *hid) 348 - { 349 - struct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg; 350 - 351 - dev_info(cs35l41->dev, "Adding DSD properties for %s\n", cs35l41->acpi_subsystem_id); 352 - 353 - cs35l41->index = id; 354 - cs35l41->channel_index = 0; 355 - 356 - /* 357 - * This system has _DSD, it just contains an error, so we can still get the reset using 358 - * the "reset" label. 359 - */ 360 - cs35l41->reset_gpio = fwnode_gpiod_get_index(acpi_fwnode_handle(cs35l41->dacpi), "reset", 361 - cs35l41->index, GPIOD_OUT_LOW, 362 - "cs35l41-reset"); 363 - cs35l41->speaker_id = -ENOENT; 364 - hw_cfg->spk_pos = cs35l41->index ? 0 : 1; // right:left 365 - hw_cfg->gpio1.func = CS35L41_NOT_USED; 366 - hw_cfg->gpio1.valid = true; 367 - hw_cfg->gpio2.func = CS35L41_INTERRUPT; 368 - hw_cfg->gpio2.valid = true; 369 - hw_cfg->bst_type = CS35L41_INT_BOOST; 370 - hw_cfg->bst_ind = 1000; 371 - hw_cfg->bst_ipk = 4500; 372 - hw_cfg->bst_cap = 24; 373 - hw_cfg->valid = true; 374 - 375 - return 0; 376 - } 377 - 378 46 struct cs35l41_prop_model { 379 47 const char *hid; 380 48 const char *ssid; ··· 347 91 static const struct cs35l41_prop_model cs35l41_prop_model_table[] = { 348 92 { "CLSA0100", NULL, lenovo_legion_no_acpi }, 349 93 { "CLSA0101", NULL, lenovo_legion_no_acpi }, 350 - { "CSC3551", "103C89C6", hp_vision_acpi_fix }, 94 + { "CSC3551", "103C89C6", generic_dsd_config }, 95 + { "CSC3551", "104312AF", generic_dsd_config }, 96 + { "CSC3551", "10431433", generic_dsd_config }, 97 + { "CSC3551", "10431463", generic_dsd_config }, 98 + { "CSC3551", "10431473", generic_dsd_config }, 99 + { "CSC3551", "10431483", generic_dsd_config }, 100 + { "CSC3551", "10431493", generic_dsd_config }, 101 + { "CSC3551", "104314D3", generic_dsd_config }, 102 + { "CSC3551", "104314E3", generic_dsd_config }, 103 + { "CSC3551", "10431503", generic_dsd_config }, 104 + { "CSC3551", "10431533", generic_dsd_config }, 105 + { "CSC3551", "10431573", generic_dsd_config }, 106 + { "CSC3551", "10431663", generic_dsd_config }, 107 + { "CSC3551", "104316D3", generic_dsd_config }, 108 + { "CSC3551", "104316F3", generic_dsd_config }, 109 + { "CSC3551", "104317F3", generic_dsd_config }, 110 + { "CSC3551", "10431863", generic_dsd_config }, 111 + { "CSC3551", "104318D3", generic_dsd_config }, 112 + { "CSC3551", "10431C9F", generic_dsd_config }, 113 + { "CSC3551", "10431CAF", generic_dsd_config }, 114 + { "CSC3551", "10431CCF", generic_dsd_config }, 115 + { "CSC3551", "10431CDF", generic_dsd_config }, 116 + { "CSC3551", "10431CEF", generic_dsd_config }, 117 + { "CSC3551", "10431D1F", generic_dsd_config }, 118 + { "CSC3551", "10431DA2", generic_dsd_config }, 119 + { "CSC3551", "10431E02", generic_dsd_config }, 120 + { "CSC3551", "10431EE2", generic_dsd_config }, 121 + { "CSC3551", "10431F12", generic_dsd_config }, 122 + { "CSC3551", "10431F1F", generic_dsd_config }, 123 + { "CSC3551", "10431F62", generic_dsd_config }, 351 124 {} 352 125 }; 353 126 ··· 389 104 if (!strcmp(model->hid, hid) && 390 105 (!model->ssid || 391 106 (cs35l41->acpi_subsystem_id && 392 - !strcmp(model->ssid, cs35l41->acpi_subsystem_id)))) 107 + !strcasecmp(model->ssid, cs35l41->acpi_subsystem_id)))) 393 108 return model->add_prop(cs35l41, physdev, id, hid); 394 109 } 395 110
+26 -12
sound/pci/hda/patch_realtek.c
··· 9948 9948 SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), 9949 9949 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), 9950 9950 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), 9951 - SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), 9952 - SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), 9953 - SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC), 9954 - SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC), 9955 - SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601V", ALC285_FIXUP_ASUS_HEADSET_MIC), 9951 + SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), 9952 + SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), 9953 + SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC), 9954 + SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), 9955 + SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), 9956 + SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), 9957 + SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2), 9958 + SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2), 9956 9959 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 9957 - SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC), 9960 + SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2), 9961 + SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301VV/VQ/VU/VJ/VA/VC/VE/VVC/VQC/VUC/VJC/VEC/VCC", ALC285_FIXUP_ASUS_HEADSET_MIC), 9958 9962 SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), 9959 - SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC), 9963 + SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZI/ZJ/ZQ/ZU/ZV", ALC285_FIXUP_ASUS_HEADSET_MIC), 9960 9964 SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2), 9961 9965 SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), 9966 + SND_PCI_QUIRK(0x1043, 0x16d3, "ASUS UX5304VA", ALC245_FIXUP_CS35L41_SPI_2), 9962 9967 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 9968 + SND_PCI_QUIRK(0x1043, 0x16f3, "ASUS UX7602VI/BZ", ALC245_FIXUP_CS35L41_SPI_2), 9963 9969 SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), 9964 9970 SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), 9965 - SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY), 9971 + SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY), 9972 + SND_PCI_QUIRK(0x1043, 0x1863, "ASUS UX6404VI/VV", ALC245_FIXUP_CS35L41_SPI_2), 9966 9973 SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), 9967 9974 SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), 9968 9975 SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS UM3504DA", ALC294_FIXUP_CS35L41_I2C_2), ··· 9994 9987 SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2), 9995 9988 SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), 9996 9989 SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), 9997 - SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC), 9998 - SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), 9990 + SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC), 9991 + SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), 9999 9992 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 10000 - SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2), 9993 + SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2), 9994 + SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), 9995 + SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC), 9996 + SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS G713PI/PU/PV/PVN", ALC287_FIXUP_CS35L41_I2C_2), 10001 9997 SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), 10002 9998 SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), 9999 + SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2), 10003 10000 SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), 10004 10001 SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2), 10005 10002 SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), 10006 10003 SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), 10007 - SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), 10004 + SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), 10008 10005 SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), 10009 10006 SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), 10010 10007 SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), 10008 + SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), 10011 10009 SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), 10012 10010 SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), 10013 10011 SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), 10012 + SND_PCI_QUIRK(0x1043, 0x1f1f, "ASUS H7604JI/JV/J3D", ALC245_FIXUP_CS35L41_SPI_2), 10013 + SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), 10014 10014 SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), 10015 10015 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), 10016 10016 SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
+4
sound/pci/hda/tas2781_hda_i2c.c
··· 543 543 544 544 tas_priv->fw_state = TASDEVICE_DSP_FW_ALL_OK; 545 545 tasdevice_prmg_load(tas_priv, 0); 546 + if (tas_priv->fmw->nr_programs > 0) 547 + tas_priv->cur_prog = 0; 548 + if (tas_priv->fmw->nr_configurations > 0) 549 + tas_priv->cur_conf = 0; 546 550 547 551 /* If calibrated data occurs error, dsp will still works with default 548 552 * calibrated data inside algo.
+1 -1
sound/soc/codecs/cs35l45-i2c.c
··· 62 62 .driver = { 63 63 .name = "cs35l45", 64 64 .of_match_table = cs35l45_of_match, 65 - .pm = &cs35l45_pm_ops, 65 + .pm = pm_ptr(&cs35l45_pm_ops), 66 66 }, 67 67 .id_table = cs35l45_id_i2c, 68 68 .probe = cs35l45_i2c_probe,
+1 -1
sound/soc/codecs/cs35l45-spi.c
··· 64 64 .driver = { 65 65 .name = "cs35l45", 66 66 .of_match_table = cs35l45_of_match, 67 - .pm = &cs35l45_pm_ops, 67 + .pm = pm_ptr(&cs35l45_pm_ops), 68 68 }, 69 69 .id_table = cs35l45_id_spi, 70 70 .probe = cs35l45_spi_probe,
+51 -5
sound/soc/codecs/cs35l45.c
··· 947 947 948 948 cs35l45_setup_hibernate(cs35l45); 949 949 950 + regmap_set_bits(cs35l45->regmap, CS35L45_IRQ1_MASK_2, CS35L45_DSP_VIRT2_MBOX_MASK); 951 + 950 952 // Don't wait for ACK since bus activity would wake the device 951 953 regmap_write(cs35l45->regmap, CS35L45_DSP_VIRT1_MBOX_1, CSPL_MBOX_CMD_HIBERNATE); 952 954 ··· 969 967 CSPL_MBOX_CMD_OUT_OF_HIBERNATE); 970 968 if (!ret) { 971 969 dev_dbg(cs35l45->dev, "Wake success at cycle: %d\n", j); 970 + regmap_clear_bits(cs35l45->regmap, CS35L45_IRQ1_MASK_2, 971 + CS35L45_DSP_VIRT2_MBOX_MASK); 972 972 return 0; 973 973 } 974 974 usleep_range(100, 200); ··· 986 982 return -ETIMEDOUT; 987 983 } 988 984 989 - static int __maybe_unused cs35l45_runtime_suspend(struct device *dev) 985 + static int cs35l45_runtime_suspend(struct device *dev) 990 986 { 991 987 struct cs35l45_private *cs35l45 = dev_get_drvdata(dev); 992 988 ··· 1003 999 return 0; 1004 1000 } 1005 1001 1006 - static int __maybe_unused cs35l45_runtime_resume(struct device *dev) 1002 + static int cs35l45_runtime_resume(struct device *dev) 1007 1003 { 1008 1004 struct cs35l45_private *cs35l45 = dev_get_drvdata(dev); 1009 1005 int ret; ··· 1028 1024 regmap_set_bits(cs35l45->regmap, CS35L45_ERROR_RELEASE, CS35L45_GLOBAL_ERR_RLS_MASK); 1029 1025 regmap_clear_bits(cs35l45->regmap, CS35L45_ERROR_RELEASE, CS35L45_GLOBAL_ERR_RLS_MASK); 1030 1026 return ret; 1027 + } 1028 + 1029 + static int cs35l45_sys_suspend(struct device *dev) 1030 + { 1031 + struct cs35l45_private *cs35l45 = dev_get_drvdata(dev); 1032 + 1033 + dev_dbg(cs35l45->dev, "System suspend, disabling IRQ\n"); 1034 + disable_irq(cs35l45->irq); 1035 + 1036 + return 0; 1037 + } 1038 + 1039 + static int cs35l45_sys_suspend_noirq(struct device *dev) 1040 + { 1041 + struct cs35l45_private *cs35l45 = dev_get_drvdata(dev); 1042 + 1043 + dev_dbg(cs35l45->dev, "Late system suspend, reenabling IRQ\n"); 1044 + enable_irq(cs35l45->irq); 1045 + 1046 + return 0; 1047 + } 1048 + 1049 + static int cs35l45_sys_resume_noirq(struct device *dev) 1050 + { 1051 + struct cs35l45_private *cs35l45 = dev_get_drvdata(dev); 1052 + 1053 + dev_dbg(cs35l45->dev, "Early system resume, disabling IRQ\n"); 1054 + disable_irq(cs35l45->irq); 1055 + 1056 + return 0; 1057 + } 1058 + 1059 + static int cs35l45_sys_resume(struct device *dev) 1060 + { 1061 + struct cs35l45_private *cs35l45 = dev_get_drvdata(dev); 1062 + 1063 + dev_dbg(cs35l45->dev, "System resume, reenabling IRQ\n"); 1064 + enable_irq(cs35l45->irq); 1065 + 1066 + return 0; 1031 1067 } 1032 1068 1033 1069 static int cs35l45_apply_property_config(struct cs35l45_private *cs35l45) ··· 1510 1466 } 1511 1467 EXPORT_SYMBOL_NS_GPL(cs35l45_remove, SND_SOC_CS35L45); 1512 1468 1513 - const struct dev_pm_ops cs35l45_pm_ops = { 1514 - SET_RUNTIME_PM_OPS(cs35l45_runtime_suspend, cs35l45_runtime_resume, NULL) 1469 + EXPORT_GPL_DEV_PM_OPS(cs35l45_pm_ops) = { 1470 + RUNTIME_PM_OPS(cs35l45_runtime_suspend, cs35l45_runtime_resume, NULL) 1471 + 1472 + SYSTEM_SLEEP_PM_OPS(cs35l45_sys_suspend, cs35l45_sys_resume) 1473 + NOIRQ_SYSTEM_SLEEP_PM_OPS(cs35l45_sys_suspend_noirq, cs35l45_sys_resume_noirq) 1515 1474 }; 1516 - EXPORT_SYMBOL_NS_GPL(cs35l45_pm_ops, SND_SOC_CS35L45); 1517 1475 1518 1476 MODULE_DESCRIPTION("ASoC CS35L45 driver"); 1519 1477 MODULE_AUTHOR("James Schulman, Cirrus Logic Inc, <james.schulman@cirrus.com>");
+11 -10
sound/soc/codecs/cs42l43-jack.c
··· 237 237 return ret; 238 238 } 239 239 240 - static void cs42l43_start_hs_bias(struct cs42l43_codec *priv, bool force_high) 240 + static void cs42l43_start_hs_bias(struct cs42l43_codec *priv, bool type_detect) 241 241 { 242 242 struct cs42l43 *cs42l43 = priv->core; 243 243 unsigned int val = 0x3 << CS42L43_HSBIAS_MODE_SHIFT; ··· 247 247 regmap_update_bits(cs42l43->regmap, CS42L43_HS2, 248 248 CS42L43_HS_CLAMP_DISABLE_MASK, CS42L43_HS_CLAMP_DISABLE_MASK); 249 249 250 - if (!force_high && priv->bias_low) 251 - val = 0x2 << CS42L43_HSBIAS_MODE_SHIFT; 250 + if (!type_detect) { 251 + if (priv->bias_low) 252 + val = 0x2 << CS42L43_HSBIAS_MODE_SHIFT; 252 253 253 - if (priv->bias_sense_ua) { 254 - regmap_update_bits(cs42l43->regmap, 255 - CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL, 256 - CS42L43_HSBIAS_SENSE_EN_MASK | 257 - CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK, 258 - CS42L43_HSBIAS_SENSE_EN_MASK | 259 - CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK); 254 + if (priv->bias_sense_ua) 255 + regmap_update_bits(cs42l43->regmap, 256 + CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL, 257 + CS42L43_HSBIAS_SENSE_EN_MASK | 258 + CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK, 259 + CS42L43_HSBIAS_SENSE_EN_MASK | 260 + CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK); 260 261 } 261 262 262 263 regmap_update_bits(cs42l43->regmap, CS42L43_MIC_DETECT_CONTROL_1,
+10 -2
sound/soc/codecs/hdmi-codec.c
··· 850 850 static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp, 851 851 unsigned int jack_status) 852 852 { 853 - if (hcp->jack && jack_status != hcp->jack_status) { 854 - snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT); 853 + if (jack_status != hcp->jack_status) { 854 + if (hcp->jack) 855 + snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT); 855 856 hcp->jack_status = jack_status; 856 857 } 857 858 } ··· 881 880 882 881 if (hcp->hcd.ops->hook_plugged_cb) { 883 882 hcp->jack = jack; 883 + 884 + /* 885 + * Report the initial jack status which may have been provided 886 + * by the parent hdmi driver while the hpd hook was registered. 887 + */ 888 + snd_soc_jack_report(jack, hcp->jack_status, SND_JACK_LINEOUT); 889 + 884 890 return 0; 885 891 } 886 892
+8 -6
sound/soc/codecs/tas2781-fmwlib.c
··· 2189 2189 goto out; 2190 2190 } 2191 2191 2192 - conf = &(tas_fmw->configs[cfg_no]); 2193 2192 for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) { 2194 2193 if (cfg_info[rca_conf_no]->active_dev & (1 << i)) { 2195 - if (tas_priv->tasdevice[i].cur_prog != prm_no 2196 - || tas_priv->force_fwload_status) { 2194 + if (prm_no >= 0 2195 + && (tas_priv->tasdevice[i].cur_prog != prm_no 2196 + || tas_priv->force_fwload_status)) { 2197 2197 tas_priv->tasdevice[i].cur_conf = -1; 2198 2198 tas_priv->tasdevice[i].is_loading = true; 2199 2199 prog_status++; ··· 2228 2228 } 2229 2229 2230 2230 for (i = 0, status = 0; i < tas_priv->ndev; i++) { 2231 - if (tas_priv->tasdevice[i].cur_conf != cfg_no 2231 + if (cfg_no >= 0 2232 + && tas_priv->tasdevice[i].cur_conf != cfg_no 2232 2233 && (cfg_info[rca_conf_no]->active_dev & (1 << i)) 2233 2234 && (tas_priv->tasdevice[i].is_loaderr == false)) { 2234 2235 status++; ··· 2239 2238 } 2240 2239 2241 2240 if (status) { 2241 + conf = &(tas_fmw->configs[cfg_no]); 2242 2242 status = 0; 2243 2243 tasdevice_load_data(tas_priv, &(conf->dev_data)); 2244 2244 for (i = 0; i < tas_priv->ndev; i++) { ··· 2283 2281 } 2284 2282 2285 2283 for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) { 2286 - if (tas_priv->tasdevice[i].cur_prog != prm_no) { 2284 + if (prm_no >= 0 && tas_priv->tasdevice[i].cur_prog != prm_no) { 2287 2285 tas_priv->tasdevice[i].cur_conf = -1; 2288 2286 tas_priv->tasdevice[i].is_loading = true; 2289 2287 prog_status++; ··· 2328 2326 } 2329 2327 2330 2328 for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) { 2331 - if (tas_priv->tasdevice[i].cur_prog != prm_no) { 2329 + if (prm_no >= 0 && tas_priv->tasdevice[i].cur_prog != prm_no) { 2332 2330 tas_priv->tasdevice[i].cur_conf = -1; 2333 2331 tas_priv->tasdevice[i].is_loading = true; 2334 2332 prog_status++;
+3
sound/soc/fsl/fsl_sai.c
··· 714 714 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 715 715 unsigned int ofs = sai->soc_data->reg_offset; 716 716 717 + /* Clear xMR to avoid channel swap with mclk_with_tere enabled case */ 718 + regmap_write(sai->regmap, FSL_SAI_xMR(tx), 0); 719 + 717 720 regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx, ofs), 718 721 FSL_SAI_CR3_TRCE_MASK, 0); 719 722
+25 -6
sound/soc/intel/boards/bytcr_rt5640.c
··· 83 83 #define BYT_RT5640_HSMIC2_ON_IN1 BIT(27) 84 84 #define BYT_RT5640_JD_HP_ELITEP_1000G2 BIT(28) 85 85 #define BYT_RT5640_USE_AMCR0F28 BIT(29) 86 + #define BYT_RT5640_SWAPPED_SPEAKERS BIT(30) 86 87 87 88 #define BYTCR_INPUT_DEFAULTS \ 88 89 (BYT_RT5640_IN3_MAP | \ ··· 158 157 dev_info(dev, "quirk MONO_SPEAKER enabled\n"); 159 158 if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS) 160 159 dev_info(dev, "quirk NO_SPEAKERS enabled\n"); 160 + if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS) 161 + dev_info(dev, "quirk SWAPPED_SPEAKERS enabled\n"); 161 162 if (byt_rt5640_quirk & BYT_RT5640_LINEOUT) 162 163 dev_info(dev, "quirk LINEOUT enabled\n"); 163 164 if (byt_rt5640_quirk & BYT_RT5640_LINEOUT_AS_HP2) ··· 897 894 BYT_RT5640_SSP0_AIF1 | 898 895 BYT_RT5640_MCLK_EN), 899 896 }, 897 + { 898 + /* Medion Lifetab S10346 */ 899 + .matches = { 900 + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), 901 + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), 902 + /* Above strings are much too generic, also match on BIOS date */ 903 + DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"), 904 + }, 905 + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | 906 + BYT_RT5640_SWAPPED_SPEAKERS | 907 + BYT_RT5640_SSP0_AIF1 | 908 + BYT_RT5640_MCLK_EN), 909 + }, 900 910 { /* Mele PCG03 Mini PC */ 901 911 .matches = { 902 912 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"), ··· 1635 1619 const char *platform_name; 1636 1620 struct acpi_device *adev; 1637 1621 struct device *codec_dev; 1622 + const char *cfg_spk; 1638 1623 bool sof_parent; 1639 1624 int ret_val = 0; 1640 1625 int dai_index = 0; 1641 - int i, cfg_spk; 1642 - int aif; 1626 + int i, aif; 1643 1627 1644 1628 is_bytcr = false; 1645 1629 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ··· 1799 1783 } 1800 1784 1801 1785 if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS) { 1802 - cfg_spk = 0; 1786 + cfg_spk = "0"; 1803 1787 spk_type = "none"; 1804 1788 } else if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) { 1805 - cfg_spk = 1; 1789 + cfg_spk = "1"; 1806 1790 spk_type = "mono"; 1791 + } else if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS) { 1792 + cfg_spk = "swapped"; 1793 + spk_type = "swapped"; 1807 1794 } else { 1808 - cfg_spk = 2; 1795 + cfg_spk = "2"; 1809 1796 spk_type = "stereo"; 1810 1797 } 1811 1798 ··· 1823 1804 headset2_string = " cfg-hs2:in1"; 1824 1805 1825 1806 snprintf(byt_rt5640_components, sizeof(byt_rt5640_components), 1826 - "cfg-spk:%d cfg-mic:%s aif:%d%s%s", cfg_spk, 1807 + "cfg-spk:%s cfg-mic:%s aif:%d%s%s", cfg_spk, 1827 1808 map_name[BYT_RT5640_MAP(byt_rt5640_quirk)], aif, 1828 1809 lineout_string, headset2_string); 1829 1810 byt_rt5640_card.components = byt_rt5640_components;
+4 -4
sound/soc/intel/common/soc-acpi-intel-mtl-match.c
··· 306 306 .adr = 0x00013701FA355601ull, 307 307 .num_endpoints = 1, 308 308 .endpoints = &spk_r_endpoint, 309 - .name_prefix = "cs35l56-8" 309 + .name_prefix = "AMP8" 310 310 }, 311 311 { 312 312 .adr = 0x00013601FA355601ull, 313 313 .num_endpoints = 1, 314 314 .endpoints = &spk_3_endpoint, 315 - .name_prefix = "cs35l56-7" 315 + .name_prefix = "AMP7" 316 316 } 317 317 }; 318 318 ··· 321 321 .adr = 0x00023301FA355601ull, 322 322 .num_endpoints = 1, 323 323 .endpoints = &spk_l_endpoint, 324 - .name_prefix = "cs35l56-1" 324 + .name_prefix = "AMP1" 325 325 }, 326 326 { 327 327 .adr = 0x00023201FA355601ull, 328 328 .num_endpoints = 1, 329 329 .endpoints = &spk_2_endpoint, 330 - .name_prefix = "cs35l56-2" 330 + .name_prefix = "AMP2" 331 331 } 332 332 }; 333 333
-3
sound/soc/sof/mediatek/mt8186/mt8186.c
··· 597 597 598 598 static struct snd_sof_of_mach sof_mt8186_machs[] = { 599 599 { 600 - .compatible = "google,steelix", 601 - .sof_tplg_filename = "sof-mt8186-google-steelix.tplg" 602 - }, { 603 600 .compatible = "mediatek,mt8186", 604 601 .sof_tplg_filename = "sof-mt8186.tplg", 605 602 },
+2 -2
sound/usb/quirks.c
··· 1387 1387 1388 1388 static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev) 1389 1389 { 1390 - msleep(2000); 1390 + msleep(4000); 1391 1391 1392 1392 return 0; 1393 1393 } ··· 1630 1630 unsigned int id) 1631 1631 { 1632 1632 switch (id) { 1633 - case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ 1633 + case USB_ID(0x07fd, 0x0008): /* MOTU M Series, 1st hardware version */ 1634 1634 return snd_usb_motu_m_series_boot_quirk(dev); 1635 1635 } 1636 1636
+1 -1
tools/testing/selftests/alsa/mixer-test.c
··· 138 138 err = snd_ctl_elem_info(card_data->handle, 139 139 ctl_data->info); 140 140 if (err < 0) { 141 - ksft_print_msg("%s getting info for %d\n", 141 + ksft_print_msg("%s getting info for %s\n", 142 142 snd_strerror(err), 143 143 ctl_data->name); 144 144 }
+34
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
··· 524 524 test_sockmap_pass_prog__destroy(pass); 525 525 } 526 526 527 + static void test_sockmap_unconnected_unix(void) 528 + { 529 + int err, map, stream = 0, dgram = 0, zero = 0; 530 + struct test_sockmap_pass_prog *skel; 531 + 532 + skel = test_sockmap_pass_prog__open_and_load(); 533 + if (!ASSERT_OK_PTR(skel, "open_and_load")) 534 + return; 535 + 536 + map = bpf_map__fd(skel->maps.sock_map_rx); 537 + 538 + stream = xsocket(AF_UNIX, SOCK_STREAM, 0); 539 + if (stream < 0) 540 + return; 541 + 542 + dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 543 + if (dgram < 0) { 544 + close(stream); 545 + return; 546 + } 547 + 548 + err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY); 549 + ASSERT_ERR(err, "bpf_map_update_elem(stream)"); 550 + 551 + err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY); 552 + ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 553 + 554 + close(stream); 555 + close(dgram); 556 + } 557 + 527 558 void test_sockmap_basic(void) 528 559 { 529 560 if (test__start_subtest("sockmap create_update_free")) ··· 597 566 test_sockmap_skb_verdict_fionread(false); 598 567 if (test__start_subtest("sockmap skb_verdict msg_f_peek")) 599 568 test_sockmap_skb_verdict_peek(); 569 + 570 + if (test__start_subtest("sockmap unconnected af_unix")) 571 + test_sockmap_unconnected_unix(); 600 572 }
+19 -12
tools/testing/selftests/kvm/Makefile
··· 17 17 ARCH_DIR := $(ARCH) 18 18 endif 19 19 20 - ifeq ($(ARCH),arm64) 21 - tools_dir := $(top_srcdir)/tools 22 - arm64_tools_dir := $(tools_dir)/arch/arm64/tools/ 23 - GEN_HDRS := $(top_srcdir)/tools/arch/arm64/include/generated/ 24 - CFLAGS += -I$(GEN_HDRS) 25 - 26 - $(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*) 27 - $(MAKE) -C $(arm64_tools_dir) O=$(tools_dir) 28 - endif 29 - 30 20 LIBKVM += lib/assert.c 31 21 LIBKVM += lib/elf.c 32 22 LIBKVM += lib/guest_modes.c ··· 186 196 187 197 TEST_GEN_PROGS_riscv += demand_paging_test 188 198 TEST_GEN_PROGS_riscv += dirty_log_test 189 - TEST_GEN_PROGS_riscv += guest_print_test 190 199 TEST_GEN_PROGS_riscv += get-reg-list 200 + TEST_GEN_PROGS_riscv += guest_print_test 201 + TEST_GEN_PROGS_riscv += kvm_binary_stats_test 191 202 TEST_GEN_PROGS_riscv += kvm_create_max_vcpus 192 203 TEST_GEN_PROGS_riscv += kvm_page_table_test 193 204 TEST_GEN_PROGS_riscv += set_memory_region_test 194 - TEST_GEN_PROGS_riscv += kvm_binary_stats_test 205 + TEST_GEN_PROGS_riscv += steal_time 195 206 196 207 SPLIT_TESTS += get-reg-list 197 208 ··· 226 235 $(KHDR_INCLUDES) 227 236 ifeq ($(ARCH),s390) 228 237 CFLAGS += -march=z10 238 + endif 239 + ifeq ($(ARCH),arm64) 240 + tools_dir := $(top_srcdir)/tools 241 + arm64_tools_dir := $(tools_dir)/arch/arm64/tools/ 242 + 243 + ifneq ($(abs_objdir),) 244 + arm64_hdr_outdir := $(abs_objdir)/tools/ 245 + else 246 + arm64_hdr_outdir := $(tools_dir)/ 247 + endif 248 + 249 + GEN_HDRS := $(arm64_hdr_outdir)arch/arm64/include/generated/ 250 + CFLAGS += -I$(GEN_HDRS) 251 + 252 + $(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*) 253 + $(MAKE) -C $(arm64_tools_dir) OUTPUT=$(arm64_hdr_outdir) 229 254 endif 230 255 231 256 no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \
+5 -4
tools/testing/selftests/kvm/get-reg-list.c
··· 71 71 for_each_sublist(c, s) { 72 72 if (!strcmp(s->name, "base")) 73 73 continue; 74 - strcat(c->name + len, s->name); 75 - len += strlen(s->name) + 1; 76 - c->name[len - 1] = '+'; 74 + if (len) 75 + c->name[len++] = '+'; 76 + strcpy(c->name + len, s->name); 77 + len += strlen(s->name); 77 78 } 78 - c->name[len - 1] = '\0'; 79 + c->name[len] = '\0'; 79 80 80 81 return c->name; 81 82 }
+1
tools/testing/selftests/kvm/include/kvm_util_base.h
··· 129 129 const char *name; 130 130 long capability; 131 131 int feature; 132 + int feature_type; 132 133 bool finalize; 133 134 __u64 *regs; 134 135 __u64 regs_n;
+45 -17
tools/testing/selftests/kvm/include/riscv/processor.h
··· 10 10 #include "kvm_util.h" 11 11 #include <linux/stringify.h> 12 12 13 - static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx, 14 - uint64_t size) 13 + static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, 14 + uint64_t idx, uint64_t size) 15 15 { 16 - return KVM_REG_RISCV | type | idx | size; 16 + return KVM_REG_RISCV | type | subtype | idx | size; 17 17 } 18 18 19 19 #if __riscv_xlen == 64 ··· 22 22 #define KVM_REG_SIZE_ULONG KVM_REG_SIZE_U32 23 23 #endif 24 24 25 - #define RISCV_CONFIG_REG(name) __kvm_reg_id(KVM_REG_RISCV_CONFIG, \ 26 - KVM_REG_RISCV_CONFIG_REG(name), \ 27 - KVM_REG_SIZE_ULONG) 25 + #define RISCV_CONFIG_REG(name) __kvm_reg_id(KVM_REG_RISCV_CONFIG, 0, \ 26 + KVM_REG_RISCV_CONFIG_REG(name), \ 27 + KVM_REG_SIZE_ULONG) 28 28 29 - #define RISCV_CORE_REG(name) __kvm_reg_id(KVM_REG_RISCV_CORE, \ 30 - KVM_REG_RISCV_CORE_REG(name), \ 31 - KVM_REG_SIZE_ULONG) 29 + #define RISCV_CORE_REG(name) __kvm_reg_id(KVM_REG_RISCV_CORE, 0, \ 30 + KVM_REG_RISCV_CORE_REG(name), \ 31 + KVM_REG_SIZE_ULONG) 32 32 33 - #define RISCV_CSR_REG(name) __kvm_reg_id(KVM_REG_RISCV_CSR, \ 34 - KVM_REG_RISCV_CSR_REG(name), \ 35 - KVM_REG_SIZE_ULONG) 33 + #define RISCV_GENERAL_CSR_REG(name) __kvm_reg_id(KVM_REG_RISCV_CSR, \ 34 + KVM_REG_RISCV_CSR_GENERAL, \ 35 + KVM_REG_RISCV_CSR_REG(name), \ 36 + KVM_REG_SIZE_ULONG) 36 37 37 - #define RISCV_TIMER_REG(name) __kvm_reg_id(KVM_REG_RISCV_TIMER, \ 38 - KVM_REG_RISCV_TIMER_REG(name), \ 39 - KVM_REG_SIZE_U64) 38 + #define RISCV_TIMER_REG(name) __kvm_reg_id(KVM_REG_RISCV_TIMER, 0, \ 39 + KVM_REG_RISCV_TIMER_REG(name), \ 40 + KVM_REG_SIZE_U64) 40 41 41 - #define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \ 42 - idx, KVM_REG_SIZE_ULONG) 42 + #define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \ 43 + KVM_REG_RISCV_ISA_SINGLE, \ 44 + idx, KVM_REG_SIZE_ULONG) 45 + 46 + #define RISCV_SBI_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_SBI_EXT, \ 47 + KVM_REG_RISCV_SBI_SINGLE, \ 48 + idx, KVM_REG_SIZE_ULONG) 43 49 44 50 /* L3 index Bit[47:39] */ 45 51 #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL ··· 108 102 #define SATP_ASID_SHIFT 44 109 103 #define SATP_ASID_MASK _AC(0xFFFF, UL) 110 104 105 + /* SBI return error codes */ 106 + #define SBI_SUCCESS 0 107 + #define SBI_ERR_FAILURE -1 108 + #define SBI_ERR_NOT_SUPPORTED -2 109 + #define SBI_ERR_INVALID_PARAM -3 110 + #define SBI_ERR_DENIED -4 111 + #define SBI_ERR_INVALID_ADDRESS -5 112 + #define SBI_ERR_ALREADY_AVAILABLE -6 113 + #define SBI_ERR_ALREADY_STARTED -7 114 + #define SBI_ERR_ALREADY_STOPPED -8 115 + 111 116 #define SBI_EXT_EXPERIMENTAL_START 0x08000000 112 117 #define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF 113 118 114 119 #define KVM_RISCV_SELFTESTS_SBI_EXT SBI_EXT_EXPERIMENTAL_END 115 120 #define KVM_RISCV_SELFTESTS_SBI_UCALL 0 116 121 #define KVM_RISCV_SELFTESTS_SBI_UNEXP 1 122 + 123 + enum sbi_ext_id { 124 + SBI_EXT_BASE = 0x10, 125 + SBI_EXT_STA = 0x535441, 126 + }; 127 + 128 + enum sbi_ext_base_fid { 129 + SBI_EXT_BASE_PROBE_EXT = 3, 130 + }; 117 131 118 132 struct sbiret { 119 133 long error; ··· 144 118 unsigned long arg1, unsigned long arg2, 145 119 unsigned long arg3, unsigned long arg4, 146 120 unsigned long arg5); 121 + 122 + bool guest_sbi_probe_extension(int extid, long *out_val); 147 123 148 124 #endif /* SELFTEST_KVM_PROCESSOR_H */
+47 -2
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 201 201 satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; 202 202 satp |= SATP_MODE_48; 203 203 204 - vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp); 204 + vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp); 205 205 } 206 206 207 207 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) ··· 315 315 vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); 316 316 317 317 /* Setup default exception vector of guest */ 318 - vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap); 318 + vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap); 319 319 320 320 return vcpu; 321 321 } ··· 366 366 367 367 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 368 368 { 369 + } 370 + 371 + struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, 372 + unsigned long arg1, unsigned long arg2, 373 + unsigned long arg3, unsigned long arg4, 374 + unsigned long arg5) 375 + { 376 + register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); 377 + register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); 378 + register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); 379 + register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); 380 + register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); 381 + register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); 382 + register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); 383 + register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); 384 + struct sbiret ret; 385 + 386 + asm volatile ( 387 + "ecall" 388 + : "+r" (a0), "+r" (a1) 389 + : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) 390 + : "memory"); 391 + ret.error = a0; 392 + ret.value = a1; 393 + 394 + return ret; 395 + } 396 + 397 + bool guest_sbi_probe_extension(int extid, long *out_val) 398 + { 399 + struct sbiret ret; 400 + 401 + ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid, 402 + 0, 0, 0, 0, 0); 403 + 404 + __GUEST_ASSERT(!ret.error || ret.error == SBI_ERR_NOT_SUPPORTED, 405 + "ret.error=%ld, ret.value=%ld\n", ret.error, ret.value); 406 + 407 + if (ret.error == SBI_ERR_NOT_SUPPORTED) 408 + return false; 409 + 410 + if (out_val) 411 + *out_val = ret.value; 412 + 413 + return true; 369 414 }
-26
tools/testing/selftests/kvm/lib/riscv/ucall.c
··· 10 10 #include "kvm_util.h" 11 11 #include "processor.h" 12 12 13 - struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, 14 - unsigned long arg1, unsigned long arg2, 15 - unsigned long arg3, unsigned long arg4, 16 - unsigned long arg5) 17 - { 18 - register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); 19 - register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); 20 - register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); 21 - register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); 22 - register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); 23 - register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); 24 - register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); 25 - register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); 26 - struct sbiret ret; 27 - 28 - asm volatile ( 29 - "ecall" 30 - : "+r" (a0), "+r" (a1) 31 - : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) 32 - : "memory"); 33 - ret.error = a0; 34 - ret.value = a1; 35 - 36 - return ret; 37 - } 38 - 39 13 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) 40 14 { 41 15 struct kvm_run *run = vcpu->run;
+280 -306
tools/testing/selftests/kvm/riscv/get-reg-list.c
··· 12 12 13 13 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) 14 14 15 + enum { 16 + VCPU_FEATURE_ISA_EXT = 0, 17 + VCPU_FEATURE_SBI_EXT, 18 + }; 19 + 15 20 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX]; 16 21 17 22 bool filter_reg(__u64 reg) ··· 33 28 * 34 29 * Note: The below list is alphabetically sorted. 35 30 */ 36 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A: 37 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C: 38 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D: 39 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F: 40 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H: 41 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I: 42 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M: 43 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V: 44 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN: 45 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA: 46 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC: 47 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL: 48 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT: 49 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT: 50 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA: 51 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB: 52 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS: 53 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM: 54 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ: 55 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR: 56 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND: 57 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR: 58 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI: 59 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: 60 - case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM: 31 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_A: 32 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_C: 33 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D: 34 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F: 35 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_H: 36 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_I: 37 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_M: 38 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V: 39 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN: 40 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA: 41 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSTC: 42 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL: 43 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT: 44 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT: 45 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA: 46 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB: 47 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBS: 48 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM: 49 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ: 50 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR: 51 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICOND: 52 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICSR: 53 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIFENCEI: 54 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: 55 + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM: 56 + /* 57 + * Like ISA_EXT registers, SBI_EXT registers are only visible when the 58 + * host supports them and disabling them does not affect the visibility 59 + * of the SBI_EXT register itself. 60 + */ 61 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01: 62 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME: 63 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI: 64 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE: 65 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST: 66 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM: 67 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU: 68 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN: 69 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA: 70 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL: 71 + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR: 61 72 return true; 62 73 /* AIA registers are always available when Ssaia can't be disabled */ 63 74 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect): ··· 96 75 return err == EINVAL; 97 76 } 98 77 99 - static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext) 78 + static bool vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext_id) 100 79 { 101 80 int ret; 102 81 unsigned long value; 103 82 104 - ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value); 83 + ret = __vcpu_get_reg(vcpu, ext_id, &value); 105 84 return (ret) ? false : !!value; 106 85 } 107 86 ··· 109 88 { 110 89 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; 111 90 struct vcpu_reg_sublist *s; 91 + uint64_t feature; 112 92 int rc; 113 93 114 94 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) ··· 125 103 isa_ext_cant_disable[i] = true; 126 104 } 127 105 106 + for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) { 107 + rc = __vcpu_set_reg(vcpu, RISCV_SBI_EXT_REG(i), 0); 108 + TEST_ASSERT(!rc || (rc == -1 && errno == ENOENT), "Unexpected error"); 109 + } 110 + 128 111 for_each_sublist(c, s) { 129 112 if (!s->feature) 130 113 continue; 131 114 115 + switch (s->feature_type) { 116 + case VCPU_FEATURE_ISA_EXT: 117 + feature = RISCV_ISA_EXT_REG(s->feature); 118 + break; 119 + case VCPU_FEATURE_SBI_EXT: 120 + feature = RISCV_SBI_EXT_REG(s->feature); 121 + break; 122 + default: 123 + TEST_FAIL("Unknown feature type"); 124 + } 125 + 132 126 /* Try to enable the desired extension */ 133 - __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1); 127 + __vcpu_set_reg(vcpu, feature, 1); 134 128 135 129 /* Double check whether the desired extension was enabled */ 136 - __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature), 130 + __TEST_REQUIRE(vcpu_has_ext(vcpu, feature), 137 131 "%s not available, skipping tests\n", s->name); 138 132 } 139 133 } ··· 373 335 } 374 336 375 337 #define KVM_ISA_EXT_ARR(ext) \ 376 - [KVM_RISCV_ISA_EXT_##ext] = "KVM_RISCV_ISA_EXT_" #ext 338 + [KVM_RISCV_ISA_EXT_##ext] = "KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_" #ext 377 339 378 - static const char *isa_ext_id_to_str(const char *prefix, __u64 id) 340 + static const char *isa_ext_single_id_to_str(__u64 reg_off) 379 341 { 380 - /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */ 381 - __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT); 382 - 383 - assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT); 384 - 385 342 static const char * const kvm_isa_ext_reg_name[] = { 386 343 KVM_ISA_EXT_ARR(A), 387 344 KVM_ISA_EXT_ARR(C), ··· 406 373 }; 407 374 408 375 if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) 409 - return strdup_printf("%lld /* UNKNOWN */", reg_off); 376 + return strdup_printf("KVM_REG_RISCV_ISA_SINGLE | %lld /* UNKNOWN */", reg_off); 410 377 411 378 return kvm_isa_ext_reg_name[reg_off]; 379 + } 380 + 381 + static const char *isa_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off) 382 + { 383 + const char *unknown = ""; 384 + 385 + if (reg_off > KVM_REG_RISCV_ISA_MULTI_REG_LAST) 386 + unknown = " /* UNKNOWN */"; 387 + 388 + switch (reg_subtype) { 389 + case KVM_REG_RISCV_ISA_MULTI_EN: 390 + return strdup_printf("KVM_REG_RISCV_ISA_MULTI_EN | %lld%s", reg_off, unknown); 391 + case KVM_REG_RISCV_ISA_MULTI_DIS: 392 + return strdup_printf("KVM_REG_RISCV_ISA_MULTI_DIS | %lld%s", reg_off, unknown); 393 + } 394 + 395 + return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 396 + } 397 + 398 + static const char *isa_ext_id_to_str(const char *prefix, __u64 id) 399 + { 400 + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT); 401 + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 402 + 403 + assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT); 404 + 405 + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 406 + 407 + switch (reg_subtype) { 408 + case KVM_REG_RISCV_ISA_SINGLE: 409 + return isa_ext_single_id_to_str(reg_off); 410 + case KVM_REG_RISCV_ISA_MULTI_EN: 411 + case KVM_REG_RISCV_ISA_MULTI_DIS: 412 + return isa_ext_multi_id_to_str(reg_subtype, reg_off); 413 + } 414 + 415 + return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 412 416 } 413 417 414 418 #define KVM_SBI_EXT_ARR(ext) \ ··· 462 392 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST), 463 393 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM), 464 394 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU), 395 + KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA), 465 396 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL), 466 397 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR), 467 398 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN), ··· 511 440 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 512 441 } 513 442 443 + static const char *sbi_sta_id_to_str(__u64 reg_off) 444 + { 445 + switch (reg_off) { 446 + case 0: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo)"; 447 + case 1: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi)"; 448 + } 449 + return strdup_printf("KVM_REG_RISCV_SBI_STA | %lld /* UNKNOWN */", reg_off); 450 + } 451 + 452 + static const char *sbi_id_to_str(const char *prefix, __u64 id) 453 + { 454 + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_STATE); 455 + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 456 + 457 + assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_STATE); 458 + 459 + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 460 + 461 + switch (reg_subtype) { 462 + case KVM_REG_RISCV_SBI_STA: 463 + return sbi_sta_id_to_str(reg_off); 464 + } 465 + 466 + return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 467 + } 468 + 514 469 void print_reg(const char *prefix, __u64 id) 515 470 { 516 471 const char *reg_size = NULL; ··· 555 458 reg_size = "KVM_REG_SIZE_U128"; 556 459 break; 557 460 default: 558 - printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,", 559 - (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & REG_MASK); 461 + printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,\n", 462 + (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & ~REG_MASK); 463 + return; 560 464 } 561 465 562 466 switch (id & KVM_REG_RISCV_TYPE_MASK) { ··· 593 495 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n", 594 496 reg_size, sbi_ext_id_to_str(prefix, id)); 595 497 break; 498 + case KVM_REG_RISCV_SBI_STATE: 499 + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_STATE | %s,\n", 500 + reg_size, sbi_id_to_str(prefix, id)); 501 + break; 596 502 default: 597 - printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,", 598 - reg_size, id & REG_MASK); 503 + printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,\n", 504 + reg_size, id & ~REG_MASK); 505 + return; 599 506 } 600 507 } 601 508 ··· 662 559 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), 663 560 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), 664 561 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 665 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, 666 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, 667 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, 668 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE, 669 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST, 670 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM, 671 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU, 672 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL, 673 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR, 674 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN, 675 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0, 676 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0, 677 562 }; 678 563 679 564 /* ··· 672 581 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 673 582 }; 674 583 675 - static __u64 h_regs[] = { 676 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H, 584 + static __u64 sbi_base_regs[] = { 585 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, 586 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, 587 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, 588 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE, 589 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST, 590 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM, 591 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL, 592 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR, 593 + }; 594 + 595 + static __u64 sbi_sta_regs[] = { 596 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA, 597 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo), 598 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi), 677 599 }; 678 600 679 601 static __u64 zicbom_regs[] = { 680 602 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), 681 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM, 603 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM, 682 604 }; 683 605 684 606 static __u64 zicboz_regs[] = { 685 607 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), 686 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ, 687 - }; 688 - 689 - static __u64 svpbmt_regs[] = { 690 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT, 691 - }; 692 - 693 - static __u64 sstc_regs[] = { 694 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC, 695 - }; 696 - 697 - static __u64 svinval_regs[] = { 698 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL, 699 - }; 700 - 701 - static __u64 zihintpause_regs[] = { 702 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE, 703 - }; 704 - 705 - static __u64 zba_regs[] = { 706 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA, 707 - }; 708 - 709 - static __u64 zbb_regs[] = { 710 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB, 711 - }; 712 - 713 - static __u64 zbs_regs[] = { 714 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS, 715 - }; 716 - 717 - static __u64 zicntr_regs[] = { 718 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR, 719 - }; 720 - 721 - static __u64 zicond_regs[] = { 722 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND, 723 - }; 724 - 725 - static __u64 zicsr_regs[] = { 726 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR, 727 - }; 728 - 729 - static __u64 zifencei_regs[] = { 730 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI, 731 - }; 732 - 733 - static __u64 zihpm_regs[] = { 734 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM, 608 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ, 735 609 }; 736 610 737 611 static __u64 aia_regs[] = { ··· 707 651 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph), 708 652 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h), 709 653 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h), 710 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA, 654 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA, 711 655 }; 712 656 713 657 static __u64 smstateen_regs[] = { 714 658 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0), 715 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN, 659 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN, 716 660 }; 717 661 718 662 static __u64 fp_f_regs[] = { ··· 749 693 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]), 750 694 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]), 751 695 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr), 752 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F, 696 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F, 753 697 }; 754 698 755 699 static __u64 fp_d_regs[] = { ··· 786 730 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]), 787 731 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]), 788 732 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr), 789 - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D, 733 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D, 790 734 }; 791 735 792 - #define BASE_SUBLIST \ 736 + #define SUBLIST_BASE \ 793 737 {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \ 794 738 .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),} 795 - #define H_REGS_SUBLIST \ 796 - {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),} 797 - #define ZICBOM_REGS_SUBLIST \ 739 + #define SUBLIST_SBI_BASE \ 740 + {"sbi-base", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_V01, \ 741 + .regs = sbi_base_regs, .regs_n = ARRAY_SIZE(sbi_base_regs),} 742 + #define SUBLIST_SBI_STA \ 743 + {"sbi-sta", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_STA, \ 744 + .regs = sbi_sta_regs, .regs_n = ARRAY_SIZE(sbi_sta_regs),} 745 + #define SUBLIST_ZICBOM \ 798 746 {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),} 799 - #define ZICBOZ_REGS_SUBLIST \ 747 + #define SUBLIST_ZICBOZ \ 800 748 {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),} 801 - #define SVPBMT_REGS_SUBLIST \ 802 - {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),} 803 - #define SSTC_REGS_SUBLIST \ 804 - {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),} 805 - #define SVINVAL_REGS_SUBLIST \ 806 - {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),} 807 - #define ZIHINTPAUSE_REGS_SUBLIST \ 808 - {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),} 809 - #define ZBA_REGS_SUBLIST \ 810 - {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),} 811 - #define ZBB_REGS_SUBLIST \ 812 - {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),} 813 - #define ZBS_REGS_SUBLIST \ 814 - {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),} 815 - #define ZICNTR_REGS_SUBLIST \ 816 - {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),} 817 - #define ZICOND_REGS_SUBLIST \ 818 - {"zicond", .feature = KVM_RISCV_ISA_EXT_ZICOND, .regs = zicond_regs, .regs_n = ARRAY_SIZE(zicond_regs),} 819 - #define ZICSR_REGS_SUBLIST \ 820 - {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),} 821 - #define ZIFENCEI_REGS_SUBLIST \ 822 - {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),} 823 - #define ZIHPM_REGS_SUBLIST \ 824 - {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),} 825 - #define AIA_REGS_SUBLIST \ 749 + #define SUBLIST_AIA \ 826 750 {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),} 827 - #define SMSTATEEN_REGS_SUBLIST \ 751 + #define SUBLIST_SMSTATEEN \ 828 752 {"smstateen", .feature = KVM_RISCV_ISA_EXT_SMSTATEEN, .regs = smstateen_regs, .regs_n = ARRAY_SIZE(smstateen_regs),} 829 - #define FP_F_REGS_SUBLIST \ 753 + #define SUBLIST_FP_F \ 830 754 {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \ 831 755 .regs_n = ARRAY_SIZE(fp_f_regs),} 832 - #define FP_D_REGS_SUBLIST \ 756 + #define SUBLIST_FP_D \ 833 757 {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \ 834 758 .regs_n = ARRAY_SIZE(fp_d_regs),} 835 759 836 - static struct vcpu_reg_list h_config = { 837 - .sublists = { 838 - BASE_SUBLIST, 839 - H_REGS_SUBLIST, 840 - {0}, 841 - }, 842 - }; 760 + #define KVM_ISA_EXT_SIMPLE_CONFIG(ext, extu) \ 761 + static __u64 regs_##ext[] = { \ 762 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \ 763 + KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | \ 764 + KVM_RISCV_ISA_EXT_##extu, \ 765 + }; \ 766 + static struct vcpu_reg_list config_##ext = { \ 767 + .sublists = { \ 768 + SUBLIST_BASE, \ 769 + { \ 770 + .name = #ext, \ 771 + .feature = KVM_RISCV_ISA_EXT_##extu, \ 772 + .regs = regs_##ext, \ 773 + .regs_n = ARRAY_SIZE(regs_##ext), \ 774 + }, \ 775 + {0}, \ 776 + }, \ 777 + } \ 843 778 844 - static struct vcpu_reg_list zicbom_config = { 845 - .sublists = { 846 - BASE_SUBLIST, 847 - ZICBOM_REGS_SUBLIST, 848 - {0}, 849 - }, 850 - }; 779 + #define KVM_SBI_EXT_SIMPLE_CONFIG(ext, extu) \ 780 + static __u64 regs_sbi_##ext[] = { \ 781 + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \ 782 + KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | \ 783 + KVM_RISCV_SBI_EXT_##extu, \ 784 + }; \ 785 + static struct vcpu_reg_list config_sbi_##ext = { \ 786 + .sublists = { \ 787 + SUBLIST_BASE, \ 788 + { \ 789 + .name = "sbi-"#ext, \ 790 + .feature_type = VCPU_FEATURE_SBI_EXT, \ 791 + .feature = KVM_RISCV_SBI_EXT_##extu, \ 792 + .regs = regs_sbi_##ext, \ 793 + .regs_n = ARRAY_SIZE(regs_sbi_##ext), \ 794 + }, \ 795 + {0}, \ 796 + }, \ 797 + } \ 851 798 852 - static struct vcpu_reg_list zicboz_config = { 853 - .sublists = { 854 - BASE_SUBLIST, 855 - ZICBOZ_REGS_SUBLIST, 856 - {0}, 857 - }, 858 - }; 799 + #define KVM_ISA_EXT_SUBLIST_CONFIG(ext, extu) \ 800 + static struct vcpu_reg_list config_##ext = { \ 801 + .sublists = { \ 802 + SUBLIST_BASE, \ 803 + SUBLIST_##extu, \ 804 + {0}, \ 805 + }, \ 806 + } \ 859 807 860 - static struct vcpu_reg_list svpbmt_config = { 861 - .sublists = { 862 - BASE_SUBLIST, 863 - SVPBMT_REGS_SUBLIST, 864 - {0}, 865 - }, 866 - }; 808 + #define KVM_SBI_EXT_SUBLIST_CONFIG(ext, extu) \ 809 + static struct vcpu_reg_list config_sbi_##ext = { \ 810 + .sublists = { \ 811 + SUBLIST_BASE, \ 812 + SUBLIST_SBI_##extu, \ 813 + {0}, \ 814 + }, \ 815 + } \ 867 816 868 - static struct vcpu_reg_list sstc_config = { 869 - .sublists = { 870 - BASE_SUBLIST, 871 - SSTC_REGS_SUBLIST, 872 - {0}, 873 - }, 874 - }; 817 + /* Note: The below list is alphabetically sorted. */ 875 818 876 - static struct vcpu_reg_list svinval_config = { 877 - .sublists = { 878 - BASE_SUBLIST, 879 - SVINVAL_REGS_SUBLIST, 880 - {0}, 881 - }, 882 - }; 819 + KVM_SBI_EXT_SUBLIST_CONFIG(base, BASE); 820 + KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA); 821 + KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU); 822 + KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN); 883 823 884 - static struct vcpu_reg_list zihintpause_config = { 885 - .sublists = { 886 - BASE_SUBLIST, 887 - ZIHINTPAUSE_REGS_SUBLIST, 888 - {0}, 889 - }, 890 - }; 891 - 892 - static struct vcpu_reg_list zba_config = { 893 - .sublists = { 894 - BASE_SUBLIST, 895 - ZBA_REGS_SUBLIST, 896 - {0}, 897 - }, 898 - }; 899 - 900 - static struct vcpu_reg_list zbb_config = { 901 - .sublists = { 902 - BASE_SUBLIST, 903 - ZBB_REGS_SUBLIST, 904 - {0}, 905 - }, 906 - }; 907 - 908 - static struct vcpu_reg_list zbs_config = { 909 - .sublists = { 910 - BASE_SUBLIST, 911 - ZBS_REGS_SUBLIST, 912 - {0}, 913 - }, 914 - }; 915 - 916 - static struct vcpu_reg_list zicntr_config = { 917 - .sublists = { 918 - BASE_SUBLIST, 919 - ZICNTR_REGS_SUBLIST, 920 - {0}, 921 - }, 922 - }; 923 - 924 - static struct vcpu_reg_list zicond_config = { 925 - .sublists = { 926 - BASE_SUBLIST, 927 - ZICOND_REGS_SUBLIST, 928 - {0}, 929 - }, 930 - }; 931 - 932 - static struct vcpu_reg_list zicsr_config = { 933 - .sublists = { 934 - BASE_SUBLIST, 935 - ZICSR_REGS_SUBLIST, 936 - {0}, 937 - }, 938 - }; 939 - 940 - static struct vcpu_reg_list zifencei_config = { 941 - .sublists = { 942 - BASE_SUBLIST, 943 - ZIFENCEI_REGS_SUBLIST, 944 - {0}, 945 - }, 946 - }; 947 - 948 - static struct vcpu_reg_list zihpm_config = { 949 - .sublists = { 950 - BASE_SUBLIST, 951 - ZIHPM_REGS_SUBLIST, 952 - {0}, 953 - }, 954 - }; 955 - 956 - static struct vcpu_reg_list aia_config = { 957 - .sublists = { 958 - BASE_SUBLIST, 959 - AIA_REGS_SUBLIST, 960 - {0}, 961 - }, 962 - }; 963 - 964 - static struct vcpu_reg_list smstateen_config = { 965 - .sublists = { 966 - BASE_SUBLIST, 967 - SMSTATEEN_REGS_SUBLIST, 968 - {0}, 969 - }, 970 - }; 971 - 972 - static struct vcpu_reg_list fp_f_config = { 973 - .sublists = { 974 - BASE_SUBLIST, 975 - FP_F_REGS_SUBLIST, 976 - {0}, 977 - }, 978 - }; 979 - 980 - static struct vcpu_reg_list fp_d_config = { 981 - .sublists = { 982 - BASE_SUBLIST, 983 - FP_D_REGS_SUBLIST, 984 - {0}, 985 - }, 986 - }; 824 + KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA); 825 + KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F); 826 + KVM_ISA_EXT_SUBLIST_CONFIG(fp_d, FP_D); 827 + KVM_ISA_EXT_SIMPLE_CONFIG(h, H); 828 + KVM_ISA_EXT_SUBLIST_CONFIG(smstateen, SMSTATEEN); 829 + KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC); 830 + KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL); 831 + KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT); 832 + KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT); 833 + KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA); 834 + KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB); 835 + KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS); 836 + KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM); 837 + KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ); 838 + KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR); 839 + KVM_ISA_EXT_SIMPLE_CONFIG(zicond, ZICOND); 840 + KVM_ISA_EXT_SIMPLE_CONFIG(zicsr, ZICSR); 841 + KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI); 842 + KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE); 843 + KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM); 987 844 988 845 struct vcpu_reg_list *vcpu_configs[] = { 989 - &h_config, 990 - &zicbom_config, 991 - &zicboz_config, 992 - &svpbmt_config, 993 - &sstc_config, 994 - &svinval_config, 995 - &zihintpause_config, 996 - &zba_config, 997 - &zbb_config, 998 - &zbs_config, 999 - &zicntr_config, 1000 - &zicond_config, 1001 - &zicsr_config, 1002 - &zifencei_config, 1003 - &zihpm_config, 1004 - &aia_config, 1005 - &smstateen_config, 1006 - &fp_f_config, 1007 - &fp_d_config, 846 + &config_sbi_base, 847 + &config_sbi_sta, 848 + &config_sbi_pmu, 849 + &config_sbi_dbcn, 850 + &config_aia, 851 + &config_fp_f, 852 + &config_fp_d, 853 + &config_h, 854 + &config_smstateen, 855 + &config_sstc, 856 + &config_svinval, 857 + &config_svnapot, 858 + &config_svpbmt, 859 + &config_zba, 860 + &config_zbb, 861 + &config_zbs, 862 + &config_zicbom, 863 + &config_zicboz, 864 + &config_zicntr, 865 + &config_zicond, 866 + &config_zicsr, 867 + &config_zifencei, 868 + &config_zihintpause, 869 + &config_zihpm, 1008 870 }; 1009 871 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
+99
tools/testing/selftests/kvm/steal_time.c
··· 11 11 #include <pthread.h> 12 12 #include <linux/kernel.h> 13 13 #include <asm/kvm.h> 14 + #ifndef __riscv 14 15 #include <asm/kvm_para.h> 16 + #endif 15 17 16 18 #include "test_util.h" 17 19 #include "kvm_util.h" ··· 203 201 pr_info(" rev: %d\n", st->rev); 204 202 pr_info(" attr: %d\n", st->attr); 205 203 pr_info(" st_time: %ld\n", st->st_time); 204 + } 205 + 206 + #elif defined(__riscv) 207 + 208 + /* SBI STA shmem must have 64-byte alignment */ 209 + #define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63) 210 + 211 + static vm_paddr_t st_gpa[NR_VCPUS]; 212 + 213 + struct sta_struct { 214 + uint32_t sequence; 215 + uint32_t flags; 216 + uint64_t steal; 217 + uint8_t preempted; 218 + uint8_t pad[47]; 219 + } __packed; 220 + 221 + static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags) 222 + { 223 + unsigned long lo = (unsigned long)gpa; 224 + #if __riscv_xlen == 32 225 + unsigned long hi = (unsigned long)(gpa >> 32); 226 + #else 227 + unsigned long hi = gpa == -1 ? -1 : 0; 228 + #endif 229 + struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0); 230 + 231 + GUEST_ASSERT(ret.value == 0 && ret.error == 0); 232 + } 233 + 234 + static void check_status(struct sta_struct *st) 235 + { 236 + GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1)); 237 + GUEST_ASSERT(READ_ONCE(st->flags) == 0); 238 + GUEST_ASSERT(READ_ONCE(st->preempted) == 0); 239 + } 240 + 241 + static void guest_code(int cpu) 242 + { 243 + struct sta_struct *st = st_gva[cpu]; 244 + uint32_t sequence; 245 + long out_val = 0; 246 + bool probe; 247 + 248 + probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val); 249 + GUEST_ASSERT(probe && out_val == 1); 250 + 251 + sta_set_shmem(st_gpa[cpu], 0); 252 + GUEST_SYNC(0); 253 + 254 + check_status(st); 255 + WRITE_ONCE(guest_stolen_time[cpu], st->steal); 256 + sequence = READ_ONCE(st->sequence); 257 + check_status(st); 258 + GUEST_SYNC(1); 259 + 260 + check_status(st); 261 + GUEST_ASSERT(sequence < READ_ONCE(st->sequence)); 262 + WRITE_ONCE(guest_stolen_time[cpu], st->steal); 263 + check_status(st); 264 + GUEST_DONE(); 265 + } 266 + 267 + static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 268 + { 269 + uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); 270 + unsigned long enabled; 271 + 272 + vcpu_get_reg(vcpu, id, &enabled); 273 + TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result"); 274 + 275 + return enabled; 276 + } 277 + 278 + static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 279 + { 280 + /* ST_GPA_BASE is identity mapped */ 281 + st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 282 + st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]); 283 + sync_global_to_guest(vcpu->vm, st_gva[i]); 284 + sync_global_to_guest(vcpu->vm, st_gpa[i]); 285 + } 286 + 287 + static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 288 + { 289 + struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 290 + int i; 291 + 292 + pr_info("VCPU%d:\n", vcpu_idx); 293 + pr_info(" sequence: %d\n", st->sequence); 294 + pr_info(" flags: %d\n", st->flags); 295 + pr_info(" steal: %"PRIu64"\n", st->steal); 296 + pr_info(" preempted: %d\n", st->preempted); 297 + pr_info(" pad: "); 298 + for (i = 0; i < 47; ++i) 299 + pr_info("%d", st->pad[i]); 300 + pr_info("\n"); 206 301 } 207 302 208 303 #endif
+1
tools/testing/selftests/net/Makefile
··· 91 91 TEST_PROGS += test_vxlan_nolocalbypass.sh 92 92 TEST_PROGS += test_bridge_backup_port.sh 93 93 TEST_PROGS += fdb_flush.sh 94 + TEST_PROGS += vlan_hw_filter.sh 94 95 95 96 TEST_FILES := settings 96 97
+4 -4
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 2776 2776 fi 2777 2777 2778 2778 if reset "mpc backup" && 2779 - continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then 2779 + continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then 2780 2780 pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup 2781 2781 speed=slow \ 2782 2782 run_tests $ns1 $ns2 10.0.1.1 ··· 2785 2785 fi 2786 2786 2787 2787 if reset "mpc backup both sides" && 2788 - continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then 2788 + continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then 2789 2789 pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup 2790 2790 pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup 2791 2791 speed=slow \ ··· 2795 2795 fi 2796 2796 2797 2797 if reset "mpc switch to backup" && 2798 - continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then 2798 + continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then 2799 2799 pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow 2800 2800 sflags=backup speed=slow \ 2801 2801 run_tests $ns1 $ns2 10.0.1.1 ··· 2804 2804 fi 2805 2805 2806 2806 if reset "mpc switch to backup both sides" && 2807 - continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then 2807 + continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then 2808 2808 pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow 2809 2809 pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow 2810 2810 sflags=backup speed=slow \
+1 -1
tools/testing/selftests/net/rtnetlink.sh
··· 297 297 done 298 298 299 299 sleep 5 300 - run_cmd_grep "10.23.11." ip addr show dev "$devdummy" 300 + run_cmd_grep_fail "10.23.11." ip addr show dev "$devdummy" 301 301 if [ $? -eq 0 ]; then 302 302 check_err 1 303 303 end_test "FAIL: preferred_lft addresses remaining"
+29
tools/testing/selftests/net/vlan_hw_filter.sh
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + readonly NETNS="ns-$(mktemp -u XXXXXX)" 5 + 6 + ret=0 7 + 8 + cleanup() { 9 + ip netns del $NETNS 10 + } 11 + 12 + trap cleanup EXIT 13 + 14 + fail() { 15 + echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2 16 + ret=1 17 + } 18 + 19 + ip netns add ${NETNS} 20 + ip netns exec ${NETNS} ip link add bond0 type bond mode 0 21 + ip netns exec ${NETNS} ip link add bond_slave_1 type veth peer veth2 22 + ip netns exec ${NETNS} ip link set bond_slave_1 master bond0 23 + ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off 24 + ip netns exec ${NETNS} ip link add link bond_slave_1 name bond_slave_1.0 type vlan id 0 25 + ip netns exec ${NETNS} ip link add link bond0 name bond0.0 type vlan id 0 26 + ip netns exec ${NETNS} ip link set bond_slave_1 nomaster 27 + ip netns exec ${NETNS} ip link del veth2 || fail "Please check vlan HW filter function" 28 + 29 + exit $ret
+2 -1
virt/kvm/kvm_main.c
··· 5888 5888 return r < 0 ? r : 0; 5889 5889 } 5890 5890 5891 - /* Caller must hold slots_lock. */ 5892 5891 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5893 5892 int len, struct kvm_io_device *dev) 5894 5893 { 5895 5894 int i; 5896 5895 struct kvm_io_bus *new_bus, *bus; 5897 5896 struct kvm_io_range range; 5897 + 5898 + lockdep_assert_held(&kvm->slots_lock); 5898 5899 5899 5900 bus = kvm_get_bus(kvm, bus_idx); 5900 5901 if (!bus)