Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 5.12-rc4 into usb-next

We need the usb/thunderbolt fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+2475 -1807
+1 -1
Documentation/ABI/testing/sysfs-fs-xfs
··· 33 33 Description: 34 34 The current state of the log write grant head. It 35 35 represents the total log reservation of all currently 36 - oustanding transactions, including regrants due to 36 + outstanding transactions, including regrants due to 37 37 rolling transactions. The grant head is exported in 38 38 "cycle:bytes" format. 39 39 Users: xfstests
+4
Documentation/devicetree/bindings/sound/fsl,spdif.yaml
··· 21 21 - fsl,vf610-spdif 22 22 - fsl,imx6sx-spdif 23 23 - fsl,imx8qm-spdif 24 + - fsl,imx8qxp-spdif 25 + - fsl,imx8mq-spdif 26 + - fsl,imx8mm-spdif 27 + - fsl,imx8mn-spdif 24 28 25 29 reg: 26 30 maxItems: 1
+6 -3
Documentation/virt/kvm/api.rst
··· 1495 1495 1496 1496 Define which vcpu is the Bootstrap Processor (BSP). Values are the same 1497 1497 as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default 1498 - is vcpu 0. 1498 + is vcpu 0. This ioctl has to be called before vcpu creation, 1499 + otherwise it will return EBUSY error. 1499 1500 1500 1501 1501 1502 4.42 KVM_GET_XSAVE ··· 4807 4806 allows user space to deflect and potentially handle various MSR accesses 4808 4807 into user space. 4809 4808 4810 - If a vCPU is in running state while this ioctl is invoked, the vCPU may 4811 - experience inconsistent filtering behavior on MSR accesses. 4809 + Note, invoking this ioctl with a vCPU is running is inherently racy. However, 4810 + KVM does guarantee that vCPUs will see either the previous filter or the new 4811 + filter, e.g. MSRs with identical settings in both the old and new filter will 4812 + have deterministic behavior. 4812 4813 4813 4814 4.127 KVM_XEN_HVM_SET_ATTR 4814 4815 --------------------------
+3 -4
MAINTAINERS
··· 1181 1181 M: Christian Brauner <christian@brauner.io> 1182 1182 M: Hridya Valsaraju <hridya@google.com> 1183 1183 M: Suren Baghdasaryan <surenb@google.com> 1184 - L: devel@driverdev.osuosl.org 1184 + L: linux-kernel@vger.kernel.org 1185 1185 S: Supported 1186 1186 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git 1187 1187 F: drivers/android/ ··· 8116 8116 8117 8117 HISILICON STAGING DRIVERS FOR HIKEY 960/970 8118 8118 M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org> 8119 - L: devel@driverdev.osuosl.org 8120 8119 S: Maintained 8121 8120 F: drivers/staging/hikey9xx/ 8122 8121 ··· 17039 17040 17040 17041 STAGING SUBSYSTEM 17041 17042 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 17042 - L: devel@driverdev.osuosl.org 17043 + L: linux-staging@lists.linux.dev 17043 17044 S: Supported 17044 17045 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git 17045 17046 F: drivers/staging/ ··· 19134 19135 M: Martyn Welch <martyn@welchs.me.uk> 19135 19136 M: Manohar Vanga <manohar.vanga@gmail.com> 19136 19137 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 19137 - L: devel@driverdev.osuosl.org 19138 + L: linux-kernel@vger.kernel.org 19138 19139 S: Maintained 19139 19140 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git 19140 19141 F: Documentation/driver-api/vme.rst
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 12 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Frozen Wasteland 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/csky/kernel/probes/ftrace.c
··· 9 9 return 0; 10 10 } 11 11 12 - /* Ftrace callback handler for kprobes -- called under preepmt disabed */ 12 + /* Ftrace callback handler for kprobes -- called under preepmt disabled */ 13 13 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 14 14 struct ftrace_ops *ops, struct ftrace_regs *fregs) 15 15 {
+1 -1
arch/mips/kernel/vmlinux.lds.S
··· 176 176 .fill : { 177 177 FILL(0); 178 178 BYTE(0); 179 - . = ALIGN(8); 179 + STRUCT_ALIGN(); 180 180 } 181 181 __appended_dtb = .; 182 182 /* leave space for appended DTB */
+2 -2
arch/powerpc/include/asm/cpu_has_feature.h
··· 7 7 #include <linux/bug.h> 8 8 #include <asm/cputable.h> 9 9 10 - static inline bool early_cpu_has_feature(unsigned long feature) 10 + static __always_inline bool early_cpu_has_feature(unsigned long feature) 11 11 { 12 12 return !!((CPU_FTRS_ALWAYS & feature) || 13 13 (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); ··· 46 46 return static_branch_likely(&cpu_feature_keys[i]); 47 47 } 48 48 #else 49 - static inline bool cpu_has_feature(unsigned long feature) 49 + static __always_inline bool cpu_has_feature(unsigned long feature) 50 50 { 51 51 return early_cpu_has_feature(feature); 52 52 }
+11
arch/powerpc/kernel/vdso32/gettimeofday.S
··· 65 65 V_FUNCTION_BEGIN(__kernel_time) 66 66 cvdso_call_time __c_kernel_time 67 67 V_FUNCTION_END(__kernel_time) 68 + 69 + /* Routines for restoring integer registers, called by the compiler. */ 70 + /* Called with r11 pointing to the stack header word of the caller of the */ 71 + /* function, just beyond the end of the integer restore area. */ 72 + _GLOBAL(_restgpr_31_x) 73 + _GLOBAL(_rest32gpr_31_x) 74 + lwz r0,4(r11) 75 + lwz r31,-4(r11) 76 + mtlr r0 77 + mr r1,r11 78 + blr
+2 -2
arch/riscv/Kconfig
··· 93 93 select PCI_MSI if PCI 94 94 select RISCV_INTC 95 95 select RISCV_TIMER if RISCV_SBI 96 - select SPARSEMEM_STATIC if 32BIT 97 96 select SPARSE_IRQ 98 97 select SYSCTL_EXCEPTION_TRACE 99 98 select THREAD_INFO_IN_TASK ··· 153 154 config ARCH_SPARSEMEM_ENABLE 154 155 def_bool y 155 156 depends on MMU 156 - select SPARSEMEM_VMEMMAP_ENABLE 157 + select SPARSEMEM_STATIC if 32BIT && SPARSMEM 158 + select SPARSEMEM_VMEMMAP_ENABLE if 64BIT 157 159 158 160 config ARCH_SELECT_MEMORY_MODEL 159 161 def_bool ARCH_SPARSEMEM_ENABLE
+2
arch/riscv/Kconfig.socs
··· 31 31 select SIFIVE_PLIC 32 32 select ARCH_HAS_RESET_CONTROLLER 33 33 select PINCTRL 34 + select COMMON_CLK 35 + select COMMON_CLK_K210 34 36 help 35 37 This enables support for Canaan Kendryte K210 SoC platform hardware. 36 38
+16
arch/riscv/include/asm/asm-prototypes.h
··· 9 9 long long __ashrti3(long long a, int b); 10 10 long long __ashlti3(long long a, int b); 11 11 12 + 13 + #define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs) 14 + 15 + DECLARE_DO_ERROR_INFO(do_trap_unknown); 16 + DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned); 17 + DECLARE_DO_ERROR_INFO(do_trap_insn_fault); 18 + DECLARE_DO_ERROR_INFO(do_trap_insn_illegal); 19 + DECLARE_DO_ERROR_INFO(do_trap_load_fault); 20 + DECLARE_DO_ERROR_INFO(do_trap_load_misaligned); 21 + DECLARE_DO_ERROR_INFO(do_trap_store_misaligned); 22 + DECLARE_DO_ERROR_INFO(do_trap_store_fault); 23 + DECLARE_DO_ERROR_INFO(do_trap_ecall_u); 24 + DECLARE_DO_ERROR_INFO(do_trap_ecall_s); 25 + DECLARE_DO_ERROR_INFO(do_trap_ecall_m); 26 + DECLARE_DO_ERROR_INFO(do_trap_break); 27 + 12 28 #endif /* _ASM_RISCV_PROTOTYPES_H */
+2
arch/riscv/include/asm/irq.h
··· 12 12 13 13 #include <asm-generic/irq.h> 14 14 15 + extern void __init init_IRQ(void); 16 + 15 17 #endif /* _ASM_RISCV_IRQ_H */
+1
arch/riscv/include/asm/processor.h
··· 71 71 int riscv_of_parent_hartid(struct device_node *node); 72 72 73 73 extern void riscv_fill_hwcap(void); 74 + extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 74 75 75 76 #endif /* __ASSEMBLY__ */ 76 77
+5
arch/riscv/include/asm/ptrace.h
··· 119 119 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, 120 120 unsigned int n); 121 121 122 + void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 123 + unsigned long frame_pointer); 124 + int do_syscall_trace_enter(struct pt_regs *regs); 125 + void do_syscall_trace_exit(struct pt_regs *regs); 126 + 122 127 /** 123 128 * regs_get_register() - get register value from its offset 124 129 * @regs: pt_regs from which register value is gotten
+2 -2
arch/riscv/include/asm/sbi.h
··· 51 51 SBI_EXT_RFENCE_REMOTE_FENCE_I = 0, 52 52 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, 53 53 SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, 54 - SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, 55 54 SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID, 56 - SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, 55 + SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, 57 56 SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID, 57 + SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, 58 58 }; 59 59 60 60 enum sbi_ext_hsm_fid {
+2
arch/riscv/include/asm/timex.h
··· 88 88 return 0; 89 89 } 90 90 91 + extern void time_init(void); 92 + 91 93 #endif /* _ASM_RISCV_TIMEX_H */
+1
arch/riscv/kernel/Makefile
··· 8 8 CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) 9 9 CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) 10 10 endif 11 + CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) 11 12 12 13 extra-y += head.o 13 14 extra-y += vmlinux.lds
+10 -8
arch/riscv/kernel/probes/ftrace.c
··· 2 2 3 3 #include <linux/kprobes.h> 4 4 5 - /* Ftrace callback handler for kprobes -- called under preepmt disabed */ 5 + /* Ftrace callback handler for kprobes -- called under preepmt disabled */ 6 6 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 7 - struct ftrace_ops *ops, struct ftrace_regs *regs) 7 + struct ftrace_ops *ops, struct ftrace_regs *fregs) 8 8 { 9 9 struct kprobe *p; 10 + struct pt_regs *regs; 10 11 struct kprobe_ctlblk *kcb; 11 12 12 13 p = get_kprobe((kprobe_opcode_t *)ip); 13 14 if (unlikely(!p) || kprobe_disabled(p)) 14 15 return; 15 16 17 + regs = ftrace_get_regs(fregs); 16 18 kcb = get_kprobe_ctlblk(); 17 19 if (kprobe_running()) { 18 20 kprobes_inc_nmissed_count(p); 19 21 } else { 20 - unsigned long orig_ip = instruction_pointer(&(regs->regs)); 22 + unsigned long orig_ip = instruction_pointer(regs); 21 23 22 - instruction_pointer_set(&(regs->regs), ip); 24 + instruction_pointer_set(regs, ip); 23 25 24 26 __this_cpu_write(current_kprobe, p); 25 27 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 26 - if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) { 28 + if (!p->pre_handler || !p->pre_handler(p, regs)) { 27 29 /* 28 30 * Emulate singlestep (and also recover regs->pc) 29 31 * as if there is a nop 30 32 */ 31 - instruction_pointer_set(&(regs->regs), 33 + instruction_pointer_set(regs, 32 34 (unsigned long)p->addr + MCOUNT_INSN_SIZE); 33 35 if (unlikely(p->post_handler)) { 34 36 kcb->kprobe_status = KPROBE_HIT_SSDONE; 35 - p->post_handler(p, &(regs->regs), 0); 37 + p->post_handler(p, regs, 0); 36 38 } 37 - instruction_pointer_set(&(regs->regs), orig_ip); 39 + instruction_pointer_set(regs, orig_ip); 38 40 } 39 41 40 42 /*
+1 -2
arch/riscv/kernel/probes/kprobes.c
··· 256 256 * normal page fault. 257 257 */ 258 258 regs->epc = (unsigned long) cur->addr; 259 - if (!instruction_pointer(regs)) 260 - BUG(); 259 + BUG_ON(!instruction_pointer(regs)); 261 260 262 261 if (kcb->kprobe_status == KPROBE_REENTER) 263 262 restore_previous_kprobe(kcb);
+1
arch/riscv/kernel/process.c
··· 10 10 #include <linux/cpu.h> 11 11 #include <linux/kernel.h> 12 12 #include <linux/sched.h> 13 + #include <linux/sched/debug.h> 13 14 #include <linux/sched/task_stack.h> 14 15 #include <linux/tick.h> 15 16 #include <linux/ptrace.h>
+1 -1
arch/riscv/kernel/sbi.c
··· 116 116 EXPORT_SYMBOL(sbi_clear_ipi); 117 117 118 118 /** 119 - * sbi_set_timer_v01() - Program the timer for next timer event. 119 + * __sbi_set_timer_v01() - Program the timer for next timer event. 120 120 * @stime_value: The value after which next timer event should fire. 121 121 * 122 122 * Return: None
+2 -1
arch/riscv/kernel/setup.c
··· 147 147 bss_res.end = __pa_symbol(__bss_stop) - 1; 148 148 bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 149 149 150 - mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res); 150 + /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ 151 + mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res); 151 152 mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); 152 153 if (!mem_res) 153 154 panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
+1
arch/riscv/kernel/time.c
··· 9 9 #include <linux/delay.h> 10 10 #include <asm/sbi.h> 11 11 #include <asm/processor.h> 12 + #include <asm/timex.h> 12 13 13 14 unsigned long riscv_timebase; 14 15 EXPORT_SYMBOL_GPL(riscv_timebase);
+1
arch/riscv/kernel/traps.c
··· 17 17 #include <linux/module.h> 18 18 #include <linux/irq.h> 19 19 20 + #include <asm/asm-prototypes.h> 20 21 #include <asm/bug.h> 21 22 #include <asm/processor.h> 22 23 #include <asm/ptrace.h>
+3 -1
arch/riscv/mm/kasan_init.c
··· 155 155 memset(start, KASAN_SHADOW_INIT, end - start); 156 156 } 157 157 158 - void __init kasan_shallow_populate(void *start, void *end) 158 + static void __init kasan_shallow_populate(void *start, void *end) 159 159 { 160 160 unsigned long vaddr = (unsigned long)start & PAGE_MASK; 161 161 unsigned long vend = PAGE_ALIGN((unsigned long)end); ··· 187 187 } 188 188 vaddr += PAGE_SIZE; 189 189 } 190 + 191 + local_flush_tlb_all(); 190 192 } 191 193 192 194 void __init kasan_init(void)
+1 -1
arch/s390/include/asm/pci.h
··· 202 202 ----------------------------------------------------------------------------- */ 203 203 /* Base stuff */ 204 204 int zpci_create_device(u32 fid, u32 fh, enum zpci_state state); 205 - void zpci_remove_device(struct zpci_dev *zdev); 205 + void zpci_remove_device(struct zpci_dev *zdev, bool set_error); 206 206 int zpci_enable_device(struct zpci_dev *); 207 207 int zpci_disable_device(struct zpci_dev *); 208 208 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
+2 -1
arch/s390/kernel/perf_cpum_cf_diag.c
··· 968 968 */ 969 969 static size_t cf_diag_needspace(unsigned int sets) 970 970 { 971 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 971 + struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events); 972 972 size_t bytes = 0; 973 973 int i; 974 974 ··· 984 984 sizeof(((struct s390_ctrset_cpudata *)0)->no_sets)); 985 985 debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__, 986 986 bytes); 987 + put_cpu_ptr(&cpu_cf_events); 987 988 return bytes; 988 989 } 989 990
+1 -1
arch/s390/kernel/vtime.c
··· 214 214 avg_steal = S390_lowcore.avg_steal_timer / 2; 215 215 if ((s64) steal > 0) { 216 216 S390_lowcore.steal_timer = 0; 217 - account_steal_time(steal); 217 + account_steal_time(cputime_to_nsecs(steal)); 218 218 avg_steal += steal; 219 219 } 220 220 S390_lowcore.avg_steal_timer = avg_steal;
+24 -4
arch/s390/pci/pci.c
··· 682 682 } 683 683 EXPORT_SYMBOL_GPL(zpci_disable_device); 684 684 685 - void zpci_remove_device(struct zpci_dev *zdev) 685 + /* zpci_remove_device - Removes the given zdev from the PCI core 686 + * @zdev: the zdev to be removed from the PCI core 687 + * @set_error: if true the device's error state is set to permanent failure 688 + * 689 + * Sets a zPCI device to a configured but offline state; the zPCI 690 + * device is still accessible through its hotplug slot and the zPCI 691 + * API but is removed from the common code PCI bus, making it 692 + * no longer available to drivers. 693 + */ 694 + void zpci_remove_device(struct zpci_dev *zdev, bool set_error) 686 695 { 687 696 struct zpci_bus *zbus = zdev->zbus; 688 697 struct pci_dev *pdev; 689 698 699 + if (!zdev->zbus->bus) 700 + return; 701 + 690 702 pdev = pci_get_slot(zbus->bus, zdev->devfn); 691 703 if (pdev) { 692 - if (pdev->is_virtfn) 693 - return zpci_iov_remove_virtfn(pdev, zdev->vfn); 704 + if (set_error) 705 + pdev->error_state = pci_channel_io_perm_failure; 706 + if (pdev->is_virtfn) { 707 + zpci_iov_remove_virtfn(pdev, zdev->vfn); 708 + /* balance pci_get_slot */ 709 + pci_dev_put(pdev); 710 + return; 711 + } 694 712 pci_stop_and_remove_bus_device_locked(pdev); 713 + /* balance pci_get_slot */ 714 + pci_dev_put(pdev); 695 715 } 696 716 } 697 717 ··· 785 765 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); 786 766 787 767 if (zdev->zbus->bus) 788 - zpci_remove_device(zdev); 768 + zpci_remove_device(zdev, false); 789 769 790 770 switch (zdev->state) { 791 771 case ZPCI_FN_STATE_ONLINE:
+6 -12
arch/s390/pci/pci_event.c
··· 76 76 static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) 77 77 { 78 78 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 79 - struct pci_dev *pdev = NULL; 80 79 enum zpci_state state; 80 + struct pci_dev *pdev; 81 81 int ret; 82 - 83 - if (zdev && zdev->zbus->bus) 84 - pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); 85 82 86 83 zpci_err("avail CCDF:\n"); 87 84 zpci_err_hex(ccdf, sizeof(*ccdf)); ··· 121 124 case 0x0303: /* Deconfiguration requested */ 122 125 if (!zdev) 123 126 break; 124 - if (pdev) 125 - zpci_remove_device(zdev); 127 + zpci_remove_device(zdev, false); 126 128 127 129 ret = zpci_disable_device(zdev); 128 130 if (ret) ··· 136 140 case 0x0304: /* Configured -> Standby|Reserved */ 137 141 if (!zdev) 138 142 break; 139 - if (pdev) { 140 - /* Give the driver a hint that the function is 141 - * already unusable. */ 142 - pdev->error_state = pci_channel_io_perm_failure; 143 - zpci_remove_device(zdev); 144 - } 143 + /* Give the driver a hint that the function is 144 + * already unusable. 145 + */ 146 + zpci_remove_device(zdev, true); 145 147 146 148 zdev->fh = ccdf->fh; 147 149 zpci_disable_device(zdev);
+3
arch/x86/events/intel/core.c
··· 3659 3659 return ret; 3660 3660 3661 3661 if (event->attr.precise_ip) { 3662 + if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) 3663 + return -EINVAL; 3664 + 3662 3665 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { 3663 3666 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 3664 3667 if (!(event->attr.sample_type &
+1 -1
arch/x86/events/intel/ds.c
··· 2010 2010 */ 2011 2011 if (!pebs_status && cpuc->pebs_enabled && 2012 2012 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) 2013 - pebs_status = cpuc->pebs_enabled; 2013 + pebs_status = p->status = cpuc->pebs_enabled; 2014 2014 2015 2015 bit = find_first_bit((unsigned long *)&pebs_status, 2016 2016 x86_pmu.max_pebs_events);
+26 -8
arch/x86/include/asm/kvm_host.h
··· 884 884 u64 options; 885 885 }; 886 886 887 + /* Current state of Hyper-V TSC page clocksource */ 888 + enum hv_tsc_page_status { 889 + /* TSC page was not set up or disabled */ 890 + HV_TSC_PAGE_UNSET = 0, 891 + /* TSC page MSR was written by the guest, update pending */ 892 + HV_TSC_PAGE_GUEST_CHANGED, 893 + /* TSC page MSR was written by KVM userspace, update pending */ 894 + HV_TSC_PAGE_HOST_CHANGED, 895 + /* TSC page was properly set up and is currently active */ 896 + HV_TSC_PAGE_SET, 897 + /* TSC page is currently being updated and therefore is inactive */ 898 + HV_TSC_PAGE_UPDATING, 899 + /* TSC page was set up with an inaccessible GPA */ 900 + HV_TSC_PAGE_BROKEN, 901 + }; 902 + 887 903 /* Hyper-V emulation context */ 888 904 struct kvm_hv { 889 905 struct mutex hv_lock; 890 906 u64 hv_guest_os_id; 891 907 u64 hv_hypercall; 892 908 u64 hv_tsc_page; 909 + enum hv_tsc_page_status hv_tsc_page_status; 893 910 894 911 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ 895 912 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; ··· 946 929 KVM_IRQCHIP_NONE, 947 930 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ 948 931 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ 932 + }; 933 + 934 + struct kvm_x86_msr_filter { 935 + u8 count; 936 + bool default_allow:1; 937 + struct msr_bitmap_range ranges[16]; 949 938 }; 950 939 951 940 #define APICV_INHIBIT_REASON_DISABLE 0 ··· 1048 1025 bool guest_can_read_msr_platform_info; 1049 1026 bool exception_payload_enabled; 1050 1027 1028 + bool bus_lock_detection_enabled; 1029 + 1051 1030 /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */ 1052 1031 u32 user_space_msr_mask; 1053 - 1054 - struct { 1055 - u8 count; 1056 - bool default_allow:1; 1057 - struct msr_bitmap_range ranges[16]; 1058 - } msr_filter; 1059 - 1060 - bool bus_lock_detection_enabled; 1032 + struct kvm_x86_msr_filter __rcu *msr_filter; 1061 1033 1062 1034 struct kvm_pmu_event_filter __rcu *pmu_event_filter; 1063 1035 struct task_struct *nx_lpage_recovery_thread;
-9
arch/x86/include/asm/processor.h
··· 551 551 *size = fpu_kernel_xstate_size; 552 552 } 553 553 554 - /* 555 - * Thread-synchronous status. 556 - * 557 - * This is different from the flags in that nobody else 558 - * ever touches our thread-synchronous status, so we don't 559 - * have to worry about atomic accesses. 560 - */ 561 - #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 562 - 563 554 static inline void 564 555 native_load_sp0(unsigned long sp0) 565 556 {
+14 -1
arch/x86/include/asm/thread_info.h
··· 205 205 206 206 #endif 207 207 208 + /* 209 + * Thread-synchronous status. 210 + * 211 + * This is different from the flags in that nobody else 212 + * ever touches our thread-synchronous status, so we don't 213 + * have to worry about atomic accesses. 214 + */ 215 + #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 216 + 217 + #ifndef __ASSEMBLY__ 208 218 #ifdef CONFIG_COMPAT 209 219 #define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */ 220 + 221 + #define arch_set_restart_data(restart) \ 222 + do { restart->arch_data = current_thread_info()->status; } while (0) 223 + 210 224 #endif 211 - #ifndef __ASSEMBLY__ 212 225 213 226 #ifdef CONFIG_X86_32 214 227 #define in_ia32_syscall() true
+5
arch/x86/kernel/apic/apic.c
··· 2342 2342 [0 ... NR_CPUS - 1] = -1, 2343 2343 }; 2344 2344 2345 + bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 2346 + { 2347 + return phys_id == cpuid_to_apicid[cpu]; 2348 + } 2349 + 2345 2350 #ifdef CONFIG_SMP 2346 2351 /** 2347 2352 * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
+10
arch/x86/kernel/apic/io_apic.c
··· 1032 1032 if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) { 1033 1033 irq = mp_irqs[idx].srcbusirq; 1034 1034 legacy = mp_is_legacy_irq(irq); 1035 + /* 1036 + * IRQ2 is unusable for historical reasons on systems which 1037 + * have a legacy PIC. See the comment vs. IRQ2 further down. 1038 + * 1039 + * If this gets removed at some point then the related code 1040 + * in lapic_assign_system_vectors() needs to be adjusted as 1041 + * well. 1042 + */ 1043 + if (legacy && irq == PIC_CASCADE_IR) 1044 + return -EINVAL; 1035 1045 } 1036 1046 1037 1047 mutex_lock(&ioapic_mutex);
+1 -1
arch/x86/kernel/kprobes/ftrace.c
··· 12 12 13 13 #include "common.h" 14 14 15 - /* Ftrace callback handler for kprobes -- called under preepmt disabed */ 15 + /* Ftrace callback handler for kprobes -- called under preepmt disabled */ 16 16 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 17 17 struct ftrace_ops *ops, struct ftrace_regs *fregs) 18 18 {
+10 -13
arch/x86/kernel/kvm.c
··· 836 836 837 837 static void kvm_wait(u8 *ptr, u8 val) 838 838 { 839 - unsigned long flags; 840 - 841 839 if (in_nmi()) 842 840 return; 843 - 844 - local_irq_save(flags); 845 - 846 - if (READ_ONCE(*ptr) != val) 847 - goto out; 848 841 849 842 /* 850 843 * halt until it's our turn and kicked. Note that we do safe halt 851 844 * for irq enabled case to avoid hang when lock info is overwritten 852 845 * in irq spinlock slowpath and no spurious interrupt occur to save us. 853 846 */ 854 - if (arch_irqs_disabled_flags(flags)) 855 - halt(); 856 - else 857 - safe_halt(); 847 + if (irqs_disabled()) { 848 + if (READ_ONCE(*ptr) == val) 849 + halt(); 850 + } else { 851 + local_irq_disable(); 858 852 859 - out: 860 - local_irq_restore(flags); 853 + if (READ_ONCE(*ptr) == val) 854 + safe_halt(); 855 + 856 + local_irq_enable(); 857 + } 861 858 } 862 859 863 860 #ifdef CONFIG_X86_32
+1 -23
arch/x86/kernel/signal.c
··· 766 766 767 767 static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) 768 768 { 769 - /* 770 - * This function is fundamentally broken as currently 771 - * implemented. 772 - * 773 - * The idea is that we want to trigger a call to the 774 - * restart_block() syscall and that we want in_ia32_syscall(), 775 - * in_x32_syscall(), etc. to match whatever they were in the 776 - * syscall being restarted. We assume that the syscall 777 - * instruction at (regs->ip - 2) matches whatever syscall 778 - * instruction we used to enter in the first place. 779 - * 780 - * The problem is that we can get here when ptrace pokes 781 - * syscall-like values into regs even if we're not in a syscall 782 - * at all. 783 - * 784 - * For now, we maintain historical behavior and guess based on 785 - * stored state. We could do better by saving the actual 786 - * syscall arch in restart_block or (with caveats on x32) by 787 - * checking if regs->ip points to 'int $0x80'. The current 788 - * behavior is incorrect if a tracer has a different bitness 789 - * than the tracee. 790 - */ 791 769 #ifdef CONFIG_IA32_EMULATION 792 - if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED)) 770 + if (current->restart_block.arch_data & TS_COMPAT) 793 771 return __NR_ia32_restart_syscall; 794 772 #endif 795 773 #ifdef CONFIG_X86_X32_ABI
+81 -10
arch/x86/kvm/hyperv.c
··· 520 520 u64 tsc; 521 521 522 522 /* 523 - * The guest has not set up the TSC page or the clock isn't 524 - * stable, fall back to get_kvmclock_ns. 523 + * Fall back to get_kvmclock_ns() when TSC page hasn't been set up, 524 + * is broken, disabled or being updated. 525 525 */ 526 - if (!hv->tsc_ref.tsc_sequence) 526 + if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET) 527 527 return div_u64(get_kvmclock_ns(kvm), 100); 528 528 529 529 vcpu = kvm_get_vcpu(kvm, 0); ··· 1077 1077 return true; 1078 1078 } 1079 1079 1080 + /* 1081 + * Don't touch TSC page values if the guest has opted for TSC emulation after 1082 + * migration. KVM doesn't fully support reenlightenment notifications and TSC 1083 + * access emulation and Hyper-V is known to expect the values in TSC page to 1084 + * stay constant before TSC access emulation is disabled from guest side 1085 + * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC 1086 + * frequency and guest visible TSC value across migration (and prevent it when 1087 + * TSC scaling is unsupported). 1088 + */ 1089 + static inline bool tsc_page_update_unsafe(struct kvm_hv *hv) 1090 + { 1091 + return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) && 1092 + hv->hv_tsc_emulation_control; 1093 + } 1094 + 1080 1095 void kvm_hv_setup_tsc_page(struct kvm *kvm, 1081 1096 struct pvclock_vcpu_time_info *hv_clock) 1082 1097 { ··· 1102 1087 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); 1103 1088 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0); 1104 1089 1105 - if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) 1090 + if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || 1091 + hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET) 1106 1092 return; 1107 1093 1108 1094 mutex_lock(&hv->hv_lock); ··· 1117 1101 */ 1118 1102 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), 1119 1103 &tsc_seq, sizeof(tsc_seq)))) 1104 + goto out_err; 1105 + 1106 + if (tsc_seq && tsc_page_update_unsafe(hv)) { 1107 + if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) 1108 + goto out_err; 1109 + 1110 + hv->hv_tsc_page_status = HV_TSC_PAGE_SET; 1120 1111 goto out_unlock; 1112 + } 1121 1113 1122 1114 /* 1123 1115 * While we're computing and writing the parameters, force the ··· 1134 1110 hv->tsc_ref.tsc_sequence = 0; 1135 1111 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), 1136 1112 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) 1137 - goto out_unlock; 1113 + goto out_err; 1138 1114 1139 1115 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) 1140 - goto out_unlock; 1116 + goto out_err; 1141 1117 1142 1118 /* Ensure sequence is zero before writing the rest of the struct. */ 1143 1119 smp_wmb(); 1144 1120 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) 1145 - goto out_unlock; 1121 + goto out_err; 1146 1122 1147 1123 /* 1148 1124 * Now switch to the TSC page mechanism by writing the sequence. ··· 1155 1131 smp_wmb(); 1156 1132 1157 1133 hv->tsc_ref.tsc_sequence = tsc_seq; 1158 - kvm_write_guest(kvm, gfn_to_gpa(gfn), 1159 - &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); 1134 + if (kvm_write_guest(kvm, gfn_to_gpa(gfn), 1135 + &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) 1136 + goto out_err; 1137 + 1138 + hv->hv_tsc_page_status = HV_TSC_PAGE_SET; 1139 + goto out_unlock; 1140 + 1141 + out_err: 1142 + hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; 1143 + out_unlock: 1144 + mutex_unlock(&hv->hv_lock); 1145 + } 1146 + 1147 + void kvm_hv_invalidate_tsc_page(struct kvm *kvm) 1148 + { 1149 + struct kvm_hv *hv = to_kvm_hv(kvm); 1150 + u64 gfn; 1151 + 1152 + if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || 1153 + hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET || 1154 + tsc_page_update_unsafe(hv)) 1155 + return; 1156 + 1157 + mutex_lock(&hv->hv_lock); 1158 + 1159 + if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) 1160 + goto out_unlock; 1161 + 1162 + /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */ 1163 + if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET) 1164 + hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING; 1165 + 1166 + gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; 1167 + 1168 + hv->tsc_ref.tsc_sequence = 0; 1169 + if (kvm_write_guest(kvm, gfn_to_gpa(gfn), 1170 + &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) 1171 + hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; 1172 + 1160 1173 out_unlock: 1161 1174 mutex_unlock(&hv->hv_lock); 1162 1175 } ··· 1254 1193 } 1255 1194 case HV_X64_MSR_REFERENCE_TSC: 1256 1195 hv->hv_tsc_page = data; 1257 - if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) 1196 + if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) { 1197 + if (!host) 1198 + hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED; 1199 + else 1200 + hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; 1258 1201 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 1202 + } else { 1203 + hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET; 1204 + } 1259 1205 break; 1260 1206 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 1261 1207 return kvm_hv_msr_set_crash_data(kvm, ··· 1297 1229 hv->hv_tsc_emulation_control = data; 1298 1230 break; 1299 1231 case HV_X64_MSR_TSC_EMULATION_STATUS: 1232 + if (data && !host) 1233 + return 1; 1234 + 1300 1235 hv->hv_tsc_emulation_status = data; 1301 1236 break; 1302 1237 case HV_X64_MSR_TIME_REF_COUNT:
+1
arch/x86/kvm/hyperv.h
··· 133 133 134 134 void kvm_hv_setup_tsc_page(struct kvm *kvm, 135 135 struct pvclock_vcpu_time_info *hv_clock); 136 + void kvm_hv_invalidate_tsc_page(struct kvm *kvm); 136 137 137 138 void kvm_hv_init_vm(struct kvm *kvm); 138 139 void kvm_hv_destroy_vm(struct kvm *kvm);
+5
arch/x86/kvm/mmu/mmu_internal.h
··· 78 78 return to_shadow_page(__pa(sptep)); 79 79 } 80 80 81 + static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) 82 + { 83 + return sp->role.smm ? 1 : 0; 84 + } 85 + 81 86 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) 82 87 { 83 88 /*
+18 -12
arch/x86/kvm/mmu/tdp_iter.c
··· 21 21 } 22 22 23 23 /* 24 + * Return the TDP iterator to the root PT and allow it to continue its 25 + * traversal over the paging structure from there. 26 + */ 27 + void tdp_iter_restart(struct tdp_iter *iter) 28 + { 29 + iter->yielded_gfn = iter->next_last_level_gfn; 30 + iter->level = iter->root_level; 31 + 32 + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); 33 + tdp_iter_refresh_sptep(iter); 34 + 35 + iter->valid = true; 36 + } 37 + 38 + /* 24 39 * Sets a TDP iterator to walk a pre-order traversal of the paging structure 25 40 * rooted at root_pt, starting with the walk to translate next_last_level_gfn. 26 41 */ ··· 46 31 WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); 47 32 48 33 iter->next_last_level_gfn = next_last_level_gfn; 49 - iter->yielded_gfn = iter->next_last_level_gfn; 50 34 iter->root_level = root_level; 51 35 iter->min_level = min_level; 52 - iter->level = root_level; 53 - iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt; 36 + iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root_pt; 37 + iter->as_id = kvm_mmu_page_as_id(sptep_to_sp(root_pt)); 54 38 55 - iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); 56 - tdp_iter_refresh_sptep(iter); 57 - 58 - iter->valid = true; 39 + tdp_iter_restart(iter); 59 40 } 60 41 61 42 /* ··· 168 157 return; 169 158 } while (try_step_up(iter)); 170 159 iter->valid = false; 171 - } 172 - 173 - tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter) 174 - { 175 - return iter->pt_path[iter->root_level - 1]; 176 160 } 177 161
+3 -1
arch/x86/kvm/mmu/tdp_iter.h
··· 36 36 int min_level; 37 37 /* The iterator's current level within the paging structure */ 38 38 int level; 39 + /* The address space ID, i.e. SMM vs. regular. */ 40 + int as_id; 39 41 /* A snapshot of the value at sptep */ 40 42 u64 old_spte; 41 43 /* ··· 64 62 void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, 65 63 int min_level, gfn_t next_last_level_gfn); 66 64 void tdp_iter_next(struct tdp_iter *iter); 67 - tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter); 65 + void tdp_iter_restart(struct tdp_iter *iter); 68 66 69 67 #endif /* __KVM_X86_MMU_TDP_ITER_H */
+15 -25
arch/x86/kvm/mmu/tdp_mmu.c
··· 203 203 u64 old_spte, u64 new_spte, int level, 204 204 bool shared); 205 205 206 - static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) 207 - { 208 - return sp->role.smm ? 1 : 0; 209 - } 210 - 211 206 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 212 207 { 213 208 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); ··· 296 301 * 297 302 * Given a page table that has been removed from the TDP paging structure, 298 303 * iterates through the page table to clear SPTEs and free child page tables. 304 + * 305 + * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 306 + * protection. Since this thread removed it from the paging structure, 307 + * this thread will be responsible for ensuring the page is freed. Hence the 308 + * early rcu_dereferences in the function. 299 309 */ 300 - static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, 310 + static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, 301 311 bool shared) 302 312 { 303 - struct kvm_mmu_page *sp = sptep_to_sp(pt); 313 + struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 304 314 int level = sp->role.level; 305 315 gfn_t base_gfn = sp->gfn; 306 316 u64 old_child_spte; ··· 318 318 tdp_mmu_unlink_page(kvm, sp, shared); 319 319 320 320 for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 321 - sptep = pt + i; 321 + sptep = rcu_dereference(pt) + i; 322 322 gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)); 323 323 324 324 if (shared) { ··· 492 492 struct tdp_iter *iter, 493 493 u64 new_spte) 494 494 { 495 - u64 *root_pt = tdp_iter_root_pt(iter); 496 - struct kvm_mmu_page *root = sptep_to_sp(root_pt); 497 - int as_id = kvm_mmu_page_as_id(root); 498 - 499 495 lockdep_assert_held_read(&kvm->mmu_lock); 500 496 501 497 /* ··· 505 509 new_spte) != iter->old_spte) 506 510 return false; 507 511 508 - handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, 509 - iter->level, true); 512 + handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 513 + new_spte, iter->level, true); 510 514 511 515 return true; 512 516 } ··· 534 538 * here since the SPTE is going from non-present 535 539 * to non-present. 536 540 */ 537 - WRITE_ONCE(*iter->sptep, 0); 541 + WRITE_ONCE(*rcu_dereference(iter->sptep), 0); 538 542 539 543 return true; 540 544 } ··· 560 564 u64 new_spte, bool record_acc_track, 561 565 bool record_dirty_log) 562 566 { 563 - tdp_ptep_t root_pt = tdp_iter_root_pt(iter); 564 - struct kvm_mmu_page *root = sptep_to_sp(root_pt); 565 - int as_id = kvm_mmu_page_as_id(root); 566 - 567 567 lockdep_assert_held_write(&kvm->mmu_lock); 568 568 569 569 /* ··· 573 581 574 582 WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); 575 583 576 - __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, 577 - iter->level, false); 584 + __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 585 + new_spte, iter->level, false); 578 586 if (record_acc_track) 579 587 handle_changed_spte_acc_track(iter->old_spte, new_spte, 580 588 iter->level); 581 589 if (record_dirty_log) 582 - handle_changed_spte_dirty_log(kvm, as_id, iter->gfn, 590 + handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, 583 591 iter->old_spte, new_spte, 584 592 iter->level); 585 593 } ··· 651 659 652 660 WARN_ON(iter->gfn > iter->next_last_level_gfn); 653 661 654 - tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], 655 - iter->root_level, iter->min_level, 656 - iter->next_last_level_gfn); 662 + tdp_iter_restart(iter); 657 663 658 664 return true; 659 665 }
+69 -46
arch/x86/kvm/x86.c
··· 1526 1526 1527 1527 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1528 1528 { 1529 + struct kvm_x86_msr_filter *msr_filter; 1530 + struct msr_bitmap_range *ranges; 1529 1531 struct kvm *kvm = vcpu->kvm; 1530 - struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges; 1531 - u32 count = kvm->arch.msr_filter.count; 1532 - u32 i; 1533 - bool r = kvm->arch.msr_filter.default_allow; 1532 + bool allowed; 1534 1533 int idx; 1534 + u32 i; 1535 1535 1536 - /* MSR filtering not set up or x2APIC enabled, allow everything */ 1537 - if (!count || (index >= 0x800 && index <= 0x8ff)) 1536 + /* x2APIC MSRs do not support filtering. */ 1537 + if (index >= 0x800 && index <= 0x8ff) 1538 1538 return true; 1539 1539 1540 - /* Prevent collision with set_msr_filter */ 1541 1540 idx = srcu_read_lock(&kvm->srcu); 1542 1541 1543 - for (i = 0; i < count; i++) { 1542 + msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1543 + if (!msr_filter) { 1544 + allowed = true; 1545 + goto out; 1546 + } 1547 + 1548 + allowed = msr_filter->default_allow; 1549 + ranges = msr_filter->ranges; 1550 + 1551 + for (i = 0; i < msr_filter->count; i++) { 1544 1552 u32 start = ranges[i].base; 1545 1553 u32 end = start + ranges[i].nmsrs; 1546 1554 u32 flags = ranges[i].flags; 1547 1555 unsigned long *bitmap = ranges[i].bitmap; 1548 1556 1549 1557 if ((index >= start) && (index < end) && (flags & type)) { 1550 - r = !!test_bit(index - start, bitmap); 1558 + allowed = !!test_bit(index - start, bitmap); 1551 1559 break; 1552 1560 } 1553 1561 } 1554 1562 1563 + out: 1555 1564 srcu_read_unlock(&kvm->srcu, idx); 1556 1565 1557 - return r; 1566 + return allowed; 1558 1567 } 1559 1568 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1560 1569 ··· 2559 2550 int i; 2560 2551 struct kvm_vcpu *vcpu; 2561 2552 struct kvm_arch *ka = &kvm->arch; 2553 + 2554 + kvm_hv_invalidate_tsc_page(kvm); 2562 2555 2563 2556 spin_lock(&ka->pvclock_gtod_sync_lock); 2564 2557 kvm_make_mclock_inprogress_request(kvm); ··· 5363 5352 return r; 5364 5353 } 5365 5354 5366 - static void kvm_clear_msr_filter(struct kvm *kvm) 5355 + static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 5367 5356 { 5368 - u32 i; 5369 - u32 count = kvm->arch.msr_filter.count; 5370 - struct msr_bitmap_range ranges[16]; 5357 + struct kvm_x86_msr_filter *msr_filter; 5371 5358 5372 - mutex_lock(&kvm->lock); 5373 - kvm->arch.msr_filter.count = 0; 5374 - memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0])); 5375 - mutex_unlock(&kvm->lock); 5376 - synchronize_srcu(&kvm->srcu); 5359 + msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 5360 + if (!msr_filter) 5361 + return NULL; 5377 5362 5378 - for (i = 0; i < count; i++) 5379 - kfree(ranges[i].bitmap); 5363 + msr_filter->default_allow = default_allow; 5364 + return msr_filter; 5380 5365 } 5381 5366 5382 - static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range) 5367 + static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 5383 5368 { 5384 - struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges; 5369 + u32 i; 5370 + 5371 + if (!msr_filter) 5372 + return; 5373 + 5374 + for (i = 0; i < msr_filter->count; i++) 5375 + kfree(msr_filter->ranges[i].bitmap); 5376 + 5377 + kfree(msr_filter); 5378 + } 5379 + 5380 + static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 5381 + struct kvm_msr_filter_range *user_range) 5382 + { 5385 5383 struct msr_bitmap_range range; 5386 5384 unsigned long *bitmap = NULL; 5387 5385 size_t bitmap_size; ··· 5424 5404 goto err; 5425 5405 } 5426 5406 5427 - /* Everything ok, add this range identifier to our global pool */ 5428 - ranges[kvm->arch.msr_filter.count] = range; 5429 - /* Make sure we filled the array before we tell anyone to walk it */ 5430 - smp_wmb(); 5431 - kvm->arch.msr_filter.count++; 5407 + /* Everything ok, add this range identifier. */ 5408 + msr_filter->ranges[msr_filter->count] = range; 5409 + msr_filter->count++; 5432 5410 5433 5411 return 0; 5434 5412 err: ··· 5437 5419 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) 5438 5420 { 5439 5421 struct kvm_msr_filter __user *user_msr_filter = argp; 5422 + struct kvm_x86_msr_filter *new_filter, *old_filter; 5440 5423 struct kvm_msr_filter filter; 5441 5424 bool default_allow; 5442 - int r = 0; 5443 5425 bool empty = true; 5426 + int r = 0; 5444 5427 u32 i; 5445 5428 5446 5429 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) ··· 5454 5435 if (empty && !default_allow) 5455 5436 return -EINVAL; 5456 5437 5457 - kvm_clear_msr_filter(kvm); 5438 + new_filter = kvm_alloc_msr_filter(default_allow); 5439 + if (!new_filter) 5440 + return -ENOMEM; 5458 5441 5459 - kvm->arch.msr_filter.default_allow = default_allow; 5460 - 5461 - /* 5462 - * Protect from concurrent calls to this function that could trigger 5463 - * a TOCTOU violation on kvm->arch.msr_filter.count. 5464 - */ 5465 - mutex_lock(&kvm->lock); 5466 5442 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 5467 - r = kvm_add_msr_filter(kvm, &filter.ranges[i]); 5468 - if (r) 5469 - break; 5443 + r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); 5444 + if (r) { 5445 + kvm_free_msr_filter(new_filter); 5446 + return r; 5447 + } 5470 5448 } 5449 + 5450 + mutex_lock(&kvm->lock); 5451 + 5452 + /* The per-VM filter is protected by kvm->lock... */ 5453 + old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); 5454 + 5455 + rcu_assign_pointer(kvm->arch.msr_filter, new_filter); 5456 + synchronize_srcu(&kvm->srcu); 5457 + 5458 + kvm_free_msr_filter(old_filter); 5471 5459 5472 5460 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 5473 5461 mutex_unlock(&kvm->lock); 5474 5462 5475 - return r; 5463 + return 0; 5476 5464 } 5477 5465 5478 5466 long kvm_arch_vm_ioctl(struct file *filp, ··· 6629 6603 int cpu = get_cpu(); 6630 6604 6631 6605 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 6632 - smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 6606 + on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 6633 6607 wbinvd_ipi, NULL, 1); 6634 6608 put_cpu(); 6635 6609 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); ··· 10660 10634 10661 10635 void kvm_arch_destroy_vm(struct kvm *kvm) 10662 10636 { 10663 - u32 i; 10664 - 10665 10637 if (current->mm == kvm->mm) { 10666 10638 /* 10667 10639 * Free memory regions allocated on behalf of userspace, ··· 10675 10651 mutex_unlock(&kvm->slots_lock); 10676 10652 } 10677 10653 static_call_cond(kvm_x86_vm_destroy)(kvm); 10678 - for (i = 0; i < kvm->arch.msr_filter.count; i++) 10679 - kfree(kvm->arch.msr_filter.ranges[i].bitmap); 10654 + kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 10680 10655 kvm_pic_destroy(kvm); 10681 10656 kvm_ioapic_destroy(kvm); 10682 10657 kvm_free_vcpus(kvm);
-1
arch/x86/platform/iris/iris.c
··· 27 27 MODULE_LICENSE("GPL"); 28 28 MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>"); 29 29 MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille"); 30 - MODULE_SUPPORTED_DEVICE("Eurobraille/Iris"); 31 30 32 31 static bool force; 33 32
-2
drivers/atm/fore200e.c
··· 100 100 101 101 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 102 102 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 103 - MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); 104 - 105 103 106 104 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 107 105 { BUFFER_S1_NBR, BUFFER_L1_NBR },
+25 -37
drivers/base/power/runtime.c
··· 325 325 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) 326 326 __releases(&dev->power.lock) __acquires(&dev->power.lock) 327 327 { 328 - bool use_links = dev->power.links_count > 0; 329 - bool get = false; 330 328 int retval, idx; 331 - bool put; 329 + bool use_links = dev->power.links_count > 0; 332 330 333 331 if (dev->power.irq_safe) { 334 332 spin_unlock(&dev->power.lock); 335 - } else if (!use_links) { 336 - spin_unlock_irq(&dev->power.lock); 337 333 } else { 338 - get = dev->power.runtime_status == RPM_RESUMING; 339 - 340 334 spin_unlock_irq(&dev->power.lock); 341 335 342 - /* Resume suppliers if necessary. */ 343 - if (get) { 336 + /* 337 + * Resume suppliers if necessary. 338 + * 339 + * The device's runtime PM status cannot change until this 340 + * routine returns, so it is safe to read the status outside of 341 + * the lock. 342 + */ 343 + if (use_links && dev->power.runtime_status == RPM_RESUMING) { 344 344 idx = device_links_read_lock(); 345 345 346 346 retval = rpm_get_suppliers(dev); ··· 355 355 356 356 if (dev->power.irq_safe) { 357 357 spin_lock(&dev->power.lock); 358 - return retval; 359 - } 358 + } else { 359 + /* 360 + * If the device is suspending and the callback has returned 361 + * success, drop the usage counters of the suppliers that have 362 + * been reference counted on its resume. 363 + * 364 + * Do that if resume fails too. 365 + */ 366 + if (use_links 367 + && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) 368 + || (dev->power.runtime_status == RPM_RESUMING && retval))) { 369 + idx = device_links_read_lock(); 360 370 361 - spin_lock_irq(&dev->power.lock); 371 + fail: 372 + rpm_put_suppliers(dev); 362 373 363 - if (!use_links) 364 - return retval; 365 - 366 - /* 367 - * If the device is suspending and the callback has returned success, 368 - * drop the usage counters of the suppliers that have been reference 369 - * counted on its resume. 370 - * 371 - * Do that if the resume fails too. 372 - */ 373 - put = dev->power.runtime_status == RPM_SUSPENDING && !retval; 374 - if (put) 375 - __update_runtime_status(dev, RPM_SUSPENDED); 376 - else 377 - put = get && retval; 378 - 379 - if (put) { 380 - spin_unlock_irq(&dev->power.lock); 381 - 382 - idx = device_links_read_lock(); 383 - 384 - fail: 385 - rpm_put_suppliers(dev); 386 - 387 - device_links_read_unlock(idx); 374 + device_links_read_unlock(idx); 375 + } 388 376 389 377 spin_lock_irq(&dev->power.lock); 390 378 }
-1
drivers/block/floppy.c
··· 5091 5091 module_param(FLOPPY_IRQ, int, 0); 5092 5092 module_param(FLOPPY_DMA, int, 0); 5093 5093 MODULE_AUTHOR("Alain L. Knaff"); 5094 - MODULE_SUPPORTED_DEVICE("fd"); 5095 5094 MODULE_LICENSE("GPL"); 5096 5095 5097 5096 /* This doesn't actually get used other than for module information */
-1
drivers/bluetooth/btrsi.c
··· 194 194 module_exit(rsi_91x_bt_module_exit); 195 195 MODULE_AUTHOR("Redpine Signals Inc"); 196 196 MODULE_DESCRIPTION("RSI BT driver"); 197 - MODULE_SUPPORTED_DEVICE("RSI-BT"); 198 197 MODULE_LICENSE("Dual BSD/GPL");
-3
drivers/char/applicom.c
··· 81 81 MODULE_LICENSE("GPL"); 82 82 MODULE_ALIAS_MISCDEV(AC_MINOR); 83 83 84 - MODULE_SUPPORTED_DEVICE("ac"); 85 - 86 - 87 84 static struct applicom_board { 88 85 unsigned long PhysIO; 89 86 void __iomem *RamIO;
-1
drivers/char/toshiba.c
··· 64 64 MODULE_LICENSE("GPL"); 65 65 MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>"); 66 66 MODULE_DESCRIPTION("Toshiba laptop SMM driver"); 67 - MODULE_SUPPORTED_DEVICE("toshiba"); 68 67 69 68 static DEFINE_MUTEX(tosh_mutex); 70 69 static int tosh_fn;
+33 -22
drivers/counter/stm32-timer-cnt.c
··· 31 31 struct counter_device counter; 32 32 struct regmap *regmap; 33 33 struct clk *clk; 34 - u32 ceiling; 34 + u32 max_arr; 35 35 bool enabled; 36 36 struct stm32_timer_regs bak; 37 37 }; ··· 44 44 * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges 45 45 */ 46 46 enum stm32_count_function { 47 - STM32_COUNT_SLAVE_MODE_DISABLED = -1, 47 + STM32_COUNT_SLAVE_MODE_DISABLED, 48 48 STM32_COUNT_ENCODER_MODE_1, 49 49 STM32_COUNT_ENCODER_MODE_2, 50 50 STM32_COUNT_ENCODER_MODE_3, 51 51 }; 52 52 53 53 static enum counter_count_function stm32_count_functions[] = { 54 + [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE, 54 55 [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, 55 56 [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, 56 57 [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4, ··· 74 73 const unsigned long val) 75 74 { 76 75 struct stm32_timer_cnt *const priv = counter->priv; 76 + u32 ceiling; 77 77 78 - if (val > priv->ceiling) 78 + regmap_read(priv->regmap, TIM_ARR, &ceiling); 79 + if (val > ceiling) 79 80 return -EINVAL; 80 81 81 82 return regmap_write(priv->regmap, TIM_CNT, val); ··· 93 90 regmap_read(priv->regmap, TIM_SMCR, &smcr); 94 91 95 92 switch (smcr & TIM_SMCR_SMS) { 93 + case 0: 94 + *function = STM32_COUNT_SLAVE_MODE_DISABLED; 95 + return 0; 96 96 case 1: 97 97 *function = STM32_COUNT_ENCODER_MODE_1; 98 98 return 0; ··· 105 99 case 3: 106 100 *function = STM32_COUNT_ENCODER_MODE_3; 107 101 return 0; 102 + default: 103 + return -EINVAL; 108 104 } 109 - 110 - return -EINVAL; 111 105 } 112 106 113 107 static int stm32_count_function_set(struct counter_device *counter, ··· 118 112 u32 cr1, sms; 119 113 120 114 switch (function) { 115 + case STM32_COUNT_SLAVE_MODE_DISABLED: 116 + sms = 0; 117 + break; 121 118 case STM32_COUNT_ENCODER_MODE_1: 122 119 sms = 1; 123 120 break; ··· 131 122 sms = 3; 132 123 break; 133 124 default: 134 - sms = 0; 135 - break; 125 + return -EINVAL; 136 126 } 137 127 138 128 /* Store enable status */ 139 129 regmap_read(priv->regmap, TIM_CR1, &cr1); 140 130 141 131 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); 142 - 143 - /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ 144 - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); 145 - regmap_write(priv->regmap, TIM_ARR, priv->ceiling); 146 132 147 133 regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); 148 134 ··· 189 185 if (ret) 190 186 return ret; 191 187 188 + if (ceiling > priv->max_arr) 189 + return -ERANGE; 190 + 192 191 /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ 193 192 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); 194 193 regmap_write(priv->regmap, TIM_ARR, ceiling); 195 194 196 - priv->ceiling = ceiling; 197 195 return len; 198 196 } 199 197 ··· 280 274 size_t function; 281 275 int err; 282 276 283 - /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */ 284 - *action = STM32_SYNAPSE_ACTION_NONE; 285 - 286 277 err = stm32_count_function_get(counter, count, &function); 287 278 if (err) 288 - return 0; 279 + return err; 289 280 290 281 switch (function) { 282 + case STM32_COUNT_SLAVE_MODE_DISABLED: 283 + /* counts on internal clock when CEN=1 */ 284 + *action = STM32_SYNAPSE_ACTION_NONE; 285 + return 0; 291 286 case STM32_COUNT_ENCODER_MODE_1: 292 287 /* counts up/down on TI1FP1 edge depending on TI2FP2 level */ 293 288 if (synapse->signal->id == count->synapses[0].signal->id) 294 289 *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; 295 - break; 290 + else 291 + *action = STM32_SYNAPSE_ACTION_NONE; 292 + return 0; 296 293 case STM32_COUNT_ENCODER_MODE_2: 297 294 /* counts up/down on TI2FP2 edge depending on TI1FP1 level */ 298 295 if (synapse->signal->id == count->synapses[1].signal->id) 299 296 *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; 300 - break; 297 + else 298 + *action = STM32_SYNAPSE_ACTION_NONE; 299 + return 0; 301 300 case STM32_COUNT_ENCODER_MODE_3: 302 301 /* counts up/down on both TI1FP1 and TI2FP2 edges */ 303 302 *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; 304 - break; 303 + return 0; 304 + default: 305 + return -EINVAL; 305 306 } 306 - 307 - return 0; 308 307 } 309 308 310 309 static const struct counter_ops stm32_timer_cnt_ops = { ··· 370 359 371 360 priv->regmap = ddata->regmap; 372 361 priv->clk = ddata->clk; 373 - priv->ceiling = ddata->max_arr; 362 + priv->max_arr = ddata->max_arr; 374 363 375 364 priv->counter.name = dev_name(dev); 376 365 priv->counter.parent = dev;
+2 -1
drivers/firmware/efi/efi.c
··· 927 927 } 928 928 929 929 /* first try to find a slot in an existing linked list entry */ 930 - for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { 930 + for (prsv = efi_memreserve_root->next; prsv; ) { 931 931 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 932 932 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 933 933 if (index < rsv->size) { ··· 937 937 memunmap(rsv); 938 938 return efi_mem_reserve_iomem(addr, size); 939 939 } 940 + prsv = rsv->next; 940 941 memunmap(rsv); 941 942 } 942 943
+4
drivers/firmware/efi/vars.c
··· 485 485 } 486 486 487 487 break; 488 + case EFI_UNSUPPORTED: 489 + err = -EOPNOTSUPP; 490 + status = EFI_NOT_FOUND; 491 + break; 488 492 case EFI_NOT_FOUND: 489 493 break; 490 494 default:
+9 -1
drivers/gpio/gpiolib.c
··· 571 571 struct lock_class_key *lock_key, 572 572 struct lock_class_key *request_key) 573 573 { 574 + struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL; 574 575 unsigned long flags; 575 576 int ret = 0; 576 577 unsigned i; ··· 594 593 } 595 594 596 595 of_gpio_dev_init(gc, gdev); 596 + 597 + /* 598 + * Assign fwnode depending on the result of the previous calls, 599 + * if none of them succeed, assign it to the parent's one. 600 + */ 601 + gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode; 597 602 598 603 gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL); 599 604 if (gdev->id < 0) { ··· 4263 4256 return ret; 4264 4257 } 4265 4258 4266 - if (driver_register(&gpio_stub_drv) < 0) { 4259 + ret = driver_register(&gpio_stub_drv); 4260 + if (ret < 0) { 4267 4261 pr_err("gpiolib: could not register GPIO stub driver\n"); 4268 4262 bus_unregister(&gpio_bus_type); 4269 4263 return ret;
+2 -32
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 1507 1507 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed 1508 1508 || pipe_ctx->stream->update_flags.bits.gamut_remap 1509 1509 || pipe_ctx->stream->update_flags.bits.out_csc) { 1510 - struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 1511 - 1512 - if (mpc->funcs->set_gamut_remap) { 1513 - int i; 1514 - int mpcc_id = hubp->inst; 1515 - struct mpc_grph_gamut_adjustment adjust; 1516 - bool enable_remap_dpp = false; 1517 - 1518 - memset(&adjust, 0, sizeof(adjust)); 1519 - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 1520 - 1521 - /* save the enablement of gamut remap for dpp */ 1522 - enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap; 1523 - 1524 - /* force bypass gamut remap for dpp/cm */ 1525 - pipe_ctx->stream->gamut_remap_matrix.enable_remap = false; 1526 - dc->hwss.program_gamut_remap(pipe_ctx); 1527 - 1528 - /* restore gamut remap flag and use this remap into mpc */ 1529 - pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp; 1530 - 1531 - /* build remap matrix for top plane if enabled */ 1532 - if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) { 1533 - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 1534 - for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) 1535 - adjust.temperature_matrix[i] = 1536 - pipe_ctx->stream->gamut_remap_matrix.matrix[i]; 1537 - } 1538 - mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust); 1539 - } else 1540 - /* dpp/cm gamut remap*/ 1541 - dc->hwss.program_gamut_remap(pipe_ctx); 1510 + /* dpp/cm gamut remap*/ 1511 + dc->hwss.program_gamut_remap(pipe_ctx); 1542 1512 1543 1513 /*call the dcn2 method which uses mpc csc*/ 1544 1514 dc->hwss.program_output_csc(dc,
+5
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
··· 1595 1595 dcn2_1_soc.num_chans = bw_params->num_channels; 1596 1596 1597 1597 ASSERT(clk_table->num_entries); 1598 + /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */ 1599 + for (i = 0; i < dcn2_1_soc.num_states + 1; i++) { 1600 + clock_limits[i] = dcn2_1_soc.clock_limits[i]; 1601 + } 1602 + 1598 1603 for (i = 0; i < clk_table->num_entries; i++) { 1599 1604 /* loop backwards*/ 1600 1605 for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
+18 -8
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
··· 113 113 struct pwl_result_data *rgb_resulted; 114 114 struct pwl_result_data *rgb; 115 115 struct pwl_result_data *rgb_plus_1; 116 + struct pwl_result_data *rgb_minus_1; 116 117 struct fixed31_32 end_value; 117 118 118 119 int32_t region_start, region_end; ··· 141 140 region_start = -MAX_LOW_POINT; 142 141 region_end = NUMBER_REGIONS - MAX_LOW_POINT; 143 142 } else { 144 - /* 10 segments 143 + /* 11 segments 145 144 * segment is from 2^-10 to 2^0 146 145 * There are less than 256 points, for optimization 147 146 */ ··· 155 154 seg_distr[7] = 4; 156 155 seg_distr[8] = 4; 157 156 seg_distr[9] = 4; 157 + seg_distr[10] = 1; 158 158 159 159 region_start = -10; 160 - region_end = 0; 160 + region_end = 1; 161 161 } 162 162 163 163 for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) ··· 190 188 rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index]; 191 189 rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; 192 190 rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; 191 + 192 + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; 193 + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; 194 + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; 193 195 194 196 // All 3 color channels have same x 195 197 corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), ··· 265 259 266 260 rgb = rgb_resulted; 267 261 rgb_plus_1 = rgb_resulted + 1; 262 + rgb_minus_1 = rgb; 268 263 269 264 i = 1; 270 265 while (i != hw_points + 1) { 271 - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) 272 - rgb_plus_1->red = rgb->red; 273 - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) 274 - rgb_plus_1->green = rgb->green; 275 - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) 276 - rgb_plus_1->blue = rgb->blue; 266 + if (i >= hw_points - 1) { 267 + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) 268 + rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red); 269 + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) 270 + rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green); 271 + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) 272 + rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue); 273 + } 277 274 278 275 rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); 279 276 rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); ··· 292 283 } 293 284 294 285 ++rgb_plus_1; 286 + rgb_minus_1 = rgb; 295 287 ++rgb; 296 288 ++i; 297 289 }
+5 -8
drivers/gpu/drm/i915/i915_perf.c
··· 603 603 { 604 604 int report_size = stream->oa_buffer.format_size; 605 605 struct drm_i915_perf_record_header header; 606 - u32 sample_flags = stream->sample_flags; 607 606 608 607 header.type = DRM_I915_PERF_RECORD_SAMPLE; 609 608 header.pad = 0; ··· 616 617 return -EFAULT; 617 618 buf += sizeof(header); 618 619 619 - if (sample_flags & SAMPLE_OA_REPORT) { 620 - if (copy_to_user(buf, report, report_size)) 621 - return -EFAULT; 622 - } 620 + if (copy_to_user(buf, report, report_size)) 621 + return -EFAULT; 623 622 624 623 (*offset) += header.size; 625 624 ··· 2679 2682 2680 2683 stream->perf->ops.oa_enable(stream); 2681 2684 2682 - if (stream->periodic) 2685 + if (stream->sample_flags & SAMPLE_OA_REPORT) 2683 2686 hrtimer_start(&stream->poll_check_timer, 2684 2687 ns_to_ktime(stream->poll_oa_period), 2685 2688 HRTIMER_MODE_REL_PINNED); ··· 2742 2745 { 2743 2746 stream->perf->ops.oa_disable(stream); 2744 2747 2745 - if (stream->periodic) 2748 + if (stream->sample_flags & SAMPLE_OA_REPORT) 2746 2749 hrtimer_cancel(&stream->poll_check_timer); 2747 2750 } 2748 2751 ··· 3025 3028 * disabled stream as an error. In particular it might otherwise lead 3026 3029 * to a deadlock for blocking file descriptors... 3027 3030 */ 3028 - if (!stream->enabled) 3031 + if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) 3029 3032 return -EIO; 3030 3033 3031 3034 if (!(file->f_flags & O_NONBLOCK)) {
+22 -1
drivers/gpu/drm/i915/i915_reg.h
··· 3316 3316 3317 3317 #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) 3318 3318 #define ILK_FBCQ_DIS (1 << 22) 3319 - #define ILK_PABSTRETCH_DIS (1 << 21) 3319 + #define ILK_PABSTRETCH_DIS REG_BIT(21) 3320 + #define ILK_SABSTRETCH_DIS REG_BIT(20) 3321 + #define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20) 3322 + #define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0) 3323 + #define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1) 3324 + #define IVB_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 2) 3325 + #define IVB_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 3) 3326 + #define IVB_SPR_STRETCH_MAX_MASK REG_GENMASK(19, 18) 3327 + #define IVB_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 0) 3328 + #define IVB_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 1) 3329 + #define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2) 3330 + #define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3) 3320 3331 3321 3332 3322 3333 /* ··· 8050 8039 8051 8040 #define _CHICKEN_PIPESL_1_A 0x420b0 8052 8041 #define _CHICKEN_PIPESL_1_B 0x420b4 8042 + #define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27) 8043 + #define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0) 8044 + #define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1) 8045 + #define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2) 8046 + #define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3) 8047 + #define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25) 8048 + #define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0) 8049 + #define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1) 8050 + #define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2) 8051 + #define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3) 8053 8052 #define HSW_FBCQ_DIS (1 << 22) 8054 8053 #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) 8055 8054 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+15 -1
drivers/gpu/drm/i915/intel_pm.c
··· 7245 7245 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, 7246 7246 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 7247 7247 7248 - /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 7249 7248 for_each_pipe(dev_priv, pipe) { 7249 + /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 7250 7250 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), 7251 7251 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) | 7252 7252 BDW_DPRS_MASK_VBLANK_SRD); 7253 + 7254 + /* Undocumented but fixes async flip + VT-d corruption */ 7255 + if (intel_vtd_active()) 7256 + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), 7257 + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); 7253 7258 } 7254 7259 7255 7260 /* WaVSRefCountFullforceMissDisable:bdw */ ··· 7290 7285 7291 7286 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) 7292 7287 { 7288 + enum pipe pipe; 7289 + 7293 7290 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 7294 7291 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), 7295 7292 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | 7296 7293 HSW_FBCQ_DIS); 7294 + 7295 + for_each_pipe(dev_priv, pipe) { 7296 + /* Undocumented but fixes async flip + VT-d corruption */ 7297 + if (intel_vtd_active()) 7298 + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), 7299 + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); 7300 + } 7297 7301 7298 7302 /* This is required by WaCatErrorRejectionIssue:hsw */ 7299 7303 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+8
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 551 551 552 552 if (!ttm_dma) 553 553 return; 554 + if (!ttm_dma->pages) { 555 + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); 556 + return; 557 + } 554 558 555 559 /* Don't waste time looping if the object is coherent */ 556 560 if (nvbo->force_coherent) ··· 587 583 588 584 if (!ttm_dma) 589 585 return; 586 + if (!ttm_dma->pages) { 587 + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); 588 + return; 589 + } 590 590 591 591 /* Don't waste time looping if the object is coherent */ 592 592 if (nvbo->force_coherent)
+4 -3
drivers/gpu/drm/omapdrm/dss/dsi.c
··· 2149 2149 const struct mipi_dsi_msg *msg) 2150 2150 { 2151 2151 struct mipi_dsi_packet pkt; 2152 + int ret; 2152 2153 u32 r; 2153 2154 2154 - r = mipi_dsi_create_packet(&pkt, msg); 2155 - if (r < 0) 2156 - return r; 2155 + ret = mipi_dsi_create_packet(&pkt, msg); 2156 + if (ret < 0) 2157 + return ret; 2157 2158 2158 2159 WARN_ON(!dsi_bus_is_locked(dsi)); 2159 2160
+3
drivers/iio/adc/Kconfig
··· 266 266 select IIO_BUFFER 267 267 select IIO_BUFFER_HW_CONSUMER 268 268 select IIO_BUFFER_DMAENGINE 269 + depends on HAS_IOMEM 270 + depends on OF 269 271 help 270 272 Say yes here to build support for Analog Devices Generic 271 273 AXI ADC IP core. The IP core is used for interfacing with ··· 925 923 depends on ARCH_STM32 || COMPILE_TEST 926 924 depends on OF 927 925 depends on REGULATOR 926 + depends on HAS_IOMEM 928 927 select IIO_BUFFER 929 928 select MFD_STM32_TIMERS 930 929 select IIO_STM32_TIMER_TRIGGER
+1 -1
drivers/iio/adc/ab8500-gpadc.c
··· 918 918 return processed; 919 919 920 920 /* Return millivolt or milliamps or millicentigrades */ 921 - *val = processed * 1000; 921 + *val = processed; 922 922 return IIO_VAL_INT; 923 923 } 924 924
+1 -1
drivers/iio/adc/ad7949.c
··· 91 91 int ret; 92 92 int i; 93 93 int bits_per_word = ad7949_adc->resolution; 94 - int mask = GENMASK(ad7949_adc->resolution, 0); 94 + int mask = GENMASK(ad7949_adc->resolution - 1, 0); 95 95 struct spi_message msg; 96 96 struct spi_transfer tx[] = { 97 97 {
+1 -1
drivers/iio/adc/qcom-spmi-vadc.c
··· 597 597 VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1) 598 598 599 599 VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0) 600 - VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0) 600 + VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT) 601 601 VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0) 602 602 VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0) 603 603 VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0)
+2
drivers/iio/gyro/mpu3050-core.c
··· 551 551 MPU3050_FIFO_R, 552 552 &fifo_values[offset], 553 553 toread); 554 + if (ret) 555 + goto out_trigger_unlock; 554 556 555 557 dev_dbg(mpu3050->dev, 556 558 "%04x %04x %04x %04x %04x\n",
+7 -5
drivers/iio/humidity/hid-sensor-humidity.c
··· 15 15 struct hid_humidity_state { 16 16 struct hid_sensor_common common_attributes; 17 17 struct hid_sensor_hub_attribute_info humidity_attr; 18 - s32 humidity_data; 18 + struct { 19 + s32 humidity_data; 20 + u64 timestamp __aligned(8); 21 + } scan; 19 22 int scale_pre_decml; 20 23 int scale_post_decml; 21 24 int scale_precision; ··· 128 125 struct hid_humidity_state *humid_st = iio_priv(indio_dev); 129 126 130 127 if (atomic_read(&humid_st->common_attributes.data_ready)) 131 - iio_push_to_buffers_with_timestamp(indio_dev, 132 - &humid_st->humidity_data, 133 - iio_get_time_ns(indio_dev)); 128 + iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan, 129 + iio_get_time_ns(indio_dev)); 134 130 135 131 return 0; 136 132 } ··· 144 142 145 143 switch (usage_id) { 146 144 case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY: 147 - humid_st->humidity_data = *(s32 *)raw_data; 145 + humid_st->scan.humidity_data = *(s32 *)raw_data; 148 146 149 147 return 0; 150 148 default:
+1 -2
drivers/iio/imu/adis16400.c
··· 462 462 if (ret) 463 463 goto err_ret; 464 464 465 - ret = sscanf(indio_dev->name, "adis%u\n", &device_id); 466 - if (ret != 1) { 465 + if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) { 467 466 ret = -EINVAL; 468 467 goto err_ret; 469 468 }
+11 -2
drivers/iio/light/hid-sensor-prox.c
··· 23 23 struct hid_sensor_common common_attributes; 24 24 struct hid_sensor_hub_attribute_info prox_attr; 25 25 u32 human_presence; 26 + int scale_pre_decml; 27 + int scale_post_decml; 28 + int scale_precision; 26 29 }; 27 30 28 31 /* Channel definitions */ ··· 96 93 ret_type = IIO_VAL_INT; 97 94 break; 98 95 case IIO_CHAN_INFO_SCALE: 99 - *val = prox_state->prox_attr.units; 100 - ret_type = IIO_VAL_INT; 96 + *val = prox_state->scale_pre_decml; 97 + *val2 = prox_state->scale_post_decml; 98 + ret_type = prox_state->scale_precision; 101 99 break; 102 100 case IIO_CHAN_INFO_OFFSET: 103 101 *val = hid_sensor_convert_exponent( ··· 237 233 HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | 238 234 HID_USAGE_SENSOR_HUMAN_PRESENCE, 239 235 &st->common_attributes.sensitivity); 236 + 237 + st->scale_precision = hid_sensor_format_scale( 238 + hsdev->usage, 239 + &st->prox_attr, 240 + &st->scale_pre_decml, &st->scale_post_decml); 240 241 241 242 return ret; 242 243 }
+8 -6
drivers/iio/temperature/hid-sensor-temperature.c
··· 15 15 struct temperature_state { 16 16 struct hid_sensor_common common_attributes; 17 17 struct hid_sensor_hub_attribute_info temperature_attr; 18 - s32 temperature_data; 18 + struct { 19 + s32 temperature_data; 20 + u64 timestamp __aligned(8); 21 + } scan; 19 22 int scale_pre_decml; 20 23 int scale_post_decml; 21 24 int scale_precision; ··· 35 32 BIT(IIO_CHAN_INFO_SAMP_FREQ) | 36 33 BIT(IIO_CHAN_INFO_HYSTERESIS), 37 34 }, 38 - IIO_CHAN_SOFT_TIMESTAMP(3), 35 + IIO_CHAN_SOFT_TIMESTAMP(1), 39 36 }; 40 37 41 38 /* Adjust channel real bits based on report descriptor */ ··· 126 123 struct temperature_state *temp_st = iio_priv(indio_dev); 127 124 128 125 if (atomic_read(&temp_st->common_attributes.data_ready)) 129 - iio_push_to_buffers_with_timestamp(indio_dev, 130 - &temp_st->temperature_data, 131 - iio_get_time_ns(indio_dev)); 126 + iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan, 127 + iio_get_time_ns(indio_dev)); 132 128 133 129 return 0; 134 130 } ··· 142 140 143 141 switch (usage_id) { 144 142 case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE: 145 - temp_st->temperature_data = *(s32 *)raw_data; 143 + temp_st->scan.temperature_data = *(s32 *)raw_data; 146 144 return 0; 147 145 default: 148 146 return -EINVAL;
-1
drivers/input/joydev.c
··· 26 26 27 27 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 28 28 MODULE_DESCRIPTION("Joystick device interfaces"); 29 - MODULE_SUPPORTED_DEVICE("input/js"); 30 29 MODULE_LICENSE("GPL"); 31 30 32 31 #define JOYDEV_MINOR_BASE 0
+20 -16
drivers/iommu/amd/init.c
··· 2714 2714 struct acpi_table_header *ivrs_base; 2715 2715 int i, remap_cache_sz, ret; 2716 2716 acpi_status status; 2717 - u32 pci_id; 2718 2717 2719 2718 if (!amd_iommu_detected) 2720 2719 return -ENODEV; ··· 2803 2804 if (ret) 2804 2805 goto out; 2805 2806 2806 - /* Disable IOMMU if there's Stoney Ridge graphics */ 2807 - for (i = 0; i < 32; i++) { 2808 - pci_id = read_pci_config(0, i, 0, 0); 2809 - if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 2810 - pr_info("Disable IOMMU on Stoney Ridge\n"); 2811 - amd_iommu_disabled = true; 2812 - break; 2813 - } 2814 - } 2815 - 2816 2807 /* Disable any previously enabled IOMMUs */ 2817 2808 if (!is_kdump_kernel() || amd_iommu_disabled) 2818 2809 disable_iommus(); ··· 2869 2880 { 2870 2881 struct acpi_table_header *ivrs_base; 2871 2882 acpi_status status; 2883 + int i; 2872 2884 2873 2885 status = acpi_get_table("IVRS", 0, &ivrs_base); 2874 2886 if (status == AE_NOT_FOUND) ··· 2881 2891 } 2882 2892 2883 2893 acpi_put_table(ivrs_base); 2894 + 2895 + /* Don't use IOMMU if there is Stoney Ridge graphics */ 2896 + for (i = 0; i < 32; i++) { 2897 + u32 pci_id; 2898 + 2899 + pci_id = read_pci_config(0, i, 0, 0); 2900 + if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 2901 + pr_info("Disable IOMMU on Stoney Ridge\n"); 2902 + return false; 2903 + } 2904 + } 2884 2905 2885 2906 /* Make sure ACS will be enabled during PCI probe */ 2886 2907 pci_request_acs(); ··· 2919 2918 } 2920 2919 break; 2921 2920 case IOMMU_IVRS_DETECTED: 2922 - ret = early_amd_iommu_init(); 2923 - init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2924 - if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { 2925 - pr_info("AMD IOMMU disabled\n"); 2921 + if (amd_iommu_disabled) { 2926 2922 init_state = IOMMU_CMDLINE_DISABLED; 2927 2923 ret = -EINVAL; 2924 + } else { 2925 + ret = early_amd_iommu_init(); 2926 + init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2928 2927 } 2929 2928 break; 2930 2929 case IOMMU_ACPI_FINISHED: ··· 3002 3001 amd_iommu_irq_remap = true; 3003 3002 3004 3003 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 3005 - if (ret) 3004 + if (ret) { 3005 + amd_iommu_irq_remap = false; 3006 3006 return ret; 3007 + } 3008 + 3007 3009 return amd_iommu_irq_remap ? 0 : -ENODEV; 3008 3010 } 3009 3011
+3 -4
drivers/iommu/tegra-smmu.c
··· 849 849 smmu = tegra_smmu_find(args.np); 850 850 if (smmu) { 851 851 err = tegra_smmu_configure(smmu, dev, &args); 852 - of_node_put(args.np); 853 852 854 - if (err < 0) 853 + if (err < 0) { 854 + of_node_put(args.np); 855 855 return ERR_PTR(err); 856 - 857 - break; 856 + } 858 857 } 859 858 860 859 of_node_put(args.np);
-1
drivers/media/firewire/firedtv-fw.c
··· 430 430 MODULE_AUTHOR("Ben Backx <ben@bbackx.com>"); 431 431 MODULE_DESCRIPTION("FireDTV DVB Driver"); 432 432 MODULE_LICENSE("GPL"); 433 - MODULE_SUPPORTED_DEVICE("FireDTV DVB");
-1
drivers/media/pci/cx18/cx18-alsa-main.c
··· 41 41 42 42 MODULE_AUTHOR("Andy Walls"); 43 43 MODULE_DESCRIPTION("CX23418 ALSA Interface"); 44 - MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder"); 45 44 MODULE_LICENSE("GPL"); 46 45 47 46 MODULE_VERSION(CX18_VERSION);
-1
drivers/media/pci/cx18/cx18-driver.c
··· 232 232 233 233 MODULE_AUTHOR("Hans Verkuil"); 234 234 MODULE_DESCRIPTION("CX23418 driver"); 235 - MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder"); 236 235 MODULE_LICENSE("GPL"); 237 236 238 237 MODULE_VERSION(CX18_VERSION);
-1
drivers/media/pci/cx25821/cx25821-alsa.c
··· 104 104 MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards"); 105 105 MODULE_AUTHOR("Hiep Huynh"); 106 106 MODULE_LICENSE("GPL"); 107 - MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); /* "{{Conexant,23881}," */ 108 107 109 108 static unsigned int debug; 110 109 module_param(debug, int, 0644);
-1
drivers/media/pci/cx88/cx88-alsa.c
··· 98 98 MODULE_LICENSE("GPL v2"); 99 99 MODULE_VERSION(CX88_VERSION); 100 100 101 - MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}"); 102 101 static unsigned int debug; 103 102 module_param(debug, int, 0644); 104 103 MODULE_PARM_DESC(debug, "enable debug messages");
-1
drivers/media/pci/ivtv/ivtv-alsa-main.c
··· 38 38 39 39 MODULE_AUTHOR("Andy Walls"); 40 40 MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface"); 41 - MODULE_SUPPORTED_DEVICE("CX23415/CX23416 MPEG2 encoder"); 42 41 MODULE_LICENSE("GPL"); 43 42 44 43 MODULE_VERSION(IVTV_VERSION);
-3
drivers/media/pci/ivtv/ivtv-driver.c
··· 275 275 276 276 MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil"); 277 277 MODULE_DESCRIPTION("CX23415/CX23416 driver"); 278 - MODULE_SUPPORTED_DEVICE 279 - ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n" 280 - "\t\t\tYuan MPG series and similar)"); 281 278 MODULE_LICENSE("GPL"); 282 279 283 280 MODULE_VERSION(IVTV_VERSION);
-1
drivers/media/pci/sta2x11/sta2x11_vip.c
··· 1269 1269 MODULE_DESCRIPTION("STA2X11 Video Input Port driver"); 1270 1270 MODULE_AUTHOR("Wind River"); 1271 1271 MODULE_LICENSE("GPL v2"); 1272 - MODULE_SUPPORTED_DEVICE("sta2x11 video input"); 1273 1272 MODULE_VERSION(DRV_VERSION); 1274 1273 MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
-1
drivers/media/platform/atmel/atmel-isi.c
··· 1363 1363 MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>"); 1364 1364 MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux"); 1365 1365 MODULE_LICENSE("GPL"); 1366 - MODULE_SUPPORTED_DEVICE("video");
-1
drivers/media/platform/atmel/atmel-sama5d2-isc.c
··· 330 330 MODULE_AUTHOR("Songjun Wu"); 331 331 MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC"); 332 332 MODULE_LICENSE("GPL v2"); 333 - MODULE_SUPPORTED_DEVICE("video");
-4
drivers/media/platform/marvell-ccic/cafe-driver.c
··· 44 44 MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>"); 45 45 MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver"); 46 46 MODULE_LICENSE("GPL"); 47 - MODULE_SUPPORTED_DEVICE("Video"); 48 - 49 - 50 - 51 47 52 48 struct cafe_camera { 53 49 int registered; /* Fully initialized? */
-1
drivers/media/platform/stm32/stm32-dcmi.c
··· 2149 2149 MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>"); 2150 2150 MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver"); 2151 2151 MODULE_LICENSE("GPL"); 2152 - MODULE_SUPPORTED_DEVICE("video");
-1
drivers/media/usb/cpia2/cpia2_v4l.c
··· 56 56 57 57 MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>"); 58 58 MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras"); 59 - MODULE_SUPPORTED_DEVICE("video"); 60 59 MODULE_LICENSE("GPL"); 61 60 MODULE_VERSION(CPIA_VERSION); 62 61
-1
drivers/media/usb/tm6000/tm6000-alsa.c
··· 51 51 MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards"); 52 52 MODULE_AUTHOR("Mauro Carvalho Chehab"); 53 53 MODULE_LICENSE("GPL v2"); 54 - MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}"); 55 54 static unsigned int debug; 56 55 module_param(debug, int, 0644); 57 56 MODULE_PARM_DESC(debug, "enable debug messages");
-2
drivers/media/usb/tm6000/tm6000-dvb.c
··· 23 23 MODULE_AUTHOR("Mauro Carvalho Chehab"); 24 24 MODULE_LICENSE("GPL"); 25 25 26 - MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}"); 27 - 28 26 static int debug; 29 27 30 28 module_param(debug, int, 0644);
-1
drivers/mtd/maps/sun_uflash.c
··· 32 32 33 33 MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); 34 34 MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets"); 35 - MODULE_SUPPORTED_DEVICE(DRIVER_NAME); 36 35 MODULE_LICENSE("GPL"); 37 36 MODULE_VERSION("2.1"); 38 37
-1
drivers/net/can/peak_canfd/peak_pciefd_main.c
··· 21 21 22 22 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); 23 23 MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); 24 - MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards"); 25 24 MODULE_LICENSE("GPL v2"); 26 25 27 26 #define PCIEFD_DRV_NAME "peak_pciefd"
-1
drivers/net/can/sja1000/ems_pci.c
··· 21 21 22 22 MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>"); 23 23 MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards"); 24 - MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card"); 25 24 MODULE_LICENSE("GPL v2"); 26 25 27 26 #define EMS_PCI_V1_MAX_CHAN 2
-1
drivers/net/can/sja1000/ems_pcmcia.c
··· 21 21 22 22 MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>"); 23 23 MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards"); 24 - MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card"); 25 24 MODULE_LICENSE("GPL v2"); 26 25 27 26 #define EMS_PCMCIA_MAX_CHAN 2
-1
drivers/net/can/sja1000/kvaser_pci.c
··· 33 33 34 34 MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>"); 35 35 MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards"); 36 - MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card"); 37 36 MODULE_LICENSE("GPL v2"); 38 37 39 38 #define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */
-2
drivers/net/can/sja1000/peak_pci.c
··· 24 24 25 25 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); 26 26 MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); 27 - MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards"); 28 - MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards"); 29 27 MODULE_LICENSE("GPL v2"); 30 28 31 29 #define DRV_NAME "peak_pci"
-1
drivers/net/can/sja1000/peak_pcmcia.c
··· 22 22 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); 23 23 MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards"); 24 24 MODULE_LICENSE("GPL v2"); 25 - MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card"); 26 25 27 26 /* PEAK-System PCMCIA driver name */ 28 27 #define PCC_NAME "peak_pcmcia"
-12
drivers/net/can/sja1000/plx_pci.c
··· 25 25 MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>"); 26 26 MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with " 27 27 "the SJA1000 chips"); 28 - MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " 29 - "Adlink PCI-7841/cPCI-7841 SE, " 30 - "Marathon CAN-bus-PCI, " 31 - "Marathon CAN-bus-PCIe, " 32 - "TEWS TECHNOLOGIES TPMC810, " 33 - "esd CAN-PCI/CPCI/PCI104/200, " 34 - "esd CAN-PCI/PMC/266, " 35 - "esd CAN-PCIe/2000, " 36 - "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), " 37 - "IXXAT PC-I 04/PCI, " 38 - "ELCUS CAN-200-PCI, " 39 - "ASEM DUAL CAN-RAW") 40 28 MODULE_LICENSE("GPL v2"); 41 29 42 30 #define PLX_PCI_MAX_CHAN 2
-2
drivers/net/can/usb/peak_usb/pcan_usb.c
··· 18 18 19 19 #include "pcan_usb_core.h" 20 20 21 - MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); 22 - 23 21 /* PCAN-USB Endpoints */ 24 22 #define PCAN_USB_EP_CMDOUT 1 25 23 #define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN)
-3
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
··· 16 16 #include "pcan_usb_core.h" 17 17 #include "pcan_usb_pro.h" 18 18 19 - MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter"); 20 - MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter"); 21 - 22 19 #define PCAN_USBPROFD_CHANNEL_COUNT 2 23 20 #define PCAN_USBFD_CHANNEL_COUNT 1 24 21
-2
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
··· 17 17 #include "pcan_usb_core.h" 18 18 #include "pcan_usb_pro.h" 19 19 20 - MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); 21 - 22 20 #define PCAN_USBPRO_CHANNEL_COUNT 2 23 21 24 22 /* PCAN-USB Pro adapter internal clock (MHz) */
-1
drivers/net/hamradio/scc.c
··· 2167 2167 2168 2168 MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>"); 2169 2169 MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards"); 2170 - MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio"); 2171 2170 MODULE_LICENSE("GPL"); 2172 2171 module_init(scc_init_driver); 2173 2172 module_exit(scc_cleanup_driver);
-1
drivers/net/wireless/admtek/adm8211.c
··· 28 28 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 29 29 MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); 30 30 MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); 31 - MODULE_SUPPORTED_DEVICE("ADM8211"); 32 31 MODULE_LICENSE("GPL"); 33 32 34 33 static unsigned int tx_ring_size __read_mostly = 16;
-1
drivers/net/wireless/ath/ath5k/base.c
··· 90 90 MODULE_AUTHOR("Jiri Slaby"); 91 91 MODULE_AUTHOR("Nick Kossifidis"); 92 92 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); 93 - MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); 94 93 MODULE_LICENSE("Dual BSD/GPL"); 95 94 96 95 static int ath5k_init(struct ieee80211_hw *hw);
-1
drivers/net/wireless/ath/ath9k/hw.c
··· 34 34 35 35 MODULE_AUTHOR("Atheros Communications"); 36 36 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 37 - MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 38 37 MODULE_LICENSE("Dual BSD/GPL"); 39 38 40 39 static void ath9k_hw_set_clockrate(struct ath_hw *ah)
-1
drivers/net/wireless/ath/ath9k/init.c
··· 37 37 38 38 MODULE_AUTHOR("Atheros Communications"); 39 39 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 40 - MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 41 40 MODULE_LICENSE("Dual BSD/GPL"); 42 41 43 42 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
-1
drivers/net/wireless/atmel/atmel.c
··· 75 75 MODULE_AUTHOR("Simon Kelley"); 76 76 MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); 77 77 MODULE_LICENSE("GPL"); 78 - MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards"); 79 78 80 79 /* The name of the firmware file to be loaded 81 80 over-rides any automatic selection */
-1
drivers/net/wireless/atmel/atmel_cs.c
··· 57 57 MODULE_AUTHOR("Simon Kelley"); 58 58 MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); 59 59 MODULE_LICENSE("GPL"); 60 - MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards"); 61 60 62 61 /*====================================================================*/ 63 62
-1
drivers/net/wireless/atmel/atmel_pci.c
··· 16 16 MODULE_AUTHOR("Simon Kelley"); 17 17 MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); 18 18 MODULE_LICENSE("GPL"); 19 - MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); 20 19 21 20 static const struct pci_device_id card_ids[] = { 22 21 { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
-1
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
··· 87 87 88 88 MODULE_AUTHOR("Broadcom Corporation"); 89 89 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); 90 - MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); 91 90 MODULE_LICENSE("Dual BSD/GPL"); 92 91 /* This needs to be adjusted when brcms_firmwares changes */ 93 92 MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
-1
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
··· 12 12 13 13 MODULE_AUTHOR("Broadcom Corporation"); 14 14 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities."); 15 - MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); 16 15 MODULE_LICENSE("Dual BSD/GPL"); 17 16 18 17 struct sk_buff *brcmu_pkt_buf_get_skb(uint len)
-1
drivers/net/wireless/cisco/airo.c
··· 251 251 MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. " 252 252 "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs."); 253 253 MODULE_LICENSE("Dual BSD/GPL"); 254 - MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); 255 254 module_param_hw_array(io, int, ioport, NULL, 0); 256 255 module_param_hw_array(irq, int, irq, NULL, 0); 257 256 module_param_array(rates, int, NULL, 0);
-1
drivers/net/wireless/cisco/airo_cs.c
··· 47 47 "cards. This is the module that links the PCMCIA card " 48 48 "with the airo module."); 49 49 MODULE_LICENSE("Dual BSD/GPL"); 50 - MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards"); 51 50 52 51 /*====================================================================*/ 53 52
-1
drivers/net/wireless/intersil/hostap/hostap_cs.c
··· 26 26 MODULE_AUTHOR("Jouni Malinen"); 27 27 MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " 28 28 "cards (PC Card)."); 29 - MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)"); 30 29 MODULE_LICENSE("GPL"); 31 30 32 31
-1
drivers/net/wireless/intersil/hostap/hostap_pci.c
··· 27 27 MODULE_AUTHOR("Jouni Malinen"); 28 28 MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN " 29 29 "PCI cards."); 30 - MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards"); 31 30 MODULE_LICENSE("GPL"); 32 31 33 32
-1
drivers/net/wireless/intersil/hostap/hostap_plx.c
··· 30 30 MODULE_AUTHOR("Jouni Malinen"); 31 31 MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " 32 32 "cards (PLX)."); 33 - MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)"); 34 33 MODULE_LICENSE("GPL"); 35 34 36 35
-1
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
··· 1821 1821 MODULE_AUTHOR(DRV_PROJECT); 1822 1822 MODULE_VERSION(DRV_VERSION); 1823 1823 MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); 1824 - MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards"); 1825 1824 MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); 1826 1825 MODULE_LICENSE("GPL"); 1827 1826
-1
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
··· 2119 2119 MODULE_AUTHOR(DRV_PROJECT); 2120 2120 MODULE_VERSION(DRV_VERSION); 2121 2121 MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver."); 2122 - MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards"); 2123 2122 MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); 2124 2123 MODULE_LICENSE("GPL"); 2125 2124
-1
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
··· 1956 1956 MODULE_AUTHOR(DRV_PROJECT); 1957 1957 MODULE_VERSION(DRV_VERSION); 1958 1958 MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); 1959 - MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); 1960 1959 MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); 1961 1960 MODULE_LICENSE("GPL"); 1962 1961
-1
drivers/net/wireless/ralink/rt2x00/rt2800pci.c
··· 439 439 MODULE_AUTHOR(DRV_PROJECT); 440 440 MODULE_VERSION(DRV_VERSION); 441 441 MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); 442 - MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); 443 442 MODULE_FIRMWARE(FIRMWARE_RT2860); 444 443 MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); 445 444 MODULE_LICENSE("GPL");
-1
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
··· 1248 1248 MODULE_AUTHOR(DRV_PROJECT); 1249 1249 MODULE_VERSION(DRV_VERSION); 1250 1250 MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver."); 1251 - MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards"); 1252 1251 MODULE_DEVICE_TABLE(usb, rt2800usb_device_table); 1253 1252 MODULE_FIRMWARE(FIRMWARE_RT2870); 1254 1253 MODULE_LICENSE("GPL");
-2
drivers/net/wireless/ralink/rt2x00/rt61pci.c
··· 2993 2993 MODULE_AUTHOR(DRV_PROJECT); 2994 2994 MODULE_VERSION(DRV_VERSION); 2995 2995 MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver."); 2996 - MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 " 2997 - "PCI & PCMCIA chipset based cards"); 2998 2996 MODULE_DEVICE_TABLE(pci, rt61pci_device_table); 2999 2997 MODULE_FIRMWARE(FIRMWARE_RT2561); 3000 2998 MODULE_FIRMWARE(FIRMWARE_RT2561s);
-1
drivers/net/wireless/ralink/rt2x00/rt73usb.c
··· 2513 2513 MODULE_AUTHOR(DRV_PROJECT); 2514 2514 MODULE_VERSION(DRV_VERSION); 2515 2515 MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); 2516 - MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards"); 2517 2516 MODULE_DEVICE_TABLE(usb, rt73usb_device_table); 2518 2517 MODULE_FIRMWARE(FIRMWARE_RT2571); 2519 2518 MODULE_LICENSE("GPL");
-1
drivers/net/wireless/rsi/rsi_91x_main.c
··· 441 441 module_exit(rsi_91x_hal_module_exit); 442 442 MODULE_AUTHOR("Redpine Signals Inc"); 443 443 MODULE_DESCRIPTION("Station driver for RSI 91x devices"); 444 - MODULE_SUPPORTED_DEVICE("RSI-91x"); 445 444 MODULE_VERSION("0.1"); 446 445 MODULE_LICENSE("Dual BSD/GPL");
-1
drivers/net/wireless/rsi/rsi_91x_sdio.c
··· 1571 1571 1572 1572 MODULE_AUTHOR("Redpine Signals Inc"); 1573 1573 MODULE_DESCRIPTION("Common SDIO layer for RSI drivers"); 1574 - MODULE_SUPPORTED_DEVICE("RSI-91x"); 1575 1574 MODULE_DEVICE_TABLE(sdio, rsi_dev_table); 1576 1575 MODULE_FIRMWARE(FIRMWARE_RSI9113); 1577 1576 MODULE_VERSION("0.1");
-1
drivers/net/wireless/rsi/rsi_91x_usb.c
··· 928 928 929 929 MODULE_AUTHOR("Redpine Signals Inc"); 930 930 MODULE_DESCRIPTION("Common USB layer for RSI drivers"); 931 - MODULE_SUPPORTED_DEVICE("RSI-91x"); 932 931 MODULE_DEVICE_TABLE(usb, rsi_dev_table); 933 932 MODULE_FIRMWARE(FIRMWARE_RSI9113); 934 933 MODULE_VERSION("0.1");
+21 -43
drivers/nvme/host/core.c
··· 1226 1226 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 1227 1227 } 1228 1228 1229 - static int nvme_keep_alive(struct nvme_ctrl *ctrl) 1230 - { 1231 - struct request *rq; 1232 - 1233 - rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, 1234 - BLK_MQ_REQ_RESERVED); 1235 - if (IS_ERR(rq)) 1236 - return PTR_ERR(rq); 1237 - 1238 - rq->timeout = ctrl->kato * HZ; 1239 - rq->end_io_data = ctrl; 1240 - 1241 - blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); 1242 - 1243 - return 0; 1244 - } 1245 - 1246 1229 static void nvme_keep_alive_work(struct work_struct *work) 1247 1230 { 1248 1231 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1249 1232 struct nvme_ctrl, ka_work); 1250 1233 bool comp_seen = ctrl->comp_seen; 1234 + struct request *rq; 1251 1235 1252 1236 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1253 1237 dev_dbg(ctrl->device, ··· 1241 1257 return; 1242 1258 } 1243 1259 1244 - if (nvme_keep_alive(ctrl)) { 1260 + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, 1261 + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1262 + if (IS_ERR(rq)) { 1245 1263 /* allocation failure, reset the controller */ 1246 - dev_err(ctrl->device, "keep-alive failed\n"); 1264 + dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1247 1265 nvme_reset_ctrl(ctrl); 1248 1266 return; 1249 1267 } 1268 + 1269 + rq->timeout = ctrl->kato * HZ; 1270 + rq->end_io_data = ctrl; 1271 + blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); 1250 1272 } 1251 1273 1252 1274 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) ··· 1954 1964 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1955 1965 } 1956 1966 1957 - static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) 1967 + /* 1968 + * Even though NVMe spec explicitly states that MDTS is not applicable to the 1969 + * write-zeroes, we are cautious and limit the size to the controllers 1970 + * max_hw_sectors value, which is based on the MDTS field and possibly other 1971 + * limiting factors. 1972 + */ 1973 + static void nvme_config_write_zeroes(struct request_queue *q, 1974 + struct nvme_ctrl *ctrl) 1958 1975 { 1959 - u64 max_blocks; 1960 - 1961 - if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || 1962 - (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 1963 - return; 1964 - /* 1965 - * Even though NVMe spec explicitly states that MDTS is not 1966 - * applicable to the write-zeroes:- "The restriction does not apply to 1967 - * commands that do not transfer data between the host and the 1968 - * controller (e.g., Write Uncorrectable ro Write Zeroes command).". 1969 - * In order to be more cautious use controller's max_hw_sectors value 1970 - * to configure the maximum sectors for the write-zeroes which is 1971 - * configured based on the controller's MDTS field in the 1972 - * nvme_init_identify() if available. 1973 - */ 1974 - if (ns->ctrl->max_hw_sectors == UINT_MAX) 1975 - max_blocks = (u64)USHRT_MAX + 1; 1976 - else 1977 - max_blocks = ns->ctrl->max_hw_sectors + 1; 1978 - 1979 - blk_queue_max_write_zeroes_sectors(disk->queue, 1980 - nvme_lba_to_sect(ns, max_blocks)); 1976 + if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 1977 + !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 1978 + blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors); 1981 1979 } 1982 1980 1983 1981 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) ··· 2137 2159 set_capacity_and_notify(disk, capacity); 2138 2160 2139 2161 nvme_config_discard(disk, ns); 2140 - nvme_config_write_zeroes(disk, ns); 2162 + nvme_config_write_zeroes(disk->queue, ns->ctrl); 2141 2163 2142 2164 set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || 2143 2165 test_bit(NVME_NS_FORCE_RO, &ns->flags));
+7
drivers/nvme/host/fabrics.h
··· 19 19 #define NVMF_DEF_FAIL_FAST_TMO -1 20 20 21 21 /* 22 + * Reserved one command for internal usage. This command is used for sending 23 + * the connect command, as well as for the keep alive command on the admin 24 + * queue once live. 25 + */ 26 + #define NVMF_RESERVED_TAGS 1 27 + 28 + /* 22 29 * Define a host as seen by the target. We allocate one at boot, but also 23 30 * allow the override it when creating controllers. This is both to provide 24 31 * persistence of the Host NQN over multiple boots, and to allow using
+2 -2
drivers/nvme/host/fc.c
··· 2863 2863 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 2864 2864 ctrl->tag_set.ops = &nvme_fc_mq_ops; 2865 2865 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 2866 - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 2866 + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; 2867 2867 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; 2868 2868 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 2869 2869 ctrl->tag_set.cmd_size = ··· 3485 3485 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3486 3486 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; 3487 3487 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 3488 - ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ 3488 + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; 3489 3489 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; 3490 3490 ctrl->admin_tag_set.cmd_size = 3491 3491 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+7 -4
drivers/nvme/host/rdma.c
··· 736 736 return ret; 737 737 738 738 ctrl->ctrl.queue_count = nr_io_queues + 1; 739 - if (ctrl->ctrl.queue_count < 2) 740 - return 0; 739 + if (ctrl->ctrl.queue_count < 2) { 740 + dev_err(ctrl->ctrl.device, 741 + "unable to set any I/O queues\n"); 742 + return -ENOMEM; 743 + } 741 744 742 745 dev_info(ctrl->ctrl.device, 743 746 "creating %d I/O queues.\n", nr_io_queues); ··· 801 798 memset(set, 0, sizeof(*set)); 802 799 set->ops = &nvme_rdma_admin_mq_ops; 803 800 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 804 - set->reserved_tags = 2; /* connect + keep-alive */ 801 + set->reserved_tags = NVMF_RESERVED_TAGS; 805 802 set->numa_node = nctrl->numa_node; 806 803 set->cmd_size = sizeof(struct nvme_rdma_request) + 807 804 NVME_RDMA_DATA_SGL_SIZE; ··· 814 811 memset(set, 0, sizeof(*set)); 815 812 set->ops = &nvme_rdma_mq_ops; 816 813 set->queue_depth = nctrl->sqsize + 1; 817 - set->reserved_tags = 1; /* fabric connect */ 814 + set->reserved_tags = NVMF_RESERVED_TAGS; 818 815 set->numa_node = nctrl->numa_node; 819 816 set->flags = BLK_MQ_F_SHOULD_MERGE; 820 817 set->cmd_size = sizeof(struct nvme_rdma_request) +
+15 -5
drivers/nvme/host/tcp.c
··· 287 287 * directly, otherwise queue io_work. Also, only do that if we 288 288 * are on the same cpu, so we don't introduce contention. 289 289 */ 290 - if (queue->io_cpu == __smp_processor_id() && 290 + if (queue->io_cpu == raw_smp_processor_id() && 291 291 sync && empty && mutex_trylock(&queue->send_mutex)) { 292 292 queue->more_requests = !last; 293 293 nvme_tcp_send_all(queue); ··· 567 567 568 568 req->pdu_len = le32_to_cpu(pdu->r2t_length); 569 569 req->pdu_sent = 0; 570 + 571 + if (unlikely(!req->pdu_len)) { 572 + dev_err(queue->ctrl->ctrl.device, 573 + "req %d r2t len is %u, probably a bug...\n", 574 + rq->tag, req->pdu_len); 575 + return -EPROTO; 576 + } 570 577 571 578 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { 572 579 dev_err(queue->ctrl->ctrl.device, ··· 1582 1575 memset(set, 0, sizeof(*set)); 1583 1576 set->ops = &nvme_tcp_admin_mq_ops; 1584 1577 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1585 - set->reserved_tags = 2; /* connect + keep-alive */ 1578 + set->reserved_tags = NVMF_RESERVED_TAGS; 1586 1579 set->numa_node = nctrl->numa_node; 1587 1580 set->flags = BLK_MQ_F_BLOCKING; 1588 1581 set->cmd_size = sizeof(struct nvme_tcp_request); ··· 1594 1587 memset(set, 0, sizeof(*set)); 1595 1588 set->ops = &nvme_tcp_mq_ops; 1596 1589 set->queue_depth = nctrl->sqsize + 1; 1597 - set->reserved_tags = 1; /* fabric connect */ 1590 + set->reserved_tags = NVMF_RESERVED_TAGS; 1598 1591 set->numa_node = nctrl->numa_node; 1599 1592 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 1600 1593 set->cmd_size = sizeof(struct nvme_tcp_request); ··· 1752 1745 return ret; 1753 1746 1754 1747 ctrl->queue_count = nr_io_queues + 1; 1755 - if (ctrl->queue_count < 2) 1756 - return 0; 1748 + if (ctrl->queue_count < 2) { 1749 + dev_err(ctrl->device, 1750 + "unable to set any I/O queues\n"); 1751 + return -ENOMEM; 1752 + } 1757 1753 1758 1754 dev_info(ctrl->device, 1759 1755 "creating %d I/O queues.\n", nr_io_queues);
+14 -3
drivers/nvme/target/core.c
··· 1118 1118 { 1119 1119 lockdep_assert_held(&ctrl->lock); 1120 1120 1121 - if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || 1122 - nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || 1123 - nvmet_cc_mps(ctrl->cc) != 0 || 1121 + /* 1122 + * Only I/O controllers should verify iosqes,iocqes. 1123 + * Strictly speaking, the spec says a discovery controller 1124 + * should verify iosqes,iocqes are zeroed, however that 1125 + * would break backwards compatibility, so don't enforce it. 1126 + */ 1127 + if (ctrl->subsys->type != NVME_NQN_DISC && 1128 + (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || 1129 + nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { 1130 + ctrl->csts = NVME_CSTS_CFS; 1131 + return; 1132 + } 1133 + 1134 + if (nvmet_cc_mps(ctrl->cc) != 0 || 1124 1135 nvmet_cc_ams(ctrl->cc) != 0 || 1125 1136 nvmet_cc_css(ctrl->cc) != 0) { 1126 1137 ctrl->csts = NVME_CSTS_CFS;
+2 -2
drivers/nvme/target/loop.c
··· 349 349 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 350 350 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; 351 351 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 352 - ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ 352 + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; 353 353 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; 354 354 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + 355 355 NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ··· 520 520 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 521 521 ctrl->tag_set.ops = &nvme_loop_mq_ops; 522 522 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 523 - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 523 + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; 524 524 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; 525 525 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 526 526 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
+1 -1
drivers/nvme/target/tcp.c
··· 1098 1098 cmd->rbytes_done += ret; 1099 1099 } 1100 1100 1101 + nvmet_tcp_unmap_pdu_iovec(cmd); 1101 1102 if (queue->data_digest) { 1102 1103 nvmet_tcp_prep_recv_ddgst(cmd); 1103 1104 return 0; 1104 1105 } 1105 - nvmet_tcp_unmap_pdu_iovec(cmd); 1106 1106 1107 1107 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && 1108 1108 cmd->rbytes_done == cmd->req.transfer_len) {
-1
drivers/parport/parport_amiga.c
··· 241 241 242 242 MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); 243 243 MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); 244 - MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); 245 244 MODULE_LICENSE("GPL"); 246 245 MODULE_ALIAS("platform:amiga-parallel");
-1
drivers/parport/parport_atari.c
··· 218 218 219 219 MODULE_AUTHOR("Andreas Schwab"); 220 220 MODULE_DESCRIPTION("Parport Driver for Atari builtin Port"); 221 - MODULE_SUPPORTED_DEVICE("Atari builtin Parallel Port"); 222 221 MODULE_LICENSE("GPL"); 223 222 224 223 module_init(parport_atari_init)
-1
drivers/parport/parport_gsc.c
··· 41 41 42 42 MODULE_AUTHOR("Helge Deller <deller@gmx.de>"); 43 43 MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver"); 44 - MODULE_SUPPORTED_DEVICE("integrated PC-style parallel port"); 45 44 MODULE_LICENSE("GPL"); 46 45 47 46
-1
drivers/parport/parport_mfc3.c
··· 359 359 360 360 MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); 361 361 MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port"); 362 - MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port"); 363 362 MODULE_LICENSE("GPL"); 364 363 365 364 module_init(parport_mfc3_init)
-1
drivers/parport/parport_sunbpp.c
··· 377 377 378 378 MODULE_AUTHOR("Derrick J Brashear"); 379 379 MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port"); 380 - MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port"); 381 380 MODULE_VERSION("2.0"); 382 381 MODULE_LICENSE("GPL");
+6 -8
drivers/pci/hotplug/rpadlpar_sysfs.c
··· 34 34 if (nbytes >= MAX_DRC_NAME_LEN) 35 35 return 0; 36 36 37 - memcpy(drc_name, buf, nbytes); 37 + strscpy(drc_name, buf, nbytes + 1); 38 38 39 39 end = strchr(drc_name, '\n'); 40 - if (!end) 41 - end = &drc_name[nbytes]; 42 - *end = '\0'; 40 + if (end) 41 + *end = '\0'; 43 42 44 43 rc = dlpar_add_slot(drc_name); 45 44 if (rc) ··· 64 65 if (nbytes >= MAX_DRC_NAME_LEN) 65 66 return 0; 66 67 67 - memcpy(drc_name, buf, nbytes); 68 + strscpy(drc_name, buf, nbytes + 1); 68 69 69 70 end = strchr(drc_name, '\n'); 70 - if (!end) 71 - end = &drc_name[nbytes]; 72 - *end = '\0'; 71 + if (end) 72 + *end = '\0'; 73 73 74 74 rc = dlpar_remove_slot(drc_name); 75 75 if (rc)
+2 -1
drivers/pci/hotplug/s390_pci_hpc.c
··· 93 93 pci_dev_put(pdev); 94 94 return -EBUSY; 95 95 } 96 + pci_dev_put(pdev); 96 97 97 - zpci_remove_device(zdev); 98 + zpci_remove_device(zdev, false); 98 99 99 100 rc = zpci_disable_device(zdev); 100 101 if (rc)
-1
drivers/s390/block/dasd.c
··· 63 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 65 " Copyright IBM Corp. 2000"); 66 - MODULE_SUPPORTED_DEVICE("dasd"); 67 66 MODULE_LICENSE("GPL"); 68 67 69 68 /*
-1
drivers/sbus/char/display7seg.c
··· 50 50 MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); 51 51 MODULE_DESCRIPTION("7-Segment Display driver for Sun Microsystems CP1400/1500"); 52 52 MODULE_LICENSE("GPL"); 53 - MODULE_SUPPORTED_DEVICE("d7s"); 54 53 55 54 struct d7s { 56 55 void __iomem *regs;
-1
drivers/scsi/hpsa.c
··· 80 80 MODULE_AUTHOR("Hewlett-Packard Company"); 81 81 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 82 82 HPSA_DRIVER_VERSION); 83 - MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 84 83 MODULE_VERSION(HPSA_DRIVER_VERSION); 85 84 MODULE_LICENSE("GPL"); 86 85 MODULE_ALIAS("cciss");
+2
drivers/scsi/ibmvscsi/ibmvfc.c
··· 5784 5784 vhost->disc_buf_dma); 5785 5785 dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), 5786 5786 vhost->login_buf, vhost->login_buf_dma); 5787 + dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf), 5788 + vhost->channel_setup_buf, vhost->channel_setup_dma); 5787 5789 dma_pool_destroy(vhost->sg_pool); 5788 5790 ibmvfc_free_queue(vhost, async_q); 5789 5791 LEAVE;
+2 -2
drivers/scsi/lpfc/lpfc_debugfs.c
··· 2421 2421 memset(dstbuf, 0, 33); 2422 2422 size = (nbytes < 32) ? nbytes : 32; 2423 2423 if (copy_from_user(dstbuf, buf, size)) 2424 - return 0; 2424 + return -EFAULT; 2425 2425 2426 2426 if (dent == phba->debug_InjErrLBA) { 2427 2427 if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') && ··· 2430 2430 } 2431 2431 2432 2432 if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) 2433 - return 0; 2433 + return -EINVAL; 2434 2434 2435 2435 if (dent == phba->debug_writeGuard) 2436 2436 phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
+1 -1
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 413 413 * And add this object to port_table_list. 414 414 */ 415 415 if (!ioc->multipath_on_hba) { 416 - port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 416 + port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); 417 417 if (!port) 418 418 return NULL; 419 419
+1 -1
drivers/scsi/myrs.c
··· 2273 2273 if (cs->mmio_base) { 2274 2274 cs->disable_intr(cs); 2275 2275 iounmap(cs->mmio_base); 2276 + cs->mmio_base = NULL; 2276 2277 } 2277 2278 if (cs->irq) 2278 2279 free_irq(cs->irq, cs); 2279 2280 if (cs->io_addr) 2280 2281 release_region(cs->io_addr, 0x80); 2281 - iounmap(cs->mmio_base); 2282 2282 pci_set_drvdata(pdev, NULL); 2283 2283 pci_disable_device(pdev); 2284 2284 scsi_host_put(cs->host);
-1
drivers/scsi/pcmcia/nsp_cs.c
··· 55 55 56 56 MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>"); 57 57 MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); 58 - MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); 59 58 MODULE_LICENSE("GPL"); 60 59 61 60 #include "nsp_io.h"
+1 -1
drivers/scsi/qla2xxx/qla_target.h
··· 116 116 (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ 117 117 QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) 118 118 #endif 119 - #endif 120 119 121 120 #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ 122 121 ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ ··· 243 244 #ifndef CTIO_RET_TYPE 244 245 #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ 245 246 #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ 247 + #endif 246 248 247 249 struct fcp_hdr { 248 250 uint8_t r_ctl;
+11 -8
drivers/scsi/sd_zbc.c
··· 280 280 static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) 281 281 { 282 282 struct scsi_disk *sdkp; 283 + unsigned long flags; 283 284 unsigned int zno; 284 285 int ret; 285 286 286 287 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); 287 288 288 - spin_lock_bh(&sdkp->zones_wp_offset_lock); 289 + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 289 290 for (zno = 0; zno < sdkp->nr_zones; zno++) { 290 291 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) 291 292 continue; 292 293 293 - spin_unlock_bh(&sdkp->zones_wp_offset_lock); 294 + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 294 295 ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, 295 296 SD_BUF_SIZE, 296 297 zno * sdkp->zone_blocks, true); 297 - spin_lock_bh(&sdkp->zones_wp_offset_lock); 298 + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 298 299 if (!ret) 299 300 sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, 300 301 zno, sd_zbc_update_wp_offset_cb, 301 302 sdkp); 302 303 } 303 - spin_unlock_bh(&sdkp->zones_wp_offset_lock); 304 + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 304 305 305 306 scsi_device_put(sdkp->device); 306 307 } ··· 325 324 struct request *rq = cmd->request; 326 325 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 327 326 unsigned int wp_offset, zno = blk_rq_zone_no(rq); 327 + unsigned long flags; 328 328 blk_status_t ret; 329 329 330 330 ret = sd_zbc_cmnd_checks(cmd); ··· 339 337 if (!blk_req_zone_write_trylock(rq)) 340 338 return BLK_STS_ZONE_RESOURCE; 341 339 342 - spin_lock_bh(&sdkp->zones_wp_offset_lock); 340 + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 343 341 wp_offset = sdkp->zones_wp_offset[zno]; 344 342 switch (wp_offset) { 345 343 case SD_ZBC_INVALID_WP_OFST: ··· 368 366 369 367 *lba += wp_offset; 370 368 } 371 - spin_unlock_bh(&sdkp->zones_wp_offset_lock); 369 + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 372 370 if (ret) 373 371 blk_req_zone_write_unlock(rq); 374 372 return ret; ··· 447 445 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 448 446 unsigned int zno = blk_rq_zone_no(rq); 449 447 enum req_opf op = req_op(rq); 448 + unsigned long flags; 450 449 451 450 /* 452 451 * If we got an error for a command that needs updating the write ··· 455 452 * invalid to force an update from disk the next time a zone append 456 453 * command is issued. 457 454 */ 458 - spin_lock_bh(&sdkp->zones_wp_offset_lock); 455 + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 459 456 460 457 if (result && op != REQ_OP_ZONE_RESET_ALL) { 461 458 if (op == REQ_OP_ZONE_APPEND) { ··· 499 496 } 500 497 501 498 unlock_wp_offset: 502 - spin_unlock_bh(&sdkp->zones_wp_offset_lock); 499 + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 503 500 504 501 return good_bytes; 505 502 }
-1
drivers/scsi/smartpqi/smartpqi_init.c
··· 48 48 MODULE_AUTHOR("Microsemi"); 49 49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 50 50 DRIVER_VERSION); 51 - MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 52 51 MODULE_VERSION(DRIVER_VERSION); 53 52 MODULE_LICENSE("GPL"); 54 53
+1 -1
drivers/scsi/st.c
··· 1269 1269 spin_lock(&st_use_lock); 1270 1270 if (STp->in_use) { 1271 1271 spin_unlock(&st_use_lock); 1272 - scsi_tape_put(STp); 1273 1272 DEBC_printk(STp, "Device already in use.\n"); 1273 + scsi_tape_put(STp); 1274 1274 return (-EBUSY); 1275 1275 } 1276 1276
+1 -1
drivers/scsi/ufs/ufs-mediatek.c
··· 911 911 if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc) 912 912 return; 913 913 914 - if (lpm & !hba->vreg_info.vcc->enabled) 914 + if (lpm && !hba->vreg_info.vcc->enabled) 915 915 regulator_set_mode(hba->vreg_info.vccq2->reg, 916 916 REGULATOR_MODE_IDLE); 917 917 else if (!lpm)
-1
drivers/sh/maple/maple.c
··· 30 30 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); 31 31 MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 32 32 MODULE_LICENSE("GPL v2"); 33 - MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); 34 33 35 34 static void maple_dma_handler(struct work_struct *work); 36 35 static void maple_vblank_handler(struct work_struct *work);
+1
drivers/spi/spi-cadence-quadspi.c
··· 1433 1433 cqspi = spi_master_get_devdata(master); 1434 1434 1435 1435 cqspi->pdev = pdev; 1436 + platform_set_drvdata(pdev, cqspi); 1436 1437 1437 1438 /* Obtain configuration from OF. */ 1438 1439 ret = cqspi_of_get_pdata(cqspi);
+1 -1
drivers/staging/comedi/drivers/cb_pcidas.c
··· 1281 1281 devpriv->amcc + AMCC_OP_REG_INTCSR); 1282 1282 1283 1283 ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED, 1284 - dev->board_name, dev); 1284 + "cb_pcidas", dev); 1285 1285 if (ret) { 1286 1286 dev_dbg(dev->class_dev, "unable to allocate irq %d\n", 1287 1287 pcidev->irq);
+1 -1
drivers/staging/comedi/drivers/cb_pcidas64.c
··· 4035 4035 init_stc_registers(dev); 4036 4036 4037 4037 retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, 4038 - dev->board_name, dev); 4038 + "cb_pcidas64", dev); 4039 4039 if (retval) { 4040 4040 dev_dbg(dev->class_dev, "unable to allocate irq %u\n", 4041 4041 pcidev->irq);
-1
drivers/staging/comedi/drivers/vmk80xx.c
··· 877 877 878 878 MODULE_AUTHOR("Manuel Gebele <forensixs@gmx.de>"); 879 879 MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver"); 880 - MODULE_SUPPORTED_DEVICE("K8055/K8061 aka VM110/VM140"); 881 880 MODULE_LICENSE("GPL");
+2 -2
drivers/staging/vt6655/rxtx.h
··· 150 150 u16 reserved; 151 151 struct ieee80211_cts data; 152 152 u16 reserved2; 153 - } __packed; 153 + } __packed __aligned(2); 154 154 155 155 struct vnt_cts_fb { 156 156 struct vnt_phy_field b; ··· 160 160 __le16 cts_duration_ba_f1; 161 161 struct ieee80211_cts data; 162 162 u16 reserved2; 163 - } __packed; 163 + } __packed __aligned(2); 164 164 165 165 struct vnt_tx_fifo_head { 166 166 u8 tx_key[WLAN_KEY_LEN_CCMP];
-1
drivers/tee/optee/core.c
··· 747 747 748 748 MODULE_AUTHOR("Linaro"); 749 749 MODULE_DESCRIPTION("OP-TEE driver"); 750 - MODULE_SUPPORTED_DEVICE(""); 751 750 MODULE_VERSION("1.0"); 752 751 MODULE_LICENSE("GPL v2"); 753 752 MODULE_ALIAS("platform:optee");
+3
drivers/thermal/thermal_sysfs.c
··· 674 674 { 675 675 struct cooling_dev_stats *stats = cdev->stats; 676 676 677 + if (!stats) 678 + return; 679 + 677 680 spin_lock(&stats->lock); 678 681 679 682 if (stats->state == new_state)
+8 -10
drivers/thunderbolt/switch.c
··· 768 768 769 769 tb_dump_port(port->sw->tb, &port->config); 770 770 771 - /* Control port does not need HopID allocation */ 772 - if (port->port) { 773 - ida_init(&port->in_hopids); 774 - ida_init(&port->out_hopids); 775 - } 776 - 777 771 INIT_LIST_HEAD(&port->list); 778 772 return 0; 779 773 ··· 1836 1842 dma_port_free(sw->dma_port); 1837 1843 1838 1844 tb_switch_for_each_port(sw, port) { 1839 - if (!port->disabled) { 1840 - ida_destroy(&port->in_hopids); 1841 - ida_destroy(&port->out_hopids); 1842 - } 1845 + ida_destroy(&port->in_hopids); 1846 + ida_destroy(&port->out_hopids); 1843 1847 } 1844 1848 1845 1849 kfree(sw->uuid); ··· 2017 2025 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2018 2026 sw->ports[i].sw = sw; 2019 2027 sw->ports[i].port = i; 2028 + 2029 + /* Control port does not need HopID allocation */ 2030 + if (i) { 2031 + ida_init(&sw->ports[i].in_hopids); 2032 + ida_init(&sw->ports[i].out_hopids); 2033 + } 2020 2034 } 2021 2035 2022 2036 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
+4
drivers/thunderbolt/tb.c
··· 138 138 parent->boot = true; 139 139 parent = tb_switch_parent(parent); 140 140 } 141 + } else if (tb_tunnel_is_dp(tunnel)) { 142 + /* Keep the domain from powering down */ 143 + pm_runtime_get_sync(&tunnel->src_port->sw->dev); 144 + pm_runtime_get_sync(&tunnel->dst_port->sw->dev); 141 145 } 142 146 143 147 list_add_tail(&tunnel->list, &tcm->tunnel_list);
-2
drivers/tty/serial/icom.c
··· 1639 1639 1640 1640 MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>"); 1641 1641 MODULE_DESCRIPTION("IBM iSeries Serial IOA driver"); 1642 - MODULE_SUPPORTED_DEVICE 1643 - ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters"); 1644 1642 MODULE_LICENSE("GPL"); 1645 1643 MODULE_FIRMWARE("icom_call_setup.bin"); 1646 1644 MODULE_FIRMWARE("icom_res_dce.bin");
-1
drivers/tty/serial/jsm/jsm_driver.c
··· 19 19 MODULE_AUTHOR("Digi International, https://www.digi.com"); 20 20 MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line"); 21 21 MODULE_LICENSE("GPL"); 22 - MODULE_SUPPORTED_DEVICE("jsm"); 23 22 24 23 #define JSM_DRIVER_NAME "jsm" 25 24 #define NR_PORTS 32
+4 -1
drivers/usb/cdns3/cdnsp-ring.c
··· 2197 2197 * inverted in the first TDs isoc TRB. 2198 2198 */ 2199 2199 field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) | 2200 - start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count); 2200 + TRB_SIA | TRB_TBC(burst_count); 2201 + 2202 + if (!start_cycle) 2203 + field |= TRB_CYCLE; 2201 2204 2202 2205 /* Fill the rest of the TRB fields, and remaining normal TRBs. */ 2203 2206 for (i = 0; i < trbs_per_td; i++) {
+5 -6
drivers/usb/dwc3/gadget.c
··· 783 783 784 784 trace_dwc3_gadget_ep_disable(dep); 785 785 786 - dwc3_remove_requests(dwc, dep); 787 - 788 786 /* make sure HW endpoint isn't stalled */ 789 787 if (dep->flags & DWC3_EP_STALL) 790 788 __dwc3_gadget_ep_set_halt(dep, 0, false); ··· 800 802 dep->endpoint.comp_desc = NULL; 801 803 dep->endpoint.desc = NULL; 802 804 } 805 + 806 + dwc3_remove_requests(dwc, dep); 803 807 804 808 return 0; 805 809 } ··· 1617 1617 { 1618 1618 struct dwc3 *dwc = dep->dwc; 1619 1619 1620 - if (!dep->endpoint.desc || !dwc->pullups_connected) { 1620 + if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) { 1621 1621 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1622 1622 dep->name); 1623 1623 return -ESHUTDOWN; ··· 2247 2247 if (!is_on) { 2248 2248 u32 count; 2249 2249 2250 + dwc->connected = false; 2250 2251 /* 2251 2252 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a 2252 2253 * Section 4.1.8 Table 4-7, it states that for a device-initiated ··· 2272 2271 dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % 2273 2272 dwc->ev_buf->length; 2274 2273 } 2275 - dwc->connected = false; 2276 2274 } else { 2277 2275 __dwc3_gadget_start(dwc); 2278 2276 } ··· 3329 3329 { 3330 3330 u32 reg; 3331 3331 3332 - dwc->connected = true; 3333 - 3334 3332 /* 3335 3333 * WORKAROUND: DWC3 revisions <1.88a have an issue which 3336 3334 * would cause a missing Disconnect Event if there's a ··· 3368 3370 * transfers." 3369 3371 */ 3370 3372 dwc3_stop_active_transfers(dwc); 3373 + dwc->connected = true; 3371 3374 3372 3375 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3373 3376 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
+10 -4
drivers/usb/gadget/configfs.c
··· 97 97 struct list_head list; 98 98 }; 99 99 100 + #define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1) 101 + 100 102 static int usb_string_copy(const char *s, char **s_copy) 101 103 { 102 104 int ret; ··· 108 106 if (ret > USB_MAX_STRING_LEN) 109 107 return -EOVERFLOW; 110 108 111 - str = kstrdup(s, GFP_KERNEL); 112 - if (!str) 113 - return -ENOMEM; 109 + if (copy) { 110 + str = copy; 111 + } else { 112 + str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL); 113 + if (!str) 114 + return -ENOMEM; 115 + } 116 + strcpy(str, s); 114 117 if (str[ret - 1] == '\n') 115 118 str[ret - 1] = '\0'; 116 - kfree(copy); 117 119 *s_copy = str; 118 120 return 0; 119 121 }
-1
drivers/usb/misc/ldusb.c
··· 117 117 MODULE_AUTHOR("Michael Hund <mhund@ld-didactic.de>"); 118 118 MODULE_DESCRIPTION("LD USB Driver"); 119 119 MODULE_LICENSE("GPL"); 120 - MODULE_SUPPORTED_DEVICE("LD USB Devices"); 121 120 122 121 /* All interrupt in transfers are collected in a ring buffer to 123 122 * avoid racing conditions and get better performance of the driver.
+7
drivers/usb/storage/transport.c
··· 656 656 need_auto_sense = 1; 657 657 } 658 658 659 + /* Some devices (Kindle) require another command after SYNC CACHE */ 660 + if ((us->fflags & US_FL_SENSE_AFTER_SYNC) && 661 + srb->cmnd[0] == SYNCHRONIZE_CACHE) { 662 + usb_stor_dbg(us, "-- sense after SYNC CACHE\n"); 663 + need_auto_sense = 1; 664 + } 665 + 659 666 /* 660 667 * If we have a failure, we're going to do a REQUEST_SENSE 661 668 * automatically. Note that we differentiate between a command
+12
drivers/usb/storage/unusual_devs.h
··· 2212 2212 US_FL_NO_READ_DISC_INFO ), 2213 2213 2214 2214 /* 2215 + * Reported by Matthias Schwarzott <zzam@gentoo.org> 2216 + * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that 2217 + * the host may be finished with it, and automatically ejects its 2218 + * emulated media unless it receives another command within one second. 2219 + */ 2220 + UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999, 2221 + "Amazon", 2222 + "Kindle", 2223 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2224 + US_FL_SENSE_AFTER_SYNC ), 2225 + 2226 + /* 2215 2227 * Reported by Oliver Neukum <oneukum@suse.com> 2216 2228 * This device morphes spontaneously into another device if the access 2217 2229 * pattern of Windows isn't followed. Thus writable media would be dirty
+9 -2
drivers/usb/typec/tcpm/tcpm.c
··· 945 945 946 946 port->supply_voltage = mv; 947 947 port->current_limit = max_ma; 948 + power_supply_changed(port->psy); 948 949 949 950 if (port->tcpc->set_current_limit) 950 951 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); ··· 2932 2931 2933 2932 port->pps_data.supported = false; 2934 2933 port->usb_type = POWER_SUPPLY_USB_TYPE_PD; 2934 + power_supply_changed(port->psy); 2935 2935 2936 2936 /* 2937 2937 * Select the source PDO providing the most power which has a ··· 2957 2955 port->pps_data.supported = true; 2958 2956 port->usb_type = 2959 2957 POWER_SUPPLY_USB_TYPE_PD_PPS; 2958 + power_supply_changed(port->psy); 2960 2959 } 2961 2960 continue; 2962 2961 default: ··· 3115 3112 port->pps_data.out_volt)); 3116 3113 port->pps_data.op_curr = min(port->pps_data.max_curr, 3117 3114 port->pps_data.op_curr); 3115 + power_supply_changed(port->psy); 3118 3116 } 3119 3117 3120 3118 return src_pdo; ··· 3351 3347 return ret; 3352 3348 } 3353 3349 port->vbus_charge = charge; 3350 + power_supply_changed(port->psy); 3354 3351 return 0; 3355 3352 } 3356 3353 ··· 3535 3530 port->try_src_count = 0; 3536 3531 port->try_snk_count = 0; 3537 3532 port->usb_type = POWER_SUPPLY_USB_TYPE_C; 3533 + power_supply_changed(port->psy); 3538 3534 port->nr_sink_caps = 0; 3539 3535 port->sink_cap_done = false; 3540 3536 if (port->tcpc->enable_frs) ··· 5225 5219 goto unlock; 5226 5220 5227 5221 /* Send when the state machine is idle */ 5228 - if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover) 5222 + if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover) 5229 5223 goto resched; 5230 5224 5231 5225 port->upcoming_state = GET_SINK_CAP; ··· 5963 5957 ret = -EINVAL; 5964 5958 break; 5965 5959 } 5966 - 5960 + power_supply_changed(port->psy); 5967 5961 return ret; 5968 5962 } 5969 5963 ··· 6116 6110 err = devm_tcpm_psy_register(port); 6117 6111 if (err) 6118 6112 goto out_role_sw_put; 6113 + power_supply_changed(port->psy); 6119 6114 6120 6115 port->typec_port = typec_register_port(port->dev, &port->typec_caps); 6121 6116 if (IS_ERR(port->typec_port)) {
-1
drivers/usb/typec/tipd/core.c
··· 53 53 struct tps6598x_rx_identity_reg { 54 54 u8 status; 55 55 struct usb_pd_identity identity; 56 - u32 vdo[3]; 57 56 } __packed; 58 57 59 58 /* Standard Task return codes */
+1 -1
drivers/usb/usbip/vudc_sysfs.c
··· 174 174 175 175 udc->ud.tcp_socket = socket; 176 176 udc->ud.tcp_rx = tcp_rx; 177 - udc->ud.tcp_rx = tcp_tx; 177 + udc->ud.tcp_tx = tcp_tx; 178 178 udc->ud.status = SDEV_ST_USED; 179 179 180 180 spin_unlock_irq(&udc->ud.lock);
+2 -3
drivers/vdpa/ifcvf/ifcvf_main.c
··· 431 431 } 432 432 433 433 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, 434 - dev, &ifc_vdpa_ops, 435 - IFCVF_MAX_QUEUE_PAIRS * 2, NULL); 434 + dev, &ifc_vdpa_ops, NULL); 436 435 if (adapter == NULL) { 437 436 IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); 438 437 return -ENOMEM; ··· 455 456 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) 456 457 vf->vring[i].irq = -EINVAL; 457 458 458 - ret = vdpa_register_device(&adapter->vdpa); 459 + ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2); 459 460 if (ret) { 460 461 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); 461 462 goto err;
+2 -2
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 1982 1982 max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); 1983 1983 1984 1984 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, 1985 - 2 * mlx5_vdpa_max_qps(max_vqs), NULL); 1985 + NULL); 1986 1986 if (IS_ERR(ndev)) 1987 1987 return PTR_ERR(ndev); 1988 1988 ··· 2009 2009 if (err) 2010 2010 goto err_res; 2011 2011 2012 - err = vdpa_register_device(&mvdev->vdev); 2012 + err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); 2013 2013 if (err) 2014 2014 goto err_reg; 2015 2015
+10 -8
drivers/vdpa/vdpa.c
··· 69 69 * initialized but before registered. 70 70 * @parent: the parent device 71 71 * @config: the bus operations that is supported by this device 72 - * @nvqs: number of virtqueues supported by this device 73 72 * @size: size of the parent structure that contains private data 74 73 * @name: name of the vdpa device; optional. 75 74 * ··· 80 81 */ 81 82 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 82 83 const struct vdpa_config_ops *config, 83 - int nvqs, size_t size, const char *name) 84 + size_t size, const char *name) 84 85 { 85 86 struct vdpa_device *vdev; 86 87 int err = -EINVAL; ··· 106 107 vdev->index = err; 107 108 vdev->config = config; 108 109 vdev->features_valid = false; 109 - vdev->nvqs = nvqs; 110 110 111 111 if (name) 112 112 err = dev_set_name(&vdev->dev, "%s", name); ··· 134 136 return (strcmp(dev_name(&vdev->dev), data) == 0); 135 137 } 136 138 137 - static int __vdpa_register_device(struct vdpa_device *vdev) 139 + static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) 138 140 { 139 141 struct device *dev; 142 + 143 + vdev->nvqs = nvqs; 140 144 141 145 lockdep_assert_held(&vdpa_dev_mutex); 142 146 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); ··· 155 155 * Caller must invoke this routine in the management device dev_add() 156 156 * callback after setting up valid mgmtdev for this vdpa device. 157 157 * @vdev: the vdpa device to be registered to vDPA bus 158 + * @nvqs: number of virtqueues supported by this device 158 159 * 159 160 * Returns an error when fail to add device to vDPA bus 160 161 */ 161 - int _vdpa_register_device(struct vdpa_device *vdev) 162 + int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) 162 163 { 163 164 if (!vdev->mdev) 164 165 return -EINVAL; 165 166 166 - return __vdpa_register_device(vdev); 167 + return __vdpa_register_device(vdev, nvqs); 167 168 } 168 169 EXPORT_SYMBOL_GPL(_vdpa_register_device); 169 170 ··· 172 171 * vdpa_register_device - register a vDPA device 173 172 * Callers must have a succeed call of vdpa_alloc_device() before. 174 173 * @vdev: the vdpa device to be registered to vDPA bus 174 + * @nvqs: number of virtqueues supported by this device 175 175 * 176 176 * Returns an error when fail to add to vDPA bus 177 177 */ 178 - int vdpa_register_device(struct vdpa_device *vdev) 178 + int vdpa_register_device(struct vdpa_device *vdev, int nvqs) 179 179 { 180 180 int err; 181 181 182 182 mutex_lock(&vdpa_dev_mutex); 183 - err = __vdpa_register_device(vdev); 183 + err = __vdpa_register_device(vdev, nvqs); 184 184 mutex_unlock(&vdpa_dev_mutex); 185 185 return err; 186 186 }
+1 -1
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 235 235 ops = &vdpasim_config_ops; 236 236 237 237 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 238 - dev_attr->nvqs, dev_attr->name); 238 + dev_attr->name); 239 239 if (!vdpasim) 240 240 goto err_alloc; 241 241
+2 -3
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
··· 110 110 111 111 static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config) 112 112 { 113 - struct virtio_net_config *net_config = 114 - (struct virtio_net_config *)config; 113 + struct virtio_net_config *net_config = config; 115 114 116 115 net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500); 117 116 net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); ··· 146 147 if (IS_ERR(simdev)) 147 148 return PTR_ERR(simdev); 148 149 149 - ret = _vdpa_register_device(&simdev->vdpa); 150 + ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM); 150 151 if (ret) 151 152 goto reg_err; 152 153
+2 -2
drivers/vfio/Kconfig
··· 21 21 22 22 menuconfig VFIO 23 23 tristate "VFIO Non-Privileged userspace driver framework" 24 - depends on IOMMU_API 25 - select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64) 24 + select IOMMU_API 25 + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) 26 26 help 27 27 VFIO provides a framework for secure userspace device drivers. 28 28 See Documentation/driver-api/vfio.rst for more details.
+2 -2
drivers/vfio/platform/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config VFIO_PLATFORM 3 3 tristate "VFIO support for platform devices" 4 - depends on VFIO && EVENTFD && (ARM || ARM64) 4 + depends on VFIO && EVENTFD && (ARM || ARM64 || COMPILE_TEST) 5 5 select VFIO_VIRQFD 6 6 help 7 7 Support for platform devices with VFIO. This is required to make ··· 12 12 13 13 config VFIO_AMBA 14 14 tristate "VFIO support for AMBA devices" 15 - depends on VFIO_PLATFORM && ARM_AMBA 15 + depends on VFIO_PLATFORM && (ARM_AMBA || COMPILE_TEST) 16 16 help 17 17 Support for ARM AMBA devices with VFIO. This is required to make 18 18 use of ARM AMBA devices present on the system using the VFIO
+12 -8
drivers/vfio/vfio_iommu_type1.c
··· 189 189 } 190 190 191 191 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, 192 - dma_addr_t start, size_t size) 192 + dma_addr_t start, u64 size) 193 193 { 194 194 struct rb_node *res = NULL; 195 195 struct rb_node *node = iommu->dma_list.rb_node; ··· 785 785 return -ENODEV; 786 786 787 787 ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); 788 - if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { 788 + if (ret != 1) 789 + goto out; 790 + 791 + ret = 0; 792 + 793 + if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { 789 794 ret = vfio_lock_acct(dma, 1, true); 790 795 if (ret) { 791 796 put_pfn(*pfn_base, dma->prot); ··· 802 797 } 803 798 } 804 799 800 + out: 805 801 mmput(mm); 806 802 return ret; 807 803 } ··· 1294 1288 int ret = -EINVAL, retries = 0; 1295 1289 unsigned long pgshift; 1296 1290 dma_addr_t iova = unmap->iova; 1297 - unsigned long size = unmap->size; 1291 + u64 size = unmap->size; 1298 1292 bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; 1299 1293 bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; 1300 1294 struct rb_node *n, *first_n; ··· 1310 1304 if (unmap_all) { 1311 1305 if (iova || size) 1312 1306 goto unlock; 1313 - size = SIZE_MAX; 1314 - } else if (!size || size & (pgsize - 1)) { 1307 + size = U64_MAX; 1308 + } else if (!size || size & (pgsize - 1) || 1309 + iova + size - 1 < iova || size > SIZE_MAX) { 1315 1310 goto unlock; 1316 1311 } 1317 - 1318 - if (iova + size - 1 < iova || size > SIZE_MAX) 1319 - goto unlock; 1320 1312 1321 1313 /* When dirty tracking is enabled, allow only min supported pgsize */ 1322 1314 if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
+11 -9
drivers/vhost/vdpa.c
··· 308 308 309 309 static void vhost_vdpa_config_put(struct vhost_vdpa *v) 310 310 { 311 - if (v->config_ctx) 311 + if (v->config_ctx) { 312 312 eventfd_ctx_put(v->config_ctx); 313 + v->config_ctx = NULL; 314 + } 313 315 } 314 316 315 317 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) ··· 331 329 if (!IS_ERR_OR_NULL(ctx)) 332 330 eventfd_ctx_put(ctx); 333 331 334 - if (IS_ERR(v->config_ctx)) 335 - return PTR_ERR(v->config_ctx); 332 + if (IS_ERR(v->config_ctx)) { 333 + long ret = PTR_ERR(v->config_ctx); 334 + 335 + v->config_ctx = NULL; 336 + return ret; 337 + } 336 338 337 339 v->vdpa->config->set_config_cb(v->vdpa, &cb); 338 340 ··· 906 900 907 901 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v) 908 902 { 909 - struct vhost_virtqueue *vq; 910 903 int i; 911 904 912 - for (i = 0; i < v->nvqs; i++) { 913 - vq = &v->vqs[i]; 914 - if (vq->call_ctx.producer.irq) 915 - irq_bypass_unregister_producer(&vq->call_ctx.producer); 916 - } 905 + for (i = 0; i < v->nvqs; i++) 906 + vhost_vdpa_unsetup_vq_irq(v, i); 917 907 } 918 908 919 909 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+1 -1
drivers/vhost/vhost.c
··· 332 332 vq->error_ctx = NULL; 333 333 vq->kick = NULL; 334 334 vq->log_ctx = NULL; 335 - vhost_reset_is_le(vq); 336 335 vhost_disable_cross_endian(vq); 336 + vhost_reset_is_le(vq); 337 337 vq->busyloop_timeout = 0; 338 338 vq->umem = NULL; 339 339 vq->iotlb = NULL;
+2 -4
drivers/virtio/virtio.c
··· 141 141 } 142 142 EXPORT_SYMBOL_GPL(virtio_config_changed); 143 143 144 - void virtio_config_disable(struct virtio_device *dev) 144 + static void virtio_config_disable(struct virtio_device *dev) 145 145 { 146 146 spin_lock_irq(&dev->config_lock); 147 147 dev->config_enabled = false; 148 148 spin_unlock_irq(&dev->config_lock); 149 149 } 150 - EXPORT_SYMBOL_GPL(virtio_config_disable); 151 150 152 - void virtio_config_enable(struct virtio_device *dev) 151 + static void virtio_config_enable(struct virtio_device *dev) 153 152 { 154 153 spin_lock_irq(&dev->config_lock); 155 154 dev->config_enabled = true; ··· 157 158 dev->config_change_pending = false; 158 159 spin_unlock_irq(&dev->config_lock); 159 160 } 160 - EXPORT_SYMBOL_GPL(virtio_config_enable); 161 161 162 162 void virtio_add_status(struct virtio_device *dev, unsigned int status) 163 163 {
+1 -2
drivers/virtio/virtio_mmio.c
··· 548 548 { 549 549 struct virtio_device *vdev = 550 550 container_of(_d, struct virtio_device, dev); 551 - struct virtio_mmio_device *vm_dev = 552 - container_of(vdev, struct virtio_mmio_device, vdev); 551 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 553 552 struct platform_device *pdev = vm_dev->pdev; 554 553 555 554 devm_kfree(&pdev->dev, vm_dev);
-1
drivers/watchdog/cpu5wdt.c
··· 273 273 274 274 MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>"); 275 275 MODULE_DESCRIPTION("sma cpu5 watchdog driver"); 276 - MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog"); 277 276 MODULE_LICENSE("GPL"); 278 277 279 278 module_param_hw(port, int, ioport, 0);
-1
drivers/watchdog/cpwd.c
··· 172 172 MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); 173 173 MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500"); 174 174 MODULE_LICENSE("GPL"); 175 - MODULE_SUPPORTED_DEVICE("watchdog"); 176 175 177 176 static void cpwd_writew(u16 val, void __iomem *addr) 178 177 {
-1
drivers/watchdog/riowd.c
··· 46 46 47 47 MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); 48 48 MODULE_DESCRIPTION("Hardware watchdog driver for Sun RIO"); 49 - MODULE_SUPPORTED_DEVICE("watchdog"); 50 49 MODULE_LICENSE("GPL"); 51 50 52 51 #define DRIVER_NAME "riowd"
-1
fs/afs/dir.c
··· 70 70 .permission = afs_permission, 71 71 .getattr = afs_getattr, 72 72 .setattr = afs_setattr, 73 - .listxattr = afs_listxattr, 74 73 }; 75 74 76 75 const struct address_space_operations afs_dir_aops = {
-1
fs/afs/file.c
··· 43 43 .getattr = afs_getattr, 44 44 .setattr = afs_setattr, 45 45 .permission = afs_permission, 46 - .listxattr = afs_listxattr, 47 46 }; 48 47 49 48 const struct address_space_operations afs_fs_aops = {
+5 -2
fs/afs/fs_operation.c
··· 181 181 if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) && 182 182 op->ops->issue_yfs_rpc) 183 183 op->ops->issue_yfs_rpc(op); 184 - else 184 + else if (op->ops->issue_afs_rpc) 185 185 op->ops->issue_afs_rpc(op); 186 + else 187 + op->ac.error = -ENOTSUPP; 186 188 187 - op->error = afs_wait_for_call_to_complete(op->call, &op->ac); 189 + if (op->call) 190 + op->error = afs_wait_for_call_to_complete(op->call, &op->ac); 188 191 } 189 192 190 193 switch (op->error) {
-1
fs/afs/inode.c
··· 27 27 28 28 static const struct inode_operations afs_symlink_inode_operations = { 29 29 .get_link = page_get_link, 30 - .listxattr = afs_listxattr, 31 30 }; 32 31 33 32 static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
-1
fs/afs/internal.h
··· 1509 1509 * xattr.c 1510 1510 */ 1511 1511 extern const struct xattr_handler *afs_xattr_handlers[]; 1512 - extern ssize_t afs_listxattr(struct dentry *, char *, size_t); 1513 1512 1514 1513 /* 1515 1514 * yfsclient.c
-1
fs/afs/mntpt.c
··· 32 32 .lookup = afs_mntpt_lookup, 33 33 .readlink = page_readlink, 34 34 .getattr = afs_getattr, 35 - .listxattr = afs_listxattr, 36 35 }; 37 36 38 37 const struct inode_operations afs_autocell_inode_operations = {
+7 -24
fs/afs/xattr.c
··· 11 11 #include <linux/xattr.h> 12 12 #include "internal.h" 13 13 14 - static const char afs_xattr_list[] = 15 - "afs.acl\0" 16 - "afs.cell\0" 17 - "afs.fid\0" 18 - "afs.volume\0" 19 - "afs.yfs.acl\0" 20 - "afs.yfs.acl_inherited\0" 21 - "afs.yfs.acl_num_cleaned\0" 22 - "afs.yfs.vol_acl"; 23 - 24 - /* 25 - * Retrieve a list of the supported xattrs. 26 - */ 27 - ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size) 28 - { 29 - if (size == 0) 30 - return sizeof(afs_xattr_list); 31 - if (size < sizeof(afs_xattr_list)) 32 - return -ERANGE; 33 - memcpy(buffer, afs_xattr_list, sizeof(afs_xattr_list)); 34 - return sizeof(afs_xattr_list); 35 - } 36 - 37 14 /* 38 15 * Deal with the result of a successful fetch ACL operation. 39 16 */ ··· 208 231 else 209 232 ret = -ERANGE; 210 233 } 234 + } else if (ret == -ENOTSUPP) { 235 + ret = -ENODATA; 211 236 } 212 237 213 238 error_yacl: ··· 235 256 { 236 257 struct afs_operation *op; 237 258 struct afs_vnode *vnode = AFS_FS_I(inode); 259 + int ret; 238 260 239 261 if (flags == XATTR_CREATE || 240 262 strcmp(name, "acl") != 0) ··· 250 270 return afs_put_operation(op); 251 271 252 272 op->ops = &yfs_store_opaque_acl2_operation; 253 - return afs_do_sync_operation(op); 273 + ret = afs_do_sync_operation(op); 274 + if (ret == -ENOTSUPP) 275 + ret = -ENODATA; 276 + return ret; 254 277 } 255 278 256 279 static const struct xattr_handler afs_xattr_yfs_handler = {
+2
fs/btrfs/ctree.c
··· 1365 1365 "failed to read tree block %llu from get_old_root", 1366 1366 logical); 1367 1367 } else { 1368 + btrfs_tree_read_lock(old); 1368 1369 eb = btrfs_clone_extent_buffer(old); 1370 + btrfs_tree_read_unlock(old); 1369 1371 free_extent_buffer(old); 1370 1372 } 1371 1373 } else if (old_root) {
+22 -1
fs/btrfs/extent-tree.c
··· 3323 3323 3324 3324 if (last_ref && btrfs_header_generation(buf) == trans->transid) { 3325 3325 struct btrfs_block_group *cache; 3326 + bool must_pin = false; 3326 3327 3327 3328 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 3328 3329 ret = check_ref_cleanup(trans, buf->start); ··· 3341 3340 goto out; 3342 3341 } 3343 3342 3344 - if (btrfs_is_zoned(fs_info)) { 3343 + /* 3344 + * If this is a leaf and there are tree mod log users, we may 3345 + * have recorded mod log operations that point to this leaf. 3346 + * So we must make sure no one reuses this leaf's extent before 3347 + * mod log operations are applied to a node, otherwise after 3348 + * rewinding a node using the mod log operations we get an 3349 + * inconsistent btree, as the leaf's extent may now be used as 3350 + * a node or leaf for another different btree. 3351 + * We are safe from races here because at this point no other 3352 + * node or root points to this extent buffer, so if after this 3353 + * check a new tree mod log user joins, it will not be able to 3354 + * find a node pointing to this leaf and record operations that 3355 + * point to this leaf. 3356 + */ 3357 + if (btrfs_header_level(buf) == 0) { 3358 + read_lock(&fs_info->tree_mod_log_lock); 3359 + must_pin = !list_empty(&fs_info->tree_mod_seq_list); 3360 + read_unlock(&fs_info->tree_mod_log_lock); 3361 + } 3362 + 3363 + if (must_pin || btrfs_is_zoned(fs_info)) { 3345 3364 btrfs_redirty_list_add(trans->transaction, buf); 3346 3365 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3347 3366 btrfs_put_block_group(cache);
+31 -2
fs/btrfs/extent_io.c
··· 2886 2886 } 2887 2887 2888 2888 /* 2889 + * Find extent buffer for a givne bytenr. 2890 + * 2891 + * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking 2892 + * in endio context. 2893 + */ 2894 + static struct extent_buffer *find_extent_buffer_readpage( 2895 + struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr) 2896 + { 2897 + struct extent_buffer *eb; 2898 + 2899 + /* 2900 + * For regular sectorsize, we can use page->private to grab extent 2901 + * buffer 2902 + */ 2903 + if (fs_info->sectorsize == PAGE_SIZE) { 2904 + ASSERT(PagePrivate(page) && page->private); 2905 + return (struct extent_buffer *)page->private; 2906 + } 2907 + 2908 + /* For subpage case, we need to lookup buffer radix tree */ 2909 + rcu_read_lock(); 2910 + eb = radix_tree_lookup(&fs_info->buffer_radix, 2911 + bytenr >> fs_info->sectorsize_bits); 2912 + rcu_read_unlock(); 2913 + ASSERT(eb); 2914 + return eb; 2915 + } 2916 + 2917 + /* 2889 2918 * after a readpage IO is done, we need to: 2890 2919 * clear the uptodate bits on error 2891 2920 * set the uptodate bits if things worked ··· 3025 2996 } else { 3026 2997 struct extent_buffer *eb; 3027 2998 3028 - eb = (struct extent_buffer *)page->private; 2999 + eb = find_extent_buffer_readpage(fs_info, page, start); 3029 3000 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 3030 3001 eb->read_mirror = mirror; 3031 3002 atomic_dec(&eb->io_pages); ··· 3049 3020 */ 3050 3021 if (page->index == end_index && i_size <= end) { 3051 3022 u32 zero_start = max(offset_in_page(i_size), 3052 - offset_in_page(end)); 3023 + offset_in_page(start)); 3053 3024 3054 3025 zero_user_segment(page, zero_start, 3055 3026 offset_in_page(end) + 1);
+26 -11
fs/btrfs/inode.c
··· 9008 9008 9009 9009 btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", 9010 9010 PAGE_SIZE, PAGE_SIZE, 9011 - SLAB_RED_ZONE, NULL); 9011 + SLAB_MEM_SPREAD, NULL); 9012 9012 if (!btrfs_free_space_bitmap_cachep) 9013 9013 goto fail; 9014 9014 ··· 9877 9877 struct btrfs_path *path; 9878 9878 u64 start = ins->objectid; 9879 9879 u64 len = ins->offset; 9880 + int qgroup_released; 9880 9881 int ret; 9881 9882 9882 9883 memset(&stack_fi, 0, sizeof(stack_fi)); ··· 9890 9889 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9891 9890 /* Encryption and other encoding is reserved and all 0 */ 9892 9891 9893 - ret = btrfs_qgroup_release_data(inode, file_offset, len); 9894 - if (ret < 0) 9895 - return ERR_PTR(ret); 9892 + qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len); 9893 + if (qgroup_released < 0) 9894 + return ERR_PTR(qgroup_released); 9896 9895 9897 9896 if (trans) { 9898 9897 ret = insert_reserved_file_extent(trans, inode, 9899 9898 file_offset, &stack_fi, 9900 - true, ret); 9899 + true, qgroup_released); 9901 9900 if (ret) 9902 - return ERR_PTR(ret); 9901 + goto free_qgroup; 9903 9902 return trans; 9904 9903 } 9905 9904 ··· 9910 9909 extent_info.file_offset = file_offset; 9911 9910 extent_info.extent_buf = (char *)&stack_fi; 9912 9911 extent_info.is_new_extent = true; 9913 - extent_info.qgroup_reserved = ret; 9912 + extent_info.qgroup_reserved = qgroup_released; 9914 9913 extent_info.insertions = 0; 9915 9914 9916 9915 path = btrfs_alloc_path(); 9917 - if (!path) 9918 - return ERR_PTR(-ENOMEM); 9916 + if (!path) { 9917 + ret = -ENOMEM; 9918 + goto free_qgroup; 9919 + } 9919 9920 9920 9921 ret = btrfs_replace_file_extents(&inode->vfs_inode, path, file_offset, 9921 9922 file_offset + len - 1, &extent_info, 9922 9923 &trans); 9923 9924 btrfs_free_path(path); 9924 9925 if (ret) 9925 - return ERR_PTR(ret); 9926 - 9926 + goto free_qgroup; 9927 9927 return trans; 9928 + 9929 + free_qgroup: 9930 + /* 9931 + * We have released qgroup data range at the beginning of the function, 9932 + * and normally qgroup_released bytes will be freed when committing 9933 + * transaction. 9934 + * But if we error out early, we have to free what we have released 9935 + * or we leak qgroup data reservation. 9936 + */ 9937 + btrfs_qgroup_free_refroot(inode->root->fs_info, 9938 + inode->root->root_key.objectid, qgroup_released, 9939 + BTRFS_QGROUP_RSV_DATA); 9940 + return ERR_PTR(ret); 9928 9941 } 9929 9942 9930 9943 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+18 -17
fs/btrfs/reada.c
··· 209 209 /* find extent */ 210 210 spin_lock(&fs_info->reada_lock); 211 211 re = radix_tree_lookup(&fs_info->reada_tree, 212 - eb->start >> PAGE_SHIFT); 212 + eb->start >> fs_info->sectorsize_bits); 213 213 if (re) 214 214 re->refcnt++; 215 215 spin_unlock(&fs_info->reada_lock); ··· 240 240 zone = NULL; 241 241 spin_lock(&fs_info->reada_lock); 242 242 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 243 - logical >> PAGE_SHIFT, 1); 243 + logical >> fs_info->sectorsize_bits, 1); 244 244 if (ret == 1 && logical >= zone->start && logical <= zone->end) { 245 245 kref_get(&zone->refcnt); 246 246 spin_unlock(&fs_info->reada_lock); ··· 283 283 284 284 spin_lock(&fs_info->reada_lock); 285 285 ret = radix_tree_insert(&dev->reada_zones, 286 - (unsigned long)(zone->end >> PAGE_SHIFT), 287 - zone); 286 + (unsigned long)(zone->end >> fs_info->sectorsize_bits), 287 + zone); 288 288 289 289 if (ret == -EEXIST) { 290 290 kfree(zone); 291 291 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 292 - logical >> PAGE_SHIFT, 1); 292 + logical >> fs_info->sectorsize_bits, 1); 293 293 if (ret == 1 && logical >= zone->start && logical <= zone->end) 294 294 kref_get(&zone->refcnt); 295 295 else ··· 315 315 u64 length; 316 316 int real_stripes; 317 317 int nzones = 0; 318 - unsigned long index = logical >> PAGE_SHIFT; 318 + unsigned long index = logical >> fs_info->sectorsize_bits; 319 319 int dev_replace_is_ongoing; 320 320 int have_zone = 0; 321 321 ··· 497 497 struct reada_extent *re) 498 498 { 499 499 int i; 500 - unsigned long index = re->logical >> PAGE_SHIFT; 500 + unsigned long index = re->logical >> fs_info->sectorsize_bits; 501 501 502 502 spin_lock(&fs_info->reada_lock); 503 503 if (--re->refcnt) { ··· 538 538 static void reada_zone_release(struct kref *kref) 539 539 { 540 540 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); 541 + struct btrfs_fs_info *fs_info = zone->device->fs_info; 541 542 542 - lockdep_assert_held(&zone->device->fs_info->reada_lock); 543 + lockdep_assert_held(&fs_info->reada_lock); 543 544 544 545 radix_tree_delete(&zone->device->reada_zones, 545 - zone->end >> PAGE_SHIFT); 546 + zone->end >> fs_info->sectorsize_bits); 546 547 547 548 kfree(zone); 548 549 } ··· 594 593 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) 595 594 { 596 595 int i; 597 - unsigned long index = zone->end >> PAGE_SHIFT; 596 + unsigned long index = zone->end >> zone->device->fs_info->sectorsize_bits; 598 597 599 598 for (i = 0; i < zone->ndevs; ++i) { 600 599 struct reada_zone *peer; ··· 629 628 (void **)&zone, index, 1); 630 629 if (ret == 0) 631 630 break; 632 - index = (zone->end >> PAGE_SHIFT) + 1; 631 + index = (zone->end >> dev->fs_info->sectorsize_bits) + 1; 633 632 if (zone->locked) { 634 633 if (zone->elems > top_locked_elems) { 635 634 top_locked_elems = zone->elems; ··· 710 709 * plugging to speed things up 711 710 */ 712 711 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 713 - dev->reada_next >> PAGE_SHIFT, 1); 712 + dev->reada_next >> fs_info->sectorsize_bits, 1); 714 713 if (ret == 0 || re->logical > dev->reada_curr_zone->end) { 715 714 ret = reada_pick_zone(dev); 716 715 if (!ret) { ··· 719 718 } 720 719 re = NULL; 721 720 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 722 - dev->reada_next >> PAGE_SHIFT, 1); 721 + dev->reada_next >> fs_info->sectorsize_bits, 1); 723 722 } 724 723 if (ret == 0) { 725 724 spin_unlock(&fs_info->reada_lock); ··· 886 885 pr_cont(" curr off %llu", 887 886 device->reada_next - zone->start); 888 887 pr_cont("\n"); 889 - index = (zone->end >> PAGE_SHIFT) + 1; 888 + index = (zone->end >> fs_info->sectorsize_bits) + 1; 890 889 } 891 890 cnt = 0; 892 891 index = 0; ··· 911 910 } 912 911 } 913 912 pr_cont("\n"); 914 - index = (re->logical >> PAGE_SHIFT) + 1; 913 + index = (re->logical >> fs_info->sectorsize_bits) + 1; 915 914 if (++cnt > 15) 916 915 break; 917 916 } ··· 927 926 if (ret == 0) 928 927 break; 929 928 if (!re->scheduled) { 930 - index = (re->logical >> PAGE_SHIFT) + 1; 929 + index = (re->logical >> fs_info->sectorsize_bits) + 1; 931 930 continue; 932 931 } 933 932 pr_debug("re: logical %llu size %u list empty %d scheduled %d", ··· 943 942 } 944 943 } 945 944 pr_cont("\n"); 946 - index = (re->logical >> PAGE_SHIFT) + 1; 945 + index = (re->logical >> fs_info->sectorsize_bits) + 1; 947 946 } 948 947 spin_unlock(&fs_info->reada_lock); 949 948 }
+4 -4
fs/btrfs/tree-log.c
··· 3169 3169 3170 3170 mutex_lock(&log_root_tree->log_mutex); 3171 3171 3172 - index2 = log_root_tree->log_transid % 2; 3173 - list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3174 - root_log_ctx.log_transid = log_root_tree->log_transid; 3175 - 3176 3172 if (btrfs_is_zoned(fs_info)) { 3177 3173 if (!log_root_tree->node) { 3178 3174 ret = btrfs_alloc_log_tree_node(trans, log_root_tree); ··· 3178 3182 } 3179 3183 } 3180 3184 } 3185 + 3186 + index2 = log_root_tree->log_transid % 2; 3187 + list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3188 + root_log_ctx.log_transid = log_root_tree->log_transid; 3181 3189 3182 3190 /* 3183 3191 * Now we are safe to update the log_root_tree because we're under the
+1 -1
fs/cifs/cifs_swn.c
··· 248 248 249 249 /* 250 250 * Try to find a matching registration for the tcon's server name and share name. 251 - * Calls to this funciton must be protected by cifs_swnreg_idr_mutex. 251 + * Calls to this function must be protected by cifs_swnreg_idr_mutex. 252 252 * TODO Try to avoid memory allocations 253 253 */ 254 254 static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
+6 -3
fs/cifs/cifsacl.c
··· 1118 1118 /* Retain old ACEs which we can retain */ 1119 1119 for (i = 0; i < src_num_aces; ++i) { 1120 1120 pntace = (struct cifs_ace *) (acl_base + size); 1121 - pnntace = (struct cifs_ace *) (nacl_base + nsize); 1122 1121 1123 1122 if (!new_aces_set && (pntace->flags & INHERITED_ACE)) { 1124 1123 /* Place the new ACEs in between existing explicit and inherited */ ··· 1130 1131 } 1131 1132 1132 1133 /* If it's any one of the ACE we're replacing, skip! */ 1133 - if ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) || 1134 + if (!mode_from_sid && 1135 + ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) || 1134 1136 (compare_sids(&pntace->sid, pownersid) == 0) || 1135 1137 (compare_sids(&pntace->sid, pgrpsid) == 0) || 1136 1138 (compare_sids(&pntace->sid, &sid_everyone) == 0) || 1137 - (compare_sids(&pntace->sid, &sid_authusers) == 0)) { 1139 + (compare_sids(&pntace->sid, &sid_authusers) == 0))) { 1138 1140 goto next_ace; 1139 1141 } 1142 + 1143 + /* update the pointer to the next ACE to populate*/ 1144 + pnntace = (struct cifs_ace *) (nacl_base + nsize); 1140 1145 1141 1146 nsize += cifs_copy_ace(pnntace, pntace, NULL); 1142 1147 num_aces++;
+4 -2
fs/cifs/fs_context.c
··· 1196 1196 pr_warn_once("Witness protocol support is experimental\n"); 1197 1197 break; 1198 1198 case Opt_rootfs: 1199 - #ifdef CONFIG_CIFS_ROOT 1200 - ctx->rootfs = true; 1199 + #ifndef CONFIG_CIFS_ROOT 1200 + cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n"); 1201 + goto cifs_parse_mount_err; 1201 1202 #endif 1203 + ctx->rootfs = true; 1202 1204 break; 1203 1205 case Opt_posixpaths: 1204 1206 if (result.negated)
+9 -1
fs/cifs/inode.c
··· 2395 2395 * We need to be sure that all dirty pages are written and the server 2396 2396 * has actual ctime, mtime and file length. 2397 2397 */ 2398 - if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE)) && 2398 + if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) && 2399 2399 !CIFS_CACHE_READ(CIFS_I(inode)) && 2400 2400 inode->i_mapping && inode->i_mapping->nrpages != 0) { 2401 2401 rc = filemap_fdatawait(inode->i_mapping); ··· 2585 2585 if (rc == 0) { 2586 2586 cifsInode->server_eof = attrs->ia_size; 2587 2587 cifs_setsize(inode, attrs->ia_size); 2588 + /* 2589 + * i_blocks is not related to (i_size / i_blksize), but instead 2590 + * 512 byte (2**9) size is required for calculating num blocks. 2591 + * Until we can query the server for actual allocation size, 2592 + * this is best estimate we have for blocks allocated for a file 2593 + * Number of blocks must be rounded up so size 1 is not 0 blocks 2594 + */ 2595 + inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9; 2588 2596 2589 2597 /* 2590 2598 * The man page of truncate says if the size changed,
+6 -1
fs/cifs/transport.c
··· 1196 1196 /* 1197 1197 * Compounding is never used during session establish. 1198 1198 */ 1199 - if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) 1199 + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { 1200 + mutex_lock(&server->srv_mutex); 1200 1201 smb311_update_preauth_hash(ses, rqst[0].rq_iov, 1201 1202 rqst[0].rq_nvec); 1203 + mutex_unlock(&server->srv_mutex); 1204 + } 1202 1205 1203 1206 for (i = 0; i < num_rqst; i++) { 1204 1207 rc = wait_for_response(server, midQ[i]); ··· 1269 1266 .iov_base = resp_iov[0].iov_base, 1270 1267 .iov_len = resp_iov[0].iov_len 1271 1268 }; 1269 + mutex_lock(&server->srv_mutex); 1272 1270 smb311_update_preauth_hash(ses, &iov, 1); 1271 + mutex_unlock(&server->srv_mutex); 1273 1272 } 1274 1273 1275 1274 out:
+26 -12
fs/ext4/balloc.c
··· 626 626 627 627 /** 628 628 * ext4_should_retry_alloc() - check if a block allocation should be retried 629 - * @sb: super block 630 - * @retries: number of attemps has been made 629 + * @sb: superblock 630 + * @retries: number of retry attempts made so far 631 631 * 632 - * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 633 - * it is profitable to retry the operation, this function will wait 634 - * for the current or committing transaction to complete, and then 635 - * return TRUE. We will only retry once. 632 + * ext4_should_retry_alloc() is called when ENOSPC is returned while 633 + * attempting to allocate blocks. If there's an indication that a pending 634 + * journal transaction might free some space and allow another attempt to 635 + * succeed, this function will wait for the current or committing transaction 636 + * to complete and then return TRUE. 636 637 */ 637 638 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 638 639 { 639 - if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || 640 - (*retries)++ > 1 || 641 - !EXT4_SB(sb)->s_journal) 640 + struct ext4_sb_info *sbi = EXT4_SB(sb); 641 + 642 + if (!sbi->s_journal) 642 643 return 0; 643 644 645 + if (++(*retries) > 3) { 646 + percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit); 647 + return 0; 648 + } 649 + 650 + /* 651 + * if there's no indication that blocks are about to be freed it's 652 + * possible we just missed a transaction commit that did so 653 + */ 644 654 smp_mb(); 645 - if (EXT4_SB(sb)->s_mb_free_pending == 0) 646 - return 0; 655 + if (sbi->s_mb_free_pending == 0) 656 + return ext4_has_free_clusters(sbi, 1, 0); 647 657 658 + /* 659 + * it's possible we've just missed a transaction commit here, 660 + * so ignore the returned status 661 + */ 648 662 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 649 - jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 663 + (void) jbd2_journal_force_commit_nested(sbi->s_journal); 650 664 return 1; 651 665 } 652 666
+3
fs/ext4/ext4.h
··· 1484 1484 struct percpu_counter s_freeinodes_counter; 1485 1485 struct percpu_counter s_dirs_counter; 1486 1486 struct percpu_counter s_dirtyclusters_counter; 1487 + struct percpu_counter s_sra_exceeded_retry_limit; 1487 1488 struct blockgroup_lock *s_blockgroup_lock; 1488 1489 struct proc_dir_entry *s_proc; 1489 1490 struct kobject s_kobj; ··· 2794 2793 struct dentry *dentry); 2795 2794 void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry); 2796 2795 void ext4_fc_track_link(handle_t *handle, struct dentry *dentry); 2796 + void __ext4_fc_track_create(handle_t *handle, struct inode *inode, 2797 + struct dentry *dentry); 2797 2798 void ext4_fc_track_create(handle_t *handle, struct dentry *dentry); 2798 2799 void ext4_fc_track_inode(handle_t *handle, struct inode *inode); 2799 2800 void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
+1 -1
fs/ext4/extents.c
··· 4382 4382 { 4383 4383 struct inode *inode = file_inode(file); 4384 4384 handle_t *handle; 4385 - int ret, ret2 = 0, ret3 = 0; 4385 + int ret = 0, ret2 = 0, ret3 = 0; 4386 4386 int retries = 0; 4387 4387 int depth = 0; 4388 4388 struct ext4_map_blocks map;
+7 -2
fs/ext4/fast_commit.c
··· 513 513 __ext4_fc_track_link(handle, d_inode(dentry), dentry); 514 514 } 515 515 516 - void ext4_fc_track_create(handle_t *handle, struct dentry *dentry) 516 + void __ext4_fc_track_create(handle_t *handle, struct inode *inode, 517 + struct dentry *dentry) 517 518 { 518 519 struct __track_dentry_update_args args; 519 - struct inode *inode = d_inode(dentry); 520 520 int ret; 521 521 522 522 args.dentry = dentry; ··· 525 525 ret = ext4_fc_track_template(handle, inode, __track_dentry_update, 526 526 (void *)&args, 0); 527 527 trace_ext4_fc_track_create(inode, dentry, ret); 528 + } 529 + 530 + void ext4_fc_track_create(handle_t *handle, struct dentry *dentry) 531 + { 532 + __ext4_fc_track_create(handle, d_inode(dentry), dentry); 528 533 } 529 534 530 535 /* __track_fn for inode tracking */
+10 -8
fs/ext4/inode.c
··· 1938 1938 if (!ret) 1939 1939 ret = err; 1940 1940 1941 - if (!ext4_has_inline_data(inode)) 1942 - ext4_walk_page_buffers(NULL, page_bufs, 0, len, 1943 - NULL, bput_one); 1944 1941 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1945 1942 out: 1946 1943 unlock_page(page); 1947 1944 out_no_pagelock: 1945 + if (!inline_data && page_bufs) 1946 + ext4_walk_page_buffers(NULL, page_bufs, 0, len, 1947 + NULL, bput_one); 1948 1948 brelse(inode_bh); 1949 1949 return ret; 1950 1950 } ··· 5026 5026 struct ext4_inode_info *ei = EXT4_I(inode); 5027 5027 struct buffer_head *bh = iloc->bh; 5028 5028 struct super_block *sb = inode->i_sb; 5029 - int err = 0, rc, block; 5029 + int err = 0, block; 5030 5030 int need_datasync = 0, set_large_file = 0; 5031 5031 uid_t i_uid; 5032 5032 gid_t i_gid; ··· 5138 5138 bh->b_data); 5139 5139 5140 5140 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5141 - rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5142 - if (!err) 5143 - err = rc; 5141 + err = ext4_handle_dirty_metadata(handle, NULL, bh); 5142 + if (err) 5143 + goto out_brelse; 5144 5144 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5145 5145 if (set_large_file) { 5146 5146 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); ··· 5387 5387 inode->i_gid = attr->ia_gid; 5388 5388 error = ext4_mark_inode_dirty(handle, inode); 5389 5389 ext4_journal_stop(handle); 5390 - if (unlikely(error)) 5390 + if (unlikely(error)) { 5391 + ext4_fc_stop_update(inode); 5391 5392 return error; 5393 + } 5392 5394 } 5393 5395 5394 5396 if (attr->ia_valid & ATTR_SIZE) {
+9 -2
fs/ext4/mballoc.c
··· 2709 2709 } 2710 2710 2711 2711 if (ext4_has_feature_flex_bg(sb)) { 2712 - /* a single flex group is supposed to be read by a single IO */ 2713 - sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex, 2712 + /* a single flex group is supposed to be read by a single IO. 2713 + * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 2714 + * unsigned integer, so the maximum shift is 32. 2715 + */ 2716 + if (sbi->s_es->s_log_groups_per_flex >= 32) { 2717 + ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 2718 + goto err_freesgi; 2719 + } 2720 + sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 2714 2721 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 2715 2722 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 2716 2723 } else {
+39 -11
fs/ext4/namei.c
··· 3613 3613 return retval; 3614 3614 } 3615 3615 3616 + static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, 3617 + unsigned ino, unsigned file_type) 3618 + { 3619 + struct ext4_renament old = *ent; 3620 + int retval = 0; 3621 + 3622 + /* 3623 + * old->de could have moved from under us during make indexed dir, 3624 + * so the old->de may no longer valid and need to find it again 3625 + * before reset old inode info. 3626 + */ 3627 + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); 3628 + if (IS_ERR(old.bh)) 3629 + retval = PTR_ERR(old.bh); 3630 + if (!old.bh) 3631 + retval = -ENOENT; 3632 + if (retval) { 3633 + ext4_std_error(old.dir->i_sb, retval); 3634 + return; 3635 + } 3636 + 3637 + ext4_setent(handle, &old, ino, file_type); 3638 + brelse(old.bh); 3639 + } 3640 + 3616 3641 static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, 3617 3642 const struct qstr *d_name) 3618 3643 { ··· 3799 3774 */ 3800 3775 retval = -ENOENT; 3801 3776 if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) 3802 - goto end_rename; 3777 + goto release_bh; 3803 3778 3804 3779 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, 3805 3780 &new.de, &new.inlined); 3806 3781 if (IS_ERR(new.bh)) { 3807 3782 retval = PTR_ERR(new.bh); 3808 3783 new.bh = NULL; 3809 - goto end_rename; 3784 + goto release_bh; 3810 3785 } 3811 3786 if (new.bh) { 3812 3787 if (!new.inode) { ··· 3823 3798 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); 3824 3799 if (IS_ERR(handle)) { 3825 3800 retval = PTR_ERR(handle); 3826 - handle = NULL; 3827 - goto end_rename; 3801 + goto release_bh; 3828 3802 } 3829 3803 } else { 3830 3804 whiteout = ext4_whiteout_for_rename(mnt_userns, &old, credits, &handle); 3831 3805 if (IS_ERR(whiteout)) { 3832 3806 retval = PTR_ERR(whiteout); 3833 - whiteout = NULL; 3834 - goto end_rename; 3807 + goto release_bh; 3835 3808 } 3836 3809 } 3837 3810 ··· 3873 3850 retval = ext4_mark_inode_dirty(handle, whiteout); 3874 3851 if (unlikely(retval)) 3875 3852 goto end_rename; 3853 + 3876 3854 } 3877 3855 if (!new.bh) { 3878 3856 retval = ext4_add_entry(handle, new.dentry, old.inode); ··· 3947 3923 ext4_fc_track_unlink(handle, new.dentry); 3948 3924 __ext4_fc_track_link(handle, old.inode, new.dentry); 3949 3925 __ext4_fc_track_unlink(handle, old.inode, old.dentry); 3926 + if (whiteout) 3927 + __ext4_fc_track_create(handle, whiteout, old.dentry); 3950 3928 } 3951 3929 3952 3930 if (new.inode) { ··· 3963 3937 end_rename: 3964 3938 if (whiteout) { 3965 3939 if (retval) { 3966 - ext4_setent(handle, &old, 3967 - old.inode->i_ino, old_file_type); 3940 + ext4_resetent(handle, &old, 3941 + old.inode->i_ino, old_file_type); 3968 3942 drop_nlink(whiteout); 3943 + ext4_orphan_add(handle, whiteout); 3969 3944 } 3970 3945 unlock_new_inode(whiteout); 3946 + ext4_journal_stop(handle); 3971 3947 iput(whiteout); 3972 - 3948 + } else { 3949 + ext4_journal_stop(handle); 3973 3950 } 3951 + release_bh: 3974 3952 brelse(old.dir_bh); 3975 3953 brelse(old.bh); 3976 3954 brelse(new.bh); 3977 - if (handle) 3978 - ext4_journal_stop(handle); 3979 3955 return retval; 3980 3956 } 3981 3957
+6 -1
fs/ext4/super.c
··· 1210 1210 percpu_counter_destroy(&sbi->s_freeinodes_counter); 1211 1211 percpu_counter_destroy(&sbi->s_dirs_counter); 1212 1212 percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 1213 + percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 1213 1214 percpu_free_rwsem(&sbi->s_writepages_rwsem); 1214 1215 #ifdef CONFIG_QUOTA 1215 1216 for (i = 0; i < EXT4_MAXQUOTAS; i++) ··· 5013 5012 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, 5014 5013 GFP_KERNEL); 5015 5014 if (!err) 5015 + err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0, 5016 + GFP_KERNEL); 5017 + if (!err) 5016 5018 err = percpu_init_rwsem(&sbi->s_writepages_rwsem); 5017 5019 5018 5020 if (err) { ··· 5128 5124 percpu_counter_destroy(&sbi->s_freeinodes_counter); 5129 5125 percpu_counter_destroy(&sbi->s_dirs_counter); 5130 5126 percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 5127 + percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 5131 5128 percpu_free_rwsem(&sbi->s_writepages_rwsem); 5132 5129 failed_mount5: 5133 5130 ext4_ext_release(sb); ··· 5154 5149 failed_mount3a: 5155 5150 ext4_es_unregister_shrinker(sbi); 5156 5151 failed_mount3: 5157 - del_timer_sync(&sbi->s_err_report); 5158 5152 flush_work(&sbi->s_error_work); 5153 + del_timer_sync(&sbi->s_err_report); 5159 5154 if (sbi->s_mmp_tsk) 5160 5155 kthread_stop(sbi->s_mmp_tsk); 5161 5156 failed_mount2:
+7
fs/ext4/sysfs.c
··· 24 24 attr_session_write_kbytes, 25 25 attr_lifetime_write_kbytes, 26 26 attr_reserved_clusters, 27 + attr_sra_exceeded_retry_limit, 27 28 attr_inode_readahead, 28 29 attr_trigger_test_error, 29 30 attr_first_error_time, ··· 203 202 EXT4_ATTR_FUNC(session_write_kbytes, 0444); 204 203 EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444); 205 204 EXT4_ATTR_FUNC(reserved_clusters, 0644); 205 + EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444); 206 206 207 207 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead, 208 208 ext4_sb_info, s_inode_readahead_blks); ··· 253 251 ATTR_LIST(session_write_kbytes), 254 252 ATTR_LIST(lifetime_write_kbytes), 255 253 ATTR_LIST(reserved_clusters), 254 + ATTR_LIST(sra_exceeded_retry_limit), 256 255 ATTR_LIST(inode_readahead_blks), 257 256 ATTR_LIST(inode_goal), 258 257 ATTR_LIST(mb_stats), ··· 377 374 return snprintf(buf, PAGE_SIZE, "%llu\n", 378 375 (unsigned long long) 379 376 atomic64_read(&sbi->s_resv_clusters)); 377 + case attr_sra_exceeded_retry_limit: 378 + return snprintf(buf, PAGE_SIZE, "%llu\n", 379 + (unsigned long long) 380 + percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit)); 380 381 case attr_inode_readahead: 381 382 case attr_pointer_ui: 382 383 if (!ptr)
+57 -36
fs/ext4/verity.c
··· 201 201 struct inode *inode = file_inode(filp); 202 202 const int credits = 2; /* superblock and inode for ext4_orphan_del() */ 203 203 handle_t *handle; 204 + struct ext4_iloc iloc; 204 205 int err = 0; 205 - int err2; 206 - 207 - if (desc != NULL) { 208 - /* Succeeded; write the verity descriptor. */ 209 - err = ext4_write_verity_descriptor(inode, desc, desc_size, 210 - merkle_tree_size); 211 - 212 - /* Write all pages before clearing VERITY_IN_PROGRESS. */ 213 - if (!err) 214 - err = filemap_write_and_wait(inode->i_mapping); 215 - } 216 - 217 - /* If we failed, truncate anything we wrote past i_size. */ 218 - if (desc == NULL || err) 219 - ext4_truncate(inode); 220 206 221 207 /* 222 - * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and 223 - * deleting the inode from the orphan list, even if something failed. 224 - * If everything succeeded, we'll also set the verity bit in the same 225 - * transaction. 208 + * If an error already occurred (which fs/verity/ signals by passing 209 + * desc == NULL), then only clean-up is needed. 226 210 */ 211 + if (desc == NULL) 212 + goto cleanup; 227 213 228 - ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS); 214 + /* Append the verity descriptor. */ 215 + err = ext4_write_verity_descriptor(inode, desc, desc_size, 216 + merkle_tree_size); 217 + if (err) 218 + goto cleanup; 219 + 220 + /* 221 + * Write all pages (both data and verity metadata). Note that this must 222 + * happen before clearing EXT4_STATE_VERITY_IN_PROGRESS; otherwise pages 223 + * beyond i_size won't be written properly. For crash consistency, this 224 + * also must happen before the verity inode flag gets persisted. 225 + */ 226 + err = filemap_write_and_wait(inode->i_mapping); 227 + if (err) 228 + goto cleanup; 229 + 230 + /* 231 + * Finally, set the verity inode flag and remove the inode from the 232 + * orphan list (in a single transaction). 233 + */ 229 234 230 235 handle = ext4_journal_start(inode, EXT4_HT_INODE, credits); 231 236 if (IS_ERR(handle)) { 232 - ext4_orphan_del(NULL, inode); 233 - return PTR_ERR(handle); 237 + err = PTR_ERR(handle); 238 + goto cleanup; 234 239 } 235 240 236 - err2 = ext4_orphan_del(handle, inode); 237 - if (err2) 238 - goto out_stop; 241 + err = ext4_orphan_del(handle, inode); 242 + if (err) 243 + goto stop_and_cleanup; 239 244 240 - if (desc != NULL && !err) { 241 - struct ext4_iloc iloc; 245 + err = ext4_reserve_inode_write(handle, inode, &iloc); 246 + if (err) 247 + goto stop_and_cleanup; 242 248 243 - err = ext4_reserve_inode_write(handle, inode, &iloc); 244 - if (err) 245 - goto out_stop; 246 - ext4_set_inode_flag(inode, EXT4_INODE_VERITY); 247 - ext4_set_inode_flags(inode, false); 248 - err = ext4_mark_iloc_dirty(handle, inode, &iloc); 249 - } 250 - out_stop: 249 + ext4_set_inode_flag(inode, EXT4_INODE_VERITY); 250 + ext4_set_inode_flags(inode, false); 251 + err = ext4_mark_iloc_dirty(handle, inode, &iloc); 252 + if (err) 253 + goto stop_and_cleanup; 254 + 251 255 ext4_journal_stop(handle); 252 - return err ?: err2; 256 + 257 + ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS); 258 + return 0; 259 + 260 + stop_and_cleanup: 261 + ext4_journal_stop(handle); 262 + cleanup: 263 + /* 264 + * Verity failed to be enabled, so clean up by truncating any verity 265 + * metadata that was written beyond i_size (both from cache and from 266 + * disk), removing the inode from the orphan list (if it wasn't done 267 + * already), and clearing EXT4_STATE_VERITY_IN_PROGRESS. 268 + */ 269 + truncate_inode_pages(inode->i_mapping, inode->i_size); 270 + ext4_truncate(inode); 271 + ext4_orphan_del(NULL, inode); 272 + ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS); 273 + return err; 253 274 } 254 275 255 276 static int ext4_get_verity_descriptor_location(struct inode *inode,
+5 -1
fs/ext4/xattr.c
··· 1462 1462 if (!ce) 1463 1463 return NULL; 1464 1464 1465 + WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) && 1466 + !(current->flags & PF_MEMALLOC_NOFS)); 1467 + 1465 1468 ea_data = kvmalloc(value_len, GFP_KERNEL); 1466 1469 if (!ea_data) { 1467 1470 mb_cache_entry_put(ea_inode_cache, ce); ··· 2330 2327 error = -ENOSPC; 2331 2328 goto cleanup; 2332 2329 } 2330 + WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS)); 2333 2331 } 2334 2332 2335 2333 error = ext4_reserve_inode_write(handle, inode, &is.iloc); ··· 2404 2400 * external inode if possible. 2405 2401 */ 2406 2402 if (ext4_has_feature_ea_inode(inode->i_sb) && 2407 - !i.in_inode) { 2403 + i.value_len && !i.in_inode) { 2408 2404 i.in_inode = 1; 2409 2405 goto retry_inode; 2410 2406 }
+16 -10
fs/fuse/dev.c
··· 2229 2229 static long fuse_dev_ioctl(struct file *file, unsigned int cmd, 2230 2230 unsigned long arg) 2231 2231 { 2232 - int err = -ENOTTY; 2232 + int res; 2233 + int oldfd; 2234 + struct fuse_dev *fud = NULL; 2233 2235 2234 - if (cmd == FUSE_DEV_IOC_CLONE) { 2235 - int oldfd; 2236 + if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC) 2237 + return -ENOTTY; 2236 2238 2237 - err = -EFAULT; 2238 - if (!get_user(oldfd, (__u32 __user *) arg)) { 2239 + switch (_IOC_NR(cmd)) { 2240 + case _IOC_NR(FUSE_DEV_IOC_CLONE): 2241 + res = -EFAULT; 2242 + if (!get_user(oldfd, (__u32 __user *)arg)) { 2239 2243 struct file *old = fget(oldfd); 2240 2244 2241 - err = -EINVAL; 2245 + res = -EINVAL; 2242 2246 if (old) { 2243 - struct fuse_dev *fud = NULL; 2244 - 2245 2247 /* 2246 2248 * Check against file->f_op because CUSE 2247 2249 * uses the same ioctl handler. ··· 2254 2252 2255 2253 if (fud) { 2256 2254 mutex_lock(&fuse_mutex); 2257 - err = fuse_device_clone(fud->fc, file); 2255 + res = fuse_device_clone(fud->fc, file); 2258 2256 mutex_unlock(&fuse_mutex); 2259 2257 } 2260 2258 fput(old); 2261 2259 } 2262 2260 } 2261 + break; 2262 + default: 2263 + res = -ENOTTY; 2264 + break; 2263 2265 } 2264 - return err; 2266 + return res; 2265 2267 } 2266 2268 2267 2269 const struct file_operations fuse_dev_operations = {
+1
fs/fuse/fuse_i.h
··· 863 863 864 864 static inline void fuse_make_bad(struct inode *inode) 865 865 { 866 + remove_inode_hash(inode); 866 867 set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); 867 868 } 868 869
+8 -1
fs/fuse/virtio_fs.c
··· 1324 1324 1325 1325 /* virtiofs allocates and installs its own fuse devices */ 1326 1326 ctx->fudptr = NULL; 1327 - if (ctx->dax) 1327 + if (ctx->dax) { 1328 + if (!fs->dax_dev) { 1329 + err = -EINVAL; 1330 + pr_err("virtio-fs: dax can't be enabled as filesystem" 1331 + " device does not support it.\n"); 1332 + goto err_free_fuse_devs; 1333 + } 1328 1334 ctx->dax_dev = fs->dax_dev; 1335 + } 1329 1336 err = fuse_fill_super_common(sb, ctx); 1330 1337 if (err < 0) 1331 1338 goto err_free_fuse_devs;
+11 -3
fs/io-wq.c
··· 386 386 return NULL; 387 387 } 388 388 389 - static void io_flush_signals(void) 389 + static bool io_flush_signals(void) 390 390 { 391 391 if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { 392 + __set_current_state(TASK_RUNNING); 392 393 if (current->task_works) 393 394 task_work_run(); 394 395 clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); 396 + return true; 395 397 } 398 + return false; 396 399 } 397 400 398 401 static void io_assign_current_work(struct io_worker *worker, ··· 491 488 set_task_comm(current, buf); 492 489 493 490 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { 491 + long ret; 492 + 494 493 set_current_state(TASK_INTERRUPTIBLE); 495 494 loop: 496 495 raw_spin_lock_irq(&wqe->lock); ··· 502 497 } 503 498 __io_worker_idle(wqe, worker); 504 499 raw_spin_unlock_irq(&wqe->lock); 505 - io_flush_signals(); 506 - if (schedule_timeout(WORKER_IDLE_TIMEOUT)) 500 + if (io_flush_signals()) 501 + continue; 502 + ret = schedule_timeout(WORKER_IDLE_TIMEOUT); 503 + if (try_to_freeze() || ret) 507 504 continue; 508 505 if (fatal_signal_pending(current)) 509 506 break; ··· 716 709 set_current_state(TASK_INTERRUPTIBLE); 717 710 io_wq_check_workers(wq); 718 711 schedule_timeout(HZ); 712 + try_to_freeze(); 719 713 if (fatal_signal_pending(current)) 720 714 set_bit(IO_WQ_BIT_EXIT, &wq->state); 721 715 } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));
+9 -1
fs/io-wq.h
··· 2 2 #define INTERNAL_IO_WQ_H 3 3 4 4 #include <linux/refcount.h> 5 - #include <linux/io_uring.h> 6 5 7 6 struct io_wq; 8 7 ··· 18 19 IO_WQ_CANCEL_OK, /* cancelled before started */ 19 20 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ 20 21 IO_WQ_CANCEL_NOTFOUND, /* work not found */ 22 + }; 23 + 24 + struct io_wq_work_node { 25 + struct io_wq_work_node *next; 26 + }; 27 + 28 + struct io_wq_work_list { 29 + struct io_wq_work_node *first; 30 + struct io_wq_work_node *last; 21 31 }; 22 32 23 33 static inline void wq_list_add_after(struct io_wq_work_node *node,
+147 -105
fs/io_uring.c
··· 258 258 259 259 struct io_sq_data { 260 260 refcount_t refs; 261 - struct rw_semaphore rw_lock; 261 + atomic_t park_pending; 262 + struct mutex lock; 262 263 263 264 /* ctx's that are using this sqd */ 264 265 struct list_head ctx_list; ··· 274 273 275 274 unsigned long state; 276 275 struct completion exited; 276 + struct callback_head *park_task_work; 277 277 }; 278 278 279 279 #define IO_IOPOLL_BATCH 8 ··· 404 402 struct socket *ring_sock; 405 403 #endif 406 404 407 - struct idr io_buffer_idr; 405 + struct xarray io_buffers; 408 406 409 407 struct xarray personalities; 410 408 u32 pers_next; ··· 454 452 /* Keep this last, we don't need it for the fast path */ 455 453 struct work_struct exit_work; 456 454 struct list_head tctx_list; 455 + }; 456 + 457 + struct io_uring_task { 458 + /* submission side */ 459 + struct xarray xa; 460 + struct wait_queue_head wait; 461 + const struct io_ring_ctx *last; 462 + struct io_wq *io_wq; 463 + struct percpu_counter inflight; 464 + atomic_t in_idle; 465 + bool sqpoll; 466 + 467 + spinlock_t task_lock; 468 + struct io_wq_work_list task_list; 469 + unsigned long task_state; 470 + struct callback_head task_work; 457 471 }; 458 472 459 473 /* ··· 1153 1135 init_waitqueue_head(&ctx->cq_wait); 1154 1136 INIT_LIST_HEAD(&ctx->cq_overflow_list); 1155 1137 init_completion(&ctx->ref_comp); 1156 - idr_init(&ctx->io_buffer_idr); 1138 + xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1); 1157 1139 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); 1158 1140 mutex_init(&ctx->uring_lock); 1159 1141 init_waitqueue_head(&ctx->wait); ··· 1568 1550 io_put_task(req->task, 1); 1569 1551 list_add(&req->compl.list, &cs->locked_free_list); 1570 1552 cs->locked_free_nr++; 1571 - } else 1572 - req = NULL; 1553 + } else { 1554 + if (!percpu_ref_tryget(&ctx->refs)) 1555 + req = NULL; 1556 + } 1573 1557 io_commit_cqring(ctx); 1574 1558 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1575 - io_cqring_ev_posted(ctx); 1576 1559 1577 - if (req) 1560 + if (req) { 1561 + io_cqring_ev_posted(ctx); 1578 1562 percpu_ref_put(&ctx->refs); 1563 + } 1579 1564 } 1580 1565 1581 1566 static void io_req_complete_state(struct io_kiocb *req, long res, ··· 1946 1925 return ret; 1947 1926 } 1948 1927 1928 + static bool io_run_task_work_head(struct callback_head **work_head) 1929 + { 1930 + struct callback_head *work, *next; 1931 + bool executed = false; 1932 + 1933 + do { 1934 + work = xchg(work_head, NULL); 1935 + if (!work) 1936 + break; 1937 + 1938 + do { 1939 + next = work->next; 1940 + work->func(work); 1941 + work = next; 1942 + cond_resched(); 1943 + } while (work); 1944 + executed = true; 1945 + } while (1); 1946 + 1947 + return executed; 1948 + } 1949 + 1950 + static void io_task_work_add_head(struct callback_head **work_head, 1951 + struct callback_head *task_work) 1952 + { 1953 + struct callback_head *head; 1954 + 1955 + do { 1956 + head = READ_ONCE(*work_head); 1957 + task_work->next = head; 1958 + } while (cmpxchg(work_head, head, task_work) != head); 1959 + } 1960 + 1949 1961 static void io_req_task_work_add_fallback(struct io_kiocb *req, 1950 1962 task_work_func_t cb) 1951 1963 { 1952 - struct io_ring_ctx *ctx = req->ctx; 1953 - struct callback_head *head; 1954 - 1955 1964 init_task_work(&req->task_work, cb); 1956 - do { 1957 - head = READ_ONCE(ctx->exit_task_work); 1958 - req->task_work.next = head; 1959 - } while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head); 1965 + io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work); 1960 1966 } 1961 1967 1962 1968 static void __io_req_task_cancel(struct io_kiocb *req, int error) ··· 2891 2843 2892 2844 lockdep_assert_held(&req->ctx->uring_lock); 2893 2845 2894 - head = idr_find(&req->ctx->io_buffer_idr, bgid); 2846 + head = xa_load(&req->ctx->io_buffers, bgid); 2895 2847 if (head) { 2896 2848 if (!list_empty(&head->list)) { 2897 2849 kbuf = list_last_entry(&head->list, struct io_buffer, ··· 2899 2851 list_del(&kbuf->list); 2900 2852 } else { 2901 2853 kbuf = head; 2902 - idr_remove(&req->ctx->io_buffer_idr, bgid); 2854 + xa_erase(&req->ctx->io_buffers, bgid); 2903 2855 } 2904 2856 if (*len > kbuf->len) 2905 2857 *len = kbuf->len; ··· 3940 3892 } 3941 3893 i++; 3942 3894 kfree(buf); 3943 - idr_remove(&ctx->io_buffer_idr, bgid); 3895 + xa_erase(&ctx->io_buffers, bgid); 3944 3896 3945 3897 return i; 3946 3898 } ··· 3958 3910 lockdep_assert_held(&ctx->uring_lock); 3959 3911 3960 3912 ret = -ENOENT; 3961 - head = idr_find(&ctx->io_buffer_idr, p->bgid); 3913 + head = xa_load(&ctx->io_buffers, p->bgid); 3962 3914 if (head) 3963 3915 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs); 3964 3916 if (ret < 0) ··· 4041 3993 4042 3994 lockdep_assert_held(&ctx->uring_lock); 4043 3995 4044 - list = head = idr_find(&ctx->io_buffer_idr, p->bgid); 3996 + list = head = xa_load(&ctx->io_buffers, p->bgid); 4045 3997 4046 3998 ret = io_add_buffers(p, &head); 4047 - if (ret < 0) 4048 - goto out; 4049 - 4050 - if (!list) { 4051 - ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1, 4052 - GFP_KERNEL); 4053 - if (ret < 0) { 3999 + if (ret >= 0 && !list) { 4000 + ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL); 4001 + if (ret < 0) 4054 4002 __io_remove_buffers(ctx, head, p->bgid, -1U); 4055 - goto out; 4056 - } 4057 4003 } 4058 - out: 4059 4004 if (ret < 0) 4060 4005 req_set_fail_links(req); 4061 4006 ··· 4386 4345 struct io_async_msghdr iomsg, *kmsg; 4387 4346 struct socket *sock; 4388 4347 unsigned flags; 4348 + int min_ret = 0; 4389 4349 int ret; 4390 4350 4391 4351 sock = sock_from_file(req->file); ··· 4401 4359 kmsg = &iomsg; 4402 4360 } 4403 4361 4404 - flags = req->sr_msg.msg_flags; 4362 + flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; 4405 4363 if (flags & MSG_DONTWAIT) 4406 4364 req->flags |= REQ_F_NOWAIT; 4407 4365 else if (issue_flags & IO_URING_F_NONBLOCK) 4408 4366 flags |= MSG_DONTWAIT; 4367 + 4368 + if (flags & MSG_WAITALL) 4369 + min_ret = iov_iter_count(&kmsg->msg.msg_iter); 4409 4370 4410 4371 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 4411 4372 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) ··· 4420 4375 if (kmsg->free_iov) 4421 4376 kfree(kmsg->free_iov); 4422 4377 req->flags &= ~REQ_F_NEED_CLEANUP; 4423 - if (ret < 0) 4378 + if (ret < min_ret) 4424 4379 req_set_fail_links(req); 4425 4380 __io_req_complete(req, issue_flags, ret, 0); 4426 4381 return 0; ··· 4433 4388 struct iovec iov; 4434 4389 struct socket *sock; 4435 4390 unsigned flags; 4391 + int min_ret = 0; 4436 4392 int ret; 4437 4393 4438 4394 sock = sock_from_file(req->file); ··· 4449 4403 msg.msg_controllen = 0; 4450 4404 msg.msg_namelen = 0; 4451 4405 4452 - flags = req->sr_msg.msg_flags; 4406 + flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; 4453 4407 if (flags & MSG_DONTWAIT) 4454 4408 req->flags |= REQ_F_NOWAIT; 4455 4409 else if (issue_flags & IO_URING_F_NONBLOCK) 4456 4410 flags |= MSG_DONTWAIT; 4411 + 4412 + if (flags & MSG_WAITALL) 4413 + min_ret = iov_iter_count(&msg.msg_iter); 4457 4414 4458 4415 msg.msg_flags = flags; 4459 4416 ret = sock_sendmsg(sock, &msg); ··· 4465 4416 if (ret == -ERESTARTSYS) 4466 4417 ret = -EINTR; 4467 4418 4468 - if (ret < 0) 4419 + if (ret < min_ret) 4469 4420 req_set_fail_links(req); 4470 4421 __io_req_complete(req, issue_flags, ret, 0); 4471 4422 return 0; ··· 4617 4568 struct socket *sock; 4618 4569 struct io_buffer *kbuf; 4619 4570 unsigned flags; 4571 + int min_ret = 0; 4620 4572 int ret, cflags = 0; 4621 4573 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 4622 4574 ··· 4643 4593 1, req->sr_msg.len); 4644 4594 } 4645 4595 4646 - flags = req->sr_msg.msg_flags; 4596 + flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; 4647 4597 if (flags & MSG_DONTWAIT) 4648 4598 req->flags |= REQ_F_NOWAIT; 4649 4599 else if (force_nonblock) 4650 4600 flags |= MSG_DONTWAIT; 4601 + 4602 + if (flags & MSG_WAITALL) 4603 + min_ret = iov_iter_count(&kmsg->msg.msg_iter); 4651 4604 4652 4605 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, 4653 4606 kmsg->uaddr, flags); ··· 4665 4612 if (kmsg->free_iov) 4666 4613 kfree(kmsg->free_iov); 4667 4614 req->flags &= ~REQ_F_NEED_CLEANUP; 4668 - if (ret < 0) 4615 + if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) 4669 4616 req_set_fail_links(req); 4670 4617 __io_req_complete(req, issue_flags, ret, cflags); 4671 4618 return 0; ··· 4680 4627 struct socket *sock; 4681 4628 struct iovec iov; 4682 4629 unsigned flags; 4630 + int min_ret = 0; 4683 4631 int ret, cflags = 0; 4684 4632 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 4685 4633 ··· 4706 4652 msg.msg_iocb = NULL; 4707 4653 msg.msg_flags = 0; 4708 4654 4709 - flags = req->sr_msg.msg_flags; 4655 + flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; 4710 4656 if (flags & MSG_DONTWAIT) 4711 4657 req->flags |= REQ_F_NOWAIT; 4712 4658 else if (force_nonblock) 4713 4659 flags |= MSG_DONTWAIT; 4660 + 4661 + if (flags & MSG_WAITALL) 4662 + min_ret = iov_iter_count(&msg.msg_iter); 4714 4663 4715 4664 ret = sock_recvmsg(sock, &msg, flags); 4716 4665 if (force_nonblock && ret == -EAGAIN) ··· 4723 4666 out_free: 4724 4667 if (req->flags & REQ_F_BUFFER_SELECTED) 4725 4668 cflags = io_put_recv_kbuf(req); 4726 - if (ret < 0) 4669 + if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) 4727 4670 req_set_fail_links(req); 4728 4671 __io_req_complete(req, issue_flags, ret, cflags); 4729 4672 return 0; ··· 6261 6204 spin_unlock_irqrestore(&ctx->completion_lock, flags); 6262 6205 6263 6206 if (prev) { 6264 - req_set_fail_links(prev); 6265 6207 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); 6266 6208 io_put_req_deferred(prev, 1); 6267 6209 } else { ··· 6750 6694 set_cpus_allowed_ptr(current, cpu_online_mask); 6751 6695 current->flags |= PF_NO_SETAFFINITY; 6752 6696 6753 - down_read(&sqd->rw_lock); 6754 - 6697 + mutex_lock(&sqd->lock); 6755 6698 while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) { 6756 6699 int ret; 6757 6700 bool cap_entries, sqt_spin, needs_sched; 6758 6701 6759 6702 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) { 6760 - up_read(&sqd->rw_lock); 6703 + mutex_unlock(&sqd->lock); 6761 6704 cond_resched(); 6762 - down_read(&sqd->rw_lock); 6705 + mutex_lock(&sqd->lock); 6763 6706 io_run_task_work(); 6707 + io_run_task_work_head(&sqd->park_task_work); 6764 6708 timeout = jiffies + sqd->sq_thread_idle; 6765 6709 continue; 6766 6710 } ··· 6806 6750 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 6807 6751 io_ring_set_wakeup_flag(ctx); 6808 6752 6809 - up_read(&sqd->rw_lock); 6753 + mutex_unlock(&sqd->lock); 6810 6754 schedule(); 6811 - down_read(&sqd->rw_lock); 6755 + try_to_freeze(); 6756 + mutex_lock(&sqd->lock); 6812 6757 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 6813 6758 io_ring_clear_wakeup_flag(ctx); 6814 6759 } 6815 6760 6816 6761 finish_wait(&sqd->wait, &wait); 6762 + io_run_task_work_head(&sqd->park_task_work); 6817 6763 timeout = jiffies + sqd->sq_thread_idle; 6818 6764 } 6819 - up_read(&sqd->rw_lock); 6820 - down_write(&sqd->rw_lock); 6821 - /* 6822 - * someone may have parked and added a cancellation task_work, run 6823 - * it first because we don't want it in io_uring_cancel_sqpoll() 6824 - */ 6825 - io_run_task_work(); 6826 6765 6827 6766 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 6828 6767 io_uring_cancel_sqpoll(ctx); 6829 6768 sqd->thread = NULL; 6830 6769 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 6831 6770 io_ring_set_wakeup_flag(ctx); 6832 - up_write(&sqd->rw_lock); 6771 + mutex_unlock(&sqd->lock); 6833 6772 6834 6773 io_run_task_work(); 6774 + io_run_task_work_head(&sqd->park_task_work); 6835 6775 complete(&sqd->exited); 6836 6776 do_exit(0); 6837 6777 } ··· 7127 7075 } 7128 7076 7129 7077 static void io_sq_thread_unpark(struct io_sq_data *sqd) 7130 - __releases(&sqd->rw_lock) 7078 + __releases(&sqd->lock) 7131 7079 { 7132 7080 WARN_ON_ONCE(sqd->thread == current); 7133 7081 7082 + /* 7083 + * Do the dance but not conditional clear_bit() because it'd race with 7084 + * other threads incrementing park_pending and setting the bit. 7085 + */ 7134 7086 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 7135 - up_write(&sqd->rw_lock); 7087 + if (atomic_dec_return(&sqd->park_pending)) 7088 + set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 7089 + mutex_unlock(&sqd->lock); 7136 7090 } 7137 7091 7138 7092 static void io_sq_thread_park(struct io_sq_data *sqd) 7139 - __acquires(&sqd->rw_lock) 7093 + __acquires(&sqd->lock) 7140 7094 { 7141 7095 WARN_ON_ONCE(sqd->thread == current); 7142 7096 7097 + atomic_inc(&sqd->park_pending); 7143 7098 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 7144 - down_write(&sqd->rw_lock); 7145 - /* set again for consistency, in case concurrent parks are happening */ 7146 - set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 7099 + mutex_lock(&sqd->lock); 7147 7100 if (sqd->thread) 7148 7101 wake_up_process(sqd->thread); 7149 7102 } ··· 7157 7100 { 7158 7101 WARN_ON_ONCE(sqd->thread == current); 7159 7102 7160 - down_write(&sqd->rw_lock); 7103 + mutex_lock(&sqd->lock); 7161 7104 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); 7162 7105 if (sqd->thread) 7163 7106 wake_up_process(sqd->thread); 7164 - up_write(&sqd->rw_lock); 7107 + mutex_unlock(&sqd->lock); 7165 7108 wait_for_completion(&sqd->exited); 7166 7109 } 7167 7110 7168 7111 static void io_put_sq_data(struct io_sq_data *sqd) 7169 7112 { 7170 7113 if (refcount_dec_and_test(&sqd->refs)) { 7114 + WARN_ON_ONCE(atomic_read(&sqd->park_pending)); 7115 + 7171 7116 io_sq_thread_stop(sqd); 7172 7117 kfree(sqd); 7173 7118 } ··· 7243 7184 if (!sqd) 7244 7185 return ERR_PTR(-ENOMEM); 7245 7186 7187 + atomic_set(&sqd->park_pending, 0); 7246 7188 refcount_set(&sqd->refs, 1); 7247 7189 INIT_LIST_HEAD(&sqd->ctx_list); 7248 - init_rwsem(&sqd->rw_lock); 7190 + mutex_init(&sqd->lock); 7249 7191 init_waitqueue_head(&sqd->wait); 7250 7192 init_completion(&sqd->exited); 7251 7193 return sqd; ··· 7926 7866 7927 7867 ret = 0; 7928 7868 io_sq_thread_park(sqd); 7869 + list_add(&ctx->sqd_list, &sqd->ctx_list); 7870 + io_sqd_update_thread_idle(sqd); 7929 7871 /* don't attach to a dying SQPOLL thread, would be racy */ 7930 - if (attached && !sqd->thread) { 7872 + if (attached && !sqd->thread) 7931 7873 ret = -ENXIO; 7932 - } else { 7933 - list_add(&ctx->sqd_list, &sqd->ctx_list); 7934 - io_sqd_update_thread_idle(sqd); 7935 - } 7936 7874 io_sq_thread_unpark(sqd); 7937 7875 7938 - if (ret < 0) { 7939 - io_put_sq_data(sqd); 7940 - ctx->sq_data = NULL; 7941 - return ret; 7942 - } else if (attached) { 7876 + if (ret < 0) 7877 + goto err; 7878 + if (attached) 7943 7879 return 0; 7944 - } 7945 7880 7946 7881 if (p->flags & IORING_SETUP_SQ_AFF) { 7947 7882 int cpu = p->sq_thread_cpu; ··· 8387 8332 return -ENXIO; 8388 8333 } 8389 8334 8390 - static int __io_destroy_buffers(int id, void *p, void *data) 8391 - { 8392 - struct io_ring_ctx *ctx = data; 8393 - struct io_buffer *buf = p; 8394 - 8395 - __io_remove_buffers(ctx, buf, id, -1U); 8396 - return 0; 8397 - } 8398 - 8399 8335 static void io_destroy_buffers(struct io_ring_ctx *ctx) 8400 8336 { 8401 - idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx); 8402 - idr_destroy(&ctx->io_buffer_idr); 8337 + struct io_buffer *buf; 8338 + unsigned long index; 8339 + 8340 + xa_for_each(&ctx->io_buffers, index, buf) 8341 + __io_remove_buffers(ctx, buf, index, -1U); 8403 8342 } 8404 8343 8405 8344 static void io_req_cache_free(struct list_head *list, struct task_struct *tsk) ··· 8435 8386 { 8436 8387 /* 8437 8388 * Some may use context even when all refs and requests have been put, 8438 - * and they are free to do so while still holding uring_lock, see 8439 - * __io_req_task_submit(). Wait for them to finish. 8389 + * and they are free to do so while still holding uring_lock or 8390 + * completion_lock, see __io_req_task_submit(). Wait for them to finish. 8440 8391 */ 8441 8392 mutex_lock(&ctx->uring_lock); 8442 8393 mutex_unlock(&ctx->uring_lock); 8394 + spin_lock_irq(&ctx->completion_lock); 8395 + spin_unlock_irq(&ctx->completion_lock); 8443 8396 8444 8397 io_sq_thread_finish(ctx); 8445 8398 io_sqe_buffers_unregister(ctx); ··· 8529 8478 return -EINVAL; 8530 8479 } 8531 8480 8532 - static bool io_run_ctx_fallback(struct io_ring_ctx *ctx) 8481 + static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx) 8533 8482 { 8534 - struct callback_head *work, *next; 8535 - bool executed = false; 8536 - 8537 - do { 8538 - work = xchg(&ctx->exit_task_work, NULL); 8539 - if (!work) 8540 - break; 8541 - 8542 - do { 8543 - next = work->next; 8544 - work->func(work); 8545 - work = next; 8546 - cond_resched(); 8547 - } while (work); 8548 - executed = true; 8549 - } while (1); 8550 - 8551 - return executed; 8483 + return io_run_task_work_head(&ctx->exit_task_work); 8552 8484 } 8553 8485 8554 8486 struct io_tctx_exit { ··· 8613 8579 xa_for_each(&ctx->personalities, index, creds) 8614 8580 io_unregister_personality(ctx, index); 8615 8581 mutex_unlock(&ctx->uring_lock); 8582 + 8583 + /* prevent SQPOLL from submitting new requests */ 8584 + if (ctx->sq_data) { 8585 + io_sq_thread_park(ctx->sq_data); 8586 + list_del_init(&ctx->sqd_list); 8587 + io_sqd_update_thread_idle(ctx->sq_data); 8588 + io_sq_thread_unpark(ctx->sq_data); 8589 + } 8616 8590 8617 8591 io_kill_timeouts(ctx, NULL, NULL); 8618 8592 io_poll_remove_all(ctx, NULL, NULL); ··· 8921 8879 if (task) { 8922 8880 init_completion(&work.completion); 8923 8881 init_task_work(&work.task_work, io_sqpoll_cancel_cb); 8924 - WARN_ON_ONCE(task_work_add(task, &work.task_work, TWA_SIGNAL)); 8882 + io_task_work_add_head(&sqd->park_task_work, &work.task_work); 8925 8883 wake_up_process(task); 8926 8884 } 8927 8885 io_sq_thread_unpark(sqd);
+10
fs/iomap/swapfile.c
··· 170 170 return ret; 171 171 } 172 172 173 + /* 174 + * If this swapfile doesn't contain even a single page-aligned 175 + * contiguous range of blocks, reject this useless swapfile to 176 + * prevent confusion later on. 177 + */ 178 + if (isi.nr_pages == 0) { 179 + pr_warn("swapon: Cannot find a single usable page in file.\n"); 180 + return -EINVAL; 181 + } 182 + 173 183 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; 174 184 sis->max = isi.nr_pages; 175 185 sis->pages = isi.nr_pages - 1;
-3
fs/locks.c
··· 1808 1808 1809 1809 if (flags & FL_LAYOUT) 1810 1810 return 0; 1811 - if (flags & FL_DELEG) 1812 - /* We leave these checks to the caller. */ 1813 - return 0; 1814 1811 1815 1812 if (arg == F_RDLCK) 1816 1813 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
+1
fs/nfsd/Kconfig
··· 73 73 select NFSD_V3 74 74 select FS_POSIX_ACL 75 75 select SUNRPC_GSS 76 + select CRYPTO 76 77 select CRYPTO_MD5 77 78 select CRYPTO_SHA256 78 79 select GRACE_PERIOD
+2
fs/nfsd/filecache.c
··· 898 898 continue; 899 899 if (!nfsd_match_cred(nf->nf_cred, current_cred())) 900 900 continue; 901 + if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) 902 + continue; 901 903 if (nfsd_file_get(nf) != NULL) 902 904 return nf; 903 905 }
+1
fs/nfsd/nfs4callback.c
··· 1189 1189 switch (task->tk_status) { 1190 1190 case -EIO: 1191 1191 case -ETIMEDOUT: 1192 + case -EACCES: 1192 1193 nfsd4_mark_cb_down(clp, task->tk_status); 1193 1194 } 1194 1195 break;
+1 -1
fs/nfsd/nfs4proc.c
··· 1302 1302 struct nfsd_file *dst) 1303 1303 { 1304 1304 nfs42_ssc_close(src->nf_file); 1305 - /* 'src' is freed by nfsd4_do_async_copy */ 1305 + fput(src->nf_file); 1306 1306 nfsd_file_put(dst); 1307 1307 mntput(ss_mnt); 1308 1308 }
+15 -40
fs/nfsd/nfs4state.c
··· 4940 4940 return fl; 4941 4941 } 4942 4942 4943 - static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, 4944 - struct nfs4_file *fp) 4945 - { 4946 - struct nfs4_clnt_odstate *co; 4947 - struct file *f = fp->fi_deleg_file->nf_file; 4948 - struct inode *ino = locks_inode(f); 4949 - int writes = atomic_read(&ino->i_writecount); 4950 - 4951 - if (fp->fi_fds[O_WRONLY]) 4952 - writes--; 4953 - if (fp->fi_fds[O_RDWR]) 4954 - writes--; 4955 - if (writes > 0) 4956 - return -EAGAIN; 4957 - spin_lock(&fp->fi_lock); 4958 - list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 4959 - if (co->co_client != clp) { 4960 - spin_unlock(&fp->fi_lock); 4961 - return -EAGAIN; 4962 - } 4963 - } 4964 - spin_unlock(&fp->fi_lock); 4965 - return 0; 4966 - } 4967 - 4968 4943 static struct nfs4_delegation * 4969 4944 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, 4970 4945 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) ··· 4959 4984 4960 4985 nf = find_readable_file(fp); 4961 4986 if (!nf) { 4962 - /* 4963 - * We probably could attempt another open and get a read 4964 - * delegation, but for now, don't bother until the 4965 - * client actually sends us one. 4966 - */ 4967 - return ERR_PTR(-EAGAIN); 4987 + /* We should always have a readable file here */ 4988 + WARN_ON_ONCE(1); 4989 + return ERR_PTR(-EBADF); 4968 4990 } 4969 4991 spin_lock(&state_lock); 4970 4992 spin_lock(&fp->fi_lock); ··· 4991 5019 if (!fl) 4992 5020 goto out_clnt_odstate; 4993 5021 4994 - status = nfsd4_check_conflicting_opens(clp, fp); 4995 - if (status) { 4996 - locks_free_lock(fl); 4997 - goto out_clnt_odstate; 4998 - } 4999 5022 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); 5000 5023 if (fl) 5001 5024 locks_free_lock(fl); 5002 - if (status) 5003 - goto out_clnt_odstate; 5004 - status = nfsd4_check_conflicting_opens(clp, fp); 5005 5025 if (status) 5006 5026 goto out_clnt_odstate; 5007 5027 ··· 5076 5112 if (locks_in_grace(clp->net)) 5077 5113 goto out_no_deleg; 5078 5114 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 5115 + goto out_no_deleg; 5116 + /* 5117 + * Also, if the file was opened for write or 5118 + * create, there's a good chance the client's 5119 + * about to write to it, resulting in an 5120 + * immediate recall (since we don't support 5121 + * write delegations): 5122 + */ 5123 + if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 5124 + goto out_no_deleg; 5125 + if (open->op_create == NFS4_OPEN_CREATE) 5079 5126 goto out_no_deleg; 5080 5127 break; 5081 5128 default: ··· 5364 5389 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { 5365 5390 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); 5366 5391 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID && 5367 - cps->cpntf_time > cutoff) 5392 + cps->cpntf_time < cutoff) 5368 5393 _free_cpntf_state_locked(nn, cps); 5369 5394 } 5370 5395 spin_unlock(&nn->s2s_cp_lock);
+4 -6
fs/select.c
··· 1055 1055 1056 1056 ret = do_sys_poll(ufds, nfds, to); 1057 1057 1058 - if (ret == -ERESTARTNOHAND) { 1059 - restart_block->fn = do_restart_poll; 1060 - ret = -ERESTART_RESTARTBLOCK; 1061 - } 1058 + if (ret == -ERESTARTNOHAND) 1059 + ret = set_restart_fn(restart_block, do_restart_poll); 1060 + 1062 1061 return ret; 1063 1062 } 1064 1063 ··· 1079 1080 struct restart_block *restart_block; 1080 1081 1081 1082 restart_block = &current->restart_block; 1082 - restart_block->fn = do_restart_poll; 1083 1083 restart_block->poll.ufds = ufds; 1084 1084 restart_block->poll.nfds = nfds; 1085 1085 ··· 1089 1091 } else 1090 1092 restart_block->poll.has_timeout = 0; 1091 1093 1092 - ret = -ERESTART_RESTARTBLOCK; 1094 + ret = set_restart_fn(restart_block, do_restart_poll); 1093 1095 } 1094 1096 return ret; 1095 1097 }
+8 -6
fs/xfs/xfs_inode.c
··· 1007 1007 /* 1008 1008 * Make sure that we have allocated dquot(s) on disk. 1009 1009 */ 1010 - error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, 1011 - XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1012 - &udqp, &gdqp, &pdqp); 1010 + error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns), 1011 + fsgid_into_mnt(mnt_userns), prid, 1012 + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1013 + &udqp, &gdqp, &pdqp); 1013 1014 if (error) 1014 1015 return error; 1015 1016 ··· 1158 1157 /* 1159 1158 * Make sure that we have allocated dquot(s) on disk. 1160 1159 */ 1161 - error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, 1162 - XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1163 - &udqp, &gdqp, &pdqp); 1160 + error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns), 1161 + fsgid_into_mnt(mnt_userns), prid, 1162 + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1163 + &udqp, &gdqp, &pdqp); 1164 1164 if (error) 1165 1165 return error; 1166 1166
+6
fs/xfs/xfs_itable.c
··· 168 168 }; 169 169 int error; 170 170 171 + if (breq->mnt_userns != &init_user_ns) { 172 + xfs_warn_ratelimited(breq->mp, 173 + "bulkstat not supported inside of idmapped mounts."); 174 + return -EINVAL; 175 + } 176 + 171 177 ASSERT(breq->icount == 1); 172 178 173 179 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
+44 -46
fs/xfs/xfs_mount.c
··· 635 635 } 636 636 637 637 /* 638 + * Flush and reclaim dirty inodes in preparation for unmount. Inodes and 639 + * internal inode structures can be sitting in the CIL and AIL at this point, 640 + * so we need to unpin them, write them back and/or reclaim them before unmount 641 + * can proceed. 642 + * 643 + * An inode cluster that has been freed can have its buffer still pinned in 644 + * memory because the transaction is still sitting in a iclog. The stale inodes 645 + * on that buffer will be pinned to the buffer until the transaction hits the 646 + * disk and the callbacks run. Pushing the AIL will skip the stale inodes and 647 + * may never see the pinned buffer, so nothing will push out the iclog and 648 + * unpin the buffer. 649 + * 650 + * Hence we need to force the log to unpin everything first. However, log 651 + * forces don't wait for the discards they issue to complete, so we have to 652 + * explicitly wait for them to complete here as well. 653 + * 654 + * Then we can tell the world we are unmounting so that error handling knows 655 + * that the filesystem is going away and we should error out anything that we 656 + * have been retrying in the background. This will prevent never-ending 657 + * retries in AIL pushing from hanging the unmount. 658 + * 659 + * Finally, we can push the AIL to clean all the remaining dirty objects, then 660 + * reclaim the remaining inodes that are still in memory at this point in time. 661 + */ 662 + static void 663 + xfs_unmount_flush_inodes( 664 + struct xfs_mount *mp) 665 + { 666 + xfs_log_force(mp, XFS_LOG_SYNC); 667 + xfs_extent_busy_wait_all(mp); 668 + flush_workqueue(xfs_discard_wq); 669 + 670 + mp->m_flags |= XFS_MOUNT_UNMOUNTING; 671 + 672 + xfs_ail_push_all_sync(mp->m_ail); 673 + cancel_delayed_work_sync(&mp->m_reclaim_work); 674 + xfs_reclaim_inodes(mp); 675 + xfs_health_unmount(mp); 676 + } 677 + 678 + /* 638 679 * This function does the following on an initial mount of a file system: 639 680 * - reads the superblock from disk and init the mount struct 640 681 * - if we're a 32-bit kernel, do a size check on the superblock ··· 1049 1008 /* Clean out dquots that might be in memory after quotacheck. */ 1050 1009 xfs_qm_unmount(mp); 1051 1010 /* 1052 - * Cancel all delayed reclaim work and reclaim the inodes directly. 1011 + * Flush all inode reclamation work and flush the log. 1053 1012 * We have to do this /after/ rtunmount and qm_unmount because those 1054 1013 * two will have scheduled delayed reclaim for the rt/quota inodes. 1055 1014 * ··· 1059 1018 * qm_unmount_quotas and therefore rely on qm_unmount to release the 1060 1019 * quota inodes. 1061 1020 */ 1062 - cancel_delayed_work_sync(&mp->m_reclaim_work); 1063 - xfs_reclaim_inodes(mp); 1064 - xfs_health_unmount(mp); 1021 + xfs_unmount_flush_inodes(mp); 1065 1022 out_log_dealloc: 1066 - mp->m_flags |= XFS_MOUNT_UNMOUNTING; 1067 1023 xfs_log_mount_cancel(mp); 1068 1024 out_fail_wait: 1069 1025 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) ··· 1101 1063 xfs_rtunmount_inodes(mp); 1102 1064 xfs_irele(mp->m_rootip); 1103 1065 1104 - /* 1105 - * We can potentially deadlock here if we have an inode cluster 1106 - * that has been freed has its buffer still pinned in memory because 1107 - * the transaction is still sitting in a iclog. The stale inodes 1108 - * on that buffer will be pinned to the buffer until the 1109 - * transaction hits the disk and the callbacks run. Pushing the AIL will 1110 - * skip the stale inodes and may never see the pinned buffer, so 1111 - * nothing will push out the iclog and unpin the buffer. Hence we 1112 - * need to force the log here to ensure all items are flushed into the 1113 - * AIL before we go any further. 1114 - */ 1115 - xfs_log_force(mp, XFS_LOG_SYNC); 1116 - 1117 - /* 1118 - * Wait for all busy extents to be freed, including completion of 1119 - * any discard operation. 1120 - */ 1121 - xfs_extent_busy_wait_all(mp); 1122 - flush_workqueue(xfs_discard_wq); 1123 - 1124 - /* 1125 - * We now need to tell the world we are unmounting. This will allow 1126 - * us to detect that the filesystem is going away and we should error 1127 - * out anything that we have been retrying in the background. This will 1128 - * prevent neverending retries in AIL pushing from hanging the unmount. 1129 - */ 1130 - mp->m_flags |= XFS_MOUNT_UNMOUNTING; 1131 - 1132 - /* 1133 - * Flush all pending changes from the AIL. 1134 - */ 1135 - xfs_ail_push_all_sync(mp->m_ail); 1136 - 1137 - /* 1138 - * Reclaim all inodes. At this point there should be no dirty inodes and 1139 - * none should be pinned or locked. Stop background inode reclaim here 1140 - * if it is still running. 1141 - */ 1142 - cancel_delayed_work_sync(&mp->m_reclaim_work); 1143 - xfs_reclaim_inodes(mp); 1144 - xfs_health_unmount(mp); 1066 + xfs_unmount_flush_inodes(mp); 1145 1067 1146 1068 xfs_qm_unmount(mp); 1147 1069
+2 -1
fs/xfs/xfs_symlink.c
··· 182 182 /* 183 183 * Make sure that we have allocated dquot(s) on disk. 184 184 */ 185 - error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, 185 + error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns), 186 + fsgid_into_mnt(mnt_userns), prid, 186 187 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 187 188 &udqp, &gdqp, &pdqp); 188 189 if (error)
+87 -14
fs/zonefs/super.c
··· 165 165 return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops); 166 166 } 167 167 168 + static int zonefs_swap_activate(struct swap_info_struct *sis, 169 + struct file *swap_file, sector_t *span) 170 + { 171 + struct inode *inode = file_inode(swap_file); 172 + struct zonefs_inode_info *zi = ZONEFS_I(inode); 173 + 174 + if (zi->i_ztype != ZONEFS_ZTYPE_CNV) { 175 + zonefs_err(inode->i_sb, 176 + "swap file: not a conventional zone file\n"); 177 + return -EINVAL; 178 + } 179 + 180 + return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops); 181 + } 182 + 168 183 static const struct address_space_operations zonefs_file_aops = { 169 184 .readpage = zonefs_readpage, 170 185 .readahead = zonefs_readahead, ··· 192 177 .is_partially_uptodate = iomap_is_partially_uptodate, 193 178 .error_remove_page = generic_error_remove_page, 194 179 .direct_IO = noop_direct_IO, 180 + .swap_activate = zonefs_swap_activate, 195 181 }; 196 182 197 183 static void zonefs_update_stats(struct inode *inode, loff_t new_isize) ··· 744 728 } 745 729 746 730 /* 731 + * Do not exceed the LFS limits nor the file zone size. If pos is under the 732 + * limit it becomes a short access. If it exceeds the limit, return -EFBIG. 733 + */ 734 + static loff_t zonefs_write_check_limits(struct file *file, loff_t pos, 735 + loff_t count) 736 + { 737 + struct inode *inode = file_inode(file); 738 + struct zonefs_inode_info *zi = ZONEFS_I(inode); 739 + loff_t limit = rlimit(RLIMIT_FSIZE); 740 + loff_t max_size = zi->i_max_size; 741 + 742 + if (limit != RLIM_INFINITY) { 743 + if (pos >= limit) { 744 + send_sig(SIGXFSZ, current, 0); 745 + return -EFBIG; 746 + } 747 + count = min(count, limit - pos); 748 + } 749 + 750 + if (!(file->f_flags & O_LARGEFILE)) 751 + max_size = min_t(loff_t, MAX_NON_LFS, max_size); 752 + 753 + if (unlikely(pos >= max_size)) 754 + return -EFBIG; 755 + 756 + return min(count, max_size - pos); 757 + } 758 + 759 + static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from) 760 + { 761 + struct file *file = iocb->ki_filp; 762 + struct inode *inode = file_inode(file); 763 + struct zonefs_inode_info *zi = ZONEFS_I(inode); 764 + loff_t count; 765 + 766 + if (IS_SWAPFILE(inode)) 767 + return -ETXTBSY; 768 + 769 + if (!iov_iter_count(from)) 770 + return 0; 771 + 772 + if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) 773 + return -EINVAL; 774 + 775 + if (iocb->ki_flags & IOCB_APPEND) { 776 + if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) 777 + return -EINVAL; 778 + mutex_lock(&zi->i_truncate_mutex); 779 + iocb->ki_pos = zi->i_wpoffset; 780 + mutex_unlock(&zi->i_truncate_mutex); 781 + } 782 + 783 + count = zonefs_write_check_limits(file, iocb->ki_pos, 784 + iov_iter_count(from)); 785 + if (count < 0) 786 + return count; 787 + 788 + iov_iter_truncate(from, count); 789 + return iov_iter_count(from); 790 + } 791 + 792 + /* 747 793 * Handle direct writes. For sequential zone files, this is the only possible 748 794 * write path. For these files, check that the user is issuing writes 749 795 * sequentially from the end of the file. This code assumes that the block layer ··· 822 744 struct super_block *sb = inode->i_sb; 823 745 bool sync = is_sync_kiocb(iocb); 824 746 bool append = false; 825 - size_t count; 826 - ssize_t ret; 747 + ssize_t ret, count; 827 748 828 749 /* 829 750 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT ··· 840 763 inode_lock(inode); 841 764 } 842 765 843 - ret = generic_write_checks(iocb, from); 844 - if (ret <= 0) 766 + count = zonefs_write_checks(iocb, from); 767 + if (count <= 0) { 768 + ret = count; 845 769 goto inode_unlock; 846 - 847 - iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos); 848 - count = iov_iter_count(from); 770 + } 849 771 850 772 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { 851 773 ret = -EINVAL; ··· 904 828 inode_lock(inode); 905 829 } 906 830 907 - ret = generic_write_checks(iocb, from); 831 + ret = zonefs_write_checks(iocb, from); 908 832 if (ret <= 0) 909 833 goto inode_unlock; 910 - 911 - iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos); 912 834 913 835 ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops); 914 836 if (ret > 0) ··· 1040 966 1041 967 mutex_lock(&zi->i_truncate_mutex); 1042 968 1043 - zi->i_wr_refcnt++; 1044 - if (zi->i_wr_refcnt == 1) { 1045 - 969 + if (!zi->i_wr_refcnt) { 1046 970 if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) { 1047 971 atomic_dec(&sbi->s_open_zones); 1048 972 ret = -EBUSY; ··· 1050 978 if (i_size_read(inode) < zi->i_max_size) { 1051 979 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN); 1052 980 if (ret) { 1053 - zi->i_wr_refcnt--; 1054 981 atomic_dec(&sbi->s_open_zones); 1055 982 goto unlock; 1056 983 } 1057 984 zi->i_flags |= ZONEFS_ZONE_OPEN; 1058 985 } 1059 986 } 987 + 988 + zi->i_wr_refcnt++; 1060 989 1061 990 unlock: 1062 991 mutex_unlock(&zi->i_truncate_mutex);
+4 -2
include/drm/ttm/ttm_bo_api.h
··· 612 612 static inline void ttm_bo_unpin(struct ttm_buffer_object *bo) 613 613 { 614 614 dma_resv_assert_held(bo->base.resv); 615 - WARN_ON_ONCE(!bo->pin_count); 616 615 WARN_ON_ONCE(!kref_read(&bo->kref)); 617 - --bo->pin_count; 616 + if (bo->pin_count) 617 + --bo->pin_count; 618 + else 619 + WARN_ON_ONCE(true); 618 620 } 619 621 620 622 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+11
include/linux/amba/bus.h
··· 105 105 #define amba_get_drvdata(d) dev_get_drvdata(&d->dev) 106 106 #define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) 107 107 108 + #ifdef CONFIG_ARM_AMBA 108 109 int amba_driver_register(struct amba_driver *); 109 110 void amba_driver_unregister(struct amba_driver *); 111 + #else 112 + static inline int amba_driver_register(struct amba_driver *drv) 113 + { 114 + return -EINVAL; 115 + } 116 + static inline void amba_driver_unregister(struct amba_driver *drv) 117 + { 118 + } 119 + #endif 120 + 110 121 struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); 111 122 void amba_device_put(struct amba_device *); 112 123 int amba_device_add(struct amba_device *, struct resource *);
+4 -2
include/linux/efi.h
··· 72 72 */ 73 73 typedef guid_t efi_guid_t __aligned(__alignof__(u32)); 74 74 75 - #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ 76 - GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) 75 + #define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \ 76 + (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ 77 + (b) & 0xff, ((b) >> 8) & 0xff, \ 78 + (c) & 0xff, ((c) >> 8) & 0xff, d } } 77 79 78 80 /* 79 81 * Generic EFI table header
-25
include/linux/io_uring.h
··· 5 5 #include <linux/sched.h> 6 6 #include <linux/xarray.h> 7 7 8 - struct io_wq_work_node { 9 - struct io_wq_work_node *next; 10 - }; 11 - 12 - struct io_wq_work_list { 13 - struct io_wq_work_node *first; 14 - struct io_wq_work_node *last; 15 - }; 16 - 17 - struct io_uring_task { 18 - /* submission side */ 19 - struct xarray xa; 20 - struct wait_queue_head wait; 21 - void *last; 22 - void *io_wq; 23 - struct percpu_counter inflight; 24 - atomic_t in_idle; 25 - bool sqpoll; 26 - 27 - spinlock_t task_lock; 28 - struct io_wq_work_list task_list; 29 - unsigned long task_state; 30 - struct callback_head task_work; 31 - }; 32 - 33 8 #if defined(CONFIG_IO_URING) 34 9 struct sock *io_uring_get_socket(struct file *file); 35 10 void __io_uring_task_cancel(void);
-3
include/linux/module.h
··· 30 30 #include <linux/percpu.h> 31 31 #include <asm/module.h> 32 32 33 - /* Not Yet Implemented */ 34 - #define MODULE_SUPPORTED_DEVICE(name) 35 - 36 33 #define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN 37 34 38 35 struct modversion_info {
+1
include/linux/restart_block.h
··· 23 23 * System call restart block. 24 24 */ 25 25 struct restart_block { 26 + unsigned long arch_data; 26 27 long (*fn)(struct restart_block *); 27 28 union { 28 29 /* For futex_wait and futex_wait_requeue_pi */
-1
include/linux/sunrpc/svc_rdma.h
··· 104 104 105 105 wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ 106 106 unsigned long sc_flags; 107 - u32 sc_pending_recvs; 108 107 struct list_head sc_read_complete_q; 109 108 struct work_struct sc_work; 110 109
+13
include/linux/thread_info.h
··· 11 11 #include <linux/types.h> 12 12 #include <linux/bug.h> 13 13 #include <linux/restart_block.h> 14 + #include <linux/errno.h> 14 15 15 16 #ifdef CONFIG_THREAD_INFO_IN_TASK 16 17 /* ··· 59 58 #include <asm/thread_info.h> 60 59 61 60 #ifdef __KERNEL__ 61 + 62 + #ifndef arch_set_restart_data 63 + #define arch_set_restart_data(restart) do { } while (0) 64 + #endif 65 + 66 + static inline long set_restart_fn(struct restart_block *restart, 67 + long (*fn)(struct restart_block *)) 68 + { 69 + restart->fn = fn; 70 + arch_set_restart_data(restart); 71 + return -ERESTART_RESTARTBLOCK; 72 + } 62 73 63 74 #ifndef THREAD_ALIGN 64 75 #define THREAD_ALIGN THREAD_SIZE
+2
include/linux/usb_usual.h
··· 86 86 /* lies about caching, so always sync */ \ 87 87 US_FLAG(NO_SAME, 0x40000000) \ 88 88 /* Cannot handle WRITE_SAME */ \ 89 + US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \ 90 + /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \ 89 91 90 92 #define US_FLAG(name, value) US_FL_##name = value , 91 93 enum { US_DO_ALL_FLAGS };
+5 -5
include/linux/vdpa.h
··· 250 250 251 251 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 252 252 const struct vdpa_config_ops *config, 253 - int nvqs, size_t size, const char *name); 253 + size_t size, const char *name); 254 254 255 - #define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \ 255 + #define vdpa_alloc_device(dev_struct, member, parent, config, name) \ 256 256 container_of(__vdpa_alloc_device( \ 257 - parent, config, nvqs, \ 257 + parent, config, \ 258 258 sizeof(dev_struct) + \ 259 259 BUILD_BUG_ON_ZERO(offsetof( \ 260 260 dev_struct, member)), name), \ 261 261 dev_struct, member) 262 262 263 - int vdpa_register_device(struct vdpa_device *vdev); 263 + int vdpa_register_device(struct vdpa_device *vdev, int nvqs); 264 264 void vdpa_unregister_device(struct vdpa_device *vdev); 265 265 266 - int _vdpa_register_device(struct vdpa_device *vdev); 266 + int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); 267 267 void _vdpa_unregister_device(struct vdpa_device *vdev); 268 268 269 269 /**
-2
include/linux/virtio.h
··· 132 132 void virtio_break_device(struct virtio_device *dev); 133 133 134 134 void virtio_config_changed(struct virtio_device *dev); 135 - void virtio_config_disable(struct virtio_device *dev); 136 - void virtio_config_enable(struct virtio_device *dev); 137 135 int virtio_finalize_features(struct virtio_device *dev); 138 136 #ifdef CONFIG_PM_SLEEP 139 137 int virtio_device_freeze(struct virtio_device *dev);
+3 -2
include/linux/ww_mutex.h
··· 173 173 */ 174 174 static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) 175 175 { 176 - #ifdef CONFIG_DEBUG_MUTEXES 176 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 177 177 mutex_release(&ctx->dep_map, _THIS_IP_); 178 - 178 + #endif 179 + #ifdef CONFIG_DEBUG_MUTEXES 179 180 DEBUG_LOCKS_WARN_ON(ctx->acquired); 180 181 if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) 181 182 /*
+3 -3
include/trace/events/workqueue.h
··· 30 30 TP_STRUCT__entry( 31 31 __field( void *, work ) 32 32 __field( void *, function) 33 - __field( const char *, workqueue) 33 + __string( workqueue, pwq->wq->name) 34 34 __field( unsigned int, req_cpu ) 35 35 __field( unsigned int, cpu ) 36 36 ), ··· 38 38 TP_fast_assign( 39 39 __entry->work = work; 40 40 __entry->function = work->func; 41 - __entry->workqueue = pwq->wq->name; 41 + __assign_str(workqueue, pwq->wq->name); 42 42 __entry->req_cpu = req_cpu; 43 43 __entry->cpu = pwq->pool->cpu; 44 44 ), 45 45 46 46 TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u", 47 - __entry->work, __entry->function, __entry->workqueue, 47 + __entry->work, __entry->function, __get_str(workqueue), 48 48 __entry->req_cpu, __entry->cpu) 49 49 ); 50 50
+2 -1
include/uapi/linux/fuse.h
··· 903 903 }; 904 904 905 905 /* Device ioctls: */ 906 - #define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t) 906 + #define FUSE_DEV_IOC_MAGIC 229 907 + #define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t) 907 908 908 909 struct fuse_lseek_in { 909 910 uint64_t fh;
-1
kernel/fork.c
··· 2444 2444 if (!IS_ERR(tsk)) { 2445 2445 sigfillset(&tsk->blocked); 2446 2446 sigdelsetmask(&tsk->blocked, sigmask(SIGKILL)); 2447 - tsk->flags |= PF_NOFREEZE; 2448 2447 } 2449 2448 return tsk; 2450 2449 }
+1 -1
kernel/freezer.c
··· 134 134 return false; 135 135 } 136 136 137 - if (!(p->flags & PF_KTHREAD)) 137 + if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) 138 138 fake_signal_wake_up(p); 139 139 else 140 140 wake_up_state(p, TASK_INTERRUPTIBLE);
+1 -2
kernel/futex.c
··· 2728 2728 goto out; 2729 2729 2730 2730 restart = &current->restart_block; 2731 - restart->fn = futex_wait_restart; 2732 2731 restart->futex.uaddr = uaddr; 2733 2732 restart->futex.val = val; 2734 2733 restart->futex.time = *abs_time; 2735 2734 restart->futex.bitset = bitset; 2736 2735 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; 2737 2736 2738 - ret = -ERESTART_RESTARTBLOCK; 2737 + ret = set_restart_fn(restart, futex_wait_restart); 2739 2738 2740 2739 out: 2741 2740 if (to) {
+2 -2
kernel/irq/irq_sim.c
··· 159 159 * irq_domain_create_sim - Create a new interrupt simulator irq_domain and 160 160 * allocate a range of dummy interrupts. 161 161 * 162 - * @fnode: struct fwnode_handle to be associated with this domain. 162 + * @fwnode: struct fwnode_handle to be associated with this domain. 163 163 * @num_irqs: Number of interrupts to allocate. 164 164 * 165 165 * On success: return a new irq_domain object. ··· 228 228 * a managed device. 229 229 * 230 230 * @dev: Device to initialize the simulator object for. 231 - * @fnode: struct fwnode_handle to be associated with this domain. 231 + * @fwnode: struct fwnode_handle to be associated with this domain. 232 232 * @num_irqs: Number of interrupts to allocate 233 233 * 234 234 * On success: return a new irq_domain object.
+4
kernel/irq/manage.c
··· 1142 1142 irqreturn_t ret; 1143 1143 1144 1144 local_bh_disable(); 1145 + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 1146 + local_irq_disable(); 1145 1147 ret = action->thread_fn(action->irq, action->dev_id); 1146 1148 if (ret == IRQ_HANDLED) 1147 1149 atomic_inc(&desc->threads_handled); 1148 1150 1149 1151 irq_finalize_oneshot(desc, action); 1152 + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 1153 + local_irq_enable(); 1150 1154 local_bh_enable(); 1151 1155 return ret; 1152 1156 }
+8
kernel/jump_label.c
··· 407 407 return false; 408 408 409 409 if (!kernel_text_address(jump_entry_code(entry))) { 410 + /* 411 + * This skips patching built-in __exit, which 412 + * is part of init_section_contains() but is 413 + * not part of kernel_text_address(). 414 + * 415 + * Skipping built-in __exit is fine since it 416 + * will never be executed. 417 + */ 410 418 WARN_ONCE(!jump_entry_is_init(entry), 411 419 "can't patch jump_label at %pS", 412 420 (void *)jump_entry_code(entry));
+14 -11
kernel/locking/mutex.c
··· 626 626 */ 627 627 static __always_inline bool 628 628 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 629 - const bool use_ww_ctx, struct mutex_waiter *waiter) 629 + struct mutex_waiter *waiter) 630 630 { 631 631 if (!waiter) { 632 632 /* ··· 702 702 #else 703 703 static __always_inline bool 704 704 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 705 - const bool use_ww_ctx, struct mutex_waiter *waiter) 705 + struct mutex_waiter *waiter) 706 706 { 707 707 return false; 708 708 } ··· 922 922 struct ww_mutex *ww; 923 923 int ret; 924 924 925 + if (!use_ww_ctx) 926 + ww_ctx = NULL; 927 + 925 928 might_sleep(); 926 929 927 930 #ifdef CONFIG_DEBUG_MUTEXES ··· 932 929 #endif 933 930 934 931 ww = container_of(lock, struct ww_mutex, base); 935 - if (use_ww_ctx && ww_ctx) { 932 + if (ww_ctx) { 936 933 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 937 934 return -EALREADY; 938 935 ··· 949 946 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 950 947 951 948 if (__mutex_trylock(lock) || 952 - mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { 949 + mutex_optimistic_spin(lock, ww_ctx, NULL)) { 953 950 /* got the lock, yay! */ 954 951 lock_acquired(&lock->dep_map, ip); 955 - if (use_ww_ctx && ww_ctx) 952 + if (ww_ctx) 956 953 ww_mutex_set_context_fastpath(ww, ww_ctx); 957 954 preempt_enable(); 958 955 return 0; ··· 963 960 * After waiting to acquire the wait_lock, try again. 964 961 */ 965 962 if (__mutex_trylock(lock)) { 966 - if (use_ww_ctx && ww_ctx) 963 + if (ww_ctx) 967 964 __ww_mutex_check_waiters(lock, ww_ctx); 968 965 969 966 goto skip_wait; ··· 1016 1013 goto err; 1017 1014 } 1018 1015 1019 - if (use_ww_ctx && ww_ctx) { 1016 + if (ww_ctx) { 1020 1017 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 1021 1018 if (ret) 1022 1019 goto err; ··· 1029 1026 * ww_mutex needs to always recheck its position since its waiter 1030 1027 * list is not FIFO ordered. 1031 1028 */ 1032 - if ((use_ww_ctx && ww_ctx) || !first) { 1029 + if (ww_ctx || !first) { 1033 1030 first = __mutex_waiter_is_first(lock, &waiter); 1034 1031 if (first) 1035 1032 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); ··· 1042 1039 * or we must see its unlock and acquire. 1043 1040 */ 1044 1041 if (__mutex_trylock(lock) || 1045 - (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) 1042 + (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) 1046 1043 break; 1047 1044 1048 1045 spin_lock(&lock->wait_lock); ··· 1051 1048 acquired: 1052 1049 __set_current_state(TASK_RUNNING); 1053 1050 1054 - if (use_ww_ctx && ww_ctx) { 1051 + if (ww_ctx) { 1055 1052 /* 1056 1053 * Wound-Wait; we stole the lock (!first_waiter), check the 1057 1054 * waiters as anyone might want to wound us. ··· 1071 1068 /* got the lock - cleanup and rejoice! */ 1072 1069 lock_acquired(&lock->dep_map, ip); 1073 1070 1074 - if (use_ww_ctx && ww_ctx) 1071 + if (ww_ctx) 1075 1072 ww_mutex_lock_acquired(ww, ww_ctx); 1076 1073 1077 1074 spin_unlock(&lock->wait_lock);
-2
kernel/reboot.c
··· 244 244 void kernel_restart(char *cmd) 245 245 { 246 246 kernel_restart_prepare(cmd); 247 - if (pm_power_off_prepare) 248 - pm_power_off_prepare(); 249 247 migrate_to_reboot_cpu(); 250 248 syscore_shutdown(); 251 249 if (!cmd)
+5 -1
kernel/signal.c
··· 288 288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 289 289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 290 290 291 - if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 291 + if (unlikely(fatal_signal_pending(task) || 292 + (task->flags & (PF_EXITING | PF_IO_WORKER)))) 292 293 return false; 293 294 294 295 if (mask & JOBCTL_STOP_SIGMASK) ··· 834 833 835 834 if (!valid_signal(sig)) 836 835 return -EINVAL; 836 + /* PF_IO_WORKER threads don't take any signals */ 837 + if (t->flags & PF_IO_WORKER) 838 + return -ESRCH; 837 839 838 840 if (!si_fromuser(info)) 839 841 return 0;
+24 -18
kernel/static_call.c
··· 35 35 return (void *)((long)site->addr + (long)&site->addr); 36 36 } 37 37 38 + static inline unsigned long __static_call_key(const struct static_call_site *site) 39 + { 40 + return (long)site->key + (long)&site->key; 41 + } 38 42 39 43 static inline struct static_call_key *static_call_key(const struct static_call_site *site) 40 44 { 41 - return (struct static_call_key *) 42 - (((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS); 45 + return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS); 43 46 } 44 47 45 48 /* These assume the key is word-aligned. */ 46 49 static inline bool static_call_is_init(struct static_call_site *site) 47 50 { 48 - return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT; 51 + return __static_call_key(site) & STATIC_CALL_SITE_INIT; 49 52 } 50 53 51 54 static inline bool static_call_is_tail(struct static_call_site *site) 52 55 { 53 - return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL; 56 + return __static_call_key(site) & STATIC_CALL_SITE_TAIL; 54 57 } 55 58 56 59 static inline void static_call_set_init(struct static_call_site *site) 57 60 { 58 - site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) - 61 + site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) - 59 62 (long)&site->key; 60 63 } 61 64 ··· 149 146 }; 150 147 151 148 for (site_mod = &first; site_mod; site_mod = site_mod->next) { 149 + bool init = system_state < SYSTEM_RUNNING; 152 150 struct module *mod = site_mod->mod; 153 151 154 152 if (!site_mod->sites) { ··· 169 165 if (mod) { 170 166 stop = mod->static_call_sites + 171 167 mod->num_static_call_sites; 168 + init = mod->state == MODULE_STATE_COMING; 172 169 } 173 170 #endif 174 171 ··· 177 172 site < stop && static_call_key(site) == key; site++) { 178 173 void *site_addr = static_call_addr(site); 179 174 180 - if (static_call_is_init(site)) { 181 - /* 182 - * Don't write to call sites which were in 183 - * initmem and have since been freed. 184 - */ 185 - if (!mod && system_state >= SYSTEM_RUNNING) 186 - continue; 187 - if (mod && !within_module_init((unsigned long)site_addr, mod)) 188 - continue; 189 - } 175 + if (!init && static_call_is_init(site)) 176 + continue; 190 177 191 178 if (!kernel_text_address((unsigned long)site_addr)) { 192 - WARN_ONCE(1, "can't patch static call site at %pS", 179 + /* 180 + * This skips patching built-in __exit, which 181 + * is part of init_section_contains() but is 182 + * not part of kernel_text_address(). 183 + * 184 + * Skipping built-in __exit is fine since it 185 + * will never be executed. 186 + */ 187 + WARN_ONCE(!static_call_is_init(site), 188 + "can't patch static call site at %pS", 193 189 site_addr); 194 190 continue; 195 191 } 196 192 197 193 arch_static_call_transform(site_addr, NULL, func, 198 - static_call_is_tail(site)); 194 + static_call_is_tail(site)); 199 195 } 200 196 } 201 197 ··· 355 349 struct static_call_site *site; 356 350 357 351 for (site = start; site != stop; site++) { 358 - unsigned long s_key = (long)site->key + (long)&site->key; 352 + unsigned long s_key = __static_call_key(site); 359 353 unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS; 360 354 unsigned long key; 361 355
+1 -1
kernel/time/alarmtimer.c
··· 854 854 if (flags == TIMER_ABSTIME) 855 855 return -ERESTARTNOHAND; 856 856 857 - restart->fn = alarm_timer_nsleep_restart; 858 857 restart->nanosleep.clockid = type; 859 858 restart->nanosleep.expires = exp; 859 + set_restart_fn(restart, alarm_timer_nsleep_restart); 860 860 return ret; 861 861 } 862 862
+1 -1
kernel/time/hrtimer.c
··· 1957 1957 } 1958 1958 1959 1959 restart = &current->restart_block; 1960 - restart->fn = hrtimer_nanosleep_restart; 1961 1960 restart->nanosleep.clockid = t.timer.base->clockid; 1962 1961 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); 1962 + set_restart_fn(restart, hrtimer_nanosleep_restart); 1963 1963 out: 1964 1964 destroy_hrtimer_on_stack(&t.timer); 1965 1965 return ret;
+1 -1
kernel/time/posix-cpu-timers.c
··· 1480 1480 if (flags & TIMER_ABSTIME) 1481 1481 return -ERESTARTNOHAND; 1482 1482 1483 - restart_block->fn = posix_cpu_nsleep_restart; 1484 1483 restart_block->nanosleep.clockid = which_clock; 1484 + set_restart_fn(restart_block, posix_cpu_nsleep_restart); 1485 1485 } 1486 1486 return error; 1487 1487 }
-1
net/batman-adv/main.c
··· 702 702 703 703 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR); 704 704 MODULE_DESCRIPTION(BATADV_DRIVER_DESC); 705 - MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE); 706 705 MODULE_VERSION(BATADV_SOURCE_VERSION); 707 706 MODULE_ALIAS_RTNL_LINK("batadv"); 708 707 MODULE_ALIAS_GENL_FAMILY(BATADV_NL_NAME);
+7 -4
net/sunrpc/auth_gss/svcauth_gss.c
··· 1825 1825 svcauth_gss_release(struct svc_rqst *rqstp) 1826 1826 { 1827 1827 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; 1828 - struct rpc_gss_wire_cred *gc = &gsd->clcred; 1828 + struct rpc_gss_wire_cred *gc; 1829 1829 struct xdr_buf *resbuf = &rqstp->rq_res; 1830 1830 int stat = -EINVAL; 1831 1831 struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); 1832 1832 1833 + if (!gsd) 1834 + goto out; 1835 + gc = &gsd->clcred; 1833 1836 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1834 1837 goto out; 1835 1838 /* Release can be called twice, but we only wrap once. */ ··· 1873 1870 if (rqstp->rq_cred.cr_group_info) 1874 1871 put_group_info(rqstp->rq_cred.cr_group_info); 1875 1872 rqstp->rq_cred.cr_group_info = NULL; 1876 - if (gsd->rsci) 1873 + if (gsd && gsd->rsci) { 1877 1874 cache_put(&gsd->rsci->h, sn->rsc_cache); 1878 - gsd->rsci = NULL; 1879 - 1875 + gsd->rsci = NULL; 1876 + } 1880 1877 return stat; 1881 1878 } 1882 1879
+4 -2
net/sunrpc/svc.c
··· 1413 1413 1414 1414 sendit: 1415 1415 if (svc_authorise(rqstp)) 1416 - goto close; 1416 + goto close_xprt; 1417 1417 return 1; /* Caller can now send it */ 1418 1418 1419 1419 release_dropit: ··· 1425 1425 return 0; 1426 1426 1427 1427 close: 1428 + svc_authorise(rqstp); 1429 + close_xprt: 1428 1430 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) 1429 1431 svc_close_xprt(rqstp->rq_xprt); 1430 1432 dprintk("svc: svc_process close\n"); ··· 1435 1433 err_short_len: 1436 1434 svc_printk(rqstp, "short len %zd, dropping request\n", 1437 1435 argv->iov_len); 1438 - goto close; 1436 + goto close_xprt; 1439 1437 1440 1438 err_bad_rpc: 1441 1439 serv->sv_stats->rpcbadfmt++;
+2 -2
net/sunrpc/svc_xprt.c
··· 1060 1060 struct svc_xprt *xprt; 1061 1061 int ret = 0; 1062 1062 1063 - spin_lock(&serv->sv_lock); 1063 + spin_lock_bh(&serv->sv_lock); 1064 1064 list_for_each_entry(xprt, xprt_list, xpt_list) { 1065 1065 if (xprt->xpt_net != net) 1066 1066 continue; ··· 1068 1068 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1069 1069 svc_xprt_enqueue(xprt); 1070 1070 } 1071 - spin_unlock(&serv->sv_lock); 1071 + spin_unlock_bh(&serv->sv_lock); 1072 1072 return ret; 1073 1073 } 1074 1074
+3 -3
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
··· 252 252 xprt->timeout = &xprt_rdma_bc_timeout; 253 253 xprt_set_bound(xprt); 254 254 xprt_set_connected(xprt); 255 - xprt->bind_timeout = RPCRDMA_BIND_TO; 256 - xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 257 - xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; 255 + xprt->bind_timeout = 0; 256 + xprt->reestablish_timeout = 0; 257 + xprt->idle_timeout = 0; 258 258 259 259 xprt->prot = XPRT_TRANSPORT_BC_RDMA; 260 260 xprt->ops = &xprt_rdma_bc_procs;
+39 -43
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
··· 266 266 svc_rdma_recv_ctxt_put(rdma, ctxt); 267 267 } 268 268 269 - static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma, 270 - unsigned int wanted, bool temp) 269 + static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, 270 + struct svc_rdma_recv_ctxt *ctxt) 271 271 { 272 - const struct ib_recv_wr *bad_wr = NULL; 273 - struct svc_rdma_recv_ctxt *ctxt; 274 - struct ib_recv_wr *recv_chain; 275 272 int ret; 276 273 277 - recv_chain = NULL; 278 - while (wanted--) { 279 - ctxt = svc_rdma_recv_ctxt_get(rdma); 280 - if (!ctxt) 281 - break; 282 - 283 - trace_svcrdma_post_recv(ctxt); 284 - ctxt->rc_temp = temp; 285 - ctxt->rc_recv_wr.next = recv_chain; 286 - recv_chain = &ctxt->rc_recv_wr; 287 - rdma->sc_pending_recvs++; 288 - } 289 - if (!recv_chain) 290 - return false; 291 - 292 - ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr); 274 + trace_svcrdma_post_recv(ctxt); 275 + ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); 293 276 if (ret) 294 277 goto err_post; 295 - return true; 278 + return 0; 296 279 297 280 err_post: 298 - while (bad_wr) { 299 - ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt, 300 - rc_recv_wr); 301 - bad_wr = bad_wr->next; 302 - svc_rdma_recv_ctxt_put(rdma, ctxt); 303 - } 304 - 305 281 trace_svcrdma_rq_post_err(rdma, ret); 306 - /* Since we're destroying the xprt, no need to reset 307 - * sc_pending_recvs. */ 308 - return false; 282 + svc_rdma_recv_ctxt_put(rdma, ctxt); 283 + return ret; 284 + } 285 + 286 + static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) 287 + { 288 + struct svc_rdma_recv_ctxt *ctxt; 289 + 290 + if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 291 + return 0; 292 + ctxt = svc_rdma_recv_ctxt_get(rdma); 293 + if (!ctxt) 294 + return -ENOMEM; 295 + return __svc_rdma_post_recv(rdma, ctxt); 309 296 } 310 297 311 298 /** ··· 303 316 */ 304 317 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) 305 318 { 306 - return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true); 319 + struct svc_rdma_recv_ctxt *ctxt; 320 + unsigned int i; 321 + int ret; 322 + 323 + for (i = 0; i < rdma->sc_max_requests; i++) { 324 + ctxt = svc_rdma_recv_ctxt_get(rdma); 325 + if (!ctxt) 326 + return false; 327 + ctxt->rc_temp = true; 328 + ret = __svc_rdma_post_recv(rdma, ctxt); 329 + if (ret) 330 + return false; 331 + } 332 + return true; 307 333 } 308 334 309 335 /** ··· 324 324 * @cq: Completion Queue context 325 325 * @wc: Work Completion object 326 326 * 327 + * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 328 + * the Receive completion handler could be running. 327 329 */ 328 330 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 329 331 { ··· 333 331 struct ib_cqe *cqe = wc->wr_cqe; 334 332 struct svc_rdma_recv_ctxt *ctxt; 335 333 336 - rdma->sc_pending_recvs--; 337 - 338 334 /* WARNING: Only wc->wr_cqe and wc->status are reliable */ 339 335 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); 340 336 341 337 trace_svcrdma_wc_receive(wc, &ctxt->rc_cid); 342 338 if (wc->status != IB_WC_SUCCESS) 343 339 goto flushed; 340 + 341 + if (svc_rdma_post_recv(rdma)) 342 + goto post_err; 344 343 345 344 /* All wc fields are now known to be valid */ 346 345 ctxt->rc_byte_len = wc->byte_len; ··· 353 350 spin_unlock(&rdma->sc_rq_dto_lock); 354 351 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) 355 352 svc_xprt_enqueue(&rdma->sc_xprt); 356 - 357 - if (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags) && 358 - rdma->sc_pending_recvs < rdma->sc_max_requests) 359 - if (!svc_rdma_refresh_recvs(rdma, RPCRDMA_MAX_RECV_BATCH, 360 - false)) 361 - goto post_err; 362 - 363 353 return; 364 354 365 355 flushed: 366 - svc_rdma_recv_ctxt_put(rdma, ctxt); 367 356 post_err: 357 + svc_rdma_recv_ctxt_put(rdma, ctxt); 368 358 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 369 359 svc_xprt_enqueue(&rdma->sc_xprt); 370 360 }
-1
sound/drivers/aloop.c
··· 33 33 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 34 34 MODULE_DESCRIPTION("A loopback soundcard"); 35 35 MODULE_LICENSE("GPL"); 36 - MODULE_SUPPORTED_DEVICE("{{ALSA,Loopback soundcard}}"); 37 36 38 37 #define MAX_PCM_SUBSTREAMS 8 39 38
-1
sound/drivers/dummy.c
··· 25 25 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 26 26 MODULE_DESCRIPTION("Dummy soundcard (/dev/null)"); 27 27 MODULE_LICENSE("GPL"); 28 - MODULE_SUPPORTED_DEVICE("{{ALSA,Dummy soundcard}}"); 29 28 30 29 #define MAX_PCM_DEVICES 4 31 30 #define MAX_PCM_SUBSTREAMS 128
-1
sound/drivers/mtpav.c
··· 53 53 MODULE_AUTHOR("Michael T. Mayers"); 54 54 MODULE_DESCRIPTION("MOTU MidiTimePiece AV multiport MIDI"); 55 55 MODULE_LICENSE("GPL"); 56 - MODULE_SUPPORTED_DEVICE("{{MOTU,MidiTimePiece AV multiport MIDI}}"); 57 56 58 57 // io resources 59 58 #define MTPAV_IOBASE 0x378
-1
sound/drivers/mts64.c
··· 37 37 MODULE_AUTHOR("Matthias Koenig <mk@phasorlab.de>"); 38 38 MODULE_DESCRIPTION("ESI Miditerminal 4140"); 39 39 MODULE_LICENSE("GPL"); 40 - MODULE_SUPPORTED_DEVICE("{{ESI,Miditerminal 4140}}"); 41 40 42 41 /********************************************************************* 43 42 * Chip specific
-1
sound/drivers/pcsp/pcsp.c
··· 22 22 MODULE_AUTHOR("Stas Sergeev <stsp@users.sourceforge.net>"); 23 23 MODULE_DESCRIPTION("PC-Speaker driver"); 24 24 MODULE_LICENSE("GPL"); 25 - MODULE_SUPPORTED_DEVICE("{{PC-Speaker, pcsp}}"); 26 25 MODULE_ALIAS("platform:pcspkr"); 27 26 28 27 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
-1
sound/drivers/portman2x4.c
··· 57 57 MODULE_AUTHOR("Levent Guendogdu, Tobias Gehrig, Matthias Koenig"); 58 58 MODULE_DESCRIPTION("Midiman Portman2x4"); 59 59 MODULE_LICENSE("GPL"); 60 - MODULE_SUPPORTED_DEVICE("{{Midiman,Portman2x4}}"); 61 60 62 61 /********************************************************************* 63 62 * Chip specific
-1
sound/drivers/serial-u16550.c
··· 34 34 35 35 MODULE_DESCRIPTION("MIDI serial u16550"); 36 36 MODULE_LICENSE("GPL"); 37 - MODULE_SUPPORTED_DEVICE("{{ALSA, MIDI serial u16550}}"); 38 37 39 38 #define SNDRV_SERIAL_SOUNDCANVAS 0 /* Roland Soundcanvas; F5 NN selects part */ 40 39 #define SNDRV_SERIAL_MS124T 1 /* Midiator MS-124T */
-1
sound/drivers/virmidi.c
··· 43 43 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 44 44 MODULE_DESCRIPTION("Dummy soundcard for virtual rawmidi devices"); 45 45 MODULE_LICENSE("GPL"); 46 - MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual rawmidi device}}"); 47 46 48 47 #define MAX_MIDI_DEVICES 4 49 48
+2 -3
sound/firewire/dice/dice-stream.c
··· 493 493 struct reg_params tx_params, rx_params; 494 494 495 495 if (dice->substreams_counter == 0) { 496 - if (get_register_params(dice, &tx_params, &rx_params) >= 0) { 497 - amdtp_domain_stop(&dice->domain); 496 + if (get_register_params(dice, &tx_params, &rx_params) >= 0) 498 497 finish_session(dice, &tx_params, &rx_params); 499 - } 500 498 499 + amdtp_domain_stop(&dice->domain); 501 500 release_resources(dice); 502 501 } 503 502 }
-7
sound/isa/ad1816a/ad1816a.c
··· 22 22 MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); 23 23 MODULE_DESCRIPTION("AD1816A, AD1815"); 24 24 MODULE_LICENSE("GPL"); 25 - MODULE_SUPPORTED_DEVICE("{{Highscreen,Sound-Boostar 16 3D}," 26 - "{Analog Devices,AD1815}," 27 - "{Analog Devices,AD1816A}," 28 - "{TerraTec,Base 64}," 29 - "{TerraTec,AudioSystem EWS64S}," 30 - "{Aztech/Newcom SC-16 3D}," 31 - "{Shark Predator ISA}}"); 32 25 33 26 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 1-MAX */ 34 27 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/isa/ad1848/ad1848.c
··· 22 22 MODULE_DESCRIPTION(CRD_NAME); 23 23 MODULE_AUTHOR("Tugrul Galatali <galatalt@stuy.edu>, Jaroslav Kysela <perex@perex.cz>"); 24 24 MODULE_LICENSE("GPL"); 25 - MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1848}," 26 - "{Analog Devices,AD1847}," 27 - "{Crystal Semiconductors,CS4248}}"); 28 25 29 26 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 30 27 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-11
sound/isa/als100.c
··· 26 26 #define PFX "als100: " 27 27 28 28 MODULE_DESCRIPTION("Avance Logic ALS007/ALS1X0"); 29 - MODULE_SUPPORTED_DEVICE("{{Diamond Technologies DT-019X}," 30 - "{Avance Logic ALS-007}}" 31 - "{{Avance Logic,ALS100 - PRO16PNP}," 32 - "{Avance Logic,ALS110}," 33 - "{Avance Logic,ALS120}," 34 - "{Avance Logic,ALS200}," 35 - "{3D Melody,MF1000}," 36 - "{Digimate,3D Sound}," 37 - "{Avance Logic,ALS120}," 38 - "{RTL,RTL3000}}"); 39 - 40 29 MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); 41 30 MODULE_LICENSE("GPL"); 42 31
-5
sound/isa/azt2320.c
··· 35 35 MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); 36 36 MODULE_DESCRIPTION("Aztech Systems AZT2320"); 37 37 MODULE_LICENSE("GPL"); 38 - MODULE_SUPPORTED_DEVICE("{{Aztech Systems,PRO16V}," 39 - "{Aztech Systems,AZT2320}," 40 - "{Aztech Systems,AZT3300}," 41 - "{Aztech Systems,AZT2320}," 42 - "{Aztech Systems,AZT3000}}"); 43 38 44 39 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 45 40 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/cmi8330.c
··· 51 51 MODULE_AUTHOR("George Talusan <gstalusan@uwaterloo.ca>"); 52 52 MODULE_DESCRIPTION("C-Media CMI8330/CMI8329"); 53 53 MODULE_LICENSE("GPL"); 54 - MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8330,isapnp:{CMI0001,@@@0001,@X@0001}}}"); 55 54 56 55 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 57 56 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-1
sound/isa/cs423x/cs4231.c
··· 23 23 MODULE_DESCRIPTION(CRD_NAME); 24 24 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 25 25 MODULE_LICENSE("GPL"); 26 - MODULE_SUPPORTED_DEVICE("{{Crystal Semiconductors,CS4231}}"); 27 26 28 27 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 29 28 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-34
sound/isa/cs423x/cs4236.c
··· 18 18 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 19 19 MODULE_LICENSE("GPL"); 20 20 MODULE_DESCRIPTION("Cirrus Logic CS4232-9"); 21 - MODULE_SUPPORTED_DEVICE("{{Turtle Beach,TBS-2000}," 22 - "{Turtle Beach,Tropez Plus}," 23 - "{SIC CrystalWave 32}," 24 - "{Hewlett Packard,Omnibook 5500}," 25 - "{TerraTec,Maestro 32/96}," 26 - "{Philips,PCA70PS}}," 27 - "{{Crystal Semiconductors,CS4235}," 28 - "{Crystal Semiconductors,CS4236}," 29 - "{Crystal Semiconductors,CS4237}," 30 - "{Crystal Semiconductors,CS4238}," 31 - "{Crystal Semiconductors,CS4239}," 32 - "{Acer,AW37}," 33 - "{Acer,AW35/Pro}," 34 - "{Crystal,3D}," 35 - "{Crystal Computer,TidalWave128}," 36 - "{Dell,Optiplex GX1}," 37 - "{Dell,Workstation 400 sound}," 38 - "{EliteGroup,P5TX-LA sound}," 39 - "{Gallant,SC-70P}," 40 - "{Gateway,E1000 Onboard CS4236B}," 41 - "{Genius,Sound Maker 3DJ}," 42 - "{Hewlett Packard,HP6330 sound}," 43 - "{IBM,PC 300PL sound}," 44 - "{IBM,Aptiva 2137 E24}," 45 - "{IBM,IntelliStation M Pro}," 46 - "{Intel,Marlin Spike Mobo CS4235}," 47 - "{Intel PR440FX Onboard}," 48 - "{Guillemot,MaxiSound 16 PnP}," 49 - "{NewClear,3D}," 50 - "{TerraTec,AudioSystem EWS64L/XL}," 51 - "{Typhoon Soundsystem,CS4236B}," 52 - "{Turtle Beach,Malibu}," 53 - "{Unknown,Digital PC 5000 Onboard}}"); 54 - 55 21 MODULE_ALIAS("snd_cs4232"); 56 22 57 23 #define IDENT "CS4232+"
-5
sound/isa/es1688/es1688.c
··· 26 26 MODULE_DESCRIPTION(CRD_NAME); 27 27 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 28 28 MODULE_LICENSE("GPL"); 29 - MODULE_SUPPORTED_DEVICE("{{ESS,ES688 PnP AudioDrive,pnp:ESS0100}," 30 - "{ESS,ES1688 PnP AudioDrive,pnp:ESS0102}," 31 - "{ESS,ES688 AudioDrive,pnp:ESS6881}," 32 - "{ESS,ES1688 AudioDrive,pnp:ESS1681}}"); 33 - 34 29 MODULE_ALIAS("snd_es968"); 35 30 36 31 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+1 -9
sound/isa/es18xx.c
··· 1929 1929 1930 1930 /* Card level */ 1931 1931 1932 - MODULE_AUTHOR("Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de>, Abramo Bagnara <abramo@alsa-project.org>"); 1932 + MODULE_AUTHOR("Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de>, Abramo Bagnara <abramo@alsa-project.org>"); 1933 1933 MODULE_DESCRIPTION("ESS ES18xx AudioDrive"); 1934 1934 MODULE_LICENSE("GPL"); 1935 - MODULE_SUPPORTED_DEVICE("{{ESS,ES1868 PnP AudioDrive}," 1936 - "{ESS,ES1869 PnP AudioDrive}," 1937 - "{ESS,ES1878 PnP AudioDrive}," 1938 - "{ESS,ES1879 PnP AudioDrive}," 1939 - "{ESS,ES1887 PnP AudioDrive}," 1940 - "{ESS,ES1888 PnP AudioDrive}," 1941 - "{ESS,ES1887 AudioDrive}," 1942 - "{ESS,ES1888 AudioDrive}}"); 1943 1935 1944 1936 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 1945 1937 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/gus/gusclassic.c
··· 23 23 MODULE_DESCRIPTION(CRD_NAME); 24 24 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 25 25 MODULE_LICENSE("GPL"); 26 - MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Classic}}"); 27 26 28 27 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 29 28 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/gus/gusextreme.c
··· 27 27 MODULE_DESCRIPTION(CRD_NAME); 28 28 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 29 29 MODULE_LICENSE("GPL"); 30 - MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Extreme}}"); 31 30 32 31 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 33 32 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/gus/gusmax.c
··· 21 21 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 22 22 MODULE_DESCRIPTION("Gravis UltraSound MAX"); 23 23 MODULE_LICENSE("GPL"); 24 - MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound MAX}}"); 25 24 26 25 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 27 26 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-6
sound/isa/gus/interwave.c
··· 28 28 MODULE_LICENSE("GPL"); 29 29 #ifndef SNDRV_STB 30 30 MODULE_DESCRIPTION("AMD InterWave"); 31 - MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Plug & Play}," 32 - "{STB,SoundRage32}," 33 - "{MED,MED3210}," 34 - "{Dynasonix,Dynasonix Pro}," 35 - "{Panasonic,PCA761AW}}"); 36 31 #else 37 32 MODULE_DESCRIPTION("AMD InterWave STB with TEA6330T"); 38 - MODULE_SUPPORTED_DEVICE("{{AMD,InterWave STB with TEA6330T}}"); 39 33 #endif 40 34 41 35 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-5
sound/isa/opl3sa2.c
··· 22 22 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 23 23 MODULE_DESCRIPTION("Yamaha OPL3SA2+"); 24 24 MODULE_LICENSE("GPL"); 25 - MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF719E-S}," 26 - "{Genius,Sound Maker 3DX}," 27 - "{Yamaha,OPL3SA3}," 28 - "{Intel,AL440LX sound}," 29 - "{NeoMagic,MagicWave 3DX}}"); 30 25 31 26 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 32 27 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/isa/opti9xx/miro.c
··· 33 33 MODULE_AUTHOR("Martin Langer <martin-langer@gmx.de>"); 34 34 MODULE_LICENSE("GPL"); 35 35 MODULE_DESCRIPTION("Miro miroSOUND PCM1 pro, PCM12, PCM20 Radio"); 36 - MODULE_SUPPORTED_DEVICE("{{Miro,miroSOUND PCM1 pro}, " 37 - "{Miro,miroSOUND PCM12}, " 38 - "{Miro,miroSOUND PCM20 Radio}}"); 39 36 40 37 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ 41 38 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-6
sound/isa/opti9xx/opti92x-ad1848.c
··· 36 36 MODULE_LICENSE("GPL"); 37 37 #ifdef OPTi93X 38 38 MODULE_DESCRIPTION("OPTi93X"); 39 - MODULE_SUPPORTED_DEVICE("{{OPTi,82C931/3}}"); 40 39 #else /* OPTi93X */ 41 40 #ifdef CS4231 42 41 MODULE_DESCRIPTION("OPTi92X - CS4231"); 43 - MODULE_SUPPORTED_DEVICE("{{OPTi,82C924 (CS4231)}," 44 - "{OPTi,82C925 (CS4231)}}"); 45 42 #else /* CS4231 */ 46 43 MODULE_DESCRIPTION("OPTi92X - AD1848"); 47 - MODULE_SUPPORTED_DEVICE("{{OPTi,82C924 (AD1848)}," 48 - "{OPTi,82C925 (AD1848)}," 49 - "{OAK,Mozart}}"); 50 44 #endif /* CS4231 */ 51 45 #endif /* OPTi93X */ 52 46
-3
sound/isa/sb/jazz16.c
··· 28 28 #define PFX "jazz16: " 29 29 30 30 MODULE_DESCRIPTION("Media Vision Jazz16"); 31 - MODULE_SUPPORTED_DEVICE("{{Media Vision ??? }," 32 - "{RTL,RTL3000}}"); 33 - 34 31 MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>"); 35 32 MODULE_LICENSE("GPL"); 36 33
-8
sound/isa/sb/sb16.c
··· 31 31 MODULE_LICENSE("GPL"); 32 32 #ifndef SNDRV_SBAWE 33 33 MODULE_DESCRIPTION("Sound Blaster 16"); 34 - MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB 16}," 35 - "{Creative Labs,SB Vibra16S}," 36 - "{Creative Labs,SB Vibra16C}," 37 - "{Creative Labs,SB Vibra16CL}," 38 - "{Creative Labs,SB Vibra16X}}"); 39 34 #else 40 35 MODULE_DESCRIPTION("Sound Blaster AWE"); 41 - MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB AWE 32}," 42 - "{Creative Labs,SB AWE 64}," 43 - "{Creative Labs,SB AWE 64 Gold}}"); 44 36 #endif 45 37 46 38 #if 0
-1
sound/isa/sb/sb8.c
··· 17 17 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 18 18 MODULE_DESCRIPTION("Sound Blaster 1.0/2.0/Pro"); 19 19 MODULE_LICENSE("GPL"); 20 - MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB 1.0/SB 2.0/SB Pro}}"); 21 20 22 21 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 23 22 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/isa/sc6000.c
··· 29 29 MODULE_AUTHOR("Krzysztof Helt"); 30 30 MODULE_DESCRIPTION("Gallant SC-6000"); 31 31 MODULE_LICENSE("GPL"); 32 - MODULE_SUPPORTED_DEVICE("{{Gallant, SC-6000}," 33 - "{AudioExcel, Audio Excel DSP 16}," 34 - "{Zoltrix, AV302}}"); 35 32 36 33 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 37 34 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/wavefront/wavefront.c
··· 21 21 MODULE_AUTHOR("Paul Barton-Davis <pbd@op.net>"); 22 22 MODULE_DESCRIPTION("Turtle Beach Wavefront"); 23 23 MODULE_LICENSE("GPL"); 24 - MODULE_SUPPORTED_DEVICE("{{Turtle Beach,Maui/Tropez/Tropez+}}"); 25 24 26 25 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 27 26 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/mips/sgio2audio.c
··· 32 32 MODULE_AUTHOR("Vivien Chappelier <vivien.chappelier@linux-mips.org>"); 33 33 MODULE_DESCRIPTION("SGI O2 Audio"); 34 34 MODULE_LICENSE("GPL"); 35 - MODULE_SUPPORTED_DEVICE("{{Silicon Graphics, O2 Audio}}"); 36 35 37 36 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ 38 37 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/ad1889.c
··· 43 43 MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>, Thibaut Varene <t-bone@parisc-linux.org>"); 44 44 MODULE_DESCRIPTION("Analog Devices AD1889 ALSA sound driver"); 45 45 MODULE_LICENSE("GPL"); 46 - MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1889}}"); 47 46 48 47 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 49 48 module_param_array(index, int, NULL, 0444);
-1
sound/pci/ali5451/ali5451.c
··· 29 29 MODULE_AUTHOR("Matt Wu <Matt_Wu@acersoftech.com.cn>"); 30 30 MODULE_DESCRIPTION("ALI M5451"); 31 31 MODULE_LICENSE("GPL"); 32 - MODULE_SUPPORTED_DEVICE("{{ALI,M5451,pci},{ALI,M5451}}"); 33 32 34 33 static int index = SNDRV_DEFAULT_IDX1; /* Index */ 35 34 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/als300.c
··· 86 86 MODULE_AUTHOR("Ash Willis <ashwillis@programmer.net>"); 87 87 MODULE_DESCRIPTION("Avance Logic ALS300"); 88 88 MODULE_LICENSE("GPL"); 89 - MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS300},{Avance Logic,ALS300+}}"); 90 89 91 90 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 92 91 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-1
sound/pci/als4000.c
··· 68 68 MODULE_AUTHOR("Bart Hartgers <bart@etpmod.phys.tue.nl>, Andreas Mohr"); 69 69 MODULE_DESCRIPTION("Avance Logic ALS4000"); 70 70 MODULE_LICENSE("GPL"); 71 - MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS4000}}"); 72 71 73 72 #if IS_REACHABLE(CONFIG_GAMEPORT) 74 73 #define SUPPORT_JOYSTICK 1
-1
sound/pci/atiixp.c
··· 23 23 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 24 24 MODULE_DESCRIPTION("ATI IXP AC97 controller"); 25 25 MODULE_LICENSE("GPL"); 26 - MODULE_SUPPORTED_DEVICE("{{ATI,IXP150/200/250/300/400/600}}"); 27 26 28 27 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ 29 28 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/atiixp_modem.c
··· 23 23 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 24 24 MODULE_DESCRIPTION("ATI IXP MC97 controller"); 25 25 MODULE_LICENSE("GPL"); 26 - MODULE_SUPPORTED_DEVICE("{{ATI,IXP150/200/250}}"); 27 26 28 27 static int index = -2; /* Exclude the first card */ 29 28 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-2
sound/pci/au88x0/au88x0.c
··· 41 41 42 42 MODULE_DESCRIPTION("Aureal vortex"); 43 43 MODULE_LICENSE("GPL"); 44 - MODULE_SUPPORTED_DEVICE("{{Aureal Semiconductor Inc., Aureal Vortex Sound Processor}}"); 45 - 46 44 MODULE_DEVICE_TABLE(pci, snd_vortex_ids); 47 45 48 46 static void vortex_fix_latency(struct pci_dev *vortex)
-1
sound/pci/azt3328.c
··· 196 196 MODULE_AUTHOR("Andreas Mohr <andi AT lisas.de>"); 197 197 MODULE_DESCRIPTION("Aztech AZF3328 (PCI168)"); 198 198 MODULE_LICENSE("GPL"); 199 - MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}"); 200 199 201 200 #if IS_REACHABLE(CONFIG_GAMEPORT) 202 201 #define SUPPORT_GAMEPORT 1
-2
sound/pci/bt87x.c
··· 23 23 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 24 24 MODULE_DESCRIPTION("Brooktree Bt87x audio driver"); 25 25 MODULE_LICENSE("GPL"); 26 - MODULE_SUPPORTED_DEVICE("{{Brooktree,Bt878}," 27 - "{Brooktree,Bt879}}"); 28 26 29 27 static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -2}; /* Exclude the first card */ 30 28 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/pci/ca0106/ca0106_main.c
··· 137 137 MODULE_AUTHOR("James Courtier-Dutton <James@superbug.demon.co.uk>"); 138 138 MODULE_DESCRIPTION("CA0106"); 139 139 MODULE_LICENSE("GPL"); 140 - MODULE_SUPPORTED_DEVICE("{{Creative,SB CA0106 chip}}"); 141 140 142 141 // module parameters (see "Module Parameters") 143 142 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-4
sound/pci/cmipci.c
··· 30 30 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 31 31 MODULE_DESCRIPTION("C-Media CMI8x38 PCI"); 32 32 MODULE_LICENSE("GPL"); 33 - MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8738}," 34 - "{C-Media,CMI8738B}," 35 - "{C-Media,CMI8338A}," 36 - "{C-Media,CMI8338B}}"); 37 33 38 34 #if IS_REACHABLE(CONFIG_GAMEPORT) 39 35 #define SUPPORT_JOYSTICK 1
-1
sound/pci/cs4281.c
··· 25 25 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 26 26 MODULE_DESCRIPTION("Cirrus Logic CS4281"); 27 27 MODULE_LICENSE("GPL"); 28 - MODULE_SUPPORTED_DEVICE("{{Cirrus Logic,CS4281}}"); 29 28 30 29 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 31 30 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-7
sound/pci/cs46xx/cs46xx.c
··· 21 21 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 22 22 MODULE_DESCRIPTION("Cirrus Logic Sound Fusion CS46XX"); 23 23 MODULE_LICENSE("GPL"); 24 - MODULE_SUPPORTED_DEVICE("{{Cirrus Logic,Sound Fusion (CS4280)}," 25 - "{Cirrus Logic,Sound Fusion (CS4610)}," 26 - "{Cirrus Logic,Sound Fusion (CS4612)}," 27 - "{Cirrus Logic,Sound Fusion (CS4615)}," 28 - "{Cirrus Logic,Sound Fusion (CS4622)}," 29 - "{Cirrus Logic,Sound Fusion (CS4624)}," 30 - "{Cirrus Logic,Sound Fusion (CS4630)}}"); 31 24 32 25 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 33 26 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/pci/cs5535audio/cs5535audio.c
··· 393 393 MODULE_AUTHOR("Jaya Kumar"); 394 394 MODULE_LICENSE("GPL"); 395 395 MODULE_DESCRIPTION("CS5535 Audio"); 396 - MODULE_SUPPORTED_DEVICE("CS5535 Audio");
-1
sound/pci/ctxfi/xfi.c
··· 18 18 MODULE_AUTHOR("Creative Technology Ltd"); 19 19 MODULE_DESCRIPTION("X-Fi driver version 1.03"); 20 20 MODULE_LICENSE("GPL v2"); 21 - MODULE_SUPPORTED_DEVICE("{{Creative Labs, Sound Blaster X-Fi}"); 22 21 23 22 static unsigned int reference_rate = 48000; 24 23 static unsigned int multiple = 2;
-1
sound/pci/echoaudio/echoaudio.c
··· 10 10 MODULE_AUTHOR("Giuliano Pochini <pochini@shiny.it>"); 11 11 MODULE_LICENSE("GPL v2"); 12 12 MODULE_DESCRIPTION("Echoaudio " ECHOCARD_NAME " soundcards driver"); 13 - MODULE_SUPPORTED_DEVICE("{{Echoaudio," ECHOCARD_NAME "}}"); 14 13 MODULE_DEVICE_TABLE(pci, snd_echo_ids); 15 14 16 15 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-2
sound/pci/emu10k1/emu10k1.c
··· 18 18 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 19 19 MODULE_DESCRIPTION("EMU10K1"); 20 20 MODULE_LICENSE("GPL"); 21 - MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB Live!/PCI512/E-mu APS}," 22 - "{Creative Labs,SB Audigy}}"); 23 21 24 22 #if IS_ENABLED(CONFIG_SND_SEQUENCER) 25 23 #define ENABLE_SYNTH
-1
sound/pci/emu10k1/emu10k1x.c
··· 31 31 MODULE_AUTHOR("Francisco Moraes <fmoraes@nc.rr.com>"); 32 32 MODULE_DESCRIPTION("EMU10K1X"); 33 33 MODULE_LICENSE("GPL"); 34 - MODULE_SUPPORTED_DEVICE("{{Dell Creative Labs,SB Live!}"); 35 34 36 35 // module parameters (see "Module Parameters") 37 36 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-8
sound/pci/ens1370.c
··· 52 52 MODULE_LICENSE("GPL"); 53 53 #ifdef CHIP1370 54 54 MODULE_DESCRIPTION("Ensoniq AudioPCI ES1370"); 55 - MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI-97 ES1370}," 56 - "{Creative Labs,SB PCI64/128 (ES1370)}}"); 57 55 #endif 58 56 #ifdef CHIP1371 59 57 MODULE_DESCRIPTION("Ensoniq/Creative AudioPCI ES1371+"); 60 - MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI ES1371/73}," 61 - "{Ensoniq,AudioPCI ES1373}," 62 - "{Creative Labs,Ectiva EV1938}," 63 - "{Creative Labs,SB PCI64/128 (ES1371/73)}," 64 - "{Creative Labs,Vibra PCI128}," 65 - "{Ectiva,EV1938}}"); 66 58 #endif 67 59 68 60 #if IS_REACHABLE(CONFIG_GAMEPORT)
-4
sound/pci/es1938.c
··· 52 52 MODULE_AUTHOR("Jaromir Koutek <miri@punknet.cz>"); 53 53 MODULE_DESCRIPTION("ESS Solo-1"); 54 54 MODULE_LICENSE("GPL"); 55 - MODULE_SUPPORTED_DEVICE("{{ESS,ES1938}," 56 - "{ESS,ES1946}," 57 - "{ESS,ES1969}," 58 - "{TerraTec,128i PCI}}"); 59 55 60 56 #if IS_REACHABLE(CONFIG_GAMEPORT) 61 57 #define SUPPORT_JOYSTICK 1
-4
sound/pci/es1968.c
··· 107 107 108 108 MODULE_DESCRIPTION("ESS Maestro"); 109 109 MODULE_LICENSE("GPL"); 110 - MODULE_SUPPORTED_DEVICE("{{ESS,Maestro 2e}," 111 - "{ESS,Maestro 2}," 112 - "{ESS,Maestro 1}," 113 - "{TerraTec,DMX}}"); 114 110 115 111 #if IS_REACHABLE(CONFIG_GAMEPORT) 116 112 #define SUPPORT_JOYSTICK 1
-2
sound/pci/fm801.c
··· 26 26 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 27 27 MODULE_DESCRIPTION("ForteMedia FM801"); 28 28 MODULE_LICENSE("GPL"); 29 - MODULE_SUPPORTED_DEVICE("{{ForteMedia,FM801}," 30 - "{Genius,SoundMaker Live 5.1}}"); 31 29 32 30 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 33 31 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+1 -1
sound/pci/hda/hda_generic.c
··· 4065 4065 4066 4066 spec->micmute_led.led_mode = MICMUTE_LED_FOLLOW_MUTE; 4067 4067 spec->micmute_led.capture = 0; 4068 - spec->micmute_led.led_value = 0; 4068 + spec->micmute_led.led_value = -1; 4069 4069 spec->micmute_led.old_hook = spec->cap_sync_hook; 4070 4070 spec->cap_sync_hook = update_micmute_led; 4071 4071 if (!snd_hda_gen_add_kctl(spec, NULL, &micmute_led_mode_ctl))
-34
sound/pci/hda/hda_intel.c
··· 208 208 209 209 210 210 MODULE_LICENSE("GPL"); 211 - MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," 212 - "{Intel, ICH6M}," 213 - "{Intel, ICH7}," 214 - "{Intel, ESB2}," 215 - "{Intel, ICH8}," 216 - "{Intel, ICH9}," 217 - "{Intel, ICH10}," 218 - "{Intel, PCH}," 219 - "{Intel, CPT}," 220 - "{Intel, PPT}," 221 - "{Intel, LPT}," 222 - "{Intel, LPT_LP}," 223 - "{Intel, WPT_LP}," 224 - "{Intel, SPT}," 225 - "{Intel, SPT_LP}," 226 - "{Intel, HPT}," 227 - "{Intel, PBG}," 228 - "{Intel, SCH}," 229 - "{ATI, SB450}," 230 - "{ATI, SB600}," 231 - "{ATI, RS600}," 232 - "{ATI, RS690}," 233 - "{ATI, RS780}," 234 - "{ATI, R600}," 235 - "{ATI, RV630}," 236 - "{ATI, RV610}," 237 - "{ATI, RV670}," 238 - "{ATI, RV635}," 239 - "{ATI, RV620}," 240 - "{ATI, RV770}," 241 - "{VIA, VT8251}," 242 - "{VIA, VT8237A}," 243 - "{SiS, SIS966}," 244 - "{ULI, M5461}}"); 245 211 MODULE_DESCRIPTION("Intel HDA driver"); 246 212 247 213 #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
+16
sound/pci/hda/patch_realtek.c
··· 4225 4225 } 4226 4226 } 4227 4227 4228 + static void alc236_fixup_hp_gpio_led(struct hda_codec *codec, 4229 + const struct hda_fixup *fix, int action) 4230 + { 4231 + alc_fixup_hp_gpio_led(codec, action, 0x02, 0x01); 4232 + } 4233 + 4228 4234 static void alc269_fixup_hp_gpio_led(struct hda_codec *codec, 4229 4235 const struct hda_fixup *fix, int action) 4230 4236 { ··· 6387 6381 ALC294_FIXUP_ASUS_GX502_VERBS, 6388 6382 ALC285_FIXUP_HP_GPIO_LED, 6389 6383 ALC285_FIXUP_HP_MUTE_LED, 6384 + ALC236_FIXUP_HP_GPIO_LED, 6390 6385 ALC236_FIXUP_HP_MUTE_LED, 6391 6386 ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, 6392 6387 ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, ··· 7623 7616 .type = HDA_FIXUP_FUNC, 7624 7617 .v.func = alc285_fixup_hp_mute_led, 7625 7618 }, 7619 + [ALC236_FIXUP_HP_GPIO_LED] = { 7620 + .type = HDA_FIXUP_FUNC, 7621 + .v.func = alc236_fixup_hp_gpio_led, 7622 + }, 7626 7623 [ALC236_FIXUP_HP_MUTE_LED] = { 7627 7624 .type = HDA_FIXUP_FUNC, 7628 7625 .v.func = alc236_fixup_hp_mute_led, ··· 8056 8045 SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation", 8057 8046 ALC285_FIXUP_HP_GPIO_AMP_INIT), 8058 8047 SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), 8048 + SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), 8059 8049 SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED), 8060 8050 SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED), 8061 8051 SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), 8052 + SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), 8053 + SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), 8062 8054 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 8063 8055 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 8064 8056 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ··· 8256 8242 SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), 8257 8243 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ 8258 8244 SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), 8245 + SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), 8259 8246 SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), 8247 + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), 8260 8248 SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), 8261 8249 SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), 8262 8250 SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
-6
sound/pci/ice1712/ice1712.c
··· 60 60 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 61 61 MODULE_DESCRIPTION("ICEnsemble ICE1712 (Envy24)"); 62 62 MODULE_LICENSE("GPL"); 63 - MODULE_SUPPORTED_DEVICE("{" 64 - HOONTECH_DEVICE_DESC 65 - DELTA_DEVICE_DESC 66 - EWS_DEVICE_DESC 67 - "{ICEnsemble,Generic ICE1712}," 68 - "{ICEnsemble,Generic Envy24}}"); 69 63 70 64 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 71 65 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-19
sound/pci/ice1712/ice1724.c
··· 44 44 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 45 45 MODULE_DESCRIPTION("VIA ICEnsemble ICE1724/1720 (Envy24HT/PT)"); 46 46 MODULE_LICENSE("GPL"); 47 - MODULE_SUPPORTED_DEVICE("{" 48 - REVO_DEVICE_DESC 49 - AMP_AUDIO2000_DEVICE_DESC 50 - AUREON_DEVICE_DESC 51 - VT1720_MOBO_DEVICE_DESC 52 - PONTIS_DEVICE_DESC 53 - PRODIGY192_DEVICE_DESC 54 - PRODIGY_HIFI_DEVICE_DESC 55 - JULI_DEVICE_DESC 56 - MAYA44_DEVICE_DESC 57 - PHASE_DEVICE_DESC 58 - WTM_DEVICE_DESC 59 - SE_DEVICE_DESC 60 - QTET_DEVICE_DESC 61 - "{VIA,VT1720}," 62 - "{VIA,VT1724}," 63 - "{ICEnsemble,Generic ICE1724}," 64 - "{ICEnsemble,Generic Envy24HT}" 65 - "{ICEnsemble,Generic Envy24PT}}"); 66 47 67 48 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 68 49 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-23
sound/pci/intel8x0.c
··· 27 27 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 28 28 MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455"); 29 29 MODULE_LICENSE("GPL"); 30 - MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH}," 31 - "{Intel,82901AB-ICH0}," 32 - "{Intel,82801BA-ICH2}," 33 - "{Intel,82801CA-ICH3}," 34 - "{Intel,82801DB-ICH4}," 35 - "{Intel,ICH5}," 36 - "{Intel,ICH6}," 37 - "{Intel,ICH7}," 38 - "{Intel,6300ESB}," 39 - "{Intel,ESB2}," 40 - "{Intel,MX440}," 41 - "{SiS,SI7012}," 42 - "{NVidia,nForce Audio}," 43 - "{NVidia,nForce2 Audio}," 44 - "{NVidia,nForce3 Audio}," 45 - "{NVidia,MCP04}," 46 - "{NVidia,MCP501}," 47 - "{NVidia,CK804}," 48 - "{NVidia,CK8}," 49 - "{NVidia,CK8S}," 50 - "{AMD,AMD768}," 51 - "{AMD,AMD8111}," 52 - "{ALI,M5455}}"); 53 30 54 31 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ 55 32 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-15
sound/pci/intel8x0m.c
··· 25 25 MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; " 26 26 "SiS 7013; NVidia MCP/2/2S/3 modems"); 27 27 MODULE_LICENSE("GPL"); 28 - MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH}," 29 - "{Intel,82901AB-ICH0}," 30 - "{Intel,82801BA-ICH2}," 31 - "{Intel,82801CA-ICH3}," 32 - "{Intel,82801DB-ICH4}," 33 - "{Intel,ICH5}," 34 - "{Intel,ICH6}," 35 - "{Intel,ICH7}," 36 - "{Intel,MX440}," 37 - "{SiS,7013}," 38 - "{NVidia,NForce Modem}," 39 - "{NVidia,NForce2 Modem}," 40 - "{NVidia,NForce2s Modem}," 41 - "{NVidia,NForce3 Modem}," 42 - "{AMD,AMD768}}"); 43 28 44 29 static int index = -2; /* Exclude the first card */ 45 30 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/korg1212/korg1212.c
··· 388 388 389 389 MODULE_DESCRIPTION("korg1212"); 390 390 MODULE_LICENSE("GPL"); 391 - MODULE_SUPPORTED_DEVICE("{{KORG,korg1212}}"); 392 391 MODULE_FIRMWARE("korg/k1212.dsp"); 393 392 394 393 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-1
sound/pci/lola/lola.c
··· 54 54 */ 55 55 56 56 MODULE_LICENSE("GPL"); 57 - MODULE_SUPPORTED_DEVICE("{{Digigram, Lola}}"); 58 57 MODULE_DESCRIPTION("Digigram Lola driver"); 59 58 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 60 59
-2
sound/pci/lx6464es/lx6464es.c
··· 21 21 MODULE_AUTHOR("Tim Blechmann"); 22 22 MODULE_LICENSE("GPL"); 23 23 MODULE_DESCRIPTION("digigram lx6464es"); 24 - MODULE_SUPPORTED_DEVICE("{digigram lx6464es{}}"); 25 - 26 24 27 25 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 28 26 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-5
sound/pci/maestro3.c
··· 39 39 MODULE_AUTHOR("Zach Brown <zab@zabbo.net>, Takashi Iwai <tiwai@suse.de>"); 40 40 MODULE_DESCRIPTION("ESS Maestro3 PCI"); 41 41 MODULE_LICENSE("GPL"); 42 - MODULE_SUPPORTED_DEVICE("{{ESS,Maestro3 PCI}," 43 - "{ESS,ES1988}," 44 - "{ESS,Allegro PCI}," 45 - "{ESS,Allegro-1 PCI}," 46 - "{ESS,Canyon3D-2/LE PCI}}"); 47 42 MODULE_FIRMWARE("ess/maestro3_assp_kernel.fw"); 48 43 MODULE_FIRMWARE("ess/maestro3_assp_minisrc.fw"); 49 44
-1
sound/pci/mixart/mixart.c
··· 32 32 MODULE_AUTHOR("Digigram <alsa@digigram.com>"); 33 33 MODULE_DESCRIPTION("Digigram " CARD_NAME); 34 34 MODULE_LICENSE("GPL"); 35 - MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); 36 35 37 36 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 38 37 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-2
sound/pci/nm256/nm256.c
··· 32 32 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 33 33 MODULE_DESCRIPTION("NeoMagic NM256AV/ZX"); 34 34 MODULE_LICENSE("GPL"); 35 - MODULE_SUPPORTED_DEVICE("{{NeoMagic,NM256AV}," 36 - "{NeoMagic,NM256ZX}}"); 37 35 38 36 /* 39 37 * some compile conditions.
-3
sound/pci/oxygen/oxygen.c
··· 56 56 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 57 57 MODULE_DESCRIPTION("C-Media CMI8788 driver"); 58 58 MODULE_LICENSE("GPL v2"); 59 - MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8786}" 60 - ",{C-Media,CMI8787}" 61 - ",{C-Media,CMI8788}}"); 62 59 63 60 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 64 61 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-1
sound/pci/oxygen/se6x.c
··· 29 29 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 30 30 MODULE_DESCRIPTION("Studio Evolution SE6X driver"); 31 31 MODULE_LICENSE("GPL v2"); 32 - MODULE_SUPPORTED_DEVICE("{{Studio Evolution,SE6X}}"); 33 32 34 33 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 35 34 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-1
sound/pci/oxygen/virtuoso.c
··· 16 16 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 17 17 MODULE_DESCRIPTION("Asus Virtuoso driver"); 18 18 MODULE_LICENSE("GPL v2"); 19 - MODULE_SUPPORTED_DEVICE("{{Asus,AV66},{Asus,AV100},{Asus,AV200}}"); 20 19 21 20 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 22 21 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-1
sound/pci/pcxhr/pcxhr.c
··· 35 35 "Marc Titinger <titinger@digigram.com>"); 36 36 MODULE_DESCRIPTION("Digigram " DRIVER_NAME " " PCXHR_DRIVER_VERSION_STRING); 37 37 MODULE_LICENSE("GPL"); 38 - MODULE_SUPPORTED_DEVICE("{{Digigram," DRIVER_NAME "}}"); 39 38 40 39 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 41 40 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/pci/riptide/riptide.c
··· 103 103 MODULE_AUTHOR("Peter Gruber <nokos@gmx.net>"); 104 104 MODULE_DESCRIPTION("riptide"); 105 105 MODULE_LICENSE("GPL"); 106 - MODULE_SUPPORTED_DEVICE("{{Conexant,Riptide}}"); 107 106 MODULE_FIRMWARE("riptide.hex"); 108 107 109 108 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-1
sound/pci/rme32.c
··· 88 88 MODULE_AUTHOR("Martin Langer <martin-langer@gmx.de>, Pilo Chambert <pilo.c@wanadoo.fr>"); 89 89 MODULE_DESCRIPTION("RME Digi32, Digi32/8, Digi32 PRO"); 90 90 MODULE_LICENSE("GPL"); 91 - MODULE_SUPPORTED_DEVICE("{{RME,Digi32}," "{RME,Digi32/8}," "{RME,Digi32 PRO}}"); 92 91 93 92 /* Defines for RME Digi32 series */ 94 93 #define RME32_SPDIF_NCHANNELS 2
-5
sound/pci/rme96.c
··· 31 31 MODULE_DESCRIPTION("RME Digi96, Digi96/8, Digi96/8 PRO, Digi96/8 PST, " 32 32 "Digi96/8 PAD"); 33 33 MODULE_LICENSE("GPL"); 34 - MODULE_SUPPORTED_DEVICE("{{RME,Digi96}," 35 - "{RME,Digi96/8}," 36 - "{RME,Digi96/8 PRO}," 37 - "{RME,Digi96/8 PST}," 38 - "{RME,Digi96/8 PAD}}"); 39 34 40 35 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 41 36 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/pci/rme9652/hdsp.c
··· 44 44 MODULE_AUTHOR("Paul Davis <paul@linuxaudiosystems.com>, Marcus Andersson, Thomas Charbonnel <thomas@undata.org>"); 45 45 MODULE_DESCRIPTION("RME Hammerfall DSP"); 46 46 MODULE_LICENSE("GPL"); 47 - MODULE_SUPPORTED_DEVICE("{{RME Hammerfall-DSP}," 48 - "{RME HDSP-9652}," 49 - "{RME HDSP-9632}}"); 50 47 MODULE_FIRMWARE("rpm_firmware.bin"); 51 48 MODULE_FIRMWARE("multiface_firmware.bin"); 52 49 MODULE_FIRMWARE("multiface_firmware_rev11.bin");
-1
sound/pci/rme9652/hdspm.c
··· 165 165 ); 166 166 MODULE_DESCRIPTION("RME HDSPM"); 167 167 MODULE_LICENSE("GPL"); 168 - MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}"); 169 168 170 169 /* --- Write registers. --- 171 170 These are defined as byte-offsets from the iobase value. */
-2
sound/pci/rme9652/rme9652.c
··· 39 39 MODULE_AUTHOR("Paul Davis <pbd@op.net>, Winfried Ritsch"); 40 40 MODULE_DESCRIPTION("RME Digi9652/Digi9636"); 41 41 MODULE_LICENSE("GPL"); 42 - MODULE_SUPPORTED_DEVICE("{{RME,Hammerfall}," 43 - "{RME,Hammerfall-Light}}"); 44 42 45 43 /* The Hammerfall has two sets of 24 ADAT + 2 S/PDIF channels, one for 46 44 capture, one for playback. Both the ADAT and S/PDIF channels appear
-1
sound/pci/sis7019.c
··· 24 24 MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); 25 25 MODULE_DESCRIPTION("SiS7019"); 26 26 MODULE_LICENSE("GPL"); 27 - MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}"); 28 27 29 28 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ 30 29 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/sonicvibes.c
··· 29 29 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 30 30 MODULE_DESCRIPTION("S3 SonicVibes PCI"); 31 31 MODULE_LICENSE("GPL"); 32 - MODULE_SUPPORTED_DEVICE("{{S3,SonicVibes PCI}}"); 33 32 34 33 #if IS_REACHABLE(CONFIG_GAMEPORT) 35 34 #define SUPPORT_JOYSTICK 1
-12
sound/pci/trident/trident.c
··· 17 17 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, <audio@tridentmicro.com>"); 18 18 MODULE_DESCRIPTION("Trident 4D-WaveDX/NX & SiS SI7018"); 19 19 MODULE_LICENSE("GPL"); 20 - MODULE_SUPPORTED_DEVICE("{{Trident,4DWave DX}," 21 - "{Trident,4DWave NX}," 22 - "{SiS,SI7018 PCI Audio}," 23 - "{Best Union,Miss Melody 4DWave PCI}," 24 - "{HIS,4DWave PCI}," 25 - "{Warpspeed,ONSpeed 4DWave PCI}," 26 - "{Aztech Systems,PCI 64-Q3D}," 27 - "{Addonics,SV 750}," 28 - "{CHIC,True Sound 4Dwave}," 29 - "{Shark,Predator4D-PCI}," 30 - "{Jaton,SonicWave 4D}," 31 - "{Hoontech,SoundTrack Digital 4DWave NX}}"); 32 20 33 21 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 34 22 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/pci/via82xx.c
··· 56 56 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 57 57 MODULE_DESCRIPTION("VIA VT82xx audio"); 58 58 MODULE_LICENSE("GPL"); 59 - MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C,pci},{VIA,VT8233A/C,8235}}"); 60 59 61 60 #if IS_REACHABLE(CONFIG_GAMEPORT) 62 61 #define SUPPORT_JOYSTICK 1
-1
sound/pci/via82xx_modem.c
··· 38 38 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 39 39 MODULE_DESCRIPTION("VIA VT82xx modem"); 40 40 MODULE_LICENSE("GPL"); 41 - MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C modem,pci}}"); 42 41 43 42 static int index = -2; /* Exclude the first card */ 44 43 static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/vx222/vx222.c
··· 20 20 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 21 21 MODULE_DESCRIPTION("Digigram VX222 V2/Mic"); 22 22 MODULE_LICENSE("GPL"); 23 - MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); 24 23 25 24 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 26 25 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-6
sound/pci/ymfpci/ymfpci.c
··· 17 17 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 18 18 MODULE_DESCRIPTION("Yamaha DS-1 PCI"); 19 19 MODULE_LICENSE("GPL"); 20 - MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF724}," 21 - "{Yamaha,YMF724F}," 22 - "{Yamaha,YMF740}," 23 - "{Yamaha,YMF740C}," 24 - "{Yamaha,YMF744}," 25 - "{Yamaha,YMF754}}"); 26 20 27 21 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 28 22 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/pcmcia/pdaudiocf/pdaudiocf.c
··· 22 22 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 23 23 MODULE_DESCRIPTION("Sound Core " CARD_NAME); 24 24 MODULE_LICENSE("GPL"); 25 - MODULE_SUPPORTED_DEVICE("{{Sound Core," CARD_NAME "}}"); 26 25 27 26 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 28 27 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-4
sound/pcmcia/vx/vxpocket.c
··· 17 17 #include <sound/initval.h> 18 18 #include <sound/tlv.h> 19 19 20 - /* 21 - */ 22 - 23 20 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 24 21 MODULE_DESCRIPTION("Digigram VXPocket"); 25 22 MODULE_LICENSE("GPL"); 26 - MODULE_SUPPORTED_DEVICE("{{Digigram,VXPocket},{Digigram,VXPocket440}}"); 27 23 28 24 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 29 25 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/ppc/powermac.c
··· 18 18 #define CHIP_NAME "PMac" 19 19 20 20 MODULE_DESCRIPTION("PowerMac"); 21 - MODULE_SUPPORTED_DEVICE("{{Apple,PowerMac}}"); 22 21 MODULE_LICENSE("GPL"); 23 22 24 23 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
-1
sound/sh/aica.c
··· 32 32 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); 33 33 MODULE_DESCRIPTION("Dreamcast AICA sound (pcm) driver"); 34 34 MODULE_LICENSE("GPL"); 35 - MODULE_SUPPORTED_DEVICE("{{Yamaha/SEGA, AICA}}"); 36 35 MODULE_FIRMWARE("aica_firmware.bin"); 37 36 38 37 /* module parameters */
-1
sound/sh/sh_dac_audio.c
··· 25 25 MODULE_AUTHOR("Rafael Ignacio Zurita <rizurita@yahoo.com>"); 26 26 MODULE_DESCRIPTION("SuperH DAC audio driver"); 27 27 MODULE_LICENSE("GPL"); 28 - MODULE_SUPPORTED_DEVICE("{{SuperH DAC audio support}}"); 29 28 30 29 /* Module Parameters */ 31 30 static int index = SNDRV_DEFAULT_IDX1;
-5
sound/soc/codecs/Kconfig
··· 186 186 imply SND_SOC_SI476X 187 187 imply SND_SOC_SIMPLE_AMPLIFIER 188 188 imply SND_SOC_SIMPLE_MUX 189 - imply SND_SOC_SIRF_AUDIO_CODEC 190 189 imply SND_SOC_SPDIF 191 190 imply SND_SOC_SSM2305 192 191 imply SND_SOC_SSM2518 ··· 1277 1278 config SND_SOC_SIMPLE_MUX 1278 1279 tristate "Simple Audio Mux" 1279 1280 select GPIOLIB 1280 - 1281 - config SND_SOC_SIRF_AUDIO_CODEC 1282 - tristate "SiRF SoC internal audio codec" 1283 - select REGMAP_MMIO 1284 1281 1285 1282 config SND_SOC_SPDIF 1286 1283 tristate "S/PDIF CODEC"
+1
sound/soc/codecs/ak4458.c
··· 812 812 { .compatible = "asahi-kasei,ak4497", .data = &ak4497_drvdata}, 813 813 { }, 814 814 }; 815 + MODULE_DEVICE_TABLE(of, ak4458_of_match); 815 816 816 817 static struct i2c_driver ak4458_i2c_driver = { 817 818 .driver = {
+1
sound/soc/codecs/ak5558.c
··· 419 419 { .compatible = "asahi-kasei,ak5558"}, 420 420 { } 421 421 }; 422 + MODULE_DEVICE_TABLE(of, ak5558_i2c_dt_ids); 422 423 423 424 static struct i2c_driver ak5558_i2c_driver = { 424 425 .driver = {
+34 -78
sound/soc/codecs/cs42l42.c
··· 401 401 }; 402 402 403 403 static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false); 404 - static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false); 404 + static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true); 405 405 406 406 static const char * const cs42l42_hpf_freq_text[] = { 407 407 "1.86Hz", "120Hz", "235Hz", "466Hz" ··· 458 458 CS42L42_DAC_HPF_EN_SHIFT, true, false), 459 459 SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL, 460 460 CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT, 461 - 0x3e, 1, mixer_tlv) 461 + 0x3f, 1, mixer_tlv) 462 462 }; 463 463 464 464 static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w, ··· 511 511 {"HP", NULL, "HPDRV"} 512 512 }; 513 513 514 - static int cs42l42_set_bias_level(struct snd_soc_component *component, 515 - enum snd_soc_bias_level level) 516 - { 517 - struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component); 518 - int ret; 519 - 520 - switch (level) { 521 - case SND_SOC_BIAS_ON: 522 - break; 523 - case SND_SOC_BIAS_PREPARE: 524 - break; 525 - case SND_SOC_BIAS_STANDBY: 526 - if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { 527 - regcache_cache_only(cs42l42->regmap, false); 528 - regcache_sync(cs42l42->regmap); 529 - ret = regulator_bulk_enable( 530 - ARRAY_SIZE(cs42l42->supplies), 531 - cs42l42->supplies); 532 - if (ret != 0) { 533 - dev_err(component->dev, 534 - "Failed to enable regulators: %d\n", 535 - ret); 536 - return ret; 537 - } 538 - } 539 - break; 540 - case SND_SOC_BIAS_OFF: 541 - 542 - regcache_cache_only(cs42l42->regmap, true); 543 - regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies), 544 - cs42l42->supplies); 545 - break; 546 - } 547 - 548 - return 0; 549 - } 550 - 551 514 static int cs42l42_component_probe(struct snd_soc_component *component) 552 515 { 553 516 struct cs42l42_private *cs42l42 = ··· 523 560 524 561 static const struct snd_soc_component_driver soc_component_dev_cs42l42 = { 525 562 .probe = cs42l42_component_probe, 526 - .set_bias_level = cs42l42_set_bias_level, 527 563 .dapm_widgets = cs42l42_dapm_widgets, 528 564 .num_dapm_widgets = ARRAY_SIZE(cs42l42_dapm_widgets), 529 565 .dapm_routes = cs42l42_audio_map, ··· 653 691 CS42L42_CLK_OASRC_SEL_MASK, 654 692 CS42L42_CLK_OASRC_SEL_12 << 655 693 CS42L42_CLK_OASRC_SEL_SHIFT); 656 - /* channel 1 on low LRCLK, 32 bit */ 657 - snd_soc_component_update_bits(component, 658 - CS42L42_ASP_RX_DAI0_CH1_AP_RES, 659 - CS42L42_ASP_RX_CH_AP_MASK | 660 - CS42L42_ASP_RX_CH_RES_MASK, 661 - (CS42L42_ASP_RX_CH_AP_LOW << 662 - CS42L42_ASP_RX_CH_AP_SHIFT) | 663 - (CS42L42_ASP_RX_CH_RES_32 << 664 - CS42L42_ASP_RX_CH_RES_SHIFT)); 665 - /* Channel 2 on high LRCLK, 32 bit */ 666 - snd_soc_component_update_bits(component, 667 - CS42L42_ASP_RX_DAI0_CH2_AP_RES, 668 - CS42L42_ASP_RX_CH_AP_MASK | 669 - CS42L42_ASP_RX_CH_RES_MASK, 670 - (CS42L42_ASP_RX_CH_AP_HI << 671 - CS42L42_ASP_RX_CH_AP_SHIFT) | 672 - (CS42L42_ASP_RX_CH_RES_32 << 673 - CS42L42_ASP_RX_CH_RES_SHIFT)); 674 694 if (pll_ratio_table[i].mclk_src_sel == 0) { 675 695 /* Pass the clock straight through */ 676 696 snd_soc_component_update_bits(component, ··· 741 797 /* Bitclock/frame inversion */ 742 798 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 743 799 case SND_SOC_DAIFMT_NB_NF: 800 + asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT; 744 801 break; 745 802 case SND_SOC_DAIFMT_NB_IF: 746 - asp_cfg_val |= CS42L42_ASP_POL_INV << 747 - CS42L42_ASP_LCPOL_IN_SHIFT; 803 + asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT; 804 + asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT; 748 805 break; 749 806 case SND_SOC_DAIFMT_IB_NF: 750 - asp_cfg_val |= CS42L42_ASP_POL_INV << 751 - CS42L42_ASP_SCPOL_IN_DAC_SHIFT; 752 807 break; 753 808 case SND_SOC_DAIFMT_IB_IF: 754 - asp_cfg_val |= CS42L42_ASP_POL_INV << 755 - CS42L42_ASP_LCPOL_IN_SHIFT; 756 - asp_cfg_val |= CS42L42_ASP_POL_INV << 757 - CS42L42_ASP_SCPOL_IN_DAC_SHIFT; 809 + asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT; 758 810 break; 759 811 } 760 812 761 - snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, 762 - CS42L42_ASP_MODE_MASK | 763 - CS42L42_ASP_SCPOL_IN_DAC_MASK | 764 - CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val); 813 + snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK | 814 + CS42L42_ASP_SCPOL_MASK | 815 + CS42L42_ASP_LCPOL_MASK, 816 + asp_cfg_val); 765 817 766 818 return 0; 767 819 } ··· 768 828 { 769 829 struct snd_soc_component *component = dai->component; 770 830 struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component); 771 - int retval; 831 + unsigned int width = (params_width(params) / 8) - 1; 832 + unsigned int val = 0; 772 833 773 834 cs42l42->srate = params_rate(params); 774 - cs42l42->swidth = params_width(params); 775 835 776 - retval = cs42l42_pll_config(component); 836 + switch(substream->stream) { 837 + case SNDRV_PCM_STREAM_PLAYBACK: 838 + val |= width << CS42L42_ASP_RX_CH_RES_SHIFT; 839 + /* channel 1 on low LRCLK */ 840 + snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES, 841 + CS42L42_ASP_RX_CH_AP_MASK | 842 + CS42L42_ASP_RX_CH_RES_MASK, val); 843 + /* Channel 2 on high LRCLK */ 844 + val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT; 845 + snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES, 846 + CS42L42_ASP_RX_CH_AP_MASK | 847 + CS42L42_ASP_RX_CH_RES_MASK, val); 848 + break; 849 + default: 850 + break; 851 + } 777 852 778 - return retval; 853 + return cs42l42_pll_config(component); 779 854 } 780 855 781 856 static int cs42l42_set_sysclk(struct snd_soc_dai *dai, ··· 855 900 return 0; 856 901 } 857 902 858 - #define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \ 859 - SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \ 860 - SNDRV_PCM_FMTBIT_S32_LE) 903 + #define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ 904 + SNDRV_PCM_FMTBIT_S24_LE |\ 905 + SNDRV_PCM_FMTBIT_S32_LE ) 861 906 862 907 863 908 static const struct snd_soc_dai_ops cs42l42_ops = { ··· 1756 1801 dev_dbg(&i2c_client->dev, "Found reset GPIO\n"); 1757 1802 gpiod_set_value_cansleep(cs42l42->reset_gpio, 1); 1758 1803 } 1759 - mdelay(3); 1804 + usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2); 1760 1805 1761 1806 /* Request IRQ */ 1762 1807 ret = devm_request_threaded_irq(&i2c_client->dev, ··· 1881 1926 } 1882 1927 1883 1928 gpiod_set_value_cansleep(cs42l42->reset_gpio, 1); 1929 + usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2); 1884 1930 1885 1931 regcache_cache_only(cs42l42->regmap, false); 1886 1932 regcache_sync(cs42l42->regmap);
+7 -6
sound/soc/codecs/cs42l42.h
··· 258 258 #define CS42L42_ASP_SLAVE_MODE 0x00 259 259 #define CS42L42_ASP_MODE_SHIFT 4 260 260 #define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT) 261 - #define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2 262 - #define CS42L42_ASP_SCPOL_IN_DAC_MASK (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT) 263 - #define CS42L42_ASP_LCPOL_IN_SHIFT 0 264 - #define CS42L42_ASP_LCPOL_IN_MASK (1 << CS42L42_ASP_LCPOL_IN_SHIFT) 265 - #define CS42L42_ASP_POL_INV 1 261 + #define CS42L42_ASP_SCPOL_SHIFT 2 262 + #define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT) 263 + #define CS42L42_ASP_SCPOL_NOR 3 264 + #define CS42L42_ASP_LCPOL_SHIFT 0 265 + #define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT) 266 + #define CS42L42_ASP_LCPOL_INV 3 266 267 267 268 #define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08) 268 269 #define CS42L42_ASP_STP_SHIFT 4 ··· 740 739 #define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16) 741 740 742 741 #define CS42L42_NUM_SUPPLIES 5 742 + #define CS42L42_BOOT_TIME_US 3000 743 743 744 744 static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = { 745 745 "VA", ··· 758 756 struct completion pdn_done; 759 757 u32 sclk; 760 758 u32 srate; 761 - u32 swidth; 762 759 u8 plug_state; 763 760 u8 hs_type; 764 761 u8 ts_inv;
+2 -7
sound/soc/codecs/es8316.c
··· 63 63 1, 1, TLV_DB_SCALE_ITEM(0, 0, 0), 64 64 2, 2, TLV_DB_SCALE_ITEM(250, 0, 0), 65 65 3, 3, TLV_DB_SCALE_ITEM(450, 0, 0), 66 - 4, 4, TLV_DB_SCALE_ITEM(700, 0, 0), 67 - 5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0), 68 - 6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0), 69 - 7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0), 70 - 8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0), 71 - 9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0), 72 - 10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0), 66 + 4, 7, TLV_DB_SCALE_ITEM(700, 300, 0), 67 + 8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0), 73 68 ); 74 69 75 70 static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
+1 -1
sound/soc/codecs/lpass-rx-macro.c
··· 2895 2895 { 2896 2896 struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); 2897 2897 u16 val, ec_hq_reg; 2898 - int ec_tx; 2898 + int ec_tx = -1; 2899 2899 2900 2900 val = snd_soc_component_read(component, 2901 2901 CDC_RX_INP_MUX_RX_MIX_CFG4);
+13 -15
sound/soc/codecs/lpass-va-macro.c
··· 189 189 struct device *dev; 190 190 unsigned long active_ch_mask[VA_MACRO_MAX_DAIS]; 191 191 unsigned long active_ch_cnt[VA_MACRO_MAX_DAIS]; 192 - unsigned long active_decimator[VA_MACRO_MAX_DAIS]; 193 192 u16 dmic_clk_div; 194 193 195 194 int dec_mode[VA_MACRO_NUM_DECIMATORS]; ··· 548 549 if (enable) { 549 550 set_bit(dec_id, &va->active_ch_mask[dai_id]); 550 551 va->active_ch_cnt[dai_id]++; 551 - va->active_decimator[dai_id] = dec_id; 552 552 } else { 553 553 clear_bit(dec_id, &va->active_ch_mask[dai_id]); 554 554 va->active_ch_cnt[dai_id]--; 555 - va->active_decimator[dai_id] = -1; 556 555 } 557 556 558 557 snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update); ··· 877 880 struct va_macro *va = snd_soc_component_get_drvdata(component); 878 881 u16 tx_vol_ctl_reg, decimator; 879 882 880 - decimator = va->active_decimator[dai->id]; 881 - 882 - tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL + 883 - VA_MACRO_TX_PATH_OFFSET * decimator; 884 - if (mute) 885 - snd_soc_component_update_bits(component, tx_vol_ctl_reg, 886 - CDC_VA_TX_PATH_PGA_MUTE_EN_MASK, 887 - CDC_VA_TX_PATH_PGA_MUTE_EN); 888 - else 889 - snd_soc_component_update_bits(component, tx_vol_ctl_reg, 890 - CDC_VA_TX_PATH_PGA_MUTE_EN_MASK, 891 - CDC_VA_TX_PATH_PGA_MUTE_DISABLE); 883 + for_each_set_bit(decimator, &va->active_ch_mask[dai->id], 884 + VA_MACRO_DEC_MAX) { 885 + tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL + 886 + VA_MACRO_TX_PATH_OFFSET * decimator; 887 + if (mute) 888 + snd_soc_component_update_bits(component, tx_vol_ctl_reg, 889 + CDC_VA_TX_PATH_PGA_MUTE_EN_MASK, 890 + CDC_VA_TX_PATH_PGA_MUTE_EN); 891 + else 892 + snd_soc_component_update_bits(component, tx_vol_ctl_reg, 893 + CDC_VA_TX_PATH_PGA_MUTE_EN_MASK, 894 + CDC_VA_TX_PATH_PGA_MUTE_DISABLE); 895 + } 892 896 893 897 return 0; 894 898 }
+11 -9
sound/soc/codecs/lpass-wsa-macro.c
··· 1211 1211 struct snd_kcontrol *kcontrol, int event) 1212 1212 { 1213 1213 struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); 1214 - u16 gain_reg; 1214 + u16 path_reg, gain_reg; 1215 1215 int val; 1216 1216 1217 - switch (w->reg) { 1218 - case CDC_WSA_RX0_RX_PATH_MIX_CTL: 1217 + switch (w->shift) { 1218 + case WSA_MACRO_RX_MIX0: 1219 + path_reg = CDC_WSA_RX0_RX_PATH_MIX_CTL; 1219 1220 gain_reg = CDC_WSA_RX0_RX_VOL_MIX_CTL; 1220 1221 break; 1221 - case CDC_WSA_RX1_RX_PATH_MIX_CTL: 1222 + case WSA_MACRO_RX_MIX1: 1223 + path_reg = CDC_WSA_RX1_RX_PATH_MIX_CTL; 1222 1224 gain_reg = CDC_WSA_RX1_RX_VOL_MIX_CTL; 1223 1225 break; 1224 1226 default: ··· 1233 1231 snd_soc_component_write(component, gain_reg, val); 1234 1232 break; 1235 1233 case SND_SOC_DAPM_POST_PMD: 1236 - snd_soc_component_update_bits(component, w->reg, 1234 + snd_soc_component_update_bits(component, path_reg, 1237 1235 CDC_WSA_RX_PATH_MIX_CLK_EN_MASK, 1238 1236 CDC_WSA_RX_PATH_MIX_CLK_DISABLE); 1239 1237 break; ··· 2070 2068 SND_SOC_DAPM_MUX("WSA_RX0 INP0", SND_SOC_NOPM, 0, 0, &rx0_prim_inp0_mux), 2071 2069 SND_SOC_DAPM_MUX("WSA_RX0 INP1", SND_SOC_NOPM, 0, 0, &rx0_prim_inp1_mux), 2072 2070 SND_SOC_DAPM_MUX("WSA_RX0 INP2", SND_SOC_NOPM, 0, 0, &rx0_prim_inp2_mux), 2073 - SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", CDC_WSA_RX0_RX_PATH_MIX_CTL, 2074 - 0, 0, &rx0_mix_mux, wsa_macro_enable_mix_path, 2071 + SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX0, 2072 + 0, &rx0_mix_mux, wsa_macro_enable_mix_path, 2075 2073 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), 2076 2074 SND_SOC_DAPM_MUX("WSA_RX1 INP0", SND_SOC_NOPM, 0, 0, &rx1_prim_inp0_mux), 2077 2075 SND_SOC_DAPM_MUX("WSA_RX1 INP1", SND_SOC_NOPM, 0, 0, &rx1_prim_inp1_mux), 2078 2076 SND_SOC_DAPM_MUX("WSA_RX1 INP2", SND_SOC_NOPM, 0, 0, &rx1_prim_inp2_mux), 2079 - SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", CDC_WSA_RX1_RX_PATH_MIX_CTL, 2080 - 0, 0, &rx1_mix_mux, wsa_macro_enable_mix_path, 2077 + SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX1, 2078 + 0, &rx1_mix_mux, wsa_macro_enable_mix_path, 2081 2079 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), 2082 2080 2083 2081 SND_SOC_DAPM_MIXER_E("WSA_RX INT0 MIX", SND_SOC_NOPM, 0, 0, NULL, 0,
+2
sound/soc/codecs/rt1015.c
··· 209 209 case RT1015_VENDOR_ID: 210 210 case RT1015_DEVICE_ID: 211 211 case RT1015_PRO_ALT: 212 + case RT1015_MAN_I2C: 212 213 case RT1015_DAC3: 213 214 case RT1015_VBAT_TEST_OUT1: 214 215 case RT1015_VBAT_TEST_OUT2: ··· 514 513 msleep(300); 515 514 regmap_write(regmap, RT1015_PWR_STATE_CTRL, 0x0008); 516 515 regmap_write(regmap, RT1015_SYS_RST1, 0x05F5); 516 + regmap_write(regmap, RT1015_CLK_DET, 0x8000); 517 517 518 518 regcache_cache_bypass(regmap, false); 519 519 regcache_mark_dirty(regmap);
+2 -2
sound/soc/codecs/rt5640.c
··· 339 339 } 340 340 341 341 static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); 342 - static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); 342 + static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); 343 343 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); 344 - static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); 344 + static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); 345 345 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); 346 346 347 347 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+2 -2
sound/soc/codecs/rt5651.c
··· 285 285 } 286 286 287 287 static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); 288 - static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); 288 + static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); 289 289 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); 290 - static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); 290 + static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); 291 291 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); 292 292 293 293 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+5
sound/soc/codecs/rt5659.c
··· 3426 3426 { 3427 3427 struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component); 3428 3428 unsigned int reg_val = 0; 3429 + int ret; 3429 3430 3430 3431 if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src) 3431 3432 return 0; 3432 3433 3433 3434 switch (clk_id) { 3434 3435 case RT5659_SCLK_S_MCLK: 3436 + ret = clk_set_rate(rt5659->mclk, freq); 3437 + if (ret) 3438 + return ret; 3439 + 3435 3440 reg_val |= RT5659_SCLK_SRC_MCLK; 3436 3441 break; 3437 3442 case RT5659_SCLK_S_PLL1:
+96 -14
sound/soc/codecs/rt5670.c
··· 629 629 static SOC_ENUM_SINGLE_DECL(rt5670_if2_adc_enum, RT5670_DIG_INF1_DATA, 630 630 RT5670_IF2_ADC_SEL_SFT, rt5670_data_select); 631 631 632 + /* 633 + * For reliable output-mute LED control we need a "DAC1 Playback Switch" control. 634 + * We emulate this by only clearing the RT5670_M_DAC1_L/_R AD_DA_MIXER register 635 + * bits when both our emulated DAC1 Playback Switch control and the DAC1 MIXL/R 636 + * DAPM-mixer DAC1 input are enabled. 637 + */ 638 + static void rt5670_update_ad_da_mixer_dac1_m_bits(struct rt5670_priv *rt5670) 639 + { 640 + int val = RT5670_M_DAC1_L | RT5670_M_DAC1_R; 641 + 642 + if (rt5670->dac1_mixl_dac1_switch && rt5670->dac1_playback_switch_l) 643 + val &= ~RT5670_M_DAC1_L; 644 + 645 + if (rt5670->dac1_mixr_dac1_switch && rt5670->dac1_playback_switch_r) 646 + val &= ~RT5670_M_DAC1_R; 647 + 648 + regmap_update_bits(rt5670->regmap, RT5670_AD_DA_MIXER, 649 + RT5670_M_DAC1_L | RT5670_M_DAC1_R, val); 650 + } 651 + 652 + static int rt5670_dac1_playback_switch_get(struct snd_kcontrol *kcontrol, 653 + struct snd_ctl_elem_value *ucontrol) 654 + { 655 + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); 656 + struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); 657 + 658 + ucontrol->value.integer.value[0] = rt5670->dac1_playback_switch_l; 659 + ucontrol->value.integer.value[1] = rt5670->dac1_playback_switch_r; 660 + 661 + return 0; 662 + } 663 + 664 + static int rt5670_dac1_playback_switch_put(struct snd_kcontrol *kcontrol, 665 + struct snd_ctl_elem_value *ucontrol) 666 + { 667 + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); 668 + struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); 669 + 670 + if (rt5670->dac1_playback_switch_l == ucontrol->value.integer.value[0] && 671 + rt5670->dac1_playback_switch_r == ucontrol->value.integer.value[1]) 672 + return 0; 673 + 674 + rt5670->dac1_playback_switch_l = ucontrol->value.integer.value[0]; 675 + rt5670->dac1_playback_switch_r = ucontrol->value.integer.value[1]; 676 + 677 + rt5670_update_ad_da_mixer_dac1_m_bits(rt5670); 678 + 679 + return 1; 680 + } 681 + 632 682 static const struct snd_kcontrol_new rt5670_snd_controls[] = { 633 683 /* Headphone Output Volume */ 634 - SOC_DOUBLE("HP Playback Switch", RT5670_HP_VOL, 635 - RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1), 636 684 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL, 637 685 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 638 686 39, 1, out_vol_tlv), 639 687 /* OUTPUT Control */ 640 - SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1, 641 - RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1), 642 688 SOC_DOUBLE_TLV("OUT Playback Volume", RT5670_LOUT1, 643 689 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 39, 1, out_vol_tlv), 644 690 /* DAC Digital Volume */ 645 691 SOC_DOUBLE("DAC2 Playback Switch", RT5670_DAC_CTRL, 646 692 RT5670_M_DAC_L2_VOL_SFT, RT5670_M_DAC_R2_VOL_SFT, 1, 1), 693 + SOC_DOUBLE_EXT("DAC1 Playback Switch", SND_SOC_NOPM, 0, 1, 1, 0, 694 + rt5670_dac1_playback_switch_get, rt5670_dac1_playback_switch_put), 647 695 SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5670_DAC1_DIG_VOL, 648 696 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 649 697 175, 0, dac_vol_tlv), ··· 961 913 RT5670_M_MONO_ADC_R2_SFT, 1, 1), 962 914 }; 963 915 916 + /* See comment above rt5670_update_ad_da_mixer_dac1_m_bits() */ 917 + static int rt5670_put_dac1_mix_dac1_switch(struct snd_kcontrol *kcontrol, 918 + struct snd_ctl_elem_value *ucontrol) 919 + { 920 + struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; 921 + struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol); 922 + struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); 923 + int ret; 924 + 925 + if (mc->shift == 0) 926 + rt5670->dac1_mixl_dac1_switch = ucontrol->value.integer.value[0]; 927 + else 928 + rt5670->dac1_mixr_dac1_switch = ucontrol->value.integer.value[0]; 929 + 930 + /* Apply the update (if any) */ 931 + ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol); 932 + if (ret == 0) 933 + return 0; 934 + 935 + rt5670_update_ad_da_mixer_dac1_m_bits(rt5670); 936 + 937 + return 1; 938 + } 939 + 940 + #define SOC_DAPM_SINGLE_RT5670_DAC1_SW(name, shift) \ 941 + SOC_SINGLE_EXT(name, SND_SOC_NOPM, shift, 1, 0, \ 942 + snd_soc_dapm_get_volsw, rt5670_put_dac1_mix_dac1_switch) 943 + 964 944 static const struct snd_kcontrol_new rt5670_dac_l_mix[] = { 965 945 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5670_AD_DA_MIXER, 966 946 RT5670_M_ADCMIX_L_SFT, 1, 1), 967 - SOC_DAPM_SINGLE("DAC1 Switch", RT5670_AD_DA_MIXER, 968 - RT5670_M_DAC1_L_SFT, 1, 1), 947 + SOC_DAPM_SINGLE_RT5670_DAC1_SW("DAC1 Switch", 0), 969 948 }; 970 949 971 950 static const struct snd_kcontrol_new rt5670_dac_r_mix[] = { 972 951 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5670_AD_DA_MIXER, 973 952 RT5670_M_ADCMIX_R_SFT, 1, 1), 974 - SOC_DAPM_SINGLE("DAC1 Switch", RT5670_AD_DA_MIXER, 975 - RT5670_M_DAC1_R_SFT, 1, 1), 953 + SOC_DAPM_SINGLE_RT5670_DAC1_SW("DAC1 Switch", 1), 976 954 }; 977 955 978 956 static const struct snd_kcontrol_new rt5670_sto_dac_l_mix[] = { ··· 1730 1656 RT5670_PWR_ADC_S1F_BIT, 0, NULL, 0), 1731 1657 SND_SOC_DAPM_SUPPLY("ADC Stereo2 Filter", RT5670_PWR_DIG2, 1732 1658 RT5670_PWR_ADC_S2F_BIT, 0, NULL, 0), 1733 - SND_SOC_DAPM_MIXER("Sto1 ADC MIXL", RT5670_STO1_ADC_DIG_VOL, 1734 - RT5670_L_MUTE_SFT, 1, rt5670_sto1_adc_l_mix, 1735 - ARRAY_SIZE(rt5670_sto1_adc_l_mix)), 1736 - SND_SOC_DAPM_MIXER("Sto1 ADC MIXR", RT5670_STO1_ADC_DIG_VOL, 1737 - RT5670_R_MUTE_SFT, 1, rt5670_sto1_adc_r_mix, 1738 - ARRAY_SIZE(rt5670_sto1_adc_r_mix)), 1659 + SND_SOC_DAPM_MIXER("Sto1 ADC MIXL", SND_SOC_NOPM, 0, 0, 1660 + rt5670_sto1_adc_l_mix, ARRAY_SIZE(rt5670_sto1_adc_l_mix)), 1661 + SND_SOC_DAPM_MIXER("Sto1 ADC MIXR", SND_SOC_NOPM, 0, 0, 1662 + rt5670_sto1_adc_r_mix, ARRAY_SIZE(rt5670_sto1_adc_r_mix)), 1739 1663 SND_SOC_DAPM_MIXER("Sto2 ADC MIXL", SND_SOC_NOPM, 0, 0, 1740 1664 rt5670_sto2_adc_l_mix, 1741 1665 ARRAY_SIZE(rt5670_sto2_adc_l_mix)), ··· 3070 2998 rt5670->jd_mode = 3; 3071 2999 dev_info(&i2c->dev, "quirk JD mode 3\n"); 3072 3000 } 3001 + 3002 + /* 3003 + * Enable the emulated "DAC1 Playback Switch" by default to avoid 3004 + * muting the output with older UCM profiles. 3005 + */ 3006 + rt5670->dac1_playback_switch_l = true; 3007 + rt5670->dac1_playback_switch_r = true; 3008 + /* The Power-On-Reset values for the DAC1 mixer have the DAC1 input enabled. */ 3009 + rt5670->dac1_mixl_dac1_switch = true; 3010 + rt5670->dac1_mixr_dac1_switch = true; 3073 3011 3074 3012 rt5670->regmap = devm_regmap_init_i2c(i2c, &rt5670_regmap); 3075 3013 if (IS_ERR(rt5670->regmap)) {
+5 -4
sound/soc/codecs/rt5670.h
··· 212 212 /* global definition */ 213 213 #define RT5670_L_MUTE (0x1 << 15) 214 214 #define RT5670_L_MUTE_SFT 15 215 - #define RT5670_VOL_L_MUTE (0x1 << 14) 216 - #define RT5670_VOL_L_SFT 14 217 215 #define RT5670_R_MUTE (0x1 << 7) 218 216 #define RT5670_R_MUTE_SFT 7 219 - #define RT5670_VOL_R_MUTE (0x1 << 6) 220 - #define RT5670_VOL_R_SFT 6 221 217 #define RT5670_L_VOL_MASK (0x3f << 8) 222 218 #define RT5670_L_VOL_SFT 8 223 219 #define RT5670_R_VOL_MASK (0x3f) ··· 2013 2017 int dsp_rate; 2014 2018 int jack_type; 2015 2019 int jack_type_saved; 2020 + 2021 + bool dac1_mixl_dac1_switch; 2022 + bool dac1_mixr_dac1_switch; 2023 + bool dac1_playback_switch_l; 2024 + bool dac1_playback_switch_r; 2016 2025 }; 2017 2026 2018 2027 void rt5670_jack_suspend(struct snd_soc_component *component);
+8
sound/soc/codecs/rt711.c
··· 895 895 return 0; 896 896 } 897 897 898 + static void rt711_remove(struct snd_soc_component *component) 899 + { 900 + struct rt711_priv *rt711 = snd_soc_component_get_drvdata(component); 901 + 902 + regcache_cache_only(rt711->regmap, true); 903 + } 904 + 898 905 static const struct snd_soc_component_driver soc_codec_dev_rt711 = { 899 906 .probe = rt711_probe, 900 907 .set_bias_level = rt711_set_bias_level, ··· 912 905 .dapm_routes = rt711_audio_map, 913 906 .num_dapm_routes = ARRAY_SIZE(rt711_audio_map), 914 907 .set_jack = rt711_set_jack_detect, 908 + .remove = rt711_remove, 915 909 }; 916 910 917 911 static int rt711_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
+1 -1
sound/soc/codecs/sgtl5000.c
··· 71 71 { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f }, 72 72 { SGTL5000_DAP_MAIN_CHAN, 0x8000 }, 73 73 { SGTL5000_DAP_MIX_CHAN, 0x0000 }, 74 - { SGTL5000_DAP_AVC_CTRL, 0x0510 }, 74 + { SGTL5000_DAP_AVC_CTRL, 0x5100 }, 75 75 { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 }, 76 76 { SGTL5000_DAP_AVC_ATTACK, 0x0028 }, 77 77 { SGTL5000_DAP_AVC_DECAY, 0x0050 },
-124
sound/soc/codecs/sirf-audio-codec.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 - /* 3 - * SiRF inner codec controllers define 4 - * 5 - * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 6 - */ 7 - 8 - #ifndef _SIRF_AUDIO_CODEC_H 9 - #define _SIRF_AUDIO_CODEC_H 10 - 11 - 12 - #define AUDIO_IC_CODEC_PWR (0x00E0) 13 - #define AUDIO_IC_CODEC_CTRL0 (0x00E4) 14 - #define AUDIO_IC_CODEC_CTRL1 (0x00E8) 15 - #define AUDIO_IC_CODEC_CTRL2 (0x00EC) 16 - #define AUDIO_IC_CODEC_CTRL3 (0x00F0) 17 - 18 - #define MICBIASEN (1 << 3) 19 - 20 - #define IC_RDACEN (1 << 0) 21 - #define IC_LDACEN (1 << 1) 22 - #define IC_HSREN (1 << 2) 23 - #define IC_HSLEN (1 << 3) 24 - #define IC_SPEN (1 << 4) 25 - #define IC_CPEN (1 << 5) 26 - 27 - #define IC_HPRSELR (1 << 6) 28 - #define IC_HPLSELR (1 << 7) 29 - #define IC_HPRSELL (1 << 8) 30 - #define IC_HPLSELL (1 << 9) 31 - #define IC_SPSELR (1 << 10) 32 - #define IC_SPSELL (1 << 11) 33 - 34 - #define IC_MONOR (1 << 12) 35 - #define IC_MONOL (1 << 13) 36 - 37 - #define IC_RXOSRSEL (1 << 28) 38 - #define IC_CPFREQ (1 << 29) 39 - #define IC_HSINVEN (1 << 30) 40 - 41 - #define IC_MICINREN (1 << 0) 42 - #define IC_MICINLEN (1 << 1) 43 - #define IC_MICIN1SEL (1 << 2) 44 - #define IC_MICIN2SEL (1 << 3) 45 - #define IC_MICDIFSEL (1 << 4) 46 - #define IC_LINEIN1SEL (1 << 5) 47 - #define IC_LINEIN2SEL (1 << 6) 48 - #define IC_RADCEN (1 << 7) 49 - #define IC_LADCEN (1 << 8) 50 - #define IC_ALM (1 << 9) 51 - 52 - #define IC_DIGMICEN (1 << 22) 53 - #define IC_DIGMICFREQ (1 << 23) 54 - #define IC_ADC14B_12 (1 << 24) 55 - #define IC_FIRDAC_HSL_EN (1 << 25) 56 - #define IC_FIRDAC_HSR_EN (1 << 26) 57 - #define IC_FIRDAC_LOUT_EN (1 << 27) 58 - #define IC_POR (1 << 28) 59 - #define IC_CODEC_CLK_EN (1 << 29) 60 - #define IC_HP_3DB_BOOST (1 << 30) 61 - 62 - #define IC_ADC_LEFT_GAIN_SHIFT 16 63 - #define IC_ADC_RIGHT_GAIN_SHIFT 10 64 - #define IC_ADC_GAIN_MASK 0x3F 65 - #define IC_MIC_MAX_GAIN 0x39 66 - 67 - #define IC_RXPGAR_MASK 0x3F 68 - #define IC_RXPGAR_SHIFT 14 69 - #define IC_RXPGAL_MASK 0x3F 70 - #define IC_RXPGAL_SHIFT 21 71 - #define IC_RXPGAR 0x7B 72 - #define IC_RXPGAL 0x7B 73 - 74 - #define AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK 0x3F 75 - #define AUDIO_PORT_TX_FIFO_SC_OFFSET 0 76 - #define AUDIO_PORT_TX_FIFO_LC_OFFSET 10 77 - #define AUDIO_PORT_TX_FIFO_HC_OFFSET 20 78 - 79 - #define TX_FIFO_SC(x) (((x) & AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK) \ 80 - << AUDIO_PORT_TX_FIFO_SC_OFFSET) 81 - #define TX_FIFO_LC(x) (((x) & AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK) \ 82 - << AUDIO_PORT_TX_FIFO_LC_OFFSET) 83 - #define TX_FIFO_HC(x) (((x) & AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK) \ 84 - << AUDIO_PORT_TX_FIFO_HC_OFFSET) 85 - 86 - #define AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK 0x0F 87 - #define AUDIO_PORT_RX_FIFO_SC_OFFSET 0 88 - #define AUDIO_PORT_RX_FIFO_LC_OFFSET 10 89 - #define AUDIO_PORT_RX_FIFO_HC_OFFSET 20 90 - 91 - #define RX_FIFO_SC(x) (((x) & AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK) \ 92 - << AUDIO_PORT_RX_FIFO_SC_OFFSET) 93 - #define RX_FIFO_LC(x) (((x) & AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK) \ 94 - << AUDIO_PORT_RX_FIFO_LC_OFFSET) 95 - #define RX_FIFO_HC(x) (((x) & AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK) \ 96 - << AUDIO_PORT_RX_FIFO_HC_OFFSET) 97 - #define AUDIO_PORT_IC_CODEC_TX_CTRL (0x00F4) 98 - #define AUDIO_PORT_IC_CODEC_RX_CTRL (0x00F8) 99 - 100 - #define AUDIO_PORT_IC_TXFIFO_OP (0x00FC) 101 - #define AUDIO_PORT_IC_TXFIFO_LEV_CHK (0x0100) 102 - #define AUDIO_PORT_IC_TXFIFO_STS (0x0104) 103 - #define AUDIO_PORT_IC_TXFIFO_INT (0x0108) 104 - #define AUDIO_PORT_IC_TXFIFO_INT_MSK (0x010C) 105 - 106 - #define AUDIO_PORT_IC_RXFIFO_OP (0x0110) 107 - #define AUDIO_PORT_IC_RXFIFO_LEV_CHK (0x0114) 108 - #define AUDIO_PORT_IC_RXFIFO_STS (0x0118) 109 - #define AUDIO_PORT_IC_RXFIFO_INT (0x011C) 110 - #define AUDIO_PORT_IC_RXFIFO_INT_MSK (0x0120) 111 - 112 - #define AUDIO_FIFO_START (1 << 0) 113 - #define AUDIO_FIFO_RESET (1 << 1) 114 - 115 - #define AUDIO_FIFO_FULL (1 << 0) 116 - #define AUDIO_FIFO_EMPTY (1 << 1) 117 - #define AUDIO_FIFO_OFLOW (1 << 2) 118 - #define AUDIO_FIFO_UFLOW (1 << 3) 119 - 120 - #define IC_TX_ENABLE (0x03) 121 - #define IC_RX_ENABLE_MONO (0x01) 122 - #define IC_RX_ENABLE_STEREO (0x03) 123 - 124 - #endif /*__SIRF_AUDIO_CODEC_H*/
+6
sound/soc/codecs/wcd934x.c
··· 1873 1873 1874 1874 wcd = snd_soc_component_get_drvdata(dai->component); 1875 1875 1876 + if (tx_num > WCD934X_TX_MAX || rx_num > WCD934X_RX_MAX) { 1877 + dev_err(wcd->dev, "Invalid tx %d or rx %d channel count\n", 1878 + tx_num, rx_num); 1879 + return -EINVAL; 1880 + } 1881 + 1876 1882 if (!tx_slot || !rx_slot) { 1877 1883 dev_err(wcd->dev, "Invalid tx_slot=%p, rx_slot=%p\n", 1878 1884 tx_slot, rx_slot);
+4 -2
sound/soc/fsl/fsl_ssi.c
··· 878 878 static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt) 879 879 { 880 880 u32 strcr = 0, scr = 0, stcr, srcr, mask; 881 + unsigned int slots; 881 882 882 883 ssi->dai_fmt = fmt; 883 884 ··· 910 909 return -EINVAL; 911 910 } 912 911 912 + slots = ssi->slots ? : 2; 913 913 regmap_update_bits(ssi->regs, REG_SSI_STCCR, 914 - SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2)); 914 + SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots)); 915 915 regmap_update_bits(ssi->regs, REG_SSI_SRCCR, 916 - SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2)); 916 + SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots)); 917 917 918 918 /* Data on rising edge of bclk, frame low, 1clk before data */ 919 919 strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP | SSI_STCR_TEFS;
+7 -6
sound/soc/generic/simple-card-utils.c
··· 172 172 * or device's module clock. 173 173 */ 174 174 clk = devm_get_clk_from_child(dev, node, NULL); 175 - if (IS_ERR(clk)) 176 - clk = devm_get_clk_from_child(dev, dlc->of_node, NULL); 177 - 178 175 if (!IS_ERR(clk)) { 179 - simple_dai->clk = clk; 180 176 simple_dai->sysclk = clk_get_rate(clk); 181 - } else if (!of_property_read_u32(node, "system-clock-frequency", 182 - &val)) { 177 + 178 + simple_dai->clk = clk; 179 + } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) { 183 180 simple_dai->sysclk = val; 181 + } else { 182 + clk = devm_get_clk_from_child(dev, dlc->of_node, NULL); 183 + if (!IS_ERR(clk)) 184 + simple_dai->sysclk = clk_get_rate(clk); 184 185 } 185 186 186 187 if (of_property_read_bool(node, "system-clock-direction-out"))
+1 -1
sound/soc/intel/boards/bytcr_rt5640.c
··· 581 581 }, 582 582 .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | 583 583 BYT_RT5640_JD_SRC_JD1_IN4P | 584 - BYT_RT5640_OVCD_TH_1500UA | 584 + BYT_RT5640_OVCD_TH_2000UA | 585 585 BYT_RT5640_OVCD_SF_0P75 | 586 586 BYT_RT5640_MCLK_EN), 587 587 },
+3 -1
sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
··· 555 555 556 556 /* set tdm */ 557 557 if (tdm_priv->bck_invert) 558 - tdm_con |= 1 << BCK_INVERSE_SFT; 558 + regmap_update_bits(afe->regmap, AUDIO_TOP_CON3, 559 + BCK_INVERSE_MASK_SFT, 560 + 0x1 << BCK_INVERSE_SFT); 559 561 560 562 if (tdm_priv->lck_invert) 561 563 tdm_con |= 1 << LRCK_INVERSE_SFT;
+5 -3
sound/soc/mediatek/mt8192/mt8192-reg.h
··· 21 21 /***************************************************************************** 22 22 * R E G I S T E R D E F I N I T I O N 23 23 *****************************************************************************/ 24 + /* AUDIO_TOP_CON3 */ 25 + #define BCK_INVERSE_SFT 3 26 + #define BCK_INVERSE_MASK 0x1 27 + #define BCK_INVERSE_MASK_SFT (0x1 << 3) 28 + 24 29 /* AFE_DAC_CON0 */ 25 30 #define VUL12_ON_SFT 31 26 31 #define VUL12_ON_MASK 0x1 ··· 2084 2079 #define TDM_EN_SFT 0 2085 2080 #define TDM_EN_MASK 0x1 2086 2081 #define TDM_EN_MASK_SFT (0x1 << 0) 2087 - #define BCK_INVERSE_SFT 1 2088 - #define BCK_INVERSE_MASK 0x1 2089 - #define BCK_INVERSE_MASK_SFT (0x1 << 1) 2090 2082 #define LRCK_INVERSE_SFT 2 2091 2083 #define LRCK_INVERSE_MASK 0x1 2092 2084 #define LRCK_INVERSE_MASK_SFT (0x1 << 2)
+1 -1
sound/soc/qcom/lpass-cpu.c
··· 739 739 740 740 for_each_child_of_node(dev->of_node, node) { 741 741 ret = of_property_read_u32(node, "reg", &id); 742 - if (ret || id < 0 || id >= data->variant->num_dai) { 742 + if (ret || id < 0) { 743 743 dev_err(dev, "valid dai id not found: %d\n", ret); 744 744 continue; 745 745 }
+3 -3
sound/soc/qcom/sdm845.c
··· 27 27 #define SPK_TDM_RX_MASK 0x03 28 28 #define NUM_TDM_SLOTS 8 29 29 #define SLIM_MAX_TX_PORTS 16 30 - #define SLIM_MAX_RX_PORTS 16 30 + #define SLIM_MAX_RX_PORTS 13 31 31 #define WCD934X_DEFAULT_MCLK_RATE 9600000 32 32 33 33 struct sdm845_snd_data { 34 34 struct snd_soc_jack jack; 35 35 bool jack_setup; 36 - bool stream_prepared[SLIM_MAX_RX_PORTS]; 36 + bool stream_prepared[AFE_PORT_MAX]; 37 37 struct snd_soc_card *card; 38 38 uint32_t pri_mi2s_clk_count; 39 39 uint32_t sec_mi2s_clk_count; 40 40 uint32_t quat_tdm_clk_count; 41 - struct sdw_stream_runtime *sruntime[SLIM_MAX_RX_PORTS]; 41 + struct sdw_stream_runtime *sruntime[AFE_PORT_MAX]; 42 42 }; 43 43 44 44 static unsigned int tdm_slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+4
sound/soc/soc-core.c
··· 31 31 #include <linux/of.h> 32 32 #include <linux/of_graph.h> 33 33 #include <linux/dmi.h> 34 + #include <linux/acpi.h> 34 35 #include <sound/core.h> 35 36 #include <sound/pcm.h> 36 37 #include <sound/pcm_params.h> ··· 1573 1572 1574 1573 if (card->long_name) 1575 1574 return 0; /* long name already set by driver or from DMI */ 1575 + 1576 + if (!is_acpi_device_node(card->dev->fwnode)) 1577 + return 0; 1576 1578 1577 1579 /* make up dmi long name as: vendor-product-version-board */ 1578 1580 vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+1 -1
sound/soc/sof/intel/hda-dsp.c
··· 207 207 208 208 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 209 209 HDA_DSP_REG_ADSPCS, adspcs, 210 - !(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)), 210 + !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)), 211 211 HDA_DSP_REG_POLL_INTERVAL_US, 212 212 HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC); 213 213 if (ret < 0)
+1
sound/soc/sof/intel/hda.c
··· 897 897 /* dsp_unmap: not currently used */ 898 898 iounmap(sdev->bar[HDA_DSP_BAR]); 899 899 hdac_bus_unmap: 900 + platform_device_unregister(hdev->dmic_dev); 900 901 iounmap(bus->remap_addr); 901 902 hda_codec_i915_exit(sdev); 902 903 err:
-1
sound/sparc/amd7930.c
··· 62 62 MODULE_AUTHOR("Thomas K. Dyas and David S. Miller"); 63 63 MODULE_DESCRIPTION("Sun AMD7930"); 64 64 MODULE_LICENSE("GPL"); 65 - MODULE_SUPPORTED_DEVICE("{{Sun,AMD7930}}"); 66 65 67 66 /* Device register layout. */ 68 67
-1
sound/sparc/cs4231.c
··· 52 52 MODULE_AUTHOR("Jaroslav Kysela, Derrick J. Brashear and David S. Miller"); 53 53 MODULE_DESCRIPTION("Sun CS4231"); 54 54 MODULE_LICENSE("GPL"); 55 - MODULE_SUPPORTED_DEVICE("{{Sun,CS4231}}"); 56 55 57 56 #ifdef SBUS_SUPPORT 58 57 struct sbus_dma_info {
-1
sound/sparc/dbri.c
··· 76 76 MODULE_AUTHOR("Rudolf Koenig, Brent Baccala and Martin Habets"); 77 77 MODULE_DESCRIPTION("Sun DBRI"); 78 78 MODULE_LICENSE("GPL"); 79 - MODULE_SUPPORTED_DEVICE("{{Sun,DBRI}}"); 80 79 81 80 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 82 81 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/usb/6fire/chip.c
··· 26 26 MODULE_AUTHOR("Torsten Schenk <torsten.schenk@zoho.com>"); 27 27 MODULE_DESCRIPTION("TerraTec DMX 6Fire USB audio driver"); 28 28 MODULE_LICENSE("GPL v2"); 29 - MODULE_SUPPORTED_DEVICE("{{TerraTec,DMX 6Fire USB}}"); 30 29 31 30 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ 32 31 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */
-14
sound/usb/caiaq/device.c
··· 26 26 MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 27 27 MODULE_DESCRIPTION("caiaq USB audio"); 28 28 MODULE_LICENSE("GPL"); 29 - MODULE_SUPPORTED_DEVICE("{{Native Instruments,RigKontrol2}," 30 - "{Native Instruments,RigKontrol3}," 31 - "{Native Instruments,Kore Controller}," 32 - "{Native Instruments,Kore Controller 2}," 33 - "{Native Instruments,Audio Kontrol 1}," 34 - "{Native Instruments,Audio 2 DJ}," 35 - "{Native Instruments,Audio 4 DJ}," 36 - "{Native Instruments,Audio 8 DJ}," 37 - "{Native Instruments,Traktor Audio 2}," 38 - "{Native Instruments,Session I/O}," 39 - "{Native Instruments,GuitarRig mobile}," 40 - "{Native Instruments,Traktor Kontrol X1}," 41 - "{Native Instruments,Traktor Kontrol S4}," 42 - "{Native Instruments,Maschine Controller}}"); 43 29 44 30 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ 45 31 static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
-2
sound/usb/card.c
··· 58 58 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 59 59 MODULE_DESCRIPTION("USB Audio"); 60 60 MODULE_LICENSE("GPL"); 61 - MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}"); 62 - 63 61 64 62 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 65 63 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-17
sound/usb/hiface/chip.c
··· 21 21 MODULE_AUTHOR("Antonio Ospite <ao2@amarulasolutions.com>"); 22 22 MODULE_DESCRIPTION("M2Tech hiFace USB-SPDIF audio driver"); 23 23 MODULE_LICENSE("GPL v2"); 24 - MODULE_SUPPORTED_DEVICE("{{M2Tech,Young}," 25 - "{M2Tech,hiFace}," 26 - "{M2Tech,North Star}," 27 - "{M2Tech,W4S Young}," 28 - "{M2Tech,Corrson}," 29 - "{M2Tech,AUDIA}," 30 - "{M2Tech,SL Audio}," 31 - "{M2Tech,Empirical}," 32 - "{M2Tech,Rockna}," 33 - "{M2Tech,Pathos}," 34 - "{M2Tech,Metronome}," 35 - "{M2Tech,CAD}," 36 - "{M2Tech,Audio Esclusive}," 37 - "{M2Tech,Rotel}," 38 - "{M2Tech,Eeaudio}," 39 - "{The Chord Company,CHORD}," 40 - "{AVA Group A/S,Vitus}}"); 41 24 42 25 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ 43 26 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */
-1
sound/usb/misc/ua101.c
··· 19 19 MODULE_DESCRIPTION("Edirol UA-101/1000 driver"); 20 20 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 21 21 MODULE_LICENSE("GPL v2"); 22 - MODULE_SUPPORTED_DEVICE("{{Edirol,UA-101},{Edirol,UA-1000}}"); 23 22 24 23 /* 25 24 * Should not be lower than the minimum scheduling delay of the host
+2 -2
sound/usb/mixer_quirks.c
··· 2883 2883 u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT; 2884 2884 u16 value = elem->value.enumerated.item[0]; 2885 2885 2886 - kctl->private_value = ((device << SND_DJM_DEVICE_SHIFT) | 2886 + kctl->private_value = (((unsigned long)device << SND_DJM_DEVICE_SHIFT) | 2887 2887 (group << SND_DJM_GROUP_SHIFT) | 2888 2888 value); 2889 2889 ··· 2921 2921 value = device->controls[i].default_value; 2922 2922 knew.name = device->controls[i].name; 2923 2923 knew.private_value = ( 2924 - (device_idx << SND_DJM_DEVICE_SHIFT) | 2924 + ((unsigned long)device_idx << SND_DJM_DEVICE_SHIFT) | 2925 2925 (i << SND_DJM_GROUP_SHIFT) | 2926 2926 value); 2927 2927 err = snd_djm_controls_update(mixer, device_idx, i, value);
-1
sound/usb/usx2y/usbusx2y.c
··· 137 137 MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); 138 138 MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2"); 139 139 MODULE_LICENSE("GPL"); 140 - MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604),"NAME_ALLCAPS"(0x8001)(0x8005)(0x8007)}}"); 141 140 142 141 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ 143 142 static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
-1
sound/x86/intel_hdmi_audio.c
··· 1887 1887 MODULE_AUTHOR("Jerome Anand <jerome.anand@intel.com>"); 1888 1888 MODULE_DESCRIPTION("Intel HDMI Audio driver"); 1889 1889 MODULE_LICENSE("GPL v2"); 1890 - MODULE_SUPPORTED_DEVICE("{Intel,Intel_HAD}");
-1
sound/xen/xen_snd_front.c
··· 391 391 MODULE_DESCRIPTION("Xen virtual sound device frontend"); 392 392 MODULE_LICENSE("GPL"); 393 393 MODULE_ALIAS("xen:" XENSND_DRIVER_NAME); 394 - MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual soundcard}}");
+3
tools/testing/selftests/kvm/.gitignore
··· 8 8 /x86_64/debug_regs 9 9 /x86_64/evmcs_test 10 10 /x86_64/get_cpuid_test 11 + /x86_64/get_msr_index_features 11 12 /x86_64/kvm_pv_test 13 + /x86_64/hyperv_clock 12 14 /x86_64/hyperv_cpuid 13 15 /x86_64/mmio_warning_test 14 16 /x86_64/platform_info_test 17 + /x86_64/set_boot_cpu_id 15 18 /x86_64/set_sregs_test 16 19 /x86_64/smm_test 17 20 /x86_64/state_test
+3
tools/testing/selftests/kvm/Makefile
··· 39 39 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c 40 40 41 41 TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test 42 + TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features 42 43 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test 43 44 TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test 45 + TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock 44 46 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid 45 47 TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test 46 48 TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test 47 49 TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test 50 + TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id 48 51 TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test 49 52 TEST_GEN_PROGS_x86_64 += x86_64/smm_test 50 53 TEST_GEN_PROGS_x86_64 += x86_64/state_test
+2
tools/testing/selftests/kvm/include/kvm_util.h
··· 16 16 17 17 #include "sparsebit.h" 18 18 19 + #define KVM_DEV_PATH "/dev/kvm" 19 20 #define KVM_MAX_VCPUS 512 20 21 21 22 /* ··· 134 133 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, 135 134 void *arg); 136 135 void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); 136 + int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg); 137 137 void kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); 138 138 int _kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); 139 139 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
+6 -1
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1697 1697 { 1698 1698 int ret; 1699 1699 1700 - ret = ioctl(vm->fd, cmd, arg); 1700 + ret = _vm_ioctl(vm, cmd, arg); 1701 1701 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)", 1702 1702 cmd, ret, errno, strerror(errno)); 1703 + } 1704 + 1705 + int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1706 + { 1707 + return ioctl(vm->fd, cmd, arg); 1703 1708 } 1704 1709 1705 1710 /*
-2
tools/testing/selftests/kvm/lib/kvm_util_internal.h
··· 10 10 11 11 #include "sparsebit.h" 12 12 13 - #define KVM_DEV_PATH "/dev/kvm" 14 - 15 13 struct userspace_mem_region { 16 14 struct kvm_userspace_memory_region region; 17 15 struct sparsebit *unused_phy_pages;
+134
tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Test that KVM_GET_MSR_INDEX_LIST and 4 + * KVM_GET_MSR_FEATURE_INDEX_LIST work as intended 5 + * 6 + * Copyright (C) 2020, Red Hat, Inc. 7 + */ 8 + #include <fcntl.h> 9 + #include <stdio.h> 10 + #include <stdlib.h> 11 + #include <string.h> 12 + #include <sys/ioctl.h> 13 + 14 + #include "test_util.h" 15 + #include "kvm_util.h" 16 + #include "processor.h" 17 + 18 + static int kvm_num_index_msrs(int kvm_fd, int nmsrs) 19 + { 20 + struct kvm_msr_list *list; 21 + int r; 22 + 23 + list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); 24 + list->nmsrs = nmsrs; 25 + r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list); 26 + TEST_ASSERT(r == -1 && errno == E2BIG, 27 + "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i", 28 + r); 29 + 30 + r = list->nmsrs; 31 + free(list); 32 + return r; 33 + } 34 + 35 + static void test_get_msr_index(void) 36 + { 37 + int old_res, res, kvm_fd, r; 38 + struct kvm_msr_list *list; 39 + 40 + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 41 + if (kvm_fd < 0) 42 + exit(KSFT_SKIP); 43 + 44 + old_res = kvm_num_index_msrs(kvm_fd, 0); 45 + TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0"); 46 + 47 + if (old_res != 1) { 48 + res = kvm_num_index_msrs(kvm_fd, 1); 49 + TEST_ASSERT(res > 1, "Expecting nmsrs to be > 1"); 50 + TEST_ASSERT(res == old_res, "Expecting nmsrs to be identical"); 51 + } 52 + 53 + list = malloc(sizeof(*list) + old_res * sizeof(list->indices[0])); 54 + list->nmsrs = old_res; 55 + r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list); 56 + 57 + TEST_ASSERT(r == 0, 58 + "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST, r: %i", 59 + r); 60 + TEST_ASSERT(list->nmsrs == old_res, "Expecting nmsrs to be identical"); 61 + free(list); 62 + 63 + close(kvm_fd); 64 + } 65 + 66 + static int kvm_num_feature_msrs(int kvm_fd, int nmsrs) 67 + { 68 + struct kvm_msr_list *list; 69 + int r; 70 + 71 + list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); 72 + list->nmsrs = nmsrs; 73 + r = ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list); 74 + TEST_ASSERT(r == -1 && errno == E2BIG, 75 + "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST probe, r: %i", 76 + r); 77 + 78 + r = list->nmsrs; 79 + free(list); 80 + return r; 81 + } 82 + 83 + struct kvm_msr_list *kvm_get_msr_feature_list(int kvm_fd, int nmsrs) 84 + { 85 + struct kvm_msr_list *list; 86 + int r; 87 + 88 + list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); 89 + list->nmsrs = nmsrs; 90 + r = ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list); 91 + 92 + TEST_ASSERT(r == 0, 93 + "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST, r: %i", 94 + r); 95 + 96 + return list; 97 + } 98 + 99 + static void test_get_msr_feature(void) 100 + { 101 + int res, old_res, i, kvm_fd; 102 + struct kvm_msr_list *feature_list; 103 + 104 + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 105 + if (kvm_fd < 0) 106 + exit(KSFT_SKIP); 107 + 108 + old_res = kvm_num_feature_msrs(kvm_fd, 0); 109 + TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0"); 110 + 111 + if (old_res != 1) { 112 + res = kvm_num_feature_msrs(kvm_fd, 1); 113 + TEST_ASSERT(res > 1, "Expecting nmsrs to be > 1"); 114 + TEST_ASSERT(res == old_res, "Expecting nmsrs to be identical"); 115 + } 116 + 117 + feature_list = kvm_get_msr_feature_list(kvm_fd, old_res); 118 + TEST_ASSERT(old_res == feature_list->nmsrs, 119 + "Unmatching number of msr indexes"); 120 + 121 + for (i = 0; i < feature_list->nmsrs; i++) 122 + kvm_get_feature_msr(feature_list->indices[i]); 123 + 124 + free(feature_list); 125 + close(kvm_fd); 126 + } 127 + 128 + int main(int argc, char *argv[]) 129 + { 130 + if (kvm_check_cap(KVM_CAP_GET_MSR_FEATURES)) 131 + test_get_msr_feature(); 132 + 133 + test_get_msr_index(); 134 + }
+260
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2021, Red Hat, Inc. 4 + * 5 + * Tests for Hyper-V clocksources 6 + */ 7 + #include "test_util.h" 8 + #include "kvm_util.h" 9 + #include "processor.h" 10 + 11 + struct ms_hyperv_tsc_page { 12 + volatile u32 tsc_sequence; 13 + u32 reserved1; 14 + volatile u64 tsc_scale; 15 + volatile s64 tsc_offset; 16 + } __packed; 17 + 18 + #define HV_X64_MSR_GUEST_OS_ID 0x40000000 19 + #define HV_X64_MSR_TIME_REF_COUNT 0x40000020 20 + #define HV_X64_MSR_REFERENCE_TSC 0x40000021 21 + #define HV_X64_MSR_TSC_FREQUENCY 0x40000022 22 + #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 23 + #define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 24 + 25 + /* Simplified mul_u64_u64_shr() */ 26 + static inline u64 mul_u64_u64_shr64(u64 a, u64 b) 27 + { 28 + union { 29 + u64 ll; 30 + struct { 31 + u32 low, high; 32 + } l; 33 + } rm, rn, rh, a0, b0; 34 + u64 c; 35 + 36 + a0.ll = a; 37 + b0.ll = b; 38 + 39 + rm.ll = (u64)a0.l.low * b0.l.high; 40 + rn.ll = (u64)a0.l.high * b0.l.low; 41 + rh.ll = (u64)a0.l.high * b0.l.high; 42 + 43 + rh.l.low = c = rm.l.high + rn.l.high + rh.l.low; 44 + rh.l.high = (c >> 32) + rh.l.high; 45 + 46 + return rh.ll; 47 + } 48 + 49 + static inline void nop_loop(void) 50 + { 51 + int i; 52 + 53 + for (i = 0; i < 1000000; i++) 54 + asm volatile("nop"); 55 + } 56 + 57 + static inline void check_tsc_msr_rdtsc(void) 58 + { 59 + u64 tsc_freq, r1, r2, t1, t2; 60 + s64 delta_ns; 61 + 62 + tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY); 63 + GUEST_ASSERT(tsc_freq > 0); 64 + 65 + /* First, check MSR-based clocksource */ 66 + r1 = rdtsc(); 67 + t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); 68 + nop_loop(); 69 + r2 = rdtsc(); 70 + t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); 71 + 72 + GUEST_ASSERT(r2 > r1 && t2 > t1); 73 + 74 + /* HV_X64_MSR_TIME_REF_COUNT is in 100ns */ 75 + delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq); 76 + if (delta_ns < 0) 77 + delta_ns = -delta_ns; 78 + 79 + /* 1% tolerance */ 80 + GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100); 81 + } 82 + 83 + static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page) 84 + { 85 + u64 r1, r2, t1, t2; 86 + 87 + /* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */ 88 + t1 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset; 89 + r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); 90 + 91 + /* 10 ms tolerance */ 92 + GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000); 93 + nop_loop(); 94 + 95 + t2 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset; 96 + r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); 97 + GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000); 98 + } 99 + 100 + static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa) 101 + { 102 + u64 tsc_scale, tsc_offset; 103 + 104 + /* Set Guest OS id to enable Hyper-V emulation */ 105 + GUEST_SYNC(1); 106 + wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48); 107 + GUEST_SYNC(2); 108 + 109 + check_tsc_msr_rdtsc(); 110 + 111 + GUEST_SYNC(3); 112 + 113 + /* Set up TSC page is disabled state, check that it's clean */ 114 + wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa); 115 + GUEST_ASSERT(tsc_page->tsc_sequence == 0); 116 + GUEST_ASSERT(tsc_page->tsc_scale == 0); 117 + GUEST_ASSERT(tsc_page->tsc_offset == 0); 118 + 119 + GUEST_SYNC(4); 120 + 121 + /* Set up TSC page is enabled state */ 122 + wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa | 0x1); 123 + GUEST_ASSERT(tsc_page->tsc_sequence != 0); 124 + 125 + GUEST_SYNC(5); 126 + 127 + check_tsc_msr_tsc_page(tsc_page); 128 + 129 + GUEST_SYNC(6); 130 + 131 + tsc_offset = tsc_page->tsc_offset; 132 + /* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */ 133 + GUEST_SYNC(7); 134 + GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset); 135 + 136 + nop_loop(); 137 + 138 + /* 139 + * Enable Re-enlightenment and check that TSC page stays constant across 140 + * KVM_SET_CLOCK. 141 + */ 142 + wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0x1 << 16 | 0xff); 143 + wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0x1); 144 + tsc_offset = tsc_page->tsc_offset; 145 + tsc_scale = tsc_page->tsc_scale; 146 + GUEST_SYNC(8); 147 + GUEST_ASSERT(tsc_page->tsc_offset == tsc_offset); 148 + GUEST_ASSERT(tsc_page->tsc_scale == tsc_scale); 149 + 150 + GUEST_SYNC(9); 151 + 152 + check_tsc_msr_tsc_page(tsc_page); 153 + 154 + /* 155 + * Disable re-enlightenment and TSC page, check that KVM doesn't update 156 + * it anymore. 157 + */ 158 + wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0); 159 + wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0); 160 + wrmsr(HV_X64_MSR_REFERENCE_TSC, 0); 161 + memset(tsc_page, 0, sizeof(*tsc_page)); 162 + 163 + GUEST_SYNC(10); 164 + GUEST_ASSERT(tsc_page->tsc_sequence == 0); 165 + GUEST_ASSERT(tsc_page->tsc_offset == 0); 166 + GUEST_ASSERT(tsc_page->tsc_scale == 0); 167 + 168 + GUEST_DONE(); 169 + } 170 + 171 + #define VCPU_ID 0 172 + 173 + static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm) 174 + { 175 + u64 tsc_freq, r1, r2, t1, t2; 176 + s64 delta_ns; 177 + 178 + tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY); 179 + TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero"); 180 + 181 + /* First, check MSR-based clocksource */ 182 + r1 = rdtsc(); 183 + t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); 184 + nop_loop(); 185 + r2 = rdtsc(); 186 + t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); 187 + 188 + TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2); 189 + 190 + /* HV_X64_MSR_TIME_REF_COUNT is in 100ns */ 191 + delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq); 192 + if (delta_ns < 0) 193 + delta_ns = -delta_ns; 194 + 195 + /* 1% tolerance */ 196 + TEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100, 197 + "Elapsed time does not match (MSR=%ld, TSC=%ld)", 198 + (t2 - t1) * 100, (r2 - r1) * 1000000000 / tsc_freq); 199 + } 200 + 201 + int main(void) 202 + { 203 + struct kvm_vm *vm; 204 + struct kvm_run *run; 205 + struct ucall uc; 206 + vm_vaddr_t tsc_page_gva; 207 + int stage; 208 + 209 + vm = vm_create_default(VCPU_ID, 0, guest_main); 210 + run = vcpu_state(vm, VCPU_ID); 211 + 212 + vcpu_set_hv_cpuid(vm, VCPU_ID); 213 + 214 + tsc_page_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); 215 + memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize()); 216 + TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, 217 + "TSC page has to be page aligned\n"); 218 + vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva)); 219 + 220 + host_check_tsc_msr_rdtsc(vm); 221 + 222 + for (stage = 1;; stage++) { 223 + _vcpu_run(vm, VCPU_ID); 224 + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 225 + "Stage %d: unexpected exit reason: %u (%s),\n", 226 + stage, run->exit_reason, 227 + exit_reason_str(run->exit_reason)); 228 + 229 + switch (get_ucall(vm, VCPU_ID, &uc)) { 230 + case UCALL_ABORT: 231 + TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 232 + __FILE__, uc.args[1]); 233 + /* NOT REACHED */ 234 + case UCALL_SYNC: 235 + break; 236 + case UCALL_DONE: 237 + /* Keep in sync with guest_main() */ 238 + TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n", 239 + stage); 240 + goto out; 241 + default: 242 + TEST_FAIL("Unknown ucall %lu", uc.cmd); 243 + } 244 + 245 + TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 246 + uc.args[1] == stage, 247 + "Stage %d: Unexpected register values vmexit, got %lx", 248 + stage, (ulong)uc.args[1]); 249 + 250 + /* Reset kvmclock triggering TSC page update */ 251 + if (stage == 7 || stage == 8 || stage == 10) { 252 + struct kvm_clock_data clock = {0}; 253 + 254 + vm_ioctl(vm, KVM_SET_CLOCK, &clock); 255 + } 256 + } 257 + 258 + out: 259 + kvm_vm_free(vm); 260 + }
+166
tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Test that KVM_SET_BOOT_CPU_ID works as intended 4 + * 5 + * Copyright (C) 2020, Red Hat, Inc. 6 + */ 7 + #define _GNU_SOURCE /* for program_invocation_name */ 8 + #include <fcntl.h> 9 + #include <stdio.h> 10 + #include <stdlib.h> 11 + #include <string.h> 12 + #include <sys/ioctl.h> 13 + 14 + #include "test_util.h" 15 + #include "kvm_util.h" 16 + #include "processor.h" 17 + 18 + #define N_VCPU 2 19 + #define VCPU_ID0 0 20 + #define VCPU_ID1 1 21 + 22 + static uint32_t get_bsp_flag(void) 23 + { 24 + return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP; 25 + } 26 + 27 + static void guest_bsp_vcpu(void *arg) 28 + { 29 + GUEST_SYNC(1); 30 + 31 + GUEST_ASSERT(get_bsp_flag() != 0); 32 + 33 + GUEST_DONE(); 34 + } 35 + 36 + static void guest_not_bsp_vcpu(void *arg) 37 + { 38 + GUEST_SYNC(1); 39 + 40 + GUEST_ASSERT(get_bsp_flag() == 0); 41 + 42 + GUEST_DONE(); 43 + } 44 + 45 + static void test_set_boot_busy(struct kvm_vm *vm) 46 + { 47 + int res; 48 + 49 + res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID0); 50 + TEST_ASSERT(res == -1 && errno == EBUSY, 51 + "KVM_SET_BOOT_CPU_ID set while running vm"); 52 + } 53 + 54 + static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid) 55 + { 56 + struct ucall uc; 57 + int stage; 58 + 59 + for (stage = 0; stage < 2; stage++) { 60 + 61 + vcpu_run(vm, vcpuid); 62 + 63 + switch (get_ucall(vm, vcpuid, &uc)) { 64 + case UCALL_SYNC: 65 + TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 66 + uc.args[1] == stage + 1, 67 + "Stage %d: Unexpected register values vmexit, got %lx", 68 + stage + 1, (ulong)uc.args[1]); 69 + test_set_boot_busy(vm); 70 + break; 71 + case UCALL_DONE: 72 + TEST_ASSERT(stage == 1, 73 + "Expected GUEST_DONE in stage 2, got stage %d", 74 + stage); 75 + break; 76 + case UCALL_ABORT: 77 + TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx", 78 + (const char *)uc.args[0], __FILE__, 79 + uc.args[1], uc.args[2], uc.args[3]); 80 + default: 81 + TEST_ASSERT(false, "Unexpected exit: %s", 82 + exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason)); 83 + } 84 + } 85 + } 86 + 87 + static struct kvm_vm *create_vm(void) 88 + { 89 + struct kvm_vm *vm; 90 + uint64_t vcpu_pages = (DEFAULT_STACK_PGS) * 2; 91 + uint64_t extra_pg_pages = vcpu_pages / PTES_PER_MIN_PAGE * N_VCPU; 92 + uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages; 93 + 94 + pages = vm_adjust_num_guest_pages(VM_MODE_DEFAULT, pages); 95 + vm = vm_create(VM_MODE_DEFAULT, pages, O_RDWR); 96 + 97 + kvm_vm_elf_load(vm, program_invocation_name, 0, 0); 98 + vm_create_irqchip(vm); 99 + 100 + return vm; 101 + } 102 + 103 + static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code) 104 + { 105 + if (bsp_code) 106 + vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu); 107 + else 108 + vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu); 109 + 110 + vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); 111 + } 112 + 113 + static void run_vm_bsp(uint32_t bsp_vcpu) 114 + { 115 + struct kvm_vm *vm; 116 + bool is_bsp_vcpu1 = bsp_vcpu == VCPU_ID1; 117 + 118 + vm = create_vm(); 119 + 120 + if (is_bsp_vcpu1) 121 + vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1); 122 + 123 + add_x86_vcpu(vm, VCPU_ID0, !is_bsp_vcpu1); 124 + add_x86_vcpu(vm, VCPU_ID1, is_bsp_vcpu1); 125 + 126 + run_vcpu(vm, VCPU_ID0); 127 + run_vcpu(vm, VCPU_ID1); 128 + 129 + kvm_vm_free(vm); 130 + } 131 + 132 + static void check_set_bsp_busy(void) 133 + { 134 + struct kvm_vm *vm; 135 + int res; 136 + 137 + vm = create_vm(); 138 + 139 + add_x86_vcpu(vm, VCPU_ID0, true); 140 + add_x86_vcpu(vm, VCPU_ID1, false); 141 + 142 + res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1); 143 + TEST_ASSERT(res == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set after adding vcpu"); 144 + 145 + run_vcpu(vm, VCPU_ID0); 146 + run_vcpu(vm, VCPU_ID1); 147 + 148 + res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1); 149 + TEST_ASSERT(res == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set to a terminated vcpu"); 150 + 151 + kvm_vm_free(vm); 152 + } 153 + 154 + int main(int argc, char *argv[]) 155 + { 156 + if (!kvm_check_cap(KVM_CAP_SET_BOOT_CPU_ID)) { 157 + print_skip("set_boot_cpu_id not available"); 158 + return 0; 159 + } 160 + 161 + run_vm_bsp(VCPU_ID0); 162 + run_vm_bsp(VCPU_ID1); 163 + run_vm_bsp(VCPU_ID0); 164 + 165 + check_set_bsp_busy(); 166 + }