···3333Description:3434 The current state of the log write grant head. It3535 represents the total log reservation of all currently3636- oustanding transactions, including regrants due to3636+ outstanding transactions, including regrants due to3737 rolling transactions. The grant head is exported in3838 "cycle:bytes" format.3939Users: xfstests
···1495149514961496Define which vcpu is the Bootstrap Processor (BSP). Values are the same14971497as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default14981498-is vcpu 0.14981498+is vcpu 0. This ioctl has to be called before vcpu creation,14991499+otherwise it will return EBUSY error.1499150015001501150115024.42 KVM_GET_XSAVE···48074806allows user space to deflect and potentially handle various MSR accesses48084807into user space.4809480848104810-If a vCPU is in running state while this ioctl is invoked, the vCPU may48114811-experience inconsistent filtering behavior on MSR accesses.48094809+Note, invoking this ioctl with a vCPU is running is inherently racy. However,48104810+KVM does guarantee that vCPUs will see either the previous filter or the new48114811+filter, e.g. MSRs with identical settings in both the old and new filter will48124812+have deterministic behavior.48124813481348144.127 KVM_XEN_HVM_SET_ATTR48144815--------------------------
···99 return 0;1010}11111212-/* Ftrace callback handler for kprobes -- called under preepmt disabed */1212+/* Ftrace callback handler for kprobes -- called under preepmt disabled */1313void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,1414 struct ftrace_ops *ops, struct ftrace_regs *fregs)1515{
···77#include <linux/bug.h>88#include <asm/cputable.h>991010-static inline bool early_cpu_has_feature(unsigned long feature)1010+static __always_inline bool early_cpu_has_feature(unsigned long feature)1111{1212 return !!((CPU_FTRS_ALWAYS & feature) ||1313 (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));···4646 return static_branch_likely(&cpu_feature_keys[i]);4747}4848#else4949-static inline bool cpu_has_feature(unsigned long feature)4949+static __always_inline bool cpu_has_feature(unsigned long feature)5050{5151 return early_cpu_has_feature(feature);5252}
+11
arch/powerpc/kernel/vdso32/gettimeofday.S
···6565V_FUNCTION_BEGIN(__kernel_time)6666 cvdso_call_time __c_kernel_time6767V_FUNCTION_END(__kernel_time)6868+6969+/* Routines for restoring integer registers, called by the compiler. */7070+/* Called with r11 pointing to the stack header word of the caller of the */7171+/* function, just beyond the end of the integer restore area. */7272+_GLOBAL(_restgpr_31_x)7373+_GLOBAL(_rest32gpr_31_x)7474+ lwz r0,4(r11)7575+ lwz r31,-4(r11)7676+ mtlr r07777+ mr r1,r117878+ blr
+2-2
arch/riscv/Kconfig
···9393 select PCI_MSI if PCI9494 select RISCV_INTC9595 select RISCV_TIMER if RISCV_SBI9696- select SPARSEMEM_STATIC if 32BIT9796 select SPARSE_IRQ9897 select SYSCTL_EXCEPTION_TRACE9998 select THREAD_INFO_IN_TASK···153154config ARCH_SPARSEMEM_ENABLE154155 def_bool y155156 depends on MMU156156- select SPARSEMEM_VMEMMAP_ENABLE157157+ select SPARSEMEM_STATIC if 32BIT && SPARSMEM158158+ select SPARSEMEM_VMEMMAP_ENABLE if 64BIT157159158160config ARCH_SELECT_MEMORY_MODEL159161 def_bool ARCH_SPARSEMEM_ENABLE
+2
arch/riscv/Kconfig.socs
···3131 select SIFIVE_PLIC3232 select ARCH_HAS_RESET_CONTROLLER3333 select PINCTRL3434+ select COMMON_CLK3535+ select COMMON_CLK_K2103436 help3537 This enables support for Canaan Kendryte K210 SoC platform hardware.3638
+16
arch/riscv/include/asm/asm-prototypes.h
···99long long __ashrti3(long long a, int b);1010long long __ashlti3(long long a, int b);11111212+1313+#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)1414+1515+DECLARE_DO_ERROR_INFO(do_trap_unknown);1616+DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);1717+DECLARE_DO_ERROR_INFO(do_trap_insn_fault);1818+DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);1919+DECLARE_DO_ERROR_INFO(do_trap_load_fault);2020+DECLARE_DO_ERROR_INFO(do_trap_load_misaligned);2121+DECLARE_DO_ERROR_INFO(do_trap_store_misaligned);2222+DECLARE_DO_ERROR_INFO(do_trap_store_fault);2323+DECLARE_DO_ERROR_INFO(do_trap_ecall_u);2424+DECLARE_DO_ERROR_INFO(do_trap_ecall_s);2525+DECLARE_DO_ERROR_INFO(do_trap_ecall_m);2626+DECLARE_DO_ERROR_INFO(do_trap_break);2727+1228#endif /* _ASM_RISCV_PROTOTYPES_H */
···119119extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,120120 unsigned int n);121121122122+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,123123+ unsigned long frame_pointer);124124+int do_syscall_trace_enter(struct pt_regs *regs);125125+void do_syscall_trace_exit(struct pt_regs *regs);126126+122127/**123128 * regs_get_register() - get register value from its offset124129 * @regs: pt_regs from which register value is gotten
···116116EXPORT_SYMBOL(sbi_clear_ipi);117117118118/**119119- * sbi_set_timer_v01() - Program the timer for next timer event.119119+ * __sbi_set_timer_v01() - Program the timer for next timer event.120120 * @stime_value: The value after which next timer event should fire.121121 *122122 * Return: None
···682682}683683EXPORT_SYMBOL_GPL(zpci_disable_device);684684685685-void zpci_remove_device(struct zpci_dev *zdev)685685+/* zpci_remove_device - Removes the given zdev from the PCI core686686+ * @zdev: the zdev to be removed from the PCI core687687+ * @set_error: if true the device's error state is set to permanent failure688688+ *689689+ * Sets a zPCI device to a configured but offline state; the zPCI690690+ * device is still accessible through its hotplug slot and the zPCI691691+ * API but is removed from the common code PCI bus, making it692692+ * no longer available to drivers.693693+ */694694+void zpci_remove_device(struct zpci_dev *zdev, bool set_error)686695{687696 struct zpci_bus *zbus = zdev->zbus;688697 struct pci_dev *pdev;689698699699+ if (!zdev->zbus->bus)700700+ return;701701+690702 pdev = pci_get_slot(zbus->bus, zdev->devfn);691703 if (pdev) {692692- if (pdev->is_virtfn)693693- return zpci_iov_remove_virtfn(pdev, zdev->vfn);704704+ if (set_error)705705+ pdev->error_state = pci_channel_io_perm_failure;706706+ if (pdev->is_virtfn) {707707+ zpci_iov_remove_virtfn(pdev, zdev->vfn);708708+ /* balance pci_get_slot */709709+ pci_dev_put(pdev);710710+ return;711711+ }694712 pci_stop_and_remove_bus_device_locked(pdev);713713+ /* balance pci_get_slot */714714+ pci_dev_put(pdev);695715 }696716}697717···785765 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);786766787767 if (zdev->zbus->bus)788788- zpci_remove_device(zdev);768768+ zpci_remove_device(zdev, false);789769790770 switch (zdev->state) {791771 case ZPCI_FN_STATE_ONLINE:
+6-12
arch/s390/pci/pci_event.c
···7676static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)7777{7878 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);7979- struct pci_dev *pdev = NULL;8079 enum zpci_state state;8080+ struct pci_dev *pdev;8181 int ret;8282-8383- if (zdev && zdev->zbus->bus)8484- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);85828683 zpci_err("avail CCDF:\n");8784 zpci_err_hex(ccdf, sizeof(*ccdf));···121124 case 0x0303: /* Deconfiguration requested */122125 if (!zdev)123126 break;124124- if (pdev)125125- zpci_remove_device(zdev);127127+ zpci_remove_device(zdev, false);126128127129 ret = zpci_disable_device(zdev);128130 if (ret)···136140 case 0x0304: /* Configured -> Standby|Reserved */137141 if (!zdev)138142 break;139139- if (pdev) {140140- /* Give the driver a hint that the function is141141- * already unusable. */142142- pdev->error_state = pci_channel_io_perm_failure;143143- zpci_remove_device(zdev);144144- }143143+ /* Give the driver a hint that the function is144144+ * already unusable.145145+ */146146+ zpci_remove_device(zdev, true);145147146148 zdev->fh = ccdf->fh;147149 zpci_disable_device(zdev);
+3
arch/x86/events/intel/core.c
···36593659 return ret;3660366036613661 if (event->attr.precise_ip) {36623662+ if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)36633663+ return -EINVAL;36643664+36623665 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {36633666 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;36643667 if (!(event->attr.sample_type &
+1-1
arch/x86/events/intel/ds.c
···20102010 */20112011 if (!pebs_status && cpuc->pebs_enabled &&20122012 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))20132013- pebs_status = cpuc->pebs_enabled;20132013+ pebs_status = p->status = cpuc->pebs_enabled;2014201420152015 bit = find_first_bit((unsigned long *)&pebs_status,20162016 x86_pmu.max_pebs_events);
+26-8
arch/x86/include/asm/kvm_host.h
···884884 u64 options;885885};886886887887+/* Current state of Hyper-V TSC page clocksource */888888+enum hv_tsc_page_status {889889+ /* TSC page was not set up or disabled */890890+ HV_TSC_PAGE_UNSET = 0,891891+ /* TSC page MSR was written by the guest, update pending */892892+ HV_TSC_PAGE_GUEST_CHANGED,893893+ /* TSC page MSR was written by KVM userspace, update pending */894894+ HV_TSC_PAGE_HOST_CHANGED,895895+ /* TSC page was properly set up and is currently active */896896+ HV_TSC_PAGE_SET,897897+ /* TSC page is currently being updated and therefore is inactive */898898+ HV_TSC_PAGE_UPDATING,899899+ /* TSC page was set up with an inaccessible GPA */900900+ HV_TSC_PAGE_BROKEN,901901+};902902+887903/* Hyper-V emulation context */888904struct kvm_hv {889905 struct mutex hv_lock;890906 u64 hv_guest_os_id;891907 u64 hv_hypercall;892908 u64 hv_tsc_page;909909+ enum hv_tsc_page_status hv_tsc_page_status;893910894911 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */895912 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];···946929 KVM_IRQCHIP_NONE,947930 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */948931 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */932932+};933933+934934+struct kvm_x86_msr_filter {935935+ u8 count;936936+ bool default_allow:1;937937+ struct msr_bitmap_range ranges[16];949938};950939951940#define APICV_INHIBIT_REASON_DISABLE 0···10481025 bool guest_can_read_msr_platform_info;10491026 bool exception_payload_enabled;1050102710281028+ bool bus_lock_detection_enabled;10291029+10511030 /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */10521031 u32 user_space_msr_mask;10531053-10541054- struct {10551055- u8 count;10561056- bool default_allow:1;10571057- struct msr_bitmap_range ranges[16];10581058- } msr_filter;10591059-10601060- bool bus_lock_detection_enabled;10321032+ struct kvm_x86_msr_filter __rcu *msr_filter;1061103310621034 struct kvm_pmu_event_filter __rcu *pmu_event_filter;10631035 struct task_struct *nx_lpage_recovery_thread;
-9
arch/x86/include/asm/processor.h
···551551 *size = fpu_kernel_xstate_size;552552}553553554554-/*555555- * Thread-synchronous status.556556- *557557- * This is different from the flags in that nobody else558558- * ever touches our thread-synchronous status, so we don't559559- * have to worry about atomic accesses.560560- */561561-#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/562562-563554static inline void564555native_load_sp0(unsigned long sp0)565556{
+14-1
arch/x86/include/asm/thread_info.h
···205205206206#endif207207208208+/*209209+ * Thread-synchronous status.210210+ *211211+ * This is different from the flags in that nobody else212212+ * ever touches our thread-synchronous status, so we don't213213+ * have to worry about atomic accesses.214214+ */215215+#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/216216+217217+#ifndef __ASSEMBLY__208218#ifdef CONFIG_COMPAT209219#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */220220+221221+#define arch_set_restart_data(restart) \222222+ do { restart->arch_data = current_thread_info()->status; } while (0)223223+210224#endif211211-#ifndef __ASSEMBLY__212225213226#ifdef CONFIG_X86_32214227#define in_ia32_syscall() true
+5
arch/x86/kernel/apic/apic.c
···23422342 [0 ... NR_CPUS - 1] = -1,23432343};2344234423452345+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)23462346+{23472347+ return phys_id == cpuid_to_apicid[cpu];23482348+}23492349+23452350#ifdef CONFIG_SMP23462351/**23472352 * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
+10
arch/x86/kernel/apic/io_apic.c
···10321032 if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {10331033 irq = mp_irqs[idx].srcbusirq;10341034 legacy = mp_is_legacy_irq(irq);10351035+ /*10361036+ * IRQ2 is unusable for historical reasons on systems which10371037+ * have a legacy PIC. See the comment vs. IRQ2 further down.10381038+ *10391039+ * If this gets removed at some point then the related code10401040+ * in lapic_assign_system_vectors() needs to be adjusted as10411041+ * well.10421042+ */10431043+ if (legacy && irq == PIC_CASCADE_IR)10441044+ return -EINVAL;10351045 }1036104610371047 mutex_lock(&ioapic_mutex);
+1-1
arch/x86/kernel/kprobes/ftrace.c
···12121313#include "common.h"14141515-/* Ftrace callback handler for kprobes -- called under preepmt disabed */1515+/* Ftrace callback handler for kprobes -- called under preepmt disabled */1616void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,1717 struct ftrace_ops *ops, struct ftrace_regs *fregs)1818{
+10-13
arch/x86/kernel/kvm.c
···836836837837static void kvm_wait(u8 *ptr, u8 val)838838{839839- unsigned long flags;840840-841839 if (in_nmi())842840 return;843843-844844- local_irq_save(flags);845845-846846- if (READ_ONCE(*ptr) != val)847847- goto out;848841849842 /*850843 * halt until it's our turn and kicked. Note that we do safe halt851844 * for irq enabled case to avoid hang when lock info is overwritten852845 * in irq spinlock slowpath and no spurious interrupt occur to save us.853846 */854854- if (arch_irqs_disabled_flags(flags))855855- halt();856856- else857857- safe_halt();847847+ if (irqs_disabled()) {848848+ if (READ_ONCE(*ptr) == val)849849+ halt();850850+ } else {851851+ local_irq_disable();858852859859-out:860860- local_irq_restore(flags);853853+ if (READ_ONCE(*ptr) == val)854854+ safe_halt();855855+856856+ local_irq_enable();857857+ }861858}862859863860#ifdef CONFIG_X86_32
+1-23
arch/x86/kernel/signal.c
···766766767767static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)768768{769769- /*770770- * This function is fundamentally broken as currently771771- * implemented.772772- *773773- * The idea is that we want to trigger a call to the774774- * restart_block() syscall and that we want in_ia32_syscall(),775775- * in_x32_syscall(), etc. to match whatever they were in the776776- * syscall being restarted. We assume that the syscall777777- * instruction at (regs->ip - 2) matches whatever syscall778778- * instruction we used to enter in the first place.779779- *780780- * The problem is that we can get here when ptrace pokes781781- * syscall-like values into regs even if we're not in a syscall782782- * at all.783783- *784784- * For now, we maintain historical behavior and guess based on785785- * stored state. We could do better by saving the actual786786- * syscall arch in restart_block or (with caveats on x32) by787787- * checking if regs->ip points to 'int $0x80'. The current788788- * behavior is incorrect if a tracer has a different bitness789789- * than the tracee.790790- */791769#ifdef CONFIG_IA32_EMULATION792792- if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))770770+ if (current->restart_block.arch_data & TS_COMPAT)793771 return __NR_ia32_restart_syscall;794772#endif795773#ifdef CONFIG_X86_X32_ABI
+81-10
arch/x86/kvm/hyperv.c
···520520 u64 tsc;521521522522 /*523523- * The guest has not set up the TSC page or the clock isn't524524- * stable, fall back to get_kvmclock_ns.523523+ * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,524524+ * is broken, disabled or being updated.525525 */526526- if (!hv->tsc_ref.tsc_sequence)526526+ if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)527527 return div_u64(get_kvmclock_ns(kvm), 100);528528529529 vcpu = kvm_get_vcpu(kvm, 0);···10771077 return true;10781078}1079107910801080+/*10811081+ * Don't touch TSC page values if the guest has opted for TSC emulation after10821082+ * migration. KVM doesn't fully support reenlightenment notifications and TSC10831083+ * access emulation and Hyper-V is known to expect the values in TSC page to10841084+ * stay constant before TSC access emulation is disabled from guest side10851085+ * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC10861086+ * frequency and guest visible TSC value across migration (and prevent it when10871087+ * TSC scaling is unsupported).10881088+ */10891089+static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)10901090+{10911091+ return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&10921092+ hv->hv_tsc_emulation_control;10931093+}10941094+10801095void kvm_hv_setup_tsc_page(struct kvm *kvm,10811096 struct pvclock_vcpu_time_info *hv_clock)10821097{···11021087 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));11031088 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);1104108911051105- if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))10901090+ if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||10911091+ hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)11061092 return;1107109311081094 mutex_lock(&hv->hv_lock);···11171101 */11181102 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),11191103 &tsc_seq, sizeof(tsc_seq))))11041104+ goto out_err;11051105+11061106+ if (tsc_seq && tsc_page_update_unsafe(hv)) {11071107+ if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))11081108+ goto out_err;11091109+11101110+ hv->hv_tsc_page_status = HV_TSC_PAGE_SET;11201111 goto out_unlock;11121112+ }1121111311221114 /*11231115 * While we're computing and writing the parameters, force the···11341110 hv->tsc_ref.tsc_sequence = 0;11351111 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),11361112 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))11371137- goto out_unlock;11131113+ goto out_err;1138111411391115 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))11401140- goto out_unlock;11161116+ goto out_err;1141111711421118 /* Ensure sequence is zero before writing the rest of the struct. */11431119 smp_wmb();11441120 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))11451145- goto out_unlock;11211121+ goto out_err;1146112211471123 /*11481124 * Now switch to the TSC page mechanism by writing the sequence.···11551131 smp_wmb();1156113211571133 hv->tsc_ref.tsc_sequence = tsc_seq;11581158- kvm_write_guest(kvm, gfn_to_gpa(gfn),11591159- &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));11341134+ if (kvm_write_guest(kvm, gfn_to_gpa(gfn),11351135+ &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))11361136+ goto out_err;11371137+11381138+ hv->hv_tsc_page_status = HV_TSC_PAGE_SET;11391139+ goto out_unlock;11401140+11411141+out_err:11421142+ hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;11431143+out_unlock:11441144+ mutex_unlock(&hv->hv_lock);11451145+}11461146+11471147+void kvm_hv_invalidate_tsc_page(struct kvm *kvm)11481148+{11491149+ struct kvm_hv *hv = to_kvm_hv(kvm);11501150+ u64 gfn;11511151+11521152+ if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||11531153+ hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||11541154+ tsc_page_update_unsafe(hv))11551155+ return;11561156+11571157+ mutex_lock(&hv->hv_lock);11581158+11591159+ if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))11601160+ goto out_unlock;11611161+11621162+ /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */11631163+ if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)11641164+ hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;11651165+11661166+ gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;11671167+11681168+ hv->tsc_ref.tsc_sequence = 0;11691169+ if (kvm_write_guest(kvm, gfn_to_gpa(gfn),11701170+ &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))11711171+ hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;11721172+11601173out_unlock:11611174 mutex_unlock(&hv->hv_lock);11621175}···12541193 }12551194 case HV_X64_MSR_REFERENCE_TSC:12561195 hv->hv_tsc_page = data;12571257- if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)11961196+ if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {11971197+ if (!host)11981198+ hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;11991199+ else12001200+ hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;12581201 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);12021202+ } else {12031203+ hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;12041204+ }12591205 break;12601206 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:12611207 return kvm_hv_msr_set_crash_data(kvm,···12971229 hv->hv_tsc_emulation_control = data;12981230 break;12991231 case HV_X64_MSR_TSC_EMULATION_STATUS:12321232+ if (data && !host)12331233+ return 1;12341234+13001235 hv->hv_tsc_emulation_status = data;13011236 break;13021237 case HV_X64_MSR_TIME_REF_COUNT:
···2121}22222323/*2424+ * Return the TDP iterator to the root PT and allow it to continue its2525+ * traversal over the paging structure from there.2626+ */2727+void tdp_iter_restart(struct tdp_iter *iter)2828+{2929+ iter->yielded_gfn = iter->next_last_level_gfn;3030+ iter->level = iter->root_level;3131+3232+ iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);3333+ tdp_iter_refresh_sptep(iter);3434+3535+ iter->valid = true;3636+}3737+3838+/*2439 * Sets a TDP iterator to walk a pre-order traversal of the paging structure2540 * rooted at root_pt, starting with the walk to translate next_last_level_gfn.2641 */···4631 WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);47324833 iter->next_last_level_gfn = next_last_level_gfn;4949- iter->yielded_gfn = iter->next_last_level_gfn;5034 iter->root_level = root_level;5135 iter->min_level = min_level;5252- iter->level = root_level;5353- iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt;3636+ iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root_pt;3737+ iter->as_id = kvm_mmu_page_as_id(sptep_to_sp(root_pt));54385555- iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);5656- tdp_iter_refresh_sptep(iter);5757-5858- iter->valid = true;3939+ tdp_iter_restart(iter);5940}60416142/*···168157 return;169158 } while (try_step_up(iter));170159 iter->valid = false;171171-}172172-173173-tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter)174174-{175175- return iter->pt_path[iter->root_level - 1];176160}177161
+3-1
arch/x86/kvm/mmu/tdp_iter.h
···3636 int min_level;3737 /* The iterator's current level within the paging structure */3838 int level;3939+ /* The address space ID, i.e. SMM vs. regular. */4040+ int as_id;3941 /* A snapshot of the value at sptep */4042 u64 old_spte;4143 /*···6462void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,6563 int min_level, gfn_t next_last_level_gfn);6664void tdp_iter_next(struct tdp_iter *iter);6767-tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter);6565+void tdp_iter_restart(struct tdp_iter *iter);68666967#endif /* __KVM_X86_MMU_TDP_ITER_H */
+15-25
arch/x86/kvm/mmu/tdp_mmu.c
···203203 u64 old_spte, u64 new_spte, int level,204204 bool shared);205205206206-static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)207207-{208208- return sp->role.smm ? 1 : 0;209209-}210210-211206static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)212207{213208 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);···296301 *297302 * Given a page table that has been removed from the TDP paging structure,298303 * iterates through the page table to clear SPTEs and free child page tables.304304+ *305305+ * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU306306+ * protection. Since this thread removed it from the paging structure,307307+ * this thread will be responsible for ensuring the page is freed. Hence the308308+ * early rcu_dereferences in the function.299309 */300300-static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,310310+static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,301311 bool shared)302312{303303- struct kvm_mmu_page *sp = sptep_to_sp(pt);313313+ struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));304314 int level = sp->role.level;305315 gfn_t base_gfn = sp->gfn;306316 u64 old_child_spte;···318318 tdp_mmu_unlink_page(kvm, sp, shared);319319320320 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {321321- sptep = pt + i;321321+ sptep = rcu_dereference(pt) + i;322322 gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));323323324324 if (shared) {···492492 struct tdp_iter *iter,493493 u64 new_spte)494494{495495- u64 *root_pt = tdp_iter_root_pt(iter);496496- struct kvm_mmu_page *root = sptep_to_sp(root_pt);497497- int as_id = kvm_mmu_page_as_id(root);498498-499495 lockdep_assert_held_read(&kvm->mmu_lock);500496501497 /*···505509 new_spte) != iter->old_spte)506510 return false;507511508508- handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,509509- iter->level, true);512512+ handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,513513+ new_spte, iter->level, true);510514511515 return true;512516}···534538 * here since the SPTE is going from non-present535539 * to non-present.536540 */537537- WRITE_ONCE(*iter->sptep, 0);541541+ WRITE_ONCE(*rcu_dereference(iter->sptep), 0);538542539543 return true;540544}···560564 u64 new_spte, bool record_acc_track,561565 bool record_dirty_log)562566{563563- tdp_ptep_t root_pt = tdp_iter_root_pt(iter);564564- struct kvm_mmu_page *root = sptep_to_sp(root_pt);565565- int as_id = kvm_mmu_page_as_id(root);566566-567567 lockdep_assert_held_write(&kvm->mmu_lock);568568569569 /*···573581574582 WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);575583576576- __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,577577- iter->level, false);584584+ __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,585585+ new_spte, iter->level, false);578586 if (record_acc_track)579587 handle_changed_spte_acc_track(iter->old_spte, new_spte,580588 iter->level);581589 if (record_dirty_log)582582- handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,590590+ handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,583591 iter->old_spte, new_spte,584592 iter->level);585593}···651659652660 WARN_ON(iter->gfn > iter->next_last_level_gfn);653661654654- tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],655655- iter->root_level, iter->min_level,656656- iter->next_last_level_gfn);662662+ tdp_iter_restart(iter);657663658664 return true;659665 }
+69-46
arch/x86/kvm/x86.c
···1526152615271527bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)15281528{15291529+ struct kvm_x86_msr_filter *msr_filter;15301530+ struct msr_bitmap_range *ranges;15291531 struct kvm *kvm = vcpu->kvm;15301530- struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;15311531- u32 count = kvm->arch.msr_filter.count;15321532- u32 i;15331533- bool r = kvm->arch.msr_filter.default_allow;15321532+ bool allowed;15341533 int idx;15341534+ u32 i;1535153515361536- /* MSR filtering not set up or x2APIC enabled, allow everything */15371537- if (!count || (index >= 0x800 && index <= 0x8ff))15361536+ /* x2APIC MSRs do not support filtering. */15371537+ if (index >= 0x800 && index <= 0x8ff)15381538 return true;1539153915401540- /* Prevent collision with set_msr_filter */15411540 idx = srcu_read_lock(&kvm->srcu);1542154115431543- for (i = 0; i < count; i++) {15421542+ msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);15431543+ if (!msr_filter) {15441544+ allowed = true;15451545+ goto out;15461546+ }15471547+15481548+ allowed = msr_filter->default_allow;15491549+ ranges = msr_filter->ranges;15501550+15511551+ for (i = 0; i < msr_filter->count; i++) {15441552 u32 start = ranges[i].base;15451553 u32 end = start + ranges[i].nmsrs;15461554 u32 flags = ranges[i].flags;15471555 unsigned long *bitmap = ranges[i].bitmap;1548155615491557 if ((index >= start) && (index < end) && (flags & type)) {15501550- r = !!test_bit(index - start, bitmap);15581558+ allowed = !!test_bit(index - start, bitmap);15511559 break;15521560 }15531561 }1554156215631563+out:15551564 srcu_read_unlock(&kvm->srcu, idx);1556156515571557- return r;15661566+ return allowed;15581567}15591568EXPORT_SYMBOL_GPL(kvm_msr_allowed);15601569···25592550 int i;25602551 struct kvm_vcpu *vcpu;25612552 struct kvm_arch *ka = &kvm->arch;25532553+25542554+ kvm_hv_invalidate_tsc_page(kvm);2562255525632556 spin_lock(&ka->pvclock_gtod_sync_lock);25642557 kvm_make_mclock_inprogress_request(kvm);···53635352 return r;53645353}5365535453665366-static void kvm_clear_msr_filter(struct kvm *kvm)53555355+static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)53675356{53685368- u32 i;53695369- u32 count = kvm->arch.msr_filter.count;53705370- struct msr_bitmap_range ranges[16];53575357+ struct kvm_x86_msr_filter *msr_filter;5371535853725372- mutex_lock(&kvm->lock);53735373- kvm->arch.msr_filter.count = 0;53745374- memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));53755375- mutex_unlock(&kvm->lock);53765376- synchronize_srcu(&kvm->srcu);53595359+ msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);53605360+ if (!msr_filter)53615361+ return NULL;5377536253785378- for (i = 0; i < count; i++)53795379- kfree(ranges[i].bitmap);53635363+ msr_filter->default_allow = default_allow;53645364+ return msr_filter;53805365}5381536653825382-static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)53675367+static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)53835368{53845384- struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;53695369+ u32 i;53705370+53715371+ if (!msr_filter)53725372+ return;53735373+53745374+ for (i = 0; i < msr_filter->count; i++)53755375+ kfree(msr_filter->ranges[i].bitmap);53765376+53775377+ kfree(msr_filter);53785378+}53795379+53805380+static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,53815381+ struct kvm_msr_filter_range *user_range)53825382+{53855383 struct msr_bitmap_range range;53865384 unsigned long *bitmap = NULL;53875385 size_t bitmap_size;···54245404 goto err;54255405 }5426540654275427- /* Everything ok, add this range identifier to our global pool */54285428- ranges[kvm->arch.msr_filter.count] = range;54295429- /* Make sure we filled the array before we tell anyone to walk it */54305430- smp_wmb();54315431- kvm->arch.msr_filter.count++;54075407+ /* Everything ok, add this range identifier. */54085408+ msr_filter->ranges[msr_filter->count] = range;54095409+ msr_filter->count++;5432541054335411 return 0;54345412err:···54375419static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)54385420{54395421 struct kvm_msr_filter __user *user_msr_filter = argp;54225422+ struct kvm_x86_msr_filter *new_filter, *old_filter;54405423 struct kvm_msr_filter filter;54415424 bool default_allow;54425442- int r = 0;54435425 bool empty = true;54265426+ int r = 0;54445427 u32 i;5445542854465429 if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))···54545435 if (empty && !default_allow)54555436 return -EINVAL;5456543754575457- kvm_clear_msr_filter(kvm);54385438+ new_filter = kvm_alloc_msr_filter(default_allow);54395439+ if (!new_filter)54405440+ return -ENOMEM;5458544154595459- kvm->arch.msr_filter.default_allow = default_allow;54605460-54615461- /*54625462- * Protect from concurrent calls to this function that could trigger54635463- * a TOCTOU violation on kvm->arch.msr_filter.count.54645464- */54655465- mutex_lock(&kvm->lock);54665442 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {54675467- r = kvm_add_msr_filter(kvm, &filter.ranges[i]);54685468- if (r)54695469- break;54435443+ r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);54445444+ if (r) {54455445+ kvm_free_msr_filter(new_filter);54465446+ return r;54475447+ }54705448 }54495449+54505450+ mutex_lock(&kvm->lock);54515451+54525452+ /* The per-VM filter is protected by kvm->lock... */54535453+ old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);54545454+54555455+ rcu_assign_pointer(kvm->arch.msr_filter, new_filter);54565456+ synchronize_srcu(&kvm->srcu);54575457+54585458+ kvm_free_msr_filter(old_filter);5471545954725460 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);54735461 mutex_unlock(&kvm->lock);5474546254755475- return r;54635463+ return 0;54765464}5477546554785466long kvm_arch_vm_ioctl(struct file *filp,···66296603 int cpu = get_cpu();6630660466316605 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);66326632- smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,66066606+ on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,66336607 wbinvd_ipi, NULL, 1);66346608 put_cpu();66356609 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);···10660106341066110635void kvm_arch_destroy_vm(struct kvm *kvm)1066210636{1066310663- u32 i;1066410664-1066510637 if (current->mm == kvm->mm) {1066610638 /*1066710639 * Free memory regions allocated on behalf of userspace,···1067510651 mutex_unlock(&kvm->slots_lock);1067610652 }1067710653 static_call_cond(kvm_x86_vm_destroy)(kvm);1067810678- for (i = 0; i < kvm->arch.msr_filter.count; i++)1067910679- kfree(kvm->arch.msr_filter.ranges[i].bitmap);1065410654+ kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));1068010655 kvm_pic_destroy(kvm);1068110656 kvm_ioapic_destroy(kvm);1068210657 kvm_free_vcpus(kvm);
-1
arch/x86/platform/iris/iris.c
···2727MODULE_LICENSE("GPL");2828MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>");2929MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");3030-MODULE_SUPPORTED_DEVICE("Eurobraille/Iris");31303231static bool force;3332
-2
drivers/atm/fore200e.c
···100100101101MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");102102MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);103103-MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");104104-105103106104static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {107105 { BUFFER_S1_NBR, BUFFER_L1_NBR },
+25-37
drivers/base/power/runtime.c
···325325static int __rpm_callback(int (*cb)(struct device *), struct device *dev)326326 __releases(&dev->power.lock) __acquires(&dev->power.lock)327327{328328- bool use_links = dev->power.links_count > 0;329329- bool get = false;330328 int retval, idx;331331- bool put;329329+ bool use_links = dev->power.links_count > 0;332330333331 if (dev->power.irq_safe) {334332 spin_unlock(&dev->power.lock);335335- } else if (!use_links) {336336- spin_unlock_irq(&dev->power.lock);337333 } else {338338- get = dev->power.runtime_status == RPM_RESUMING;339339-340334 spin_unlock_irq(&dev->power.lock);341335342342- /* Resume suppliers if necessary. */343343- if (get) {336336+ /*337337+ * Resume suppliers if necessary.338338+ *339339+ * The device's runtime PM status cannot change until this340340+ * routine returns, so it is safe to read the status outside of341341+ * the lock.342342+ */343343+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {344344 idx = device_links_read_lock();345345346346 retval = rpm_get_suppliers(dev);···355355356356 if (dev->power.irq_safe) {357357 spin_lock(&dev->power.lock);358358- return retval;359359- }358358+ } else {359359+ /*360360+ * If the device is suspending and the callback has returned361361+ * success, drop the usage counters of the suppliers that have362362+ * been reference counted on its resume.363363+ *364364+ * Do that if resume fails too.365365+ */366366+ if (use_links367367+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)368368+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {369369+ idx = device_links_read_lock();360370361361- spin_lock_irq(&dev->power.lock);371371+ fail:372372+ rpm_put_suppliers(dev);362373363363- if (!use_links)364364- return retval;365365-366366- /*367367- * If the device is suspending and the callback has returned success,368368- * drop the usage counters of the suppliers that have been reference369369- * counted on its resume.370370- *371371- * Do that if the resume fails too.372372- */373373- put = dev->power.runtime_status == RPM_SUSPENDING && !retval;374374- if (put)375375- __update_runtime_status(dev, RPM_SUSPENDED);376376- else377377- put = get && retval;378378-379379- if (put) {380380- spin_unlock_irq(&dev->power.lock);381381-382382- idx = device_links_read_lock();383383-384384-fail:385385- rpm_put_suppliers(dev);386386-387387- device_links_read_unlock(idx);374374+ device_links_read_unlock(idx);375375+ }388376389377 spin_lock_irq(&dev->power.lock);390378 }
-1
drivers/block/floppy.c
···50915091module_param(FLOPPY_IRQ, int, 0);50925092module_param(FLOPPY_DMA, int, 0);50935093MODULE_AUTHOR("Alain L. Knaff");50945094-MODULE_SUPPORTED_DEVICE("fd");50955094MODULE_LICENSE("GPL");5096509550975096/* This doesn't actually get used other than for module information */
···571571 struct lock_class_key *lock_key,572572 struct lock_class_key *request_key)573573{574574+ struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL;574575 unsigned long flags;575576 int ret = 0;576577 unsigned i;···594593 }595594596595 of_gpio_dev_init(gc, gdev);596596+597597+ /*598598+ * Assign fwnode depending on the result of the previous calls,599599+ * if none of them succeed, assign it to the parent's one.600600+ */601601+ gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;597602598603 gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);599604 if (gdev->id < 0) {···42634256 return ret;42644257 }4265425842664266- if (driver_register(&gpio_stub_drv) < 0) {42594259+ ret = driver_register(&gpio_stub_drv);42604260+ if (ret < 0) {42674261 pr_err("gpiolib: could not register GPIO stub driver\n");42684262 bus_unregister(&gpio_bus_type);42694263 return ret;
···15951595 dcn2_1_soc.num_chans = bw_params->num_channels;1596159615971597 ASSERT(clk_table->num_entries);15981598+ /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */15991599+ for (i = 0; i < dcn2_1_soc.num_states + 1; i++) {16001600+ clock_limits[i] = dcn2_1_soc.clock_limits[i];16011601+ }16021602+15981603 for (i = 0; i < clk_table->num_entries; i++) {15991604 /* loop backwards*/16001605 for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
···551551552552 if (!ttm_dma)553553 return;554554+ if (!ttm_dma->pages) {555555+ NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);556556+ return;557557+ }554558555559 /* Don't waste time looping if the object is coherent */556560 if (nvbo->force_coherent)···587583588584 if (!ttm_dma)589585 return;586586+ if (!ttm_dma->pages) {587587+ NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);588588+ return;589589+ }590590591591 /* Don't waste time looping if the object is coherent */592592 if (nvbo->force_coherent)
···266266 select IIO_BUFFER267267 select IIO_BUFFER_HW_CONSUMER268268 select IIO_BUFFER_DMAENGINE269269+ depends on HAS_IOMEM270270+ depends on OF269271 help270272 Say yes here to build support for Analog Devices Generic271273 AXI ADC IP core. The IP core is used for interfacing with···925923 depends on ARCH_STM32 || COMPILE_TEST926924 depends on OF927925 depends on REGULATOR926926+ depends on HAS_IOMEM928927 select IIO_BUFFER929928 select MFD_STM32_TIMERS930929 select IIO_STM32_TIMER_TRIGGER
···275275276276MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil");277277MODULE_DESCRIPTION("CX23415/CX23416 driver");278278-MODULE_SUPPORTED_DEVICE279279- ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n"280280- "\t\t\tYuan MPG series and similar)");281278MODULE_LICENSE("GPL");282279283280MODULE_VERSION(IVTV_VERSION);
-1
drivers/media/pci/sta2x11/sta2x11_vip.c
···12691269MODULE_DESCRIPTION("STA2X11 Video Input Port driver");12701270MODULE_AUTHOR("Wind River");12711271MODULE_LICENSE("GPL v2");12721272-MODULE_SUPPORTED_DEVICE("sta2x11 video input");12731272MODULE_VERSION(DRV_VERSION);12741273MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
-1
drivers/media/platform/atmel/atmel-isi.c
···13631363MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");13641364MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");13651365MODULE_LICENSE("GPL");13661366-MODULE_SUPPORTED_DEVICE("video");
-1
drivers/media/platform/atmel/atmel-sama5d2-isc.c
···330330MODULE_AUTHOR("Songjun Wu");331331MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");332332MODULE_LICENSE("GPL v2");333333-MODULE_SUPPORTED_DEVICE("video");
-4
drivers/media/platform/marvell-ccic/cafe-driver.c
···4444MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");4545MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");4646MODULE_LICENSE("GPL");4747-MODULE_SUPPORTED_DEVICE("Video");4848-4949-5050-51475248struct cafe_camera {5349 int registered; /* Fully initialized? */
-1
drivers/media/platform/stm32/stm32-dcmi.c
···21492149MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");21502150MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");21512151MODULE_LICENSE("GPL");21522152-MODULE_SUPPORTED_DEVICE("video");
-1
drivers/media/usb/cpia2/cpia2_v4l.c
···56565757MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");5858MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");5959-MODULE_SUPPORTED_DEVICE("video");6059MODULE_LICENSE("GPL");6160MODULE_VERSION(CPIA_VERSION);6261
-1
drivers/media/usb/tm6000/tm6000-alsa.c
···5151MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");5252MODULE_AUTHOR("Mauro Carvalho Chehab");5353MODULE_LICENSE("GPL v2");5454-MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}");5554static unsigned int debug;5655module_param(debug, int, 0644);5756MODULE_PARM_DESC(debug, "enable debug messages");
-2
drivers/media/usb/tm6000/tm6000-dvb.c
···2323MODULE_AUTHOR("Mauro Carvalho Chehab");2424MODULE_LICENSE("GPL");25252626-MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}");2727-2826static int debug;29273028module_param(debug, int, 0644);
-1
drivers/mtd/maps/sun_uflash.c
···32323333MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");3434MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");3535-MODULE_SUPPORTED_DEVICE(DRIVER_NAME);3635MODULE_LICENSE("GPL");3736MODULE_VERSION("2.1");3837
-1
drivers/net/can/peak_canfd/peak_pciefd_main.c
···21212222MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");2323MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");2424-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");2524MODULE_LICENSE("GPL v2");26252726#define PCIEFD_DRV_NAME "peak_pciefd"
-1
drivers/net/can/sja1000/ems_pci.c
···21212222MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");2323MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");2424-MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card");2524MODULE_LICENSE("GPL v2");26252726#define EMS_PCI_V1_MAX_CHAN 2
-1
drivers/net/can/sja1000/ems_pcmcia.c
···21212222MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>");2323MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards");2424-MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card");2524MODULE_LICENSE("GPL v2");26252726#define EMS_PCMCIA_MAX_CHAN 2
-1
drivers/net/can/sja1000/kvaser_pci.c
···33333434MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>");3535MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards");3636-MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card");3736MODULE_LICENSE("GPL v2");38373938#define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */
-2
drivers/net/can/sja1000/peak_pci.c
···24242525MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");2626MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");2727-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards");2828-MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards");2927MODULE_LICENSE("GPL v2");30283129#define DRV_NAME "peak_pci"
-1
drivers/net/can/sja1000/peak_pcmcia.c
···2222MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");2323MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");2424MODULE_LICENSE("GPL v2");2525-MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card");26252726/* PEAK-System PCMCIA driver name */2827#define PCC_NAME "peak_pcmcia"
···1717#include "pcan_usb_core.h"1818#include "pcan_usb_pro.h"19192020-MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter");2121-2220#define PCAN_USBPRO_CHANNEL_COUNT 223212422/* PCAN-USB Pro adapter internal clock (MHz) */
-1
drivers/net/hamradio/scc.c
···2167216721682168MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");21692169MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards");21702170-MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio");21712170MODULE_LICENSE("GPL");21722171module_init(scc_init_driver);21732172module_exit(scc_cleanup_driver);
-1
drivers/net/wireless/admtek/adm8211.c
···2828MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");2929MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");3030MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211");3131-MODULE_SUPPORTED_DEVICE("ADM8211");3231MODULE_LICENSE("GPL");33323433static unsigned int tx_ring_size __read_mostly = 16;
-1
drivers/net/wireless/ath/ath5k/base.c
···9090MODULE_AUTHOR("Jiri Slaby");9191MODULE_AUTHOR("Nick Kossifidis");9292MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");9393-MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");9493MODULE_LICENSE("Dual BSD/GPL");95949695static int ath5k_init(struct ieee80211_hw *hw);
-1
drivers/net/wireless/ath/ath9k/hw.c
···34343535MODULE_AUTHOR("Atheros Communications");3636MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");3737-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");3837MODULE_LICENSE("Dual BSD/GPL");39384039static void ath9k_hw_set_clockrate(struct ath_hw *ah)
-1
drivers/net/wireless/ath/ath9k/init.c
···37373838MODULE_AUTHOR("Atheros Communications");3939MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");4040-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");4140MODULE_LICENSE("Dual BSD/GPL");42414342static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
-1
drivers/net/wireless/atmel/atmel.c
···7575MODULE_AUTHOR("Simon Kelley");7676MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");7777MODULE_LICENSE("GPL");7878-MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards");79788079/* The name of the firmware file to be loaded8180 over-rides any automatic selection */
···87878888MODULE_AUTHOR("Broadcom Corporation");8989MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");9090-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");9190MODULE_LICENSE("Dual BSD/GPL");9291/* This needs to be adjusted when brcms_firmwares changes */9392MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
···251251MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. "252252 "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs.");253253MODULE_LICENSE("Dual BSD/GPL");254254-MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350");255254module_param_hw_array(io, int, ioport, NULL, 0);256255module_param_hw_array(irq, int, irq, NULL, 0);257256module_param_array(rates, int, NULL, 0);
-1
drivers/net/wireless/cisco/airo_cs.c
···4747 "cards. This is the module that links the PCMCIA card "4848 "with the airo module.");4949MODULE_LICENSE("Dual BSD/GPL");5050-MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");51505251/*====================================================================*/5352
-1
drivers/net/wireless/intersil/hostap/hostap_cs.c
···2626MODULE_AUTHOR("Jouni Malinen");2727MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "2828 "cards (PC Card).");2929-MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)");3029MODULE_LICENSE("GPL");31303231
-1
drivers/net/wireless/intersil/hostap/hostap_pci.c
···2727MODULE_AUTHOR("Jouni Malinen");2828MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "2929 "PCI cards.");3030-MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");3130MODULE_LICENSE("GPL");32313332
-1
drivers/net/wireless/intersil/hostap/hostap_plx.c
···3030MODULE_AUTHOR("Jouni Malinen");3131MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "3232 "cards (PLX).");3333-MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)");3433MODULE_LICENSE("GPL");35343635
-1
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
···18211821MODULE_AUTHOR(DRV_PROJECT);18221822MODULE_VERSION(DRV_VERSION);18231823MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver.");18241824-MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards");18251824MODULE_DEVICE_TABLE(pci, rt2400pci_device_table);18261825MODULE_LICENSE("GPL");18271826
-1
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
···21192119MODULE_AUTHOR(DRV_PROJECT);21202120MODULE_VERSION(DRV_VERSION);21212121MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver.");21222122-MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards");21232122MODULE_DEVICE_TABLE(pci, rt2500pci_device_table);21242123MODULE_LICENSE("GPL");21252124
-1
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
···19561956MODULE_AUTHOR(DRV_PROJECT);19571957MODULE_VERSION(DRV_VERSION);19581958MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver.");19591959-MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards");19601959MODULE_DEVICE_TABLE(usb, rt2500usb_device_table);19611960MODULE_LICENSE("GPL");19621961
-1
drivers/net/wireless/ralink/rt2x00/rt2800pci.c
···439439MODULE_AUTHOR(DRV_PROJECT);440440MODULE_VERSION(DRV_VERSION);441441MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");442442-MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");443442MODULE_FIRMWARE(FIRMWARE_RT2860);444443MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);445444MODULE_LICENSE("GPL");
-1
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
···12481248MODULE_AUTHOR(DRV_PROJECT);12491249MODULE_VERSION(DRV_VERSION);12501250MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver.");12511251-MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards");12521251MODULE_DEVICE_TABLE(usb, rt2800usb_device_table);12531252MODULE_FIRMWARE(FIRMWARE_RT2870);12541253MODULE_LICENSE("GPL");
-2
drivers/net/wireless/ralink/rt2x00/rt61pci.c
···29932993MODULE_AUTHOR(DRV_PROJECT);29942994MODULE_VERSION(DRV_VERSION);29952995MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver.");29962996-MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 "29972997- "PCI & PCMCIA chipset based cards");29982996MODULE_DEVICE_TABLE(pci, rt61pci_device_table);29992997MODULE_FIRMWARE(FIRMWARE_RT2561);30002998MODULE_FIRMWARE(FIRMWARE_RT2561s);
-1
drivers/net/wireless/ralink/rt2x00/rt73usb.c
···25132513MODULE_AUTHOR(DRV_PROJECT);25142514MODULE_VERSION(DRV_VERSION);25152515MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver.");25162516-MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards");25172516MODULE_DEVICE_TABLE(usb, rt73usb_device_table);25182517MODULE_FIRMWARE(FIRMWARE_RT2571);25192518MODULE_LICENSE("GPL");
-1
drivers/net/wireless/rsi/rsi_91x_main.c
···441441module_exit(rsi_91x_hal_module_exit);442442MODULE_AUTHOR("Redpine Signals Inc");443443MODULE_DESCRIPTION("Station driver for RSI 91x devices");444444-MODULE_SUPPORTED_DEVICE("RSI-91x");445444MODULE_VERSION("0.1");446445MODULE_LICENSE("Dual BSD/GPL");
-1
drivers/net/wireless/rsi/rsi_91x_sdio.c
···1571157115721572MODULE_AUTHOR("Redpine Signals Inc");15731573MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");15741574-MODULE_SUPPORTED_DEVICE("RSI-91x");15751574MODULE_DEVICE_TABLE(sdio, rsi_dev_table);15761575MODULE_FIRMWARE(FIRMWARE_RSI9113);15771576MODULE_VERSION("0.1");
-1
drivers/net/wireless/rsi/rsi_91x_usb.c
···928928929929MODULE_AUTHOR("Redpine Signals Inc");930930MODULE_DESCRIPTION("Common USB layer for RSI drivers");931931-MODULE_SUPPORTED_DEVICE("RSI-91x");932931MODULE_DEVICE_TABLE(usb, rsi_dev_table);933932MODULE_FIRMWARE(FIRMWARE_RSI9113);934933MODULE_VERSION("0.1");
+21-43
drivers/nvme/host/core.c
···12261226 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);12271227}1228122812291229-static int nvme_keep_alive(struct nvme_ctrl *ctrl)12301230-{12311231- struct request *rq;12321232-12331233- rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,12341234- BLK_MQ_REQ_RESERVED);12351235- if (IS_ERR(rq))12361236- return PTR_ERR(rq);12371237-12381238- rq->timeout = ctrl->kato * HZ;12391239- rq->end_io_data = ctrl;12401240-12411241- blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);12421242-12431243- return 0;12441244-}12451245-12461229static void nvme_keep_alive_work(struct work_struct *work)12471230{12481231 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),12491232 struct nvme_ctrl, ka_work);12501233 bool comp_seen = ctrl->comp_seen;12341234+ struct request *rq;1251123512521236 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {12531237 dev_dbg(ctrl->device,···12411257 return;12421258 }1243125912441244- if (nvme_keep_alive(ctrl)) {12601260+ rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,12611261+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);12621262+ if (IS_ERR(rq)) {12451263 /* allocation failure, reset the controller */12461246- dev_err(ctrl->device, "keep-alive failed\n");12641264+ dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));12471265 nvme_reset_ctrl(ctrl);12481266 return;12491267 }12681268+12691269+ rq->timeout = ctrl->kato * HZ;12701270+ rq->end_io_data = ctrl;12711271+ blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);12501272}1251127312521274static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)···19541964 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);19551965}1956196619571957-static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)19671967+/*19681968+ * Even though NVMe spec explicitly states that MDTS is not applicable to the19691969+ * write-zeroes, we are cautious and limit the size to the controllers19701970+ * max_hw_sectors value, which is based on the MDTS field and possibly other19711971+ * limiting factors.19721972+ */19731973+static void nvme_config_write_zeroes(struct request_queue *q,19741974+ struct nvme_ctrl *ctrl)19581975{19591959- u64 max_blocks;19601960-19611961- if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||19621962- (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))19631963- return;19641964- /*19651965- * Even though NVMe spec explicitly states that MDTS is not19661966- * applicable to the write-zeroes:- "The restriction does not apply to19671967- * commands that do not transfer data between the host and the19681968- * controller (e.g., Write Uncorrectable ro Write Zeroes command).".19691969- * In order to be more cautious use controller's max_hw_sectors value19701970- * to configure the maximum sectors for the write-zeroes which is19711971- * configured based on the controller's MDTS field in the19721972- * nvme_init_identify() if available.19731973- */19741974- if (ns->ctrl->max_hw_sectors == UINT_MAX)19751975- max_blocks = (u64)USHRT_MAX + 1;19761976- else19771977- max_blocks = ns->ctrl->max_hw_sectors + 1;19781978-19791979- blk_queue_max_write_zeroes_sectors(disk->queue,19801980- nvme_lba_to_sect(ns, max_blocks));19761976+ if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&19771977+ !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))19781978+ blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors);19811979}1982198019831981static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)···21372159 set_capacity_and_notify(disk, capacity);2138216021392161 nvme_config_discard(disk, ns);21402140- nvme_config_write_zeroes(disk, ns);21622162+ nvme_config_write_zeroes(disk->queue, ns->ctrl);2141216321422164 set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||21432165 test_bit(NVME_NS_FORCE_RO, &ns->flags));
+7
drivers/nvme/host/fabrics.h
···1919#define NVMF_DEF_FAIL_FAST_TMO -120202121/*2222+ * Reserved one command for internal usage. This command is used for sending2323+ * the connect command, as well as for the keep alive command on the admin2424+ * queue once live.2525+ */2626+#define NVMF_RESERVED_TAGS 12727+2828+/*2229 * Define a host as seen by the target. We allocate one at boot, but also2330 * allow the override it when creating controllers. This is both to provide2431 * persistence of the Host NQN over multiple boots, and to allow using
···6363MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");6464MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"6565 " Copyright IBM Corp. 2000");6666-MODULE_SUPPORTED_DEVICE("dasd");6766MODULE_LICENSE("GPL");68676968/*
-1
drivers/sbus/char/display7seg.c
···5050MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");5151MODULE_DESCRIPTION("7-Segment Display driver for Sun Microsystems CP1400/1500");5252MODULE_LICENSE("GPL");5353-MODULE_SUPPORTED_DEVICE("d7s");54535554struct d7s {5655 void __iomem *regs;
-1
drivers/scsi/hpsa.c
···8080MODULE_AUTHOR("Hewlett-Packard Company");8181MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \8282 HPSA_DRIVER_VERSION);8383-MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");8483MODULE_VERSION(HPSA_DRIVER_VERSION);8584MODULE_LICENSE("GPL");8685MODULE_ALIAS("cciss");
···413413 * And add this object to port_table_list.414414 */415415 if (!ioc->multipath_on_hba) {416416- port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);416416+ port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);417417 if (!port)418418 return NULL;419419
+1-1
drivers/scsi/myrs.c
···22732273 if (cs->mmio_base) {22742274 cs->disable_intr(cs);22752275 iounmap(cs->mmio_base);22762276+ cs->mmio_base = NULL;22762277 }22772278 if (cs->irq)22782279 free_irq(cs->irq, cs);22792280 if (cs->io_addr)22802281 release_region(cs->io_addr, 0x80);22812281- iounmap(cs->mmio_base);22822282 pci_set_drvdata(pdev, NULL);22832283 pci_disable_device(pdev);22842284 scsi_host_put(cs->host);
···280280static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)281281{282282 struct scsi_disk *sdkp;283283+ unsigned long flags;283284 unsigned int zno;284285 int ret;285286286287 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);287288288288- spin_lock_bh(&sdkp->zones_wp_offset_lock);289289+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);289290 for (zno = 0; zno < sdkp->nr_zones; zno++) {290291 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)291292 continue;292293293293- spin_unlock_bh(&sdkp->zones_wp_offset_lock);294294+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);294295 ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,295296 SD_BUF_SIZE,296297 zno * sdkp->zone_blocks, true);297297- spin_lock_bh(&sdkp->zones_wp_offset_lock);298298+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);298299 if (!ret)299300 sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,300301 zno, sd_zbc_update_wp_offset_cb,301302 sdkp);302303 }303303- spin_unlock_bh(&sdkp->zones_wp_offset_lock);304304+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);304305305306 scsi_device_put(sdkp->device);306307}···325324 struct request *rq = cmd->request;326325 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);327326 unsigned int wp_offset, zno = blk_rq_zone_no(rq);327327+ unsigned long flags;328328 blk_status_t ret;329329330330 ret = sd_zbc_cmnd_checks(cmd);···339337 if (!blk_req_zone_write_trylock(rq))340338 return BLK_STS_ZONE_RESOURCE;341339342342- spin_lock_bh(&sdkp->zones_wp_offset_lock);340340+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);343341 wp_offset = sdkp->zones_wp_offset[zno];344342 switch (wp_offset) {345343 case SD_ZBC_INVALID_WP_OFST:···368366369367 *lba += wp_offset;370368 }371371- spin_unlock_bh(&sdkp->zones_wp_offset_lock);369369+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);372370 if (ret)373371 blk_req_zone_write_unlock(rq);374372 return ret;···447445 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);448446 unsigned int zno = blk_rq_zone_no(rq);449447 enum req_opf op = req_op(rq);448448+ unsigned long flags;450449451450 /*452451 * If we got an error for a command that needs updating the write···455452 * invalid to force an update from disk the next time a zone append456453 * command is issued.457454 */458458- spin_lock_bh(&sdkp->zones_wp_offset_lock);455455+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);459456460457 if (result && op != REQ_OP_ZONE_RESET_ALL) {461458 if (op == REQ_OP_ZONE_APPEND) {···499496 }500497501498unlock_wp_offset:502502- spin_unlock_bh(&sdkp->zones_wp_offset_lock);499499+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);503500504501 return good_bytes;505502}
-1
drivers/scsi/smartpqi/smartpqi_init.c
···4848MODULE_AUTHOR("Microsemi");4949MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "5050 DRIVER_VERSION);5151-MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");5251MODULE_VERSION(DRIVER_VERSION);5352MODULE_LICENSE("GPL");5453
···911911 if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)912912 return;913913914914- if (lpm & !hba->vreg_info.vcc->enabled)914914+ if (lpm && !hba->vreg_info.vcc->enabled)915915 regulator_set_mode(hba->vreg_info.vccq2->reg,916916 REGULATOR_MODE_IDLE);917917 else if (!lpm)
-1
drivers/sh/maple/maple.c
···3030MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");3131MODULE_DESCRIPTION("Maple bus driver for Dreamcast");3232MODULE_LICENSE("GPL v2");3333-MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");34333534static void maple_dma_handler(struct work_struct *work);3635static void maple_vblank_handler(struct work_struct *work);
+1
drivers/spi/spi-cadence-quadspi.c
···14331433 cqspi = spi_master_get_devdata(master);1434143414351435 cqspi->pdev = pdev;14361436+ platform_set_drvdata(pdev, cqspi);1436143714371438 /* Obtain configuration from OF. */14381439 ret = cqspi_of_get_pdata(cqspi);
+1-1
drivers/staging/comedi/drivers/cb_pcidas.c
···12811281 devpriv->amcc + AMCC_OP_REG_INTCSR);1282128212831283 ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,12841284- dev->board_name, dev);12841284+ "cb_pcidas", dev);12851285 if (ret) {12861286 dev_dbg(dev->class_dev, "unable to allocate irq %d\n",12871287 pcidev->irq);
···674674{675675 struct cooling_dev_stats *stats = cdev->stats;676676677677+ if (!stats)678678+ return;679679+677680 spin_lock(&stats->lock);678681679682 if (stats->state == new_state)
+8-10
drivers/thunderbolt/switch.c
···768768769769 tb_dump_port(port->sw->tb, &port->config);770770771771- /* Control port does not need HopID allocation */772772- if (port->port) {773773- ida_init(&port->in_hopids);774774- ida_init(&port->out_hopids);775775- }776776-777771 INIT_LIST_HEAD(&port->list);778772 return 0;779773···18361842 dma_port_free(sw->dma_port);1837184318381844 tb_switch_for_each_port(sw, port) {18391839- if (!port->disabled) {18401840- ida_destroy(&port->in_hopids);18411841- ida_destroy(&port->out_hopids);18421842- }18451845+ ida_destroy(&port->in_hopids);18461846+ ida_destroy(&port->out_hopids);18431847 }1844184818451849 kfree(sw->uuid);···20172025 /* minimum setup for tb_find_cap and tb_drom_read to work */20182026 sw->ports[i].sw = sw;20192027 sw->ports[i].port = i;20282028+20292029+ /* Control port does not need HopID allocation */20302030+ if (i) {20312031+ ida_init(&sw->ports[i].in_hopids);20322032+ ida_init(&sw->ports[i].out_hopids);20332033+ }20202034 }2021203520222036 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
+4
drivers/thunderbolt/tb.c
···138138 parent->boot = true;139139 parent = tb_switch_parent(parent);140140 }141141+ } else if (tb_tunnel_is_dp(tunnel)) {142142+ /* Keep the domain from powering down */143143+ pm_runtime_get_sync(&tunnel->src_port->sw->dev);144144+ pm_runtime_get_sync(&tunnel->dst_port->sw->dev);141145 }142146143147 list_add_tail(&tunnel->list, &tcm->tunnel_list);
-2
drivers/tty/serial/icom.c
···1639163916401640MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>");16411641MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");16421642-MODULE_SUPPORTED_DEVICE16431643- ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");16441642MODULE_LICENSE("GPL");16451643MODULE_FIRMWARE("icom_call_setup.bin");16461644MODULE_FIRMWARE("icom_res_dce.bin");
-1
drivers/tty/serial/jsm/jsm_driver.c
···1919MODULE_AUTHOR("Digi International, https://www.digi.com");2020MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");2121MODULE_LICENSE("GPL");2222-MODULE_SUPPORTED_DEVICE("jsm");23222423#define JSM_DRIVER_NAME "jsm"2524#define NR_PORTS 32
+4-1
drivers/usb/cdns3/cdnsp-ring.c
···21972197 * inverted in the first TDs isoc TRB.21982198 */21992199 field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |22002200- start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count);22002200+ TRB_SIA | TRB_TBC(burst_count);22012201+22022202+ if (!start_cycle)22032203+ field |= TRB_CYCLE;2201220422022205 /* Fill the rest of the TRB fields, and remaining normal TRBs. */22032206 for (i = 0; i < trbs_per_td; i++) {
+5-6
drivers/usb/dwc3/gadget.c
···783783784784 trace_dwc3_gadget_ep_disable(dep);785785786786- dwc3_remove_requests(dwc, dep);787787-788786 /* make sure HW endpoint isn't stalled */789787 if (dep->flags & DWC3_EP_STALL)790788 __dwc3_gadget_ep_set_halt(dep, 0, false);···800802 dep->endpoint.comp_desc = NULL;801803 dep->endpoint.desc = NULL;802804 }805805+806806+ dwc3_remove_requests(dwc, dep);803807804808 return 0;805809}···16171617{16181618 struct dwc3 *dwc = dep->dwc;1619161916201620- if (!dep->endpoint.desc || !dwc->pullups_connected) {16201620+ if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {16211621 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",16221622 dep->name);16231623 return -ESHUTDOWN;···22472247 if (!is_on) {22482248 u32 count;2249224922502250+ dwc->connected = false;22502251 /*22512252 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a22522253 * Section 4.1.8 Table 4-7, it states that for a device-initiated···22722271 dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %22732272 dwc->ev_buf->length;22742273 }22752275- dwc->connected = false;22762274 } else {22772275 __dwc3_gadget_start(dwc);22782276 }···33293329{33303330 u32 reg;3331333133323332- dwc->connected = true;33333333-33343332 /*33353333 * WORKAROUND: DWC3 revisions <1.88a have an issue which33363334 * would cause a missing Disconnect Event if there's a···33683370 * transfers."33693371 */33703372 dwc3_stop_active_transfers(dwc);33733373+ dwc->connected = true;3371337433723375 reg = dwc3_readl(dwc->regs, DWC3_DCTL);33733376 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
+10-4
drivers/usb/gadget/configfs.c
···9797 struct list_head list;9898};9999100100+#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1)101101+100102static int usb_string_copy(const char *s, char **s_copy)101103{102104 int ret;···108106 if (ret > USB_MAX_STRING_LEN)109107 return -EOVERFLOW;110108111111- str = kstrdup(s, GFP_KERNEL);112112- if (!str)113113- return -ENOMEM;109109+ if (copy) {110110+ str = copy;111111+ } else {112112+ str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL);113113+ if (!str)114114+ return -ENOMEM;115115+ }116116+ strcpy(str, s);114117 if (str[ret - 1] == '\n')115118 str[ret - 1] = '\0';116116- kfree(copy);117119 *s_copy = str;118120 return 0;119121}
-1
drivers/usb/misc/ldusb.c
···117117MODULE_AUTHOR("Michael Hund <mhund@ld-didactic.de>");118118MODULE_DESCRIPTION("LD USB Driver");119119MODULE_LICENSE("GPL");120120-MODULE_SUPPORTED_DEVICE("LD USB Devices");121120122121/* All interrupt in transfers are collected in a ring buffer to123122 * avoid racing conditions and get better performance of the driver.
+7
drivers/usb/storage/transport.c
···656656 need_auto_sense = 1;657657 }658658659659+ /* Some devices (Kindle) require another command after SYNC CACHE */660660+ if ((us->fflags & US_FL_SENSE_AFTER_SYNC) &&661661+ srb->cmnd[0] == SYNCHRONIZE_CACHE) {662662+ usb_stor_dbg(us, "-- sense after SYNC CACHE\n");663663+ need_auto_sense = 1;664664+ }665665+659666 /*660667 * If we have a failure, we're going to do a REQUEST_SENSE 661668 * automatically. Note that we differentiate between a command
+12
drivers/usb/storage/unusual_devs.h
···22122212 US_FL_NO_READ_DISC_INFO ),2213221322142214/*22152215+ * Reported by Matthias Schwarzott <zzam@gentoo.org>22162216+ * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that22172217+ * the host may be finished with it, and automatically ejects its22182218+ * emulated media unless it receives another command within one second.22192219+ */22202220+UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999,22212221+ "Amazon",22222222+ "Kindle",22232223+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,22242224+ US_FL_SENSE_AFTER_SYNC ),22252225+22262226+/*22152227 * Reported by Oliver Neukum <oneukum@suse.com>22162228 * This device morphes spontaneously into another device if the access22172229 * pattern of Windows isn't followed. Thus writable media would be dirty
+9-2
drivers/usb/typec/tcpm/tcpm.c
···945945946946 port->supply_voltage = mv;947947 port->current_limit = max_ma;948948+ power_supply_changed(port->psy);948949949950 if (port->tcpc->set_current_limit)950951 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);···2932293129332932 port->pps_data.supported = false;29342933 port->usb_type = POWER_SUPPLY_USB_TYPE_PD;29342934+ power_supply_changed(port->psy);2935293529362936 /*29372937 * Select the source PDO providing the most power which has a···29572955 port->pps_data.supported = true;29582956 port->usb_type =29592957 POWER_SUPPLY_USB_TYPE_PD_PPS;29582958+ power_supply_changed(port->psy);29602959 }29612960 continue;29622961 default:···31153112 port->pps_data.out_volt));31163113 port->pps_data.op_curr = min(port->pps_data.max_curr,31173114 port->pps_data.op_curr);31153115+ power_supply_changed(port->psy);31183116 }3119311731203118 return src_pdo;···33513347 return ret;33523348 }33533349 port->vbus_charge = charge;33503350+ power_supply_changed(port->psy);33543351 return 0;33553352}33563353···35353530 port->try_src_count = 0;35363531 port->try_snk_count = 0;35373532 port->usb_type = POWER_SUPPLY_USB_TYPE_C;35333533+ power_supply_changed(port->psy);35383534 port->nr_sink_caps = 0;35393535 port->sink_cap_done = false;35403536 if (port->tcpc->enable_frs)···52255219 goto unlock;5226522052275221 /* Send when the state machine is idle */52285228- if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover)52225222+ if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)52295223 goto resched;5230522452315225 port->upcoming_state = GET_SINK_CAP;···59635957 ret = -EINVAL;59645958 break;59655959 }59665966-59605960+ power_supply_changed(port->psy);59675961 return ret;59685962}59695963···61166110 err = devm_tcpm_psy_register(port);61176111 if (err)61186112 goto out_role_sw_put;61136113+ power_supply_changed(port->psy);6119611461206115 port->typec_port = typec_register_port(port->dev, &port->typec_caps);61216116 if (IS_ERR(port->typec_port)) {
···6969 * initialized but before registered.7070 * @parent: the parent device7171 * @config: the bus operations that is supported by this device7272- * @nvqs: number of virtqueues supported by this device7372 * @size: size of the parent structure that contains private data7473 * @name: name of the vdpa device; optional.7574 *···8081 */8182struct vdpa_device *__vdpa_alloc_device(struct device *parent,8283 const struct vdpa_config_ops *config,8383- int nvqs, size_t size, const char *name)8484+ size_t size, const char *name)8485{8586 struct vdpa_device *vdev;8687 int err = -EINVAL;···106107 vdev->index = err;107108 vdev->config = config;108109 vdev->features_valid = false;109109- vdev->nvqs = nvqs;110110111111 if (name)112112 err = dev_set_name(&vdev->dev, "%s", name);···134136 return (strcmp(dev_name(&vdev->dev), data) == 0);135137}136138137137-static int __vdpa_register_device(struct vdpa_device *vdev)139139+static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)138140{139141 struct device *dev;142142+143143+ vdev->nvqs = nvqs;140144141145 lockdep_assert_held(&vdpa_dev_mutex);142146 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);···155155 * Caller must invoke this routine in the management device dev_add()156156 * callback after setting up valid mgmtdev for this vdpa device.157157 * @vdev: the vdpa device to be registered to vDPA bus158158+ * @nvqs: number of virtqueues supported by this device158159 *159160 * Returns an error when fail to add device to vDPA bus160161 */161161-int _vdpa_register_device(struct vdpa_device *vdev)162162+int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)162163{163164 if (!vdev->mdev)164165 return -EINVAL;165166166166- return __vdpa_register_device(vdev);167167+ return __vdpa_register_device(vdev, nvqs);167168}168169EXPORT_SYMBOL_GPL(_vdpa_register_device);169170···172171 * vdpa_register_device - register a vDPA device173172 * Callers must have a succeed call of vdpa_alloc_device() before.174173 * @vdev: the vdpa device to be registered to vDPA bus174174+ * @nvqs: number of virtqueues supported by this device175175 *176176 * Returns an error when fail to add to vDPA bus177177 */178178-int vdpa_register_device(struct vdpa_device *vdev)178178+int vdpa_register_device(struct vdpa_device *vdev, int nvqs)179179{180180 int err;181181182182 mutex_lock(&vdpa_dev_mutex);183183- err = __vdpa_register_device(vdev);183183+ err = __vdpa_register_device(vdev, nvqs);184184 mutex_unlock(&vdpa_dev_mutex);185185 return err;186186}
···21212222menuconfig VFIO2323 tristate "VFIO Non-Privileged userspace driver framework"2424- depends on IOMMU_API2525- select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64)2424+ select IOMMU_API2525+ select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)2626 help2727 VFIO provides a framework for secure userspace device drivers.2828 See Documentation/driver-api/vfio.rst for more details.
+2-2
drivers/vfio/platform/Kconfig
···11# SPDX-License-Identifier: GPL-2.0-only22config VFIO_PLATFORM33 tristate "VFIO support for platform devices"44- depends on VFIO && EVENTFD && (ARM || ARM64)44+ depends on VFIO && EVENTFD && (ARM || ARM64 || COMPILE_TEST)55 select VFIO_VIRQFD66 help77 Support for platform devices with VFIO. This is required to make···12121313config VFIO_AMBA1414 tristate "VFIO support for AMBA devices"1515- depends on VFIO_PLATFORM && ARM_AMBA1515+ depends on VFIO_PLATFORM && (ARM_AMBA || COMPILE_TEST)1616 help1717 Support for ARM AMBA devices with VFIO. This is required to make1818 use of ARM AMBA devices present on the system using the VFIO
+12-8
drivers/vfio/vfio_iommu_type1.c
···189189}190190191191static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,192192- dma_addr_t start, size_t size)192192+ dma_addr_t start, u64 size)193193{194194 struct rb_node *res = NULL;195195 struct rb_node *node = iommu->dma_list.rb_node;···785785 return -ENODEV;786786787787 ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);788788- if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {788788+ if (ret != 1)789789+ goto out;790790+791791+ ret = 0;792792+793793+ if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {789794 ret = vfio_lock_acct(dma, 1, true);790795 if (ret) {791796 put_pfn(*pfn_base, dma->prot);···802797 }803798 }804799800800+out:805801 mmput(mm);806802 return ret;807803}···12941288 int ret = -EINVAL, retries = 0;12951289 unsigned long pgshift;12961290 dma_addr_t iova = unmap->iova;12971297- unsigned long size = unmap->size;12911291+ u64 size = unmap->size;12981292 bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;12991293 bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;13001294 struct rb_node *n, *first_n;···13101304 if (unmap_all) {13111305 if (iova || size)13121306 goto unlock;13131313- size = SIZE_MAX;13141314- } else if (!size || size & (pgsize - 1)) {13071307+ size = U64_MAX;13081308+ } else if (!size || size & (pgsize - 1) ||13091309+ iova + size - 1 < iova || size > SIZE_MAX) {13151310 goto unlock;13161311 }13171317-13181318- if (iova + size - 1 < iova || size > SIZE_MAX)13191319- goto unlock;1320131213211313 /* When dirty tracking is enabled, allow only min supported pgsize */13221314 if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
+11-9
drivers/vhost/vdpa.c
···308308309309static void vhost_vdpa_config_put(struct vhost_vdpa *v)310310{311311- if (v->config_ctx)311311+ if (v->config_ctx) {312312 eventfd_ctx_put(v->config_ctx);313313+ v->config_ctx = NULL;314314+ }313315}314316315317static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)···331329 if (!IS_ERR_OR_NULL(ctx))332330 eventfd_ctx_put(ctx);333331334334- if (IS_ERR(v->config_ctx))335335- return PTR_ERR(v->config_ctx);332332+ if (IS_ERR(v->config_ctx)) {333333+ long ret = PTR_ERR(v->config_ctx);334334+335335+ v->config_ctx = NULL;336336+ return ret;337337+ }336338337339 v->vdpa->config->set_config_cb(v->vdpa, &cb);338340···906900907901static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)908902{909909- struct vhost_virtqueue *vq;910903 int i;911904912912- for (i = 0; i < v->nvqs; i++) {913913- vq = &v->vqs[i];914914- if (vq->call_ctx.producer.irq)915915- irq_bypass_unregister_producer(&vq->call_ctx.producer);916916- }905905+ for (i = 0; i < v->nvqs; i++)906906+ vhost_vdpa_unsetup_vq_irq(v, i);917907}918908919909static int vhost_vdpa_release(struct inode *inode, struct file *filep)
···172172MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");173173MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500");174174MODULE_LICENSE("GPL");175175-MODULE_SUPPORTED_DEVICE("watchdog");176175177176static void cpwd_writew(u16 val, void __iomem *addr)178177{
-1
drivers/watchdog/riowd.c
···46464747MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");4848MODULE_DESCRIPTION("Hardware watchdog driver for Sun RIO");4949-MODULE_SUPPORTED_DEVICE("watchdog");5049MODULE_LICENSE("GPL");51505251#define DRIVER_NAME "riowd"
···1111#include <linux/xattr.h>1212#include "internal.h"13131414-static const char afs_xattr_list[] =1515- "afs.acl\0"1616- "afs.cell\0"1717- "afs.fid\0"1818- "afs.volume\0"1919- "afs.yfs.acl\0"2020- "afs.yfs.acl_inherited\0"2121- "afs.yfs.acl_num_cleaned\0"2222- "afs.yfs.vol_acl";2323-2424-/*2525- * Retrieve a list of the supported xattrs.2626- */2727-ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size)2828-{2929- if (size == 0)3030- return sizeof(afs_xattr_list);3131- if (size < sizeof(afs_xattr_list))3232- return -ERANGE;3333- memcpy(buffer, afs_xattr_list, sizeof(afs_xattr_list));3434- return sizeof(afs_xattr_list);3535-}3636-3714/*3815 * Deal with the result of a successful fetch ACL operation.3916 */···208231 else209232 ret = -ERANGE;210233 }234234+ } else if (ret == -ENOTSUPP) {235235+ ret = -ENODATA;211236 }212237213238error_yacl:···235256{236257 struct afs_operation *op;237258 struct afs_vnode *vnode = AFS_FS_I(inode);259259+ int ret;238260239261 if (flags == XATTR_CREATE ||240262 strcmp(name, "acl") != 0)···250270 return afs_put_operation(op);251271252272 op->ops = &yfs_store_opaque_acl2_operation;253253- return afs_do_sync_operation(op);273273+ ret = afs_do_sync_operation(op);274274+ if (ret == -ENOTSUPP)275275+ ret = -ENODATA;276276+ return ret;254277}255278256279static const struct xattr_handler afs_xattr_yfs_handler = {
+2
fs/btrfs/ctree.c
···13651365 "failed to read tree block %llu from get_old_root",13661366 logical);13671367 } else {13681368+ btrfs_tree_read_lock(old);13681369 eb = btrfs_clone_extent_buffer(old);13701370+ btrfs_tree_read_unlock(old);13691371 free_extent_buffer(old);13701372 }13711373 } else if (old_root) {
+22-1
fs/btrfs/extent-tree.c
···3323332333243324 if (last_ref && btrfs_header_generation(buf) == trans->transid) {33253325 struct btrfs_block_group *cache;33263326+ bool must_pin = false;3326332733273328 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {33283329 ret = check_ref_cleanup(trans, buf->start);···33413340 goto out;33423341 }3343334233443344- if (btrfs_is_zoned(fs_info)) {33433343+ /*33443344+ * If this is a leaf and there are tree mod log users, we may33453345+ * have recorded mod log operations that point to this leaf.33463346+ * So we must make sure no one reuses this leaf's extent before33473347+ * mod log operations are applied to a node, otherwise after33483348+ * rewinding a node using the mod log operations we get an33493349+ * inconsistent btree, as the leaf's extent may now be used as33503350+ * a node or leaf for another different btree.33513351+ * We are safe from races here because at this point no other33523352+ * node or root points to this extent buffer, so if after this33533353+ * check a new tree mod log user joins, it will not be able to33543354+ * find a node pointing to this leaf and record operations that33553355+ * point to this leaf.33563356+ */33573357+ if (btrfs_header_level(buf) == 0) {33583358+ read_lock(&fs_info->tree_mod_log_lock);33593359+ must_pin = !list_empty(&fs_info->tree_mod_seq_list);33603360+ read_unlock(&fs_info->tree_mod_log_lock);33613361+ }33623362+33633363+ if (must_pin || btrfs_is_zoned(fs_info)) {33453364 btrfs_redirty_list_add(trans->transaction, buf);33463365 pin_down_extent(trans, cache, buf->start, buf->len, 1);33473366 btrfs_put_block_group(cache);
+31-2
fs/btrfs/extent_io.c
···28862886}2887288728882888/*28892889+ * Find extent buffer for a givne bytenr.28902890+ *28912891+ * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking28922892+ * in endio context.28932893+ */28942894+static struct extent_buffer *find_extent_buffer_readpage(28952895+ struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)28962896+{28972897+ struct extent_buffer *eb;28982898+28992899+ /*29002900+ * For regular sectorsize, we can use page->private to grab extent29012901+ * buffer29022902+ */29032903+ if (fs_info->sectorsize == PAGE_SIZE) {29042904+ ASSERT(PagePrivate(page) && page->private);29052905+ return (struct extent_buffer *)page->private;29062906+ }29072907+29082908+ /* For subpage case, we need to lookup buffer radix tree */29092909+ rcu_read_lock();29102910+ eb = radix_tree_lookup(&fs_info->buffer_radix,29112911+ bytenr >> fs_info->sectorsize_bits);29122912+ rcu_read_unlock();29132913+ ASSERT(eb);29142914+ return eb;29152915+}29162916+29172917+/*28892918 * after a readpage IO is done, we need to:28902919 * clear the uptodate bits on error28912920 * set the uptodate bits if things worked···30252996 } else {30262997 struct extent_buffer *eb;3027299830283028- eb = (struct extent_buffer *)page->private;29992999+ eb = find_extent_buffer_readpage(fs_info, page, start);30293000 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);30303001 eb->read_mirror = mirror;30313002 atomic_dec(&eb->io_pages);···30493020 */30503021 if (page->index == end_index && i_size <= end) {30513022 u32 zero_start = max(offset_in_page(i_size),30523052- offset_in_page(end));30233023+ offset_in_page(start));3053302430543025 zero_user_segment(page, zero_start,30553026 offset_in_page(end) + 1);
+26-11
fs/btrfs/inode.c
···9008900890099009 btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",90109010 PAGE_SIZE, PAGE_SIZE,90119011- SLAB_RED_ZONE, NULL);90119011+ SLAB_MEM_SPREAD, NULL);90129012 if (!btrfs_free_space_bitmap_cachep)90139013 goto fail;90149014···98779877 struct btrfs_path *path;98789878 u64 start = ins->objectid;98799879 u64 len = ins->offset;98809880+ int qgroup_released;98809881 int ret;9881988298829883 memset(&stack_fi, 0, sizeof(stack_fi));···98909889 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);98919890 /* Encryption and other encoding is reserved and all 0 */9892989198939893- ret = btrfs_qgroup_release_data(inode, file_offset, len);98949894- if (ret < 0)98959895- return ERR_PTR(ret);98929892+ qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);98939893+ if (qgroup_released < 0)98949894+ return ERR_PTR(qgroup_released);9896989598979896 if (trans) {98989897 ret = insert_reserved_file_extent(trans, inode,98999898 file_offset, &stack_fi,99009900- true, ret);98999899+ true, qgroup_released);99019900 if (ret)99029902- return ERR_PTR(ret);99019901+ goto free_qgroup;99039902 return trans;99049903 }99059904···99109909 extent_info.file_offset = file_offset;99119910 extent_info.extent_buf = (char *)&stack_fi;99129911 extent_info.is_new_extent = true;99139913- extent_info.qgroup_reserved = ret;99129912+ extent_info.qgroup_reserved = qgroup_released;99149913 extent_info.insertions = 0;9915991499169915 path = btrfs_alloc_path();99179917- if (!path)99189918- return ERR_PTR(-ENOMEM);99169916+ if (!path) {99179917+ ret = -ENOMEM;99189918+ goto free_qgroup;99199919+ }9919992099209921 ret = btrfs_replace_file_extents(&inode->vfs_inode, path, file_offset,99219922 file_offset + len - 1, &extent_info,99229923 &trans);99239924 btrfs_free_path(path);99249925 if (ret)99259925- return ERR_PTR(ret);99269926-99269926+ goto free_qgroup;99279927 return trans;99289928+99299929+free_qgroup:99309930+ /*99319931+ * We have released qgroup data range at the beginning of the function,99329932+ * and normally qgroup_released bytes will be freed when committing99339933+ * transaction.99349934+ * But if we error out early, we have to free what we have released99359935+ * or we leak qgroup data reservation.99369936+ */99379937+ btrfs_qgroup_free_refroot(inode->root->fs_info,99389938+ inode->root->root_key.objectid, qgroup_released,99399939+ BTRFS_QGROUP_RSV_DATA);99409940+ return ERR_PTR(ret);99289941}9929994299309943static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+18-17
fs/btrfs/reada.c
···209209 /* find extent */210210 spin_lock(&fs_info->reada_lock);211211 re = radix_tree_lookup(&fs_info->reada_tree,212212- eb->start >> PAGE_SHIFT);212212+ eb->start >> fs_info->sectorsize_bits);213213 if (re)214214 re->refcnt++;215215 spin_unlock(&fs_info->reada_lock);···240240 zone = NULL;241241 spin_lock(&fs_info->reada_lock);242242 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,243243- logical >> PAGE_SHIFT, 1);243243+ logical >> fs_info->sectorsize_bits, 1);244244 if (ret == 1 && logical >= zone->start && logical <= zone->end) {245245 kref_get(&zone->refcnt);246246 spin_unlock(&fs_info->reada_lock);···283283284284 spin_lock(&fs_info->reada_lock);285285 ret = radix_tree_insert(&dev->reada_zones,286286- (unsigned long)(zone->end >> PAGE_SHIFT),287287- zone);286286+ (unsigned long)(zone->end >> fs_info->sectorsize_bits),287287+ zone);288288289289 if (ret == -EEXIST) {290290 kfree(zone);291291 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,292292- logical >> PAGE_SHIFT, 1);292292+ logical >> fs_info->sectorsize_bits, 1);293293 if (ret == 1 && logical >= zone->start && logical <= zone->end)294294 kref_get(&zone->refcnt);295295 else···315315 u64 length;316316 int real_stripes;317317 int nzones = 0;318318- unsigned long index = logical >> PAGE_SHIFT;318318+ unsigned long index = logical >> fs_info->sectorsize_bits;319319 int dev_replace_is_ongoing;320320 int have_zone = 0;321321···497497 struct reada_extent *re)498498{499499 int i;500500- unsigned long index = re->logical >> PAGE_SHIFT;500500+ unsigned long index = re->logical >> fs_info->sectorsize_bits;501501502502 spin_lock(&fs_info->reada_lock);503503 if (--re->refcnt) {···538538static void reada_zone_release(struct kref *kref)539539{540540 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);541541+ struct btrfs_fs_info *fs_info = zone->device->fs_info;541542542542- lockdep_assert_held(&zone->device->fs_info->reada_lock);543543+ lockdep_assert_held(&fs_info->reada_lock);543544544545 radix_tree_delete(&zone->device->reada_zones,545545- zone->end >> PAGE_SHIFT);546546+ zone->end >> fs_info->sectorsize_bits);546547547548 kfree(zone);548549}···594593static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)595594{596595 int i;597597- unsigned long index = zone->end >> PAGE_SHIFT;596596+ unsigned long index = zone->end >> zone->device->fs_info->sectorsize_bits;598597599598 for (i = 0; i < zone->ndevs; ++i) {600599 struct reada_zone *peer;···629628 (void **)&zone, index, 1);630629 if (ret == 0)631630 break;632632- index = (zone->end >> PAGE_SHIFT) + 1;631631+ index = (zone->end >> dev->fs_info->sectorsize_bits) + 1;633632 if (zone->locked) {634633 if (zone->elems > top_locked_elems) {635634 top_locked_elems = zone->elems;···710709 * plugging to speed things up711710 */712711 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,713713- dev->reada_next >> PAGE_SHIFT, 1);712712+ dev->reada_next >> fs_info->sectorsize_bits, 1);714713 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {715714 ret = reada_pick_zone(dev);716715 if (!ret) {···719718 }720719 re = NULL;721720 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,722722- dev->reada_next >> PAGE_SHIFT, 1);721721+ dev->reada_next >> fs_info->sectorsize_bits, 1);723722 }724723 if (ret == 0) {725724 spin_unlock(&fs_info->reada_lock);···886885 pr_cont(" curr off %llu",887886 device->reada_next - zone->start);888887 pr_cont("\n");889889- index = (zone->end >> PAGE_SHIFT) + 1;888888+ index = (zone->end >> fs_info->sectorsize_bits) + 1;890889 }891890 cnt = 0;892891 index = 0;···911910 }912911 }913912 pr_cont("\n");914914- index = (re->logical >> PAGE_SHIFT) + 1;913913+ index = (re->logical >> fs_info->sectorsize_bits) + 1;915914 if (++cnt > 15)916915 break;917916 }···927926 if (ret == 0)928927 break;929928 if (!re->scheduled) {930930- index = (re->logical >> PAGE_SHIFT) + 1;929929+ index = (re->logical >> fs_info->sectorsize_bits) + 1;931930 continue;932931 }933932 pr_debug("re: logical %llu size %u list empty %d scheduled %d",···943942 }944943 }945944 pr_cont("\n");946946- index = (re->logical >> PAGE_SHIFT) + 1;945945+ index = (re->logical >> fs_info->sectorsize_bits) + 1;947946 }948947 spin_unlock(&fs_info->reada_lock);949948}
+4-4
fs/btrfs/tree-log.c
···3169316931703170 mutex_lock(&log_root_tree->log_mutex);3171317131723172- index2 = log_root_tree->log_transid % 2;31733173- list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);31743174- root_log_ctx.log_transid = log_root_tree->log_transid;31753175-31763172 if (btrfs_is_zoned(fs_info)) {31773173 if (!log_root_tree->node) {31783174 ret = btrfs_alloc_log_tree_node(trans, log_root_tree);···31783182 }31793183 }31803184 }31853185+31863186+ index2 = log_root_tree->log_transid % 2;31873187+ list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);31883188+ root_log_ctx.log_transid = log_root_tree->log_transid;3181318931823190 /*31833191 * Now we are safe to update the log_root_tree because we're under the
+1-1
fs/cifs/cifs_swn.c
···248248249249/*250250 * Try to find a matching registration for the tcon's server name and share name.251251- * Calls to this funciton must be protected by cifs_swnreg_idr_mutex.251251+ * Calls to this function must be protected by cifs_swnreg_idr_mutex.252252 * TODO Try to avoid memory allocations253253 */254254static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
+6-3
fs/cifs/cifsacl.c
···11181118 /* Retain old ACEs which we can retain */11191119 for (i = 0; i < src_num_aces; ++i) {11201120 pntace = (struct cifs_ace *) (acl_base + size);11211121- pnntace = (struct cifs_ace *) (nacl_base + nsize);1122112111231122 if (!new_aces_set && (pntace->flags & INHERITED_ACE)) {11241123 /* Place the new ACEs in between existing explicit and inherited */···11301131 }1131113211321133 /* If it's any one of the ACE we're replacing, skip! */11331133- if ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||11341134+ if (!mode_from_sid &&11351135+ ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||11341136 (compare_sids(&pntace->sid, pownersid) == 0) ||11351137 (compare_sids(&pntace->sid, pgrpsid) == 0) ||11361138 (compare_sids(&pntace->sid, &sid_everyone) == 0) ||11371137- (compare_sids(&pntace->sid, &sid_authusers) == 0)) {11391139+ (compare_sids(&pntace->sid, &sid_authusers) == 0))) {11381140 goto next_ace;11391141 }11421142+11431143+ /* update the pointer to the next ACE to populate*/11441144+ pnntace = (struct cifs_ace *) (nacl_base + nsize);1140114511411146 nsize += cifs_copy_ace(pnntace, pntace, NULL);11421147 num_aces++;
+4-2
fs/cifs/fs_context.c
···11961196 pr_warn_once("Witness protocol support is experimental\n");11971197 break;11981198 case Opt_rootfs:11991199-#ifdef CONFIG_CIFS_ROOT12001200- ctx->rootfs = true;11991199+#ifndef CONFIG_CIFS_ROOT12001200+ cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");12011201+ goto cifs_parse_mount_err;12011202#endif12031203+ ctx->rootfs = true;12021204 break;12031205 case Opt_posixpaths:12041206 if (result.negated)
+9-1
fs/cifs/inode.c
···23952395 * We need to be sure that all dirty pages are written and the server23962396 * has actual ctime, mtime and file length.23972397 */23982398- if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE)) &&23982398+ if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&23992399 !CIFS_CACHE_READ(CIFS_I(inode)) &&24002400 inode->i_mapping && inode->i_mapping->nrpages != 0) {24012401 rc = filemap_fdatawait(inode->i_mapping);···25852585 if (rc == 0) {25862586 cifsInode->server_eof = attrs->ia_size;25872587 cifs_setsize(inode, attrs->ia_size);25882588+ /*25892589+ * i_blocks is not related to (i_size / i_blksize), but instead25902590+ * 512 byte (2**9) size is required for calculating num blocks.25912591+ * Until we can query the server for actual allocation size,25922592+ * this is best estimate we have for blocks allocated for a file25932593+ * Number of blocks must be rounded up so size 1 is not 0 blocks25942594+ */25952595+ inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;2588259625892597 /*25902598 * The man page of truncate says if the size changed,
+6-1
fs/cifs/transport.c
···11961196 /*11971197 * Compounding is never used during session establish.11981198 */11991199- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP))11991199+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {12001200+ mutex_lock(&server->srv_mutex);12001201 smb311_update_preauth_hash(ses, rqst[0].rq_iov,12011202 rqst[0].rq_nvec);12031203+ mutex_unlock(&server->srv_mutex);12041204+ }1202120512031206 for (i = 0; i < num_rqst; i++) {12041207 rc = wait_for_response(server, midQ[i]);···12691266 .iov_base = resp_iov[0].iov_base,12701267 .iov_len = resp_iov[0].iov_len12711268 };12691269+ mutex_lock(&server->srv_mutex);12721270 smb311_update_preauth_hash(ses, &iov, 1);12711271+ mutex_unlock(&server->srv_mutex);12731272 }1274127312751274out:
+26-12
fs/ext4/balloc.c
···626626627627/**628628 * ext4_should_retry_alloc() - check if a block allocation should be retried629629- * @sb: super block630630- * @retries: number of attemps has been made629629+ * @sb: superblock630630+ * @retries: number of retry attempts made so far631631 *632632- * ext4_should_retry_alloc() is called when ENOSPC is returned, and if633633- * it is profitable to retry the operation, this function will wait634634- * for the current or committing transaction to complete, and then635635- * return TRUE. We will only retry once.632632+ * ext4_should_retry_alloc() is called when ENOSPC is returned while633633+ * attempting to allocate blocks. If there's an indication that a pending634634+ * journal transaction might free some space and allow another attempt to635635+ * succeed, this function will wait for the current or committing transaction636636+ * to complete and then return TRUE.636637 */637638int ext4_should_retry_alloc(struct super_block *sb, int *retries)638639{639639- if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||640640- (*retries)++ > 1 ||641641- !EXT4_SB(sb)->s_journal)640640+ struct ext4_sb_info *sbi = EXT4_SB(sb);641641+642642+ if (!sbi->s_journal)642643 return 0;643644645645+ if (++(*retries) > 3) {646646+ percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);647647+ return 0;648648+ }649649+650650+ /*651651+ * if there's no indication that blocks are about to be freed it's652652+ * possible we just missed a transaction commit that did so653653+ */644654 smp_mb();645645- if (EXT4_SB(sb)->s_mb_free_pending == 0)646646- return 0;655655+ if (sbi->s_mb_free_pending == 0)656656+ return ext4_has_free_clusters(sbi, 1, 0);647657658658+ /*659659+ * it's possible we've just missed a transaction commit here,660660+ * so ignore the returned status661661+ */648662 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);649649- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);663663+ (void) jbd2_journal_force_commit_nested(sbi->s_journal);650664 return 1;651665}652666
···19381938 if (!ret)19391939 ret = err;1940194019411941- if (!ext4_has_inline_data(inode))19421942- ext4_walk_page_buffers(NULL, page_bufs, 0, len,19431943- NULL, bput_one);19441941 ext4_set_inode_state(inode, EXT4_STATE_JDATA);19451942out:19461943 unlock_page(page);19471944out_no_pagelock:19451945+ if (!inline_data && page_bufs)19461946+ ext4_walk_page_buffers(NULL, page_bufs, 0, len,19471947+ NULL, bput_one);19481948 brelse(inode_bh);19491949 return ret;19501950}···50265026 struct ext4_inode_info *ei = EXT4_I(inode);50275027 struct buffer_head *bh = iloc->bh;50285028 struct super_block *sb = inode->i_sb;50295029- int err = 0, rc, block;50295029+ int err = 0, block;50305030 int need_datasync = 0, set_large_file = 0;50315031 uid_t i_uid;50325032 gid_t i_gid;···51385138 bh->b_data);5139513951405140 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");51415141- rc = ext4_handle_dirty_metadata(handle, NULL, bh);51425142- if (!err)51435143- err = rc;51415141+ err = ext4_handle_dirty_metadata(handle, NULL, bh);51425142+ if (err)51435143+ goto out_brelse;51445144 ext4_clear_inode_state(inode, EXT4_STATE_NEW);51455145 if (set_large_file) {51465146 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");···53875387 inode->i_gid = attr->ia_gid;53885388 error = ext4_mark_inode_dirty(handle, inode);53895389 ext4_journal_stop(handle);53905390- if (unlikely(error))53905390+ if (unlikely(error)) {53915391+ ext4_fc_stop_update(inode);53915392 return error;53935393+ }53925394 }5393539553945396 if (attr->ia_valid & ATTR_SIZE) {
+9-2
fs/ext4/mballoc.c
···27092709 }2710271027112711 if (ext4_has_feature_flex_bg(sb)) {27122712- /* a single flex group is supposed to be read by a single IO */27132713- sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,27122712+ /* a single flex group is supposed to be read by a single IO.27132713+ * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is27142714+ * unsigned integer, so the maximum shift is 32.27152715+ */27162716+ if (sbi->s_es->s_log_groups_per_flex >= 32) {27172717+ ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");27182718+ goto err_freesgi;27192719+ }27202720+ sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,27142721 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));27152722 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */27162723 } else {
+39-11
fs/ext4/namei.c
···36133613 return retval;36143614}3615361536163616+static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,36173617+ unsigned ino, unsigned file_type)36183618+{36193619+ struct ext4_renament old = *ent;36203620+ int retval = 0;36213621+36223622+ /*36233623+ * old->de could have moved from under us during make indexed dir,36243624+ * so the old->de may no longer valid and need to find it again36253625+ * before reset old inode info.36263626+ */36273627+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);36283628+ if (IS_ERR(old.bh))36293629+ retval = PTR_ERR(old.bh);36303630+ if (!old.bh)36313631+ retval = -ENOENT;36323632+ if (retval) {36333633+ ext4_std_error(old.dir->i_sb, retval);36343634+ return;36353635+ }36363636+36373637+ ext4_setent(handle, &old, ino, file_type);36383638+ brelse(old.bh);36393639+}36403640+36163641static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,36173642 const struct qstr *d_name)36183643{···37993774 */38003775 retval = -ENOENT;38013776 if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)38023802- goto end_rename;37773777+ goto release_bh;3803377838043779 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,38053780 &new.de, &new.inlined);38063781 if (IS_ERR(new.bh)) {38073782 retval = PTR_ERR(new.bh);38083783 new.bh = NULL;38093809- goto end_rename;37843784+ goto release_bh;38103785 }38113786 if (new.bh) {38123787 if (!new.inode) {···38233798 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);38243799 if (IS_ERR(handle)) {38253800 retval = PTR_ERR(handle);38263826- handle = NULL;38273827- goto end_rename;38013801+ goto release_bh;38283802 }38293803 } else {38303804 whiteout = ext4_whiteout_for_rename(mnt_userns, &old, credits, &handle);38313805 if (IS_ERR(whiteout)) {38323806 retval = PTR_ERR(whiteout);38333833- whiteout = NULL;38343834- goto end_rename;38073807+ goto release_bh;38353808 }38363809 }38373810···38733850 retval = ext4_mark_inode_dirty(handle, whiteout);38743851 if (unlikely(retval))38753852 goto end_rename;38533853+38763854 }38773855 if (!new.bh) {38783856 retval = ext4_add_entry(handle, new.dentry, old.inode);···39473923 ext4_fc_track_unlink(handle, new.dentry);39483924 __ext4_fc_track_link(handle, old.inode, new.dentry);39493925 __ext4_fc_track_unlink(handle, old.inode, old.dentry);39263926+ if (whiteout)39273927+ __ext4_fc_track_create(handle, whiteout, old.dentry);39503928 }3951392939523930 if (new.inode) {···39633937end_rename:39643938 if (whiteout) {39653939 if (retval) {39663966- ext4_setent(handle, &old,39673967- old.inode->i_ino, old_file_type);39403940+ ext4_resetent(handle, &old,39413941+ old.inode->i_ino, old_file_type);39683942 drop_nlink(whiteout);39433943+ ext4_orphan_add(handle, whiteout);39693944 }39703945 unlock_new_inode(whiteout);39463946+ ext4_journal_stop(handle);39713947 iput(whiteout);39723972-39483948+ } else {39493949+ ext4_journal_stop(handle);39733950 }39513951+release_bh:39743952 brelse(old.dir_bh);39753953 brelse(old.bh);39763954 brelse(new.bh);39773977- if (handle)39783978- ext4_journal_stop(handle);39793955 return retval;39803956}39813957
+6-1
fs/ext4/super.c
···12101210 percpu_counter_destroy(&sbi->s_freeinodes_counter);12111211 percpu_counter_destroy(&sbi->s_dirs_counter);12121212 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);12131213+ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);12131214 percpu_free_rwsem(&sbi->s_writepages_rwsem);12141215#ifdef CONFIG_QUOTA12151216 for (i = 0; i < EXT4_MAXQUOTAS; i++)···50135012 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,50145013 GFP_KERNEL);50155014 if (!err)50155015+ err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,50165016+ GFP_KERNEL);50175017+ if (!err)50165018 err = percpu_init_rwsem(&sbi->s_writepages_rwsem);5017501950185020 if (err) {···51285124 percpu_counter_destroy(&sbi->s_freeinodes_counter);51295125 percpu_counter_destroy(&sbi->s_dirs_counter);51305126 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);51275127+ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);51315128 percpu_free_rwsem(&sbi->s_writepages_rwsem);51325129failed_mount5:51335130 ext4_ext_release(sb);···51545149failed_mount3a:51555150 ext4_es_unregister_shrinker(sbi);51565151failed_mount3:51575157- del_timer_sync(&sbi->s_err_report);51585152 flush_work(&sbi->s_error_work);51535153+ del_timer_sync(&sbi->s_err_report);51595154 if (sbi->s_mmp_tsk)51605155 kthread_stop(sbi->s_mmp_tsk);51615156failed_mount2:
+7
fs/ext4/sysfs.c
···2424 attr_session_write_kbytes,2525 attr_lifetime_write_kbytes,2626 attr_reserved_clusters,2727+ attr_sra_exceeded_retry_limit,2728 attr_inode_readahead,2829 attr_trigger_test_error,2930 attr_first_error_time,···203202EXT4_ATTR_FUNC(session_write_kbytes, 0444);204203EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);205204EXT4_ATTR_FUNC(reserved_clusters, 0644);205205+EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);206206207207EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,208208 ext4_sb_info, s_inode_readahead_blks);···253251 ATTR_LIST(session_write_kbytes),254252 ATTR_LIST(lifetime_write_kbytes),255253 ATTR_LIST(reserved_clusters),254254+ ATTR_LIST(sra_exceeded_retry_limit),256255 ATTR_LIST(inode_readahead_blks),257256 ATTR_LIST(inode_goal),258257 ATTR_LIST(mb_stats),···377374 return snprintf(buf, PAGE_SIZE, "%llu\n",378375 (unsigned long long)379376 atomic64_read(&sbi->s_resv_clusters));377377+ case attr_sra_exceeded_retry_limit:378378+ return snprintf(buf, PAGE_SIZE, "%llu\n",379379+ (unsigned long long)380380+ percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));380381 case attr_inode_readahead:381382 case attr_pointer_ui:382383 if (!ptr)
+57-36
fs/ext4/verity.c
···201201 struct inode *inode = file_inode(filp);202202 const int credits = 2; /* superblock and inode for ext4_orphan_del() */203203 handle_t *handle;204204+ struct ext4_iloc iloc;204205 int err = 0;205205- int err2;206206-207207- if (desc != NULL) {208208- /* Succeeded; write the verity descriptor. */209209- err = ext4_write_verity_descriptor(inode, desc, desc_size,210210- merkle_tree_size);211211-212212- /* Write all pages before clearing VERITY_IN_PROGRESS. */213213- if (!err)214214- err = filemap_write_and_wait(inode->i_mapping);215215- }216216-217217- /* If we failed, truncate anything we wrote past i_size. */218218- if (desc == NULL || err)219219- ext4_truncate(inode);220206221207 /*222222- * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and223223- * deleting the inode from the orphan list, even if something failed.224224- * If everything succeeded, we'll also set the verity bit in the same225225- * transaction.208208+ * If an error already occurred (which fs/verity/ signals by passing209209+ * desc == NULL), then only clean-up is needed.226210 */211211+ if (desc == NULL)212212+ goto cleanup;227213228228- ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);214214+ /* Append the verity descriptor. */215215+ err = ext4_write_verity_descriptor(inode, desc, desc_size,216216+ merkle_tree_size);217217+ if (err)218218+ goto cleanup;219219+220220+ /*221221+ * Write all pages (both data and verity metadata). Note that this must222222+ * happen before clearing EXT4_STATE_VERITY_IN_PROGRESS; otherwise pages223223+ * beyond i_size won't be written properly. For crash consistency, this224224+ * also must happen before the verity inode flag gets persisted.225225+ */226226+ err = filemap_write_and_wait(inode->i_mapping);227227+ if (err)228228+ goto cleanup;229229+230230+ /*231231+ * Finally, set the verity inode flag and remove the inode from the232232+ * orphan list (in a single transaction).233233+ */229234230235 handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);231236 if (IS_ERR(handle)) {232232- ext4_orphan_del(NULL, inode);233233- return PTR_ERR(handle);237237+ err = PTR_ERR(handle);238238+ goto cleanup;234239 }235240236236- err2 = ext4_orphan_del(handle, inode);237237- if (err2)238238- goto out_stop;241241+ err = ext4_orphan_del(handle, inode);242242+ if (err)243243+ goto stop_and_cleanup;239244240240- if (desc != NULL && !err) {241241- struct ext4_iloc iloc;245245+ err = ext4_reserve_inode_write(handle, inode, &iloc);246246+ if (err)247247+ goto stop_and_cleanup;242248243243- err = ext4_reserve_inode_write(handle, inode, &iloc);244244- if (err)245245- goto out_stop;246246- ext4_set_inode_flag(inode, EXT4_INODE_VERITY);247247- ext4_set_inode_flags(inode, false);248248- err = ext4_mark_iloc_dirty(handle, inode, &iloc);249249- }250250-out_stop:249249+ ext4_set_inode_flag(inode, EXT4_INODE_VERITY);250250+ ext4_set_inode_flags(inode, false);251251+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);252252+ if (err)253253+ goto stop_and_cleanup;254254+251255 ext4_journal_stop(handle);252252- return err ?: err2;256256+257257+ ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);258258+ return 0;259259+260260+stop_and_cleanup:261261+ ext4_journal_stop(handle);262262+cleanup:263263+ /*264264+ * Verity failed to be enabled, so clean up by truncating any verity265265+ * metadata that was written beyond i_size (both from cache and from266266+ * disk), removing the inode from the orphan list (if it wasn't done267267+ * already), and clearing EXT4_STATE_VERITY_IN_PROGRESS.268268+ */269269+ truncate_inode_pages(inode->i_mapping, inode->i_size);270270+ ext4_truncate(inode);271271+ ext4_orphan_del(NULL, inode);272272+ ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);273273+ return err;253274}254275255276static int ext4_get_verity_descriptor_location(struct inode *inode,
···1324132413251325 /* virtiofs allocates and installs its own fuse devices */13261326 ctx->fudptr = NULL;13271327- if (ctx->dax)13271327+ if (ctx->dax) {13281328+ if (!fs->dax_dev) {13291329+ err = -EINVAL;13301330+ pr_err("virtio-fs: dax can't be enabled as filesystem"13311331+ " device does not support it.\n");13321332+ goto err_free_fuse_devs;13331333+ }13281334 ctx->dax_dev = fs->dax_dev;13351335+ }13291336 err = fuse_fill_super_common(sb, ctx);13301337 if (err < 0)13311338 goto err_free_fuse_devs;
+11-3
fs/io-wq.c
···386386 return NULL;387387}388388389389-static void io_flush_signals(void)389389+static bool io_flush_signals(void)390390{391391 if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {392392+ __set_current_state(TASK_RUNNING);392393 if (current->task_works)393394 task_work_run();394395 clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);396396+ return true;395397 }398398+ return false;396399}397400398401static void io_assign_current_work(struct io_worker *worker,···491488 set_task_comm(current, buf);492489493490 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {491491+ long ret;492492+494493 set_current_state(TASK_INTERRUPTIBLE);495494loop:496495 raw_spin_lock_irq(&wqe->lock);···502497 }503498 __io_worker_idle(wqe, worker);504499 raw_spin_unlock_irq(&wqe->lock);505505- io_flush_signals();506506- if (schedule_timeout(WORKER_IDLE_TIMEOUT))500500+ if (io_flush_signals())501501+ continue;502502+ ret = schedule_timeout(WORKER_IDLE_TIMEOUT);503503+ if (try_to_freeze() || ret)507504 continue;508505 if (fatal_signal_pending(current))509506 break;···716709 set_current_state(TASK_INTERRUPTIBLE);717710 io_wq_check_workers(wq);718711 schedule_timeout(HZ);712712+ try_to_freeze();719713 if (fatal_signal_pending(current))720714 set_bit(IO_WQ_BIT_EXIT, &wq->state);721715 } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));
+9-1
fs/io-wq.h
···22#define INTERNAL_IO_WQ_H3344#include <linux/refcount.h>55-#include <linux/io_uring.h>6576struct io_wq;87···1819 IO_WQ_CANCEL_OK, /* cancelled before started */1920 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */2021 IO_WQ_CANCEL_NOTFOUND, /* work not found */2222+};2323+2424+struct io_wq_work_node {2525+ struct io_wq_work_node *next;2626+};2727+2828+struct io_wq_work_list {2929+ struct io_wq_work_node *first;3030+ struct io_wq_work_node *last;2131};22322333static inline void wq_list_add_after(struct io_wq_work_node *node,
+147-105
fs/io_uring.c
···258258259259struct io_sq_data {260260 refcount_t refs;261261- struct rw_semaphore rw_lock;261261+ atomic_t park_pending;262262+ struct mutex lock;262263263264 /* ctx's that are using this sqd */264265 struct list_head ctx_list;···274273275274 unsigned long state;276275 struct completion exited;276276+ struct callback_head *park_task_work;277277};278278279279#define IO_IOPOLL_BATCH 8···404402 struct socket *ring_sock;405403#endif406404407407- struct idr io_buffer_idr;405405+ struct xarray io_buffers;408406409407 struct xarray personalities;410408 u32 pers_next;···454452 /* Keep this last, we don't need it for the fast path */455453 struct work_struct exit_work;456454 struct list_head tctx_list;455455+};456456+457457+struct io_uring_task {458458+ /* submission side */459459+ struct xarray xa;460460+ struct wait_queue_head wait;461461+ const struct io_ring_ctx *last;462462+ struct io_wq *io_wq;463463+ struct percpu_counter inflight;464464+ atomic_t in_idle;465465+ bool sqpoll;466466+467467+ spinlock_t task_lock;468468+ struct io_wq_work_list task_list;469469+ unsigned long task_state;470470+ struct callback_head task_work;457471};458472459473/*···11531135 init_waitqueue_head(&ctx->cq_wait);11541136 INIT_LIST_HEAD(&ctx->cq_overflow_list);11551137 init_completion(&ctx->ref_comp);11561156- idr_init(&ctx->io_buffer_idr);11381138+ xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);11571139 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);11581140 mutex_init(&ctx->uring_lock);11591141 init_waitqueue_head(&ctx->wait);···15681550 io_put_task(req->task, 1);15691551 list_add(&req->compl.list, &cs->locked_free_list);15701552 cs->locked_free_nr++;15711571- } else15721572- req = NULL;15531553+ } else {15541554+ if (!percpu_ref_tryget(&ctx->refs))15551555+ req = NULL;15561556+ }15731557 io_commit_cqring(ctx);15741558 spin_unlock_irqrestore(&ctx->completion_lock, flags);15751575- io_cqring_ev_posted(ctx);1576155915771577- if (req)15601560+ if (req) {15611561+ io_cqring_ev_posted(ctx);15781562 percpu_ref_put(&ctx->refs);15631563+ }15791564}1580156515811566static void io_req_complete_state(struct io_kiocb *req, long res,···19461925 return ret;19471926}1948192719281928+static bool io_run_task_work_head(struct callback_head **work_head)19291929+{19301930+ struct callback_head *work, *next;19311931+ bool executed = false;19321932+19331933+ do {19341934+ work = xchg(work_head, NULL);19351935+ if (!work)19361936+ break;19371937+19381938+ do {19391939+ next = work->next;19401940+ work->func(work);19411941+ work = next;19421942+ cond_resched();19431943+ } while (work);19441944+ executed = true;19451945+ } while (1);19461946+19471947+ return executed;19481948+}19491949+19501950+static void io_task_work_add_head(struct callback_head **work_head,19511951+ struct callback_head *task_work)19521952+{19531953+ struct callback_head *head;19541954+19551955+ do {19561956+ head = READ_ONCE(*work_head);19571957+ task_work->next = head;19581958+ } while (cmpxchg(work_head, head, task_work) != head);19591959+}19601960+19491961static void io_req_task_work_add_fallback(struct io_kiocb *req,19501962 task_work_func_t cb)19511963{19521952- struct io_ring_ctx *ctx = req->ctx;19531953- struct callback_head *head;19541954-19551964 init_task_work(&req->task_work, cb);19561956- do {19571957- head = READ_ONCE(ctx->exit_task_work);19581958- req->task_work.next = head;19591959- } while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head);19651965+ io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);19601966}1961196719621968static void __io_req_task_cancel(struct io_kiocb *req, int error)···2891284328922844 lockdep_assert_held(&req->ctx->uring_lock);2893284528942894- head = idr_find(&req->ctx->io_buffer_idr, bgid);28462846+ head = xa_load(&req->ctx->io_buffers, bgid);28952847 if (head) {28962848 if (!list_empty(&head->list)) {28972849 kbuf = list_last_entry(&head->list, struct io_buffer,···28992851 list_del(&kbuf->list);29002852 } else {29012853 kbuf = head;29022902- idr_remove(&req->ctx->io_buffer_idr, bgid);28542854+ xa_erase(&req->ctx->io_buffers, bgid);29032855 }29042856 if (*len > kbuf->len)29052857 *len = kbuf->len;···39403892 }39413893 i++;39423894 kfree(buf);39433943- idr_remove(&ctx->io_buffer_idr, bgid);38953895+ xa_erase(&ctx->io_buffers, bgid);3944389639453897 return i;39463898}···39583910 lockdep_assert_held(&ctx->uring_lock);3959391139603912 ret = -ENOENT;39613961- head = idr_find(&ctx->io_buffer_idr, p->bgid);39133913+ head = xa_load(&ctx->io_buffers, p->bgid);39623914 if (head)39633915 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);39643916 if (ret < 0)···4041399340423994 lockdep_assert_held(&ctx->uring_lock);4043399540444044- list = head = idr_find(&ctx->io_buffer_idr, p->bgid);39963996+ list = head = xa_load(&ctx->io_buffers, p->bgid);4045399740463998 ret = io_add_buffers(p, &head);40474047- if (ret < 0)40484048- goto out;40494049-40504050- if (!list) {40514051- ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,40524052- GFP_KERNEL);40534053- if (ret < 0) {39993999+ if (ret >= 0 && !list) {40004000+ ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);40014001+ if (ret < 0)40544002 __io_remove_buffers(ctx, head, p->bgid, -1U);40554055- goto out;40564056- }40574003 }40584058-out:40594004 if (ret < 0)40604005 req_set_fail_links(req);40614006···43864345 struct io_async_msghdr iomsg, *kmsg;43874346 struct socket *sock;43884347 unsigned flags;43484348+ int min_ret = 0;43894349 int ret;4390435043914351 sock = sock_from_file(req->file);···44014359 kmsg = &iomsg;44024360 }4403436144044404- flags = req->sr_msg.msg_flags;43624362+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;44054363 if (flags & MSG_DONTWAIT)44064364 req->flags |= REQ_F_NOWAIT;44074365 else if (issue_flags & IO_URING_F_NONBLOCK)44084366 flags |= MSG_DONTWAIT;43674367+43684368+ if (flags & MSG_WAITALL)43694369+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);4409437044104371 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);44114372 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)···44204375 if (kmsg->free_iov)44214376 kfree(kmsg->free_iov);44224377 req->flags &= ~REQ_F_NEED_CLEANUP;44234423- if (ret < 0)43784378+ if (ret < min_ret)44244379 req_set_fail_links(req);44254380 __io_req_complete(req, issue_flags, ret, 0);44264381 return 0;···44334388 struct iovec iov;44344389 struct socket *sock;44354390 unsigned flags;43914391+ int min_ret = 0;44364392 int ret;4437439344384394 sock = sock_from_file(req->file);···44494403 msg.msg_controllen = 0;44504404 msg.msg_namelen = 0;4451440544524452- flags = req->sr_msg.msg_flags;44064406+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;44534407 if (flags & MSG_DONTWAIT)44544408 req->flags |= REQ_F_NOWAIT;44554409 else if (issue_flags & IO_URING_F_NONBLOCK)44564410 flags |= MSG_DONTWAIT;44114411+44124412+ if (flags & MSG_WAITALL)44134413+ min_ret = iov_iter_count(&msg.msg_iter);4457441444584415 msg.msg_flags = flags;44594416 ret = sock_sendmsg(sock, &msg);···44654416 if (ret == -ERESTARTSYS)44664417 ret = -EINTR;4467441844684468- if (ret < 0)44194419+ if (ret < min_ret)44694420 req_set_fail_links(req);44704421 __io_req_complete(req, issue_flags, ret, 0);44714422 return 0;···46174568 struct socket *sock;46184569 struct io_buffer *kbuf;46194570 unsigned flags;45714571+ int min_ret = 0;46204572 int ret, cflags = 0;46214573 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;46224574···46434593 1, req->sr_msg.len);46444594 }4645459546464646- flags = req->sr_msg.msg_flags;45964596+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;46474597 if (flags & MSG_DONTWAIT)46484598 req->flags |= REQ_F_NOWAIT;46494599 else if (force_nonblock)46504600 flags |= MSG_DONTWAIT;46014601+46024602+ if (flags & MSG_WAITALL)46034603+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);4651460446524605 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,46534606 kmsg->uaddr, flags);···46654612 if (kmsg->free_iov)46664613 kfree(kmsg->free_iov);46674614 req->flags &= ~REQ_F_NEED_CLEANUP;46684668- if (ret < 0)46154615+ if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))46694616 req_set_fail_links(req);46704617 __io_req_complete(req, issue_flags, ret, cflags);46714618 return 0;···46804627 struct socket *sock;46814628 struct iovec iov;46824629 unsigned flags;46304630+ int min_ret = 0;46834631 int ret, cflags = 0;46844632 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;46854633···47064652 msg.msg_iocb = NULL;47074653 msg.msg_flags = 0;4708465447094709- flags = req->sr_msg.msg_flags;46554655+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;47104656 if (flags & MSG_DONTWAIT)47114657 req->flags |= REQ_F_NOWAIT;47124658 else if (force_nonblock)47134659 flags |= MSG_DONTWAIT;46604660+46614661+ if (flags & MSG_WAITALL)46624662+ min_ret = iov_iter_count(&msg.msg_iter);4714466347154664 ret = sock_recvmsg(sock, &msg, flags);47164665 if (force_nonblock && ret == -EAGAIN)···47234666out_free:47244667 if (req->flags & REQ_F_BUFFER_SELECTED)47254668 cflags = io_put_recv_kbuf(req);47264726- if (ret < 0)46694669+ if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))47274670 req_set_fail_links(req);47284671 __io_req_complete(req, issue_flags, ret, cflags);47294672 return 0;···62616204 spin_unlock_irqrestore(&ctx->completion_lock, flags);6262620562636206 if (prev) {62646264- req_set_fail_links(prev);62656207 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);62666208 io_put_req_deferred(prev, 1);62676209 } else {···67506694 set_cpus_allowed_ptr(current, cpu_online_mask);67516695 current->flags |= PF_NO_SETAFFINITY;6752669667536753- down_read(&sqd->rw_lock);67546754-66976697+ mutex_lock(&sqd->lock);67556698 while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {67566699 int ret;67576700 bool cap_entries, sqt_spin, needs_sched;6758670167596702 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {67606760- up_read(&sqd->rw_lock);67036703+ mutex_unlock(&sqd->lock);67616704 cond_resched();67626762- down_read(&sqd->rw_lock);67056705+ mutex_lock(&sqd->lock);67636706 io_run_task_work();67076707+ io_run_task_work_head(&sqd->park_task_work);67646708 timeout = jiffies + sqd->sq_thread_idle;67656709 continue;67666710 }···68066750 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)68076751 io_ring_set_wakeup_flag(ctx);6808675268096809- up_read(&sqd->rw_lock);67536753+ mutex_unlock(&sqd->lock);68106754 schedule();68116811- down_read(&sqd->rw_lock);67556755+ try_to_freeze();67566756+ mutex_lock(&sqd->lock);68126757 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)68136758 io_ring_clear_wakeup_flag(ctx);68146759 }6815676068166761 finish_wait(&sqd->wait, &wait);67626762+ io_run_task_work_head(&sqd->park_task_work);68176763 timeout = jiffies + sqd->sq_thread_idle;68186764 }68196819- up_read(&sqd->rw_lock);68206820- down_write(&sqd->rw_lock);68216821- /*68226822- * someone may have parked and added a cancellation task_work, run68236823- * it first because we don't want it in io_uring_cancel_sqpoll()68246824- */68256825- io_run_task_work();6826676568276766 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)68286767 io_uring_cancel_sqpoll(ctx);68296768 sqd->thread = NULL;68306769 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)68316770 io_ring_set_wakeup_flag(ctx);68326832- up_write(&sqd->rw_lock);67716771+ mutex_unlock(&sqd->lock);6833677268346773 io_run_task_work();67746774+ io_run_task_work_head(&sqd->park_task_work);68356775 complete(&sqd->exited);68366776 do_exit(0);68376777}···71277075}7128707671297077static void io_sq_thread_unpark(struct io_sq_data *sqd)71307130- __releases(&sqd->rw_lock)70787078+ __releases(&sqd->lock)71317079{71327080 WARN_ON_ONCE(sqd->thread == current);7133708170827082+ /*70837083+ * Do the dance but not conditional clear_bit() because it'd race with70847084+ * other threads incrementing park_pending and setting the bit.70857085+ */71347086 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);71357135- up_write(&sqd->rw_lock);70877087+ if (atomic_dec_return(&sqd->park_pending))70887088+ set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);70897089+ mutex_unlock(&sqd->lock);71367090}7137709171387092static void io_sq_thread_park(struct io_sq_data *sqd)71397139- __acquires(&sqd->rw_lock)70937093+ __acquires(&sqd->lock)71407094{71417095 WARN_ON_ONCE(sqd->thread == current);7142709670977097+ atomic_inc(&sqd->park_pending);71437098 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);71447144- down_write(&sqd->rw_lock);71457145- /* set again for consistency, in case concurrent parks are happening */71467146- set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);70997099+ mutex_lock(&sqd->lock);71477100 if (sqd->thread)71487101 wake_up_process(sqd->thread);71497102}···71577100{71587101 WARN_ON_ONCE(sqd->thread == current);7159710271607160- down_write(&sqd->rw_lock);71037103+ mutex_lock(&sqd->lock);71617104 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);71627105 if (sqd->thread)71637106 wake_up_process(sqd->thread);71647164- up_write(&sqd->rw_lock);71077107+ mutex_unlock(&sqd->lock);71657108 wait_for_completion(&sqd->exited);71667109}7167711071687111static void io_put_sq_data(struct io_sq_data *sqd)71697112{71707113 if (refcount_dec_and_test(&sqd->refs)) {71147114+ WARN_ON_ONCE(atomic_read(&sqd->park_pending));71157115+71717116 io_sq_thread_stop(sqd);71727117 kfree(sqd);71737118 }···72437184 if (!sqd)72447185 return ERR_PTR(-ENOMEM);7245718671877187+ atomic_set(&sqd->park_pending, 0);72467188 refcount_set(&sqd->refs, 1);72477189 INIT_LIST_HEAD(&sqd->ctx_list);72487248- init_rwsem(&sqd->rw_lock);71907190+ mutex_init(&sqd->lock);72497191 init_waitqueue_head(&sqd->wait);72507192 init_completion(&sqd->exited);72517193 return sqd;···7926786679277867 ret = 0;79287868 io_sq_thread_park(sqd);78697869+ list_add(&ctx->sqd_list, &sqd->ctx_list);78707870+ io_sqd_update_thread_idle(sqd);79297871 /* don't attach to a dying SQPOLL thread, would be racy */79307930- if (attached && !sqd->thread) {78727872+ if (attached && !sqd->thread)79317873 ret = -ENXIO;79327932- } else {79337933- list_add(&ctx->sqd_list, &sqd->ctx_list);79347934- io_sqd_update_thread_idle(sqd);79357935- }79367874 io_sq_thread_unpark(sqd);7937787579387938- if (ret < 0) {79397939- io_put_sq_data(sqd);79407940- ctx->sq_data = NULL;79417941- return ret;79427942- } else if (attached) {78767876+ if (ret < 0)78777877+ goto err;78787878+ if (attached)79437879 return 0;79447944- }7945788079467881 if (p->flags & IORING_SETUP_SQ_AFF) {79477882 int cpu = p->sq_thread_cpu;···83878332 return -ENXIO;83888333}8389833483908390-static int __io_destroy_buffers(int id, void *p, void *data)83918391-{83928392- struct io_ring_ctx *ctx = data;83938393- struct io_buffer *buf = p;83948394-83958395- __io_remove_buffers(ctx, buf, id, -1U);83968396- return 0;83978397-}83988398-83998335static void io_destroy_buffers(struct io_ring_ctx *ctx)84008336{84018401- idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);84028402- idr_destroy(&ctx->io_buffer_idr);83378337+ struct io_buffer *buf;83388338+ unsigned long index;83398339+83408340+ xa_for_each(&ctx->io_buffers, index, buf)83418341+ __io_remove_buffers(ctx, buf, index, -1U);84038342}8404834384058344static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)···84358386{84368387 /*84378388 * Some may use context even when all refs and requests have been put,84388438- * and they are free to do so while still holding uring_lock, see84398439- * __io_req_task_submit(). Wait for them to finish.83898389+ * and they are free to do so while still holding uring_lock or83908390+ * completion_lock, see __io_req_task_submit(). Wait for them to finish.84408391 */84418392 mutex_lock(&ctx->uring_lock);84428393 mutex_unlock(&ctx->uring_lock);83948394+ spin_lock_irq(&ctx->completion_lock);83958395+ spin_unlock_irq(&ctx->completion_lock);8443839684448397 io_sq_thread_finish(ctx);84458398 io_sqe_buffers_unregister(ctx);···85298478 return -EINVAL;85308479}8531848085328532-static bool io_run_ctx_fallback(struct io_ring_ctx *ctx)84818481+static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)85338482{85348534- struct callback_head *work, *next;85358535- bool executed = false;85368536-85378537- do {85388538- work = xchg(&ctx->exit_task_work, NULL);85398539- if (!work)85408540- break;85418541-85428542- do {85438543- next = work->next;85448544- work->func(work);85458545- work = next;85468546- cond_resched();85478547- } while (work);85488548- executed = true;85498549- } while (1);85508550-85518551- return executed;84838483+ return io_run_task_work_head(&ctx->exit_task_work);85528484}8553848585548486struct io_tctx_exit {···86138579 xa_for_each(&ctx->personalities, index, creds)86148580 io_unregister_personality(ctx, index);86158581 mutex_unlock(&ctx->uring_lock);85828582+85838583+ /* prevent SQPOLL from submitting new requests */85848584+ if (ctx->sq_data) {85858585+ io_sq_thread_park(ctx->sq_data);85868586+ list_del_init(&ctx->sqd_list);85878587+ io_sqd_update_thread_idle(ctx->sq_data);85888588+ io_sq_thread_unpark(ctx->sq_data);85898589+ }8616859086178591 io_kill_timeouts(ctx, NULL, NULL);86188592 io_poll_remove_all(ctx, NULL, NULL);···89218879 if (task) {89228880 init_completion(&work.completion);89238881 init_task_work(&work.task_work, io_sqpoll_cancel_cb);89248924- WARN_ON_ONCE(task_work_add(task, &work.task_work, TWA_SIGNAL));88828882+ io_task_work_add_head(&sqd->park_task_work, &work.task_work);89258883 wake_up_process(task);89268884 }89278885 io_sq_thread_unpark(sqd);
+10
fs/iomap/swapfile.c
···170170 return ret;171171 }172172173173+ /*174174+ * If this swapfile doesn't contain even a single page-aligned175175+ * contiguous range of blocks, reject this useless swapfile to176176+ * prevent confusion later on.177177+ */178178+ if (isi.nr_pages == 0) {179179+ pr_warn("swapon: Cannot find a single usable page in file.\n");180180+ return -EINVAL;181181+ }182182+173183 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;174184 sis->max = isi.nr_pages;175185 sis->pages = isi.nr_pages - 1;
-3
fs/locks.c
···1808180818091809 if (flags & FL_LAYOUT)18101810 return 0;18111811- if (flags & FL_DELEG)18121812- /* We leave these checks to the caller. */18131813- return 0;1814181118151812 if (arg == F_RDLCK)18161813 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
···13021302 struct nfsd_file *dst)13031303{13041304 nfs42_ssc_close(src->nf_file);13051305- /* 'src' is freed by nfsd4_do_async_copy */13051305+ fput(src->nf_file);13061306 nfsd_file_put(dst);13071307 mntput(ss_mnt);13081308}
+15-40
fs/nfsd/nfs4state.c
···49404940 return fl;49414941}4942494249434943-static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,49444944- struct nfs4_file *fp)49454945-{49464946- struct nfs4_clnt_odstate *co;49474947- struct file *f = fp->fi_deleg_file->nf_file;49484948- struct inode *ino = locks_inode(f);49494949- int writes = atomic_read(&ino->i_writecount);49504950-49514951- if (fp->fi_fds[O_WRONLY])49524952- writes--;49534953- if (fp->fi_fds[O_RDWR])49544954- writes--;49554955- if (writes > 0)49564956- return -EAGAIN;49574957- spin_lock(&fp->fi_lock);49584958- list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {49594959- if (co->co_client != clp) {49604960- spin_unlock(&fp->fi_lock);49614961- return -EAGAIN;49624962- }49634963- }49644964- spin_unlock(&fp->fi_lock);49654965- return 0;49664966-}49674967-49684943static struct nfs4_delegation *49694944nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,49704945 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)···4959498449604985 nf = find_readable_file(fp);49614986 if (!nf) {49624962- /*49634963- * We probably could attempt another open and get a read49644964- * delegation, but for now, don't bother until the49654965- * client actually sends us one.49664966- */49674967- return ERR_PTR(-EAGAIN);49874987+ /* We should always have a readable file here */49884988+ WARN_ON_ONCE(1);49894989+ return ERR_PTR(-EBADF);49684990 }49694991 spin_lock(&state_lock);49704992 spin_lock(&fp->fi_lock);···49915019 if (!fl)49925020 goto out_clnt_odstate;4993502149944994- status = nfsd4_check_conflicting_opens(clp, fp);49954995- if (status) {49964996- locks_free_lock(fl);49974997- goto out_clnt_odstate;49984998- }49995022 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);50005023 if (fl)50015024 locks_free_lock(fl);50025002- if (status)50035003- goto out_clnt_odstate;50045004- status = nfsd4_check_conflicting_opens(clp, fp);50055025 if (status)50065026 goto out_clnt_odstate;50075027···50765112 if (locks_in_grace(clp->net))50775113 goto out_no_deleg;50785114 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))51155115+ goto out_no_deleg;51165116+ /*51175117+ * Also, if the file was opened for write or51185118+ * create, there's a good chance the client's51195119+ * about to write to it, resulting in an51205120+ * immediate recall (since we don't support51215121+ * write delegations):51225122+ */51235123+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)51245124+ goto out_no_deleg;51255125+ if (open->op_create == NFS4_OPEN_CREATE)50795126 goto out_no_deleg;50805127 break;50815128 default:···53645389 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {53655390 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);53665391 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&53675367- cps->cpntf_time > cutoff)53925392+ cps->cpntf_time < cutoff)53685393 _free_cpntf_state_locked(nn, cps);53695394 }53705395 spin_unlock(&nn->s2s_cp_lock);
+4-6
fs/select.c
···1055105510561056 ret = do_sys_poll(ufds, nfds, to);1057105710581058- if (ret == -ERESTARTNOHAND) {10591059- restart_block->fn = do_restart_poll;10601060- ret = -ERESTART_RESTARTBLOCK;10611061- }10581058+ if (ret == -ERESTARTNOHAND)10591059+ ret = set_restart_fn(restart_block, do_restart_poll);10601060+10621061 return ret;10631062}10641063···10791080 struct restart_block *restart_block;1080108110811082 restart_block = ¤t->restart_block;10821082- restart_block->fn = do_restart_poll;10831083 restart_block->poll.ufds = ufds;10841084 restart_block->poll.nfds = nfds;10851085···10891091 } else10901092 restart_block->poll.has_timeout = 0;1091109310921092- ret = -ERESTART_RESTARTBLOCK;10941094+ ret = set_restart_fn(restart_block, do_restart_poll);10931095 }10941096 return ret;10951097}
+8-6
fs/xfs/xfs_inode.c
···10071007 /*10081008 * Make sure that we have allocated dquot(s) on disk.10091009 */10101010- error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,10111011- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,10121012- &udqp, &gdqp, &pdqp);10101010+ error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),10111011+ fsgid_into_mnt(mnt_userns), prid,10121012+ XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,10131013+ &udqp, &gdqp, &pdqp);10131014 if (error)10141015 return error;10151016···11581157 /*11591158 * Make sure that we have allocated dquot(s) on disk.11601159 */11611161- error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,11621162- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,11631163- &udqp, &gdqp, &pdqp);11601160+ error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),11611161+ fsgid_into_mnt(mnt_userns), prid,11621162+ XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,11631163+ &udqp, &gdqp, &pdqp);11641164 if (error)11651165 return error;11661166
+6
fs/xfs/xfs_itable.c
···168168 };169169 int error;170170171171+ if (breq->mnt_userns != &init_user_ns) {172172+ xfs_warn_ratelimited(breq->mp,173173+ "bulkstat not supported inside of idmapped mounts.");174174+ return -EINVAL;175175+ }176176+171177 ASSERT(breq->icount == 1);172178173179 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
+44-46
fs/xfs/xfs_mount.c
···635635}636636637637/*638638+ * Flush and reclaim dirty inodes in preparation for unmount. Inodes and639639+ * internal inode structures can be sitting in the CIL and AIL at this point,640640+ * so we need to unpin them, write them back and/or reclaim them before unmount641641+ * can proceed.642642+ *643643+ * An inode cluster that has been freed can have its buffer still pinned in644644+ * memory because the transaction is still sitting in a iclog. The stale inodes645645+ * on that buffer will be pinned to the buffer until the transaction hits the646646+ * disk and the callbacks run. Pushing the AIL will skip the stale inodes and647647+ * may never see the pinned buffer, so nothing will push out the iclog and648648+ * unpin the buffer.649649+ *650650+ * Hence we need to force the log to unpin everything first. However, log651651+ * forces don't wait for the discards they issue to complete, so we have to652652+ * explicitly wait for them to complete here as well.653653+ *654654+ * Then we can tell the world we are unmounting so that error handling knows655655+ * that the filesystem is going away and we should error out anything that we656656+ * have been retrying in the background. This will prevent never-ending657657+ * retries in AIL pushing from hanging the unmount.658658+ *659659+ * Finally, we can push the AIL to clean all the remaining dirty objects, then660660+ * reclaim the remaining inodes that are still in memory at this point in time.661661+ */662662+static void663663+xfs_unmount_flush_inodes(664664+ struct xfs_mount *mp)665665+{666666+ xfs_log_force(mp, XFS_LOG_SYNC);667667+ xfs_extent_busy_wait_all(mp);668668+ flush_workqueue(xfs_discard_wq);669669+670670+ mp->m_flags |= XFS_MOUNT_UNMOUNTING;671671+672672+ xfs_ail_push_all_sync(mp->m_ail);673673+ cancel_delayed_work_sync(&mp->m_reclaim_work);674674+ xfs_reclaim_inodes(mp);675675+ xfs_health_unmount(mp);676676+}677677+678678+/*638679 * This function does the following on an initial mount of a file system:639680 * - reads the superblock from disk and init the mount struct640681 * - if we're a 32-bit kernel, do a size check on the superblock···10491008 /* Clean out dquots that might be in memory after quotacheck. */10501009 xfs_qm_unmount(mp);10511010 /*10521052- * Cancel all delayed reclaim work and reclaim the inodes directly.10111011+ * Flush all inode reclamation work and flush the log.10531012 * We have to do this /after/ rtunmount and qm_unmount because those10541013 * two will have scheduled delayed reclaim for the rt/quota inodes.10551014 *···10591018 * qm_unmount_quotas and therefore rely on qm_unmount to release the10601019 * quota inodes.10611020 */10621062- cancel_delayed_work_sync(&mp->m_reclaim_work);10631063- xfs_reclaim_inodes(mp);10641064- xfs_health_unmount(mp);10211021+ xfs_unmount_flush_inodes(mp);10651022 out_log_dealloc:10661066- mp->m_flags |= XFS_MOUNT_UNMOUNTING;10671023 xfs_log_mount_cancel(mp);10681024 out_fail_wait:10691025 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)···11011063 xfs_rtunmount_inodes(mp);11021064 xfs_irele(mp->m_rootip);1103106511041104- /*11051105- * We can potentially deadlock here if we have an inode cluster11061106- * that has been freed has its buffer still pinned in memory because11071107- * the transaction is still sitting in a iclog. The stale inodes11081108- * on that buffer will be pinned to the buffer until the11091109- * transaction hits the disk and the callbacks run. Pushing the AIL will11101110- * skip the stale inodes and may never see the pinned buffer, so11111111- * nothing will push out the iclog and unpin the buffer. Hence we11121112- * need to force the log here to ensure all items are flushed into the11131113- * AIL before we go any further.11141114- */11151115- xfs_log_force(mp, XFS_LOG_SYNC);11161116-11171117- /*11181118- * Wait for all busy extents to be freed, including completion of11191119- * any discard operation.11201120- */11211121- xfs_extent_busy_wait_all(mp);11221122- flush_workqueue(xfs_discard_wq);11231123-11241124- /*11251125- * We now need to tell the world we are unmounting. This will allow11261126- * us to detect that the filesystem is going away and we should error11271127- * out anything that we have been retrying in the background. This will11281128- * prevent neverending retries in AIL pushing from hanging the unmount.11291129- */11301130- mp->m_flags |= XFS_MOUNT_UNMOUNTING;11311131-11321132- /*11331133- * Flush all pending changes from the AIL.11341134- */11351135- xfs_ail_push_all_sync(mp->m_ail);11361136-11371137- /*11381138- * Reclaim all inodes. At this point there should be no dirty inodes and11391139- * none should be pinned or locked. Stop background inode reclaim here11401140- * if it is still running.11411141- */11421142- cancel_delayed_work_sync(&mp->m_reclaim_work);11431143- xfs_reclaim_inodes(mp);11441144- xfs_health_unmount(mp);10661066+ xfs_unmount_flush_inodes(mp);1145106711461068 xfs_qm_unmount(mp);11471069
+2-1
fs/xfs/xfs_symlink.c
···182182 /*183183 * Make sure that we have allocated dquot(s) on disk.184184 */185185- error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,185185+ error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),186186+ fsgid_into_mnt(mnt_userns), prid,186187 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,187188 &udqp, &gdqp, &pdqp);188189 if (error)
+87-14
fs/zonefs/super.c
···165165 return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);166166}167167168168+static int zonefs_swap_activate(struct swap_info_struct *sis,169169+ struct file *swap_file, sector_t *span)170170+{171171+ struct inode *inode = file_inode(swap_file);172172+ struct zonefs_inode_info *zi = ZONEFS_I(inode);173173+174174+ if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {175175+ zonefs_err(inode->i_sb,176176+ "swap file: not a conventional zone file\n");177177+ return -EINVAL;178178+ }179179+180180+ return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);181181+}182182+168183static const struct address_space_operations zonefs_file_aops = {169184 .readpage = zonefs_readpage,170185 .readahead = zonefs_readahead,···192177 .is_partially_uptodate = iomap_is_partially_uptodate,193178 .error_remove_page = generic_error_remove_page,194179 .direct_IO = noop_direct_IO,180180+ .swap_activate = zonefs_swap_activate,195181};196182197183static void zonefs_update_stats(struct inode *inode, loff_t new_isize)···744728}745729746730/*731731+ * Do not exceed the LFS limits nor the file zone size. If pos is under the732732+ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.733733+ */734734+static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,735735+ loff_t count)736736+{737737+ struct inode *inode = file_inode(file);738738+ struct zonefs_inode_info *zi = ZONEFS_I(inode);739739+ loff_t limit = rlimit(RLIMIT_FSIZE);740740+ loff_t max_size = zi->i_max_size;741741+742742+ if (limit != RLIM_INFINITY) {743743+ if (pos >= limit) {744744+ send_sig(SIGXFSZ, current, 0);745745+ return -EFBIG;746746+ }747747+ count = min(count, limit - pos);748748+ }749749+750750+ if (!(file->f_flags & O_LARGEFILE))751751+ max_size = min_t(loff_t, MAX_NON_LFS, max_size);752752+753753+ if (unlikely(pos >= max_size))754754+ return -EFBIG;755755+756756+ return min(count, max_size - pos);757757+}758758+759759+static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)760760+{761761+ struct file *file = iocb->ki_filp;762762+ struct inode *inode = file_inode(file);763763+ struct zonefs_inode_info *zi = ZONEFS_I(inode);764764+ loff_t count;765765+766766+ if (IS_SWAPFILE(inode))767767+ return -ETXTBSY;768768+769769+ if (!iov_iter_count(from))770770+ return 0;771771+772772+ if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))773773+ return -EINVAL;774774+775775+ if (iocb->ki_flags & IOCB_APPEND) {776776+ if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)777777+ return -EINVAL;778778+ mutex_lock(&zi->i_truncate_mutex);779779+ iocb->ki_pos = zi->i_wpoffset;780780+ mutex_unlock(&zi->i_truncate_mutex);781781+ }782782+783783+ count = zonefs_write_check_limits(file, iocb->ki_pos,784784+ iov_iter_count(from));785785+ if (count < 0)786786+ return count;787787+788788+ iov_iter_truncate(from, count);789789+ return iov_iter_count(from);790790+}791791+792792+/*747793 * Handle direct writes. For sequential zone files, this is the only possible748794 * write path. For these files, check that the user is issuing writes749795 * sequentially from the end of the file. This code assumes that the block layer···822744 struct super_block *sb = inode->i_sb;823745 bool sync = is_sync_kiocb(iocb);824746 bool append = false;825825- size_t count;826826- ssize_t ret;747747+ ssize_t ret, count;827748828749 /*829750 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT···840763 inode_lock(inode);841764 }842765843843- ret = generic_write_checks(iocb, from);844844- if (ret <= 0)766766+ count = zonefs_write_checks(iocb, from);767767+ if (count <= 0) {768768+ ret = count;845769 goto inode_unlock;846846-847847- iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);848848- count = iov_iter_count(from);770770+ }849771850772 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {851773 ret = -EINVAL;···904828 inode_lock(inode);905829 }906830907907- ret = generic_write_checks(iocb, from);831831+ ret = zonefs_write_checks(iocb, from);908832 if (ret <= 0)909833 goto inode_unlock;910910-911911- iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);912834913835 ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);914836 if (ret > 0)···10409661041967 mutex_lock(&zi->i_truncate_mutex);104296810431043- zi->i_wr_refcnt++;10441044- if (zi->i_wr_refcnt == 1) {10451045-969969+ if (!zi->i_wr_refcnt) {1046970 if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {1047971 atomic_dec(&sbi->s_open_zones);1048972 ret = -EBUSY;···1050978 if (i_size_read(inode) < zi->i_max_size) {1051979 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);1052980 if (ret) {10531053- zi->i_wr_refcnt--;1054981 atomic_dec(&sbi->s_open_zones);1055982 goto unlock;1056983 }1057984 zi->i_flags |= ZONEFS_ZONE_OPEN;1058985 }1059986 }987987+988988+ zi->i_wr_refcnt++;10609891061990unlock:1062991 mutex_unlock(&zi->i_truncate_mutex);
···2323 * System call restart block.2424 */2525struct restart_block {2626+ unsigned long arch_data;2627 long (*fn)(struct restart_block *);2728 union {2829 /* For futex_wait and futex_wait_requeue_pi */
···159159 * irq_domain_create_sim - Create a new interrupt simulator irq_domain and160160 * allocate a range of dummy interrupts.161161 *162162- * @fnode: struct fwnode_handle to be associated with this domain.162162+ * @fwnode: struct fwnode_handle to be associated with this domain.163163 * @num_irqs: Number of interrupts to allocate.164164 *165165 * On success: return a new irq_domain object.···228228 * a managed device.229229 *230230 * @dev: Device to initialize the simulator object for.231231- * @fnode: struct fwnode_handle to be associated with this domain.231231+ * @fwnode: struct fwnode_handle to be associated with this domain.232232 * @num_irqs: Number of interrupts to allocate233233 *234234 * On success: return a new irq_domain object.
+4
kernel/irq/manage.c
···11421142 irqreturn_t ret;1143114311441144 local_bh_disable();11451145+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))11461146+ local_irq_disable();11451147 ret = action->thread_fn(action->irq, action->dev_id);11461148 if (ret == IRQ_HANDLED)11471149 atomic_inc(&desc->threads_handled);1148115011491151 irq_finalize_oneshot(desc, action);11521152+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))11531153+ local_irq_enable();11501154 local_bh_enable();11511155 return ret;11521156}
+8
kernel/jump_label.c
···407407 return false;408408409409 if (!kernel_text_address(jump_entry_code(entry))) {410410+ /*411411+ * This skips patching built-in __exit, which412412+ * is part of init_section_contains() but is413413+ * not part of kernel_text_address().414414+ *415415+ * Skipping built-in __exit is fine since it416416+ * will never be executed.417417+ */410418 WARN_ONCE(!jump_entry_is_init(entry),411419 "can't patch jump_label at %pS",412420 (void *)jump_entry_code(entry));
+14-11
kernel/locking/mutex.c
···626626 */627627static __always_inline bool628628mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,629629- const bool use_ww_ctx, struct mutex_waiter *waiter)629629+ struct mutex_waiter *waiter)630630{631631 if (!waiter) {632632 /*···702702#else703703static __always_inline bool704704mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,705705- const bool use_ww_ctx, struct mutex_waiter *waiter)705705+ struct mutex_waiter *waiter)706706{707707 return false;708708}···922922 struct ww_mutex *ww;923923 int ret;924924925925+ if (!use_ww_ctx)926926+ ww_ctx = NULL;927927+925928 might_sleep();926929927930#ifdef CONFIG_DEBUG_MUTEXES···932929#endif933930934931 ww = container_of(lock, struct ww_mutex, base);935935- if (use_ww_ctx && ww_ctx) {932932+ if (ww_ctx) {936933 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))937934 return -EALREADY;938935···949946 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);950947951948 if (__mutex_trylock(lock) ||952952- mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {949949+ mutex_optimistic_spin(lock, ww_ctx, NULL)) {953950 /* got the lock, yay! */954951 lock_acquired(&lock->dep_map, ip);955955- if (use_ww_ctx && ww_ctx)952952+ if (ww_ctx)956953 ww_mutex_set_context_fastpath(ww, ww_ctx);957954 preempt_enable();958955 return 0;···963960 * After waiting to acquire the wait_lock, try again.964961 */965962 if (__mutex_trylock(lock)) {966966- if (use_ww_ctx && ww_ctx)963963+ if (ww_ctx)967964 __ww_mutex_check_waiters(lock, ww_ctx);968965969966 goto skip_wait;···10161013 goto err;10171014 }1018101510191019- if (use_ww_ctx && ww_ctx) {10161016+ if (ww_ctx) {10201017 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);10211018 if (ret)10221019 goto err;···10291026 * ww_mutex needs to always recheck its position since its waiter10301027 * list is not FIFO ordered.10311028 */10321032- if ((use_ww_ctx && ww_ctx) || !first) {10291029+ if (ww_ctx || !first) {10331030 first = __mutex_waiter_is_first(lock, &waiter);10341031 if (first)10351032 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);···10421039 * or we must see its unlock and acquire.10431040 */10441041 if (__mutex_trylock(lock) ||10451045- (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))10421042+ (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))10461043 break;1047104410481045 spin_lock(&lock->wait_lock);···10511048acquired:10521049 __set_current_state(TASK_RUNNING);1053105010541054- if (use_ww_ctx && ww_ctx) {10511051+ if (ww_ctx) {10551052 /*10561053 * Wound-Wait; we stole the lock (!first_waiter), check the10571054 * waiters as anyone might want to wound us.···10711068 /* got the lock - cleanup and rejoice! */10721069 lock_acquired(&lock->dep_map, ip);1073107010741074- if (use_ww_ctx && ww_ctx)10711071+ if (ww_ctx)10751072 ww_mutex_lock_acquired(ww, ww_ctx);1076107310771074 spin_unlock(&lock->wait_lock);
-2
kernel/reboot.c
···244244void kernel_restart(char *cmd)245245{246246 kernel_restart_prepare(cmd);247247- if (pm_power_off_prepare)248248- pm_power_off_prepare();249247 migrate_to_reboot_cpu();250248 syscore_shutdown();251249 if (!cmd)
+5-1
kernel/signal.c
···288288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));289289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));290290291291- if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))291291+ if (unlikely(fatal_signal_pending(task) ||292292+ (task->flags & (PF_EXITING | PF_IO_WORKER))))292293 return false;293294294295 if (mask & JOBCTL_STOP_SIGMASK)···834833835834 if (!valid_signal(sig))836835 return -EINVAL;836836+ /* PF_IO_WORKER threads don't take any signals */837837+ if (t->flags & PF_IO_WORKER)838838+ return -ESRCH;837839838840 if (!si_fromuser(info))839841 return 0;
+24-18
kernel/static_call.c
···3535 return (void *)((long)site->addr + (long)&site->addr);3636}37373838+static inline unsigned long __static_call_key(const struct static_call_site *site)3939+{4040+ return (long)site->key + (long)&site->key;4141+}38423943static inline struct static_call_key *static_call_key(const struct static_call_site *site)4044{4141- return (struct static_call_key *)4242- (((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);4545+ return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);4346}44474548/* These assume the key is word-aligned. */4649static inline bool static_call_is_init(struct static_call_site *site)4750{4848- return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;5151+ return __static_call_key(site) & STATIC_CALL_SITE_INIT;4952}50535154static inline bool static_call_is_tail(struct static_call_site *site)5255{5353- return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;5656+ return __static_call_key(site) & STATIC_CALL_SITE_TAIL;5457}55585659static inline void static_call_set_init(struct static_call_site *site)5760{5858- site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -6161+ site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -5962 (long)&site->key;6063}6164···149146 };150147151148 for (site_mod = &first; site_mod; site_mod = site_mod->next) {149149+ bool init = system_state < SYSTEM_RUNNING;152150 struct module *mod = site_mod->mod;153151154152 if (!site_mod->sites) {···169165 if (mod) {170166 stop = mod->static_call_sites +171167 mod->num_static_call_sites;168168+ init = mod->state == MODULE_STATE_COMING;172169 }173170#endif174171···177172 site < stop && static_call_key(site) == key; site++) {178173 void *site_addr = static_call_addr(site);179174180180- if (static_call_is_init(site)) {181181- /*182182- * Don't write to call sites which were in183183- * initmem and have since been freed.184184- */185185- if (!mod && system_state >= SYSTEM_RUNNING)186186- continue;187187- if (mod && !within_module_init((unsigned long)site_addr, mod))188188- continue;189189- }175175+ if (!init && static_call_is_init(site))176176+ continue;190177191178 if (!kernel_text_address((unsigned long)site_addr)) {192192- WARN_ONCE(1, "can't patch static call site at %pS",179179+ /*180180+ * This skips patching built-in __exit, which181181+ * is part of init_section_contains() but is182182+ * not part of kernel_text_address().183183+ *184184+ * Skipping built-in __exit is fine since it185185+ * will never be executed.186186+ */187187+ WARN_ONCE(!static_call_is_init(site),188188+ "can't patch static call site at %pS",193189 site_addr);194190 continue;195191 }196192197193 arch_static_call_transform(site_addr, NULL, func,198198- static_call_is_tail(site));194194+ static_call_is_tail(site));199195 }200196 }201197···355349 struct static_call_site *site;356350357351 for (site = start; site != stop; site++) {358358- unsigned long s_key = (long)site->key + (long)&site->key;352352+ unsigned long s_key = __static_call_key(site);359353 unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;360354 unsigned long key;361355
···5353MODULE_AUTHOR("Michael T. Mayers");5454MODULE_DESCRIPTION("MOTU MidiTimePiece AV multiport MIDI");5555MODULE_LICENSE("GPL");5656-MODULE_SUPPORTED_DEVICE("{{MOTU,MidiTimePiece AV multiport MIDI}}");57565857// io resources5958#define MTPAV_IOBASE 0x378
-1
sound/drivers/mts64.c
···3737MODULE_AUTHOR("Matthias Koenig <mk@phasorlab.de>");3838MODULE_DESCRIPTION("ESI Miditerminal 4140");3939MODULE_LICENSE("GPL");4040-MODULE_SUPPORTED_DEVICE("{{ESI,Miditerminal 4140}}");41404241/*********************************************************************4342 * Chip specific
-1
sound/drivers/pcsp/pcsp.c
···2222MODULE_AUTHOR("Stas Sergeev <stsp@users.sourceforge.net>");2323MODULE_DESCRIPTION("PC-Speaker driver");2424MODULE_LICENSE("GPL");2525-MODULE_SUPPORTED_DEVICE("{{PC-Speaker, pcsp}}");2625MODULE_ALIAS("platform:pcspkr");27262827static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
-1
sound/drivers/portman2x4.c
···5757MODULE_AUTHOR("Levent Guendogdu, Tobias Gehrig, Matthias Koenig");5858MODULE_DESCRIPTION("Midiman Portman2x4");5959MODULE_LICENSE("GPL");6060-MODULE_SUPPORTED_DEVICE("{{Midiman,Portman2x4}}");61606261/*********************************************************************6362 * Chip specific
-1
sound/drivers/serial-u16550.c
···34343535MODULE_DESCRIPTION("MIDI serial u16550");3636MODULE_LICENSE("GPL");3737-MODULE_SUPPORTED_DEVICE("{{ALSA, MIDI serial u16550}}");38373938#define SNDRV_SERIAL_SOUNDCANVAS 0 /* Roland Soundcanvas; F5 NN selects part */4039#define SNDRV_SERIAL_MS124T 1 /* Midiator MS-124T */
···2222MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");2323MODULE_DESCRIPTION("AD1816A, AD1815");2424MODULE_LICENSE("GPL");2525-MODULE_SUPPORTED_DEVICE("{{Highscreen,Sound-Boostar 16 3D},"2626- "{Analog Devices,AD1815},"2727- "{Analog Devices,AD1816A},"2828- "{TerraTec,Base 64},"2929- "{TerraTec,AudioSystem EWS64S},"3030- "{Aztech/Newcom SC-16 3D},"3131- "{Shark Predator ISA}}");32253326static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 1-MAX */3427static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/isa/ad1848/ad1848.c
···2222MODULE_DESCRIPTION(CRD_NAME);2323MODULE_AUTHOR("Tugrul Galatali <galatalt@stuy.edu>, Jaroslav Kysela <perex@perex.cz>");2424MODULE_LICENSE("GPL");2525-MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1848},"2626- "{Analog Devices,AD1847},"2727- "{Crystal Semiconductors,CS4248}}");28252926static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */3027static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
···2323MODULE_DESCRIPTION(CRD_NAME);2424MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2525MODULE_LICENSE("GPL");2626-MODULE_SUPPORTED_DEVICE("{{Crystal Semiconductors,CS4231}}");27262827static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2928static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
···2323MODULE_DESCRIPTION(CRD_NAME);2424MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2525MODULE_LICENSE("GPL");2626-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Classic}}");27262827static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2928static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/gus/gusextreme.c
···2727MODULE_DESCRIPTION(CRD_NAME);2828MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2929MODULE_LICENSE("GPL");3030-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Extreme}}");31303231static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */3332static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/gus/gusmax.c
···2121MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2222MODULE_DESCRIPTION("Gravis UltraSound MAX");2323MODULE_LICENSE("GPL");2424-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound MAX}}");25242625static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2726static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-6
sound/isa/gus/interwave.c
···2828MODULE_LICENSE("GPL");2929#ifndef SNDRV_STB3030MODULE_DESCRIPTION("AMD InterWave");3131-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Plug & Play},"3232- "{STB,SoundRage32},"3333- "{MED,MED3210},"3434- "{Dynasonix,Dynasonix Pro},"3535- "{Panasonic,PCA761AW}}");3631#else3732MODULE_DESCRIPTION("AMD InterWave STB with TEA6330T");3838-MODULE_SUPPORTED_DEVICE("{{AMD,InterWave STB with TEA6330T}}");3933#endif40344135static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-5
sound/isa/opl3sa2.c
···2222MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2323MODULE_DESCRIPTION("Yamaha OPL3SA2+");2424MODULE_LICENSE("GPL");2525-MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF719E-S},"2626- "{Genius,Sound Maker 3DX},"2727- "{Yamaha,OPL3SA3},"2828- "{Intel,AL440LX sound},"2929- "{NeoMagic,MagicWave 3DX}}");30253126static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */3227static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/isa/opti9xx/miro.c
···3333MODULE_AUTHOR("Martin Langer <martin-langer@gmx.de>");3434MODULE_LICENSE("GPL");3535MODULE_DESCRIPTION("Miro miroSOUND PCM1 pro, PCM12, PCM20 Radio");3636-MODULE_SUPPORTED_DEVICE("{{Miro,miroSOUND PCM1 pro}, "3737- "{Miro,miroSOUND PCM12}, "3838- "{Miro,miroSOUND PCM20 Radio}}");39364037static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */4138static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
···1717MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");1818MODULE_DESCRIPTION("Sound Blaster 1.0/2.0/Pro");1919MODULE_LICENSE("GPL");2020-MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB 1.0/SB 2.0/SB Pro}}");21202221static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2322static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/isa/sc6000.c
···2929MODULE_AUTHOR("Krzysztof Helt");3030MODULE_DESCRIPTION("Gallant SC-6000");3131MODULE_LICENSE("GPL");3232-MODULE_SUPPORTED_DEVICE("{{Gallant, SC-6000},"3333- "{AudioExcel, Audio Excel DSP 16},"3434- "{Zoltrix, AV302}}");35323633static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */3734static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/isa/wavefront/wavefront.c
···2121MODULE_AUTHOR("Paul Barton-Davis <pbd@op.net>");2222MODULE_DESCRIPTION("Turtle Beach Wavefront");2323MODULE_LICENSE("GPL");2424-MODULE_SUPPORTED_DEVICE("{{Turtle Beach,Maui/Tropez/Tropez+}}");25242625static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2726static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/mips/sgio2audio.c
···3232MODULE_AUTHOR("Vivien Chappelier <vivien.chappelier@linux-mips.org>");3333MODULE_DESCRIPTION("SGI O2 Audio");3434MODULE_LICENSE("GPL");3535-MODULE_SUPPORTED_DEVICE("{{Silicon Graphics, O2 Audio}}");36353736static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */3837static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
···2929MODULE_AUTHOR("Matt Wu <Matt_Wu@acersoftech.com.cn>");3030MODULE_DESCRIPTION("ALI M5451");3131MODULE_LICENSE("GPL");3232-MODULE_SUPPORTED_DEVICE("{{ALI,M5451,pci},{ALI,M5451}}");33323433static int index = SNDRV_DEFAULT_IDX1; /* Index */3534static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
···2323MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");2424MODULE_DESCRIPTION("ATI IXP AC97 controller");2525MODULE_LICENSE("GPL");2626-MODULE_SUPPORTED_DEVICE("{{ATI,IXP150/200/250/300/400/600}}");27262827static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */2928static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/atiixp_modem.c
···2323MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");2424MODULE_DESCRIPTION("ATI IXP MC97 controller");2525MODULE_LICENSE("GPL");2626-MODULE_SUPPORTED_DEVICE("{{ATI,IXP150/200/250}}");27262827static int index = -2; /* Exclude the first card */2928static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
···2525MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2626MODULE_DESCRIPTION("Cirrus Logic CS4281");2727MODULE_LICENSE("GPL");2828-MODULE_SUPPORTED_DEVICE("{{Cirrus Logic,CS4281}}");29283029static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */3130static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
···2626MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2727MODULE_DESCRIPTION("ForteMedia FM801");2828MODULE_LICENSE("GPL");2929-MODULE_SUPPORTED_DEVICE("{{ForteMedia,FM801},"3030- "{Genius,SoundMaker Live 5.1}}");31293230static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */3331static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
···6060MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");6161MODULE_DESCRIPTION("ICEnsemble ICE1712 (Envy24)");6262MODULE_LICENSE("GPL");6363-MODULE_SUPPORTED_DEVICE("{"6464- HOONTECH_DEVICE_DESC6565- DELTA_DEVICE_DESC6666- EWS_DEVICE_DESC6767- "{ICEnsemble,Generic ICE1712},"6868- "{ICEnsemble,Generic Envy24}}");69637064static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */7165static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-19
sound/pci/ice1712/ice1724.c
···4444MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");4545MODULE_DESCRIPTION("VIA ICEnsemble ICE1724/1720 (Envy24HT/PT)");4646MODULE_LICENSE("GPL");4747-MODULE_SUPPORTED_DEVICE("{"4848- REVO_DEVICE_DESC4949- AMP_AUDIO2000_DEVICE_DESC5050- AUREON_DEVICE_DESC5151- VT1720_MOBO_DEVICE_DESC5252- PONTIS_DEVICE_DESC5353- PRODIGY192_DEVICE_DESC5454- PRODIGY_HIFI_DEVICE_DESC5555- JULI_DEVICE_DESC5656- MAYA44_DEVICE_DESC5757- PHASE_DEVICE_DESC5858- WTM_DEVICE_DESC5959- SE_DEVICE_DESC6060- QTET_DEVICE_DESC6161- "{VIA,VT1720},"6262- "{VIA,VT1724},"6363- "{ICEnsemble,Generic ICE1724},"6464- "{ICEnsemble,Generic Envy24HT}"6565- "{ICEnsemble,Generic Envy24PT}}");66476748static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */6849static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-23
sound/pci/intel8x0.c
···2727MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2828MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");2929MODULE_LICENSE("GPL");3030-MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH},"3131- "{Intel,82901AB-ICH0},"3232- "{Intel,82801BA-ICH2},"3333- "{Intel,82801CA-ICH3},"3434- "{Intel,82801DB-ICH4},"3535- "{Intel,ICH5},"3636- "{Intel,ICH6},"3737- "{Intel,ICH7},"3838- "{Intel,6300ESB},"3939- "{Intel,ESB2},"4040- "{Intel,MX440},"4141- "{SiS,SI7012},"4242- "{NVidia,nForce Audio},"4343- "{NVidia,nForce2 Audio},"4444- "{NVidia,nForce3 Audio},"4545- "{NVidia,MCP04},"4646- "{NVidia,MCP501},"4747- "{NVidia,CK804},"4848- "{NVidia,CK8},"4949- "{NVidia,CK8S},"5050- "{AMD,AMD768},"5151- "{AMD,AMD8111},"5252- "{ALI,M5455}}");53305431static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */5532static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-15
sound/pci/intel8x0m.c
···2525MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; "2626 "SiS 7013; NVidia MCP/2/2S/3 modems");2727MODULE_LICENSE("GPL");2828-MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH},"2929- "{Intel,82901AB-ICH0},"3030- "{Intel,82801BA-ICH2},"3131- "{Intel,82801CA-ICH3},"3232- "{Intel,82801DB-ICH4},"3333- "{Intel,ICH5},"3434- "{Intel,ICH6},"3535- "{Intel,ICH7},"3636- "{Intel,MX440},"3737- "{SiS,7013},"3838- "{NVidia,NForce Modem},"3939- "{NVidia,NForce2 Modem},"4040- "{NVidia,NForce2s Modem},"4141- "{NVidia,NForce3 Modem},"4242- "{AMD,AMD768}}");43284429static int index = -2; /* Exclude the first card */4530static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/korg1212/korg1212.c
···388388389389MODULE_DESCRIPTION("korg1212");390390MODULE_LICENSE("GPL");391391-MODULE_SUPPORTED_DEVICE("{{KORG,korg1212}}");392391MODULE_FIRMWARE("korg/k1212.dsp");393392394393static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
···3232MODULE_AUTHOR("Digigram <alsa@digigram.com>");3333MODULE_DESCRIPTION("Digigram " CARD_NAME);3434MODULE_LICENSE("GPL");3535-MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}");36353736static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */3837static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-2
sound/pci/nm256/nm256.c
···3232MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");3333MODULE_DESCRIPTION("NeoMagic NM256AV/ZX");3434MODULE_LICENSE("GPL");3535-MODULE_SUPPORTED_DEVICE("{{NeoMagic,NM256AV},"3636- "{NeoMagic,NM256ZX}}");37353836/*3937 * some compile conditions.
···3131MODULE_DESCRIPTION("RME Digi96, Digi96/8, Digi96/8 PRO, Digi96/8 PST, "3232 "Digi96/8 PAD");3333MODULE_LICENSE("GPL");3434-MODULE_SUPPORTED_DEVICE("{{RME,Digi96},"3535- "{RME,Digi96/8},"3636- "{RME,Digi96/8 PRO},"3737- "{RME,Digi96/8 PST},"3838- "{RME,Digi96/8 PAD}}");39344035static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */4136static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-3
sound/pci/rme9652/hdsp.c
···4444MODULE_AUTHOR("Paul Davis <paul@linuxaudiosystems.com>, Marcus Andersson, Thomas Charbonnel <thomas@undata.org>");4545MODULE_DESCRIPTION("RME Hammerfall DSP");4646MODULE_LICENSE("GPL");4747-MODULE_SUPPORTED_DEVICE("{{RME Hammerfall-DSP},"4848- "{RME HDSP-9652},"4949- "{RME HDSP-9632}}");5047MODULE_FIRMWARE("rpm_firmware.bin");5148MODULE_FIRMWARE("multiface_firmware.bin");5249MODULE_FIRMWARE("multiface_firmware_rev11.bin");
-1
sound/pci/rme9652/hdspm.c
···165165);166166MODULE_DESCRIPTION("RME HDSPM");167167MODULE_LICENSE("GPL");168168-MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");169168170169/* --- Write registers. ---171170 These are defined as byte-offsets from the iobase value. */
-2
sound/pci/rme9652/rme9652.c
···3939MODULE_AUTHOR("Paul Davis <pbd@op.net>, Winfried Ritsch");4040MODULE_DESCRIPTION("RME Digi9652/Digi9636");4141MODULE_LICENSE("GPL");4242-MODULE_SUPPORTED_DEVICE("{{RME,Hammerfall},"4343- "{RME,Hammerfall-Light}}");44424543/* The Hammerfall has two sets of 24 ADAT + 2 S/PDIF channels, one for4644 capture, one for playback. Both the ADAT and S/PDIF channels appear
-1
sound/pci/sis7019.c
···2424MODULE_AUTHOR("David Dillow <dave@thedillows.org>");2525MODULE_DESCRIPTION("SiS7019");2626MODULE_LICENSE("GPL");2727-MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");28272928static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */3029static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
···3838MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");3939MODULE_DESCRIPTION("VIA VT82xx modem");4040MODULE_LICENSE("GPL");4141-MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C modem,pci}}");42414342static int index = -2; /* Exclude the first card */4443static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
-1
sound/pci/vx222/vx222.c
···2020MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");2121MODULE_DESCRIPTION("Digigram VX222 V2/Mic");2222MODULE_LICENSE("GPL");2323-MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}");24232524static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2625static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-6
sound/pci/ymfpci/ymfpci.c
···1717MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");1818MODULE_DESCRIPTION("Yamaha DS-1 PCI");1919MODULE_LICENSE("GPL");2020-MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF724},"2121- "{Yamaha,YMF724F},"2222- "{Yamaha,YMF740},"2323- "{Yamaha,YMF740C},"2424- "{Yamaha,YMF744},"2525- "{Yamaha,YMF754}}");26202721static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2822static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/pcmcia/pdaudiocf/pdaudiocf.c
···2222MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");2323MODULE_DESCRIPTION("Sound Core " CARD_NAME);2424MODULE_LICENSE("GPL");2525-MODULE_SUPPORTED_DEVICE("{{Sound Core," CARD_NAME "}}");26252726static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2827static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-4
sound/pcmcia/vx/vxpocket.c
···1717#include <sound/initval.h>1818#include <sound/tlv.h>19192020-/*2121- */2222-2320MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");2421MODULE_DESCRIPTION("Digigram VXPocket");2522MODULE_LICENSE("GPL");2626-MODULE_SUPPORTED_DEVICE("{{Digigram,VXPocket},{Digigram,VXPocket440}}");27232824static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */2925static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/ppc/powermac.c
···1818#define CHIP_NAME "PMac"19192020MODULE_DESCRIPTION("PowerMac");2121-MODULE_SUPPORTED_DEVICE("{{Apple,PowerMac}}");2221MODULE_LICENSE("GPL");23222423static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
···209209 case RT1015_VENDOR_ID:210210 case RT1015_DEVICE_ID:211211 case RT1015_PRO_ALT:212212+ case RT1015_MAN_I2C:212213 case RT1015_DAC3:213214 case RT1015_VBAT_TEST_OUT1:214215 case RT1015_VBAT_TEST_OUT2:···514513 msleep(300);515514 regmap_write(regmap, RT1015_PWR_STATE_CTRL, 0x0008);516515 regmap_write(regmap, RT1015_SYS_RST1, 0x05F5);516516+ regmap_write(regmap, RT1015_CLK_DET, 0x8000);517517518518 regcache_cache_bypass(regmap, false);519519 regcache_mark_dirty(regmap);
···555555556556 /* set tdm */557557 if (tdm_priv->bck_invert)558558- tdm_con |= 1 << BCK_INVERSE_SFT;558558+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON3,559559+ BCK_INVERSE_MASK_SFT,560560+ 0x1 << BCK_INVERSE_SFT);559561560562 if (tdm_priv->lck_invert)561563 tdm_con |= 1 << LRCK_INVERSE_SFT;
+5-3
sound/soc/mediatek/mt8192/mt8192-reg.h
···2121/*****************************************************************************2222 * R E G I S T E R D E F I N I T I O N2323 *****************************************************************************/2424+/* AUDIO_TOP_CON3 */2525+#define BCK_INVERSE_SFT 32626+#define BCK_INVERSE_MASK 0x12727+#define BCK_INVERSE_MASK_SFT (0x1 << 3)2828+2429/* AFE_DAC_CON0 */2530#define VUL12_ON_SFT 312631#define VUL12_ON_MASK 0x1···20842079#define TDM_EN_SFT 020852080#define TDM_EN_MASK 0x120862081#define TDM_EN_MASK_SFT (0x1 << 0)20872087-#define BCK_INVERSE_SFT 120882088-#define BCK_INVERSE_MASK 0x120892089-#define BCK_INVERSE_MASK_SFT (0x1 << 1)20902082#define LRCK_INVERSE_SFT 220912083#define LRCK_INVERSE_MASK 0x120922084#define LRCK_INVERSE_MASK_SFT (0x1 << 2)
+1-1
sound/soc/qcom/lpass-cpu.c
···739739740740 for_each_child_of_node(dev->of_node, node) {741741 ret = of_property_read_u32(node, "reg", &id);742742- if (ret || id < 0 || id >= data->variant->num_dai) {742742+ if (ret || id < 0) {743743 dev_err(dev, "valid dai id not found: %d\n", ret);744744 continue;745745 }
···3131#include <linux/of.h>3232#include <linux/of_graph.h>3333#include <linux/dmi.h>3434+#include <linux/acpi.h>3435#include <sound/core.h>3536#include <sound/pcm.h>3637#include <sound/pcm_params.h>···1573157215741573 if (card->long_name)15751574 return 0; /* long name already set by driver or from DMI */15751575+15761576+ if (!is_acpi_device_node(card->dev->fwnode))15771577+ return 0;1576157815771579 /* make up dmi long name as: vendor-product-version-board */15781580 vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
···897897/* dsp_unmap: not currently used */898898 iounmap(sdev->bar[HDA_DSP_BAR]);899899hdac_bus_unmap:900900+ platform_device_unregister(hdev->dmic_dev);900901 iounmap(bus->remap_addr);901902 hda_codec_i915_exit(sdev);902903err:
-1
sound/sparc/amd7930.c
···6262MODULE_AUTHOR("Thomas K. Dyas and David S. Miller");6363MODULE_DESCRIPTION("Sun AMD7930");6464MODULE_LICENSE("GPL");6565-MODULE_SUPPORTED_DEVICE("{{Sun,AMD7930}}");66656766/* Device register layout. */6867
-1
sound/sparc/cs4231.c
···5252MODULE_AUTHOR("Jaroslav Kysela, Derrick J. Brashear and David S. Miller");5353MODULE_DESCRIPTION("Sun CS4231");5454MODULE_LICENSE("GPL");5555-MODULE_SUPPORTED_DEVICE("{{Sun,CS4231}}");56555756#ifdef SBUS_SUPPORT5857struct sbus_dma_info {
-1
sound/sparc/dbri.c
···7676MODULE_AUTHOR("Rudolf Koenig, Brent Baccala and Martin Habets");7777MODULE_DESCRIPTION("Sun DBRI");7878MODULE_LICENSE("GPL");7979-MODULE_SUPPORTED_DEVICE("{{Sun,DBRI}}");80798180static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */8281static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-1
sound/usb/6fire/chip.c
···2626MODULE_AUTHOR("Torsten Schenk <torsten.schenk@zoho.com>");2727MODULE_DESCRIPTION("TerraTec DMX 6Fire USB audio driver");2828MODULE_LICENSE("GPL v2");2929-MODULE_SUPPORTED_DEVICE("{{TerraTec,DMX 6Fire USB}}");30293130static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */3231static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */
-14
sound/usb/caiaq/device.c
···2626MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");2727MODULE_DESCRIPTION("caiaq USB audio");2828MODULE_LICENSE("GPL");2929-MODULE_SUPPORTED_DEVICE("{{Native Instruments,RigKontrol2},"3030- "{Native Instruments,RigKontrol3},"3131- "{Native Instruments,Kore Controller},"3232- "{Native Instruments,Kore Controller 2},"3333- "{Native Instruments,Audio Kontrol 1},"3434- "{Native Instruments,Audio 2 DJ},"3535- "{Native Instruments,Audio 4 DJ},"3636- "{Native Instruments,Audio 8 DJ},"3737- "{Native Instruments,Traktor Audio 2},"3838- "{Native Instruments,Session I/O},"3939- "{Native Instruments,GuitarRig mobile},"4040- "{Native Instruments,Traktor Kontrol X1},"4141- "{Native Instruments,Traktor Kontrol S4},"4242- "{Native Instruments,Maschine Controller}}");43294430static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */4531static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
-2
sound/usb/card.c
···5858MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");5959MODULE_DESCRIPTION("USB Audio");6060MODULE_LICENSE("GPL");6161-MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}");6262-63616462static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */6563static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-17
sound/usb/hiface/chip.c
···2121MODULE_AUTHOR("Antonio Ospite <ao2@amarulasolutions.com>");2222MODULE_DESCRIPTION("M2Tech hiFace USB-SPDIF audio driver");2323MODULE_LICENSE("GPL v2");2424-MODULE_SUPPORTED_DEVICE("{{M2Tech,Young},"2525- "{M2Tech,hiFace},"2626- "{M2Tech,North Star},"2727- "{M2Tech,W4S Young},"2828- "{M2Tech,Corrson},"2929- "{M2Tech,AUDIA},"3030- "{M2Tech,SL Audio},"3131- "{M2Tech,Empirical},"3232- "{M2Tech,Rockna},"3333- "{M2Tech,Pathos},"3434- "{M2Tech,Metronome},"3535- "{M2Tech,CAD},"3636- "{M2Tech,Audio Esclusive},"3737- "{M2Tech,Rotel},"3838- "{M2Tech,Eeaudio},"3939- "{The Chord Company,CHORD},"4040- "{AVA Group A/S,Vitus}}");41244225static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */4326static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */
-1
sound/usb/misc/ua101.c
···1919MODULE_DESCRIPTION("Edirol UA-101/1000 driver");2020MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");2121MODULE_LICENSE("GPL v2");2222-MODULE_SUPPORTED_DEVICE("{{Edirol,UA-101},{Edirol,UA-1000}}");23222423/*2524 * Should not be lower than the minimum scheduling delay of the host
···137137MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>");138138MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2");139139MODULE_LICENSE("GPL");140140-MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604),"NAME_ALLCAPS"(0x8001)(0x8005)(0x8007)}}");141140142141static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */143142static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */