Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/ptp/Kconfig:
55c8fca1dae1 ("ptp_pch: Restore dependency on PCI")
e5f31552674e ("ethernet: fix PTP_1588_CLOCK dependencies")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1574 -1132
-41
Documentation/devicetree/bindings/iio/st,st-sensors.yaml
··· 152 152 maxItems: 1 153 153 st,drdy-int-pin: false 154 154 155 - - if: 156 - properties: 157 - compatible: 158 - enum: 159 - # Two intertial interrupts i.e. accelerometer/gyro interrupts 160 - - st,h3lis331dl-accel 161 - - st,l3g4200d-gyro 162 - - st,l3g4is-gyro 163 - - st,l3gd20-gyro 164 - - st,l3gd20h-gyro 165 - - st,lis2de12 166 - - st,lis2dw12 167 - - st,lis2hh12 168 - - st,lis2dh12-accel 169 - - st,lis331dl-accel 170 - - st,lis331dlh-accel 171 - - st,lis3de 172 - - st,lis3dh-accel 173 - - st,lis3dhh 174 - - st,lis3mdl-magn 175 - - st,lng2dm-accel 176 - - st,lps331ap-press 177 - - st,lsm303agr-accel 178 - - st,lsm303dlh-accel 179 - - st,lsm303dlhc-accel 180 - - st,lsm303dlm-accel 181 - - st,lsm330-accel 182 - - st,lsm330-gyro 183 - - st,lsm330d-accel 184 - - st,lsm330d-gyro 185 - - st,lsm330dl-accel 186 - - st,lsm330dl-gyro 187 - - st,lsm330dlc-accel 188 - - st,lsm330dlc-gyro 189 - - st,lsm9ds0-gyro 190 - - st,lsm9ds1-magn 191 - then: 192 - properties: 193 - interrupts: 194 - maxItems: 2 195 - 196 155 required: 197 156 - compatible 198 157 - reg
+1
Documentation/i2c/index.rst
··· 17 17 busses/index 18 18 i2c-topology 19 19 muxes/i2c-mux-gpio 20 + i2c-sysfs 20 21 21 22 Writing device drivers 22 23 ======================
+4 -4
Documentation/virt/kvm/locking.rst
··· 25 25 26 26 - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock 27 27 28 - - kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is 29 - taken inside kvm->arch.mmu_lock, and cannot be taken without already 30 - holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise 31 - there's no need to take kvm->arch.tdp_mmu_pages_lock at all). 28 + - kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and 29 + kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and 30 + cannot be taken without already holding kvm->arch.mmu_lock (typically with 31 + ``read_lock`` for the TDP MMU, thus the need for additional spinlocks). 32 32 33 33 Everything else is a leaf: no other lock is taken inside the critical 34 34 sections.
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 14 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc5 5 + EXTRAVERSION = -rc6 6 6 NAME = Opossums on Parade 7 7 8 8 # *DOCUMENTATION*
+8 -4
arch/arm64/kvm/arm.c
··· 94 94 kvm->arch.return_nisv_io_abort_to_user = true; 95 95 break; 96 96 case KVM_CAP_ARM_MTE: 97 - if (!system_supports_mte() || kvm->created_vcpus) 98 - return -EINVAL; 99 - r = 0; 100 - kvm->arch.mte_enabled = true; 97 + mutex_lock(&kvm->lock); 98 + if (!system_supports_mte() || kvm->created_vcpus) { 99 + r = -EINVAL; 100 + } else { 101 + r = 0; 102 + kvm->arch.mte_enabled = true; 103 + } 104 + mutex_unlock(&kvm->lock); 101 105 break; 102 106 default: 103 107 r = -EINVAL;
+1 -1
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 193 193 { 194 194 struct kvm_mem_range r1, r2; 195 195 196 - if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2)) 196 + if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2)) 197 197 return false; 198 198 if (r1.start != r2.start) 199 199 return false;
+3
arch/powerpc/include/asm/interrupt.h
··· 583 583 584 584 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 585 585 586 + /* irq.c */ 587 + DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 588 + 586 589 void __noreturn unrecoverable_exception(struct pt_regs *regs); 587 590 588 591 void replay_system_reset(void);
+1 -1
arch/powerpc/include/asm/irq.h
··· 52 52 extern void *hardirq_ctx[NR_CPUS]; 53 53 extern void *softirq_ctx[NR_CPUS]; 54 54 55 - extern void do_IRQ(struct pt_regs *regs); 55 + void __do_IRQ(struct pt_regs *regs); 56 56 extern void __init init_IRQ(void); 57 57 extern void __do_irq(struct pt_regs *regs); 58 58
+16
arch/powerpc/include/asm/ptrace.h
··· 70 70 unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */ 71 71 }; 72 72 #endif 73 + #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 74 + struct { /* Must be a multiple of 16 bytes */ 75 + unsigned long mas0; 76 + unsigned long mas1; 77 + unsigned long mas2; 78 + unsigned long mas3; 79 + unsigned long mas6; 80 + unsigned long mas7; 81 + unsigned long srr0; 82 + unsigned long srr1; 83 + unsigned long csrr0; 84 + unsigned long csrr1; 85 + unsigned long dsrr0; 86 + unsigned long dsrr1; 87 + }; 88 + #endif 73 89 }; 74 90 #endif 75 91
+14 -17
arch/powerpc/kernel/asm-offsets.c
··· 309 309 STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr); 310 310 #endif 311 311 312 - #if defined(CONFIG_PPC32) 313 - #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 314 - DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); 315 - DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); 312 + #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 313 + STACK_PT_REGS_OFFSET(MAS0, mas0); 316 314 /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ 317 - DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); 318 - DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); 319 - DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); 320 - DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); 321 - DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); 322 - DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); 323 - DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); 324 - DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); 325 - DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); 326 - DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); 327 - DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); 328 - DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); 329 - #endif 315 + STACK_PT_REGS_OFFSET(MMUCR, mas0); 316 + STACK_PT_REGS_OFFSET(MAS1, mas1); 317 + STACK_PT_REGS_OFFSET(MAS2, mas2); 318 + STACK_PT_REGS_OFFSET(MAS3, mas3); 319 + STACK_PT_REGS_OFFSET(MAS6, mas6); 320 + STACK_PT_REGS_OFFSET(MAS7, mas7); 321 + STACK_PT_REGS_OFFSET(_SRR0, srr0); 322 + STACK_PT_REGS_OFFSET(_SRR1, srr1); 323 + STACK_PT_REGS_OFFSET(_CSRR0, csrr0); 324 + STACK_PT_REGS_OFFSET(_CSRR1, csrr1); 325 + STACK_PT_REGS_OFFSET(_DSRR0, dsrr0); 326 + STACK_PT_REGS_OFFSET(_DSRR1, dsrr1); 330 327 #endif 331 328 332 329 /* About the CPU features table */
+1 -1
arch/powerpc/kernel/head_book3s_32.S
··· 300 300 EXCEPTION_PROLOG_1 301 301 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 302 302 prepare_transfer_to_handler 303 - lwz r5, _DSISR(r11) 303 + lwz r5, _DSISR(r1) 304 304 andis. r0, r5, DSISR_DABRMATCH@h 305 305 bne- 1f 306 306 bl do_page_fault
+3 -24
arch/powerpc/kernel/head_booke.h
··· 168 168 /* only on e500mc */ 169 169 #define DBG_STACK_BASE dbgirq_ctx 170 170 171 - #define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE) 172 - 173 171 #ifdef CONFIG_SMP 174 172 #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ 175 173 mfspr r8,SPRN_PIR; \ 176 174 slwi r8,r8,2; \ 177 175 addis r8,r8,level##_STACK_BASE@ha; \ 178 176 lwz r8,level##_STACK_BASE@l(r8); \ 179 - addi r8,r8,EXC_LVL_FRAME_OVERHEAD; 177 + addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE; 180 178 #else 181 179 #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ 182 180 lis r8,level##_STACK_BASE@ha; \ 183 181 lwz r8,level##_STACK_BASE@l(r8); \ 184 - addi r8,r8,EXC_LVL_FRAME_OVERHEAD; 182 + addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE; 185 183 #endif 186 184 187 185 /* ··· 206 208 mtmsr r11; \ 207 209 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ 208 210 lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ 209 - addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ 211 + addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\ 210 212 beq 1f; \ 211 213 /* COMING FROM USER MODE */ \ 212 214 stw r9,_CCR(r11); /* save CR */\ ··· 513 515 1: prepare_transfer_to_handler; \ 514 516 bl kernel_fp_unavailable_exception; \ 515 517 b interrupt_return 516 - 517 - #else /* __ASSEMBLY__ */ 518 - struct exception_regs { 519 - unsigned long mas0; 520 - unsigned long mas1; 521 - unsigned long mas2; 522 - unsigned long mas3; 523 - unsigned long mas6; 524 - unsigned long mas7; 525 - unsigned long srr0; 526 - unsigned long srr1; 527 - unsigned long csrr0; 528 - unsigned long csrr1; 529 - unsigned long dsrr0; 530 - unsigned long dsrr1; 531 - }; 532 - 533 - /* ensure this structure is always sized to a multiple of the stack alignment */ 534 - #define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16) 535 518 536 519 #endif /* __ASSEMBLY__ */ 537 520 #endif /* __HEAD_BOOKE_H__ */
+6 -1
arch/powerpc/kernel/irq.c
··· 750 750 trace_irq_exit(regs); 751 751 } 752 752 753 - DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 753 + void __do_IRQ(struct pt_regs *regs) 754 754 { 755 755 struct pt_regs *old_regs = set_irq_regs(regs); 756 756 void *cursp, *irqsp, *sirqsp; ··· 772 772 call_do_irq(regs, irqsp); 773 773 774 774 set_irq_regs(old_regs); 775 + } 776 + 777 + DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 778 + { 779 + __do_IRQ(regs); 775 780 } 776 781 777 782 static void *__init alloc_vm_stack(void)
+2 -1
arch/powerpc/kernel/kprobes.c
··· 292 292 if (user_mode(regs)) 293 293 return 0; 294 294 295 - if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) 295 + if (!IS_ENABLED(CONFIG_BOOKE) && 296 + (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))) 296 297 return 0; 297 298 298 299 /*
+1 -1
arch/powerpc/kernel/sysfs.c
··· 1167 1167 * CPU. For instance, the boot cpu might never be valid 1168 1168 * for hotplugging. 1169 1169 */ 1170 - if (smp_ops->cpu_offline_self) 1170 + if (smp_ops && smp_ops->cpu_offline_self) 1171 1171 c->hotpluggable = 1; 1172 1172 #endif 1173 1173
+1 -1
arch/powerpc/kernel/time.c
··· 586 586 587 587 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) 588 588 if (atomic_read(&ppc_n_lost_interrupts) != 0) 589 - do_IRQ(regs); 589 + __do_IRQ(regs); 590 590 #endif 591 591 592 592 old_regs = set_irq_regs(regs);
+7 -2
arch/powerpc/kernel/traps.c
··· 1104 1104 _exception(SIGTRAP, regs, TRAP_UNK, 0); 1105 1105 } 1106 1106 1107 - DEFINE_INTERRUPT_HANDLER(single_step_exception) 1107 + static void __single_step_exception(struct pt_regs *regs) 1108 1108 { 1109 1109 clear_single_step(regs); 1110 1110 clear_br_trace(regs); ··· 1121 1121 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1122 1122 } 1123 1123 1124 + DEFINE_INTERRUPT_HANDLER(single_step_exception) 1125 + { 1126 + __single_step_exception(regs); 1127 + } 1128 + 1124 1129 /* 1125 1130 * After we have successfully emulated an instruction, we have to 1126 1131 * check if the instruction was being single-stepped, and if so, ··· 1135 1130 static void emulate_single_step(struct pt_regs *regs) 1136 1131 { 1137 1132 if (single_stepping(regs)) 1138 - single_step_exception(regs); 1133 + __single_step_exception(regs); 1139 1134 } 1140 1135 1141 1136 static inline int __parse_fpscr(unsigned long fpscr)
+3 -2
arch/powerpc/platforms/pseries/setup.c
··· 539 539 * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if 540 540 * H_CPU_BEHAV_FAVOUR_SECURITY is. 541 541 */ 542 - if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) 542 + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) { 543 543 security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); 544 - else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) 544 + pseries_security_flavor = 0; 545 + } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) 545 546 pseries_security_flavor = 1; 546 547 else 547 548 pseries_security_flavor = 2;
+24 -11
arch/powerpc/sysdev/xive/common.c
··· 67 67 static struct xive_ipi_desc { 68 68 unsigned int irq; 69 69 char name[16]; 70 + atomic_t started; 70 71 } *xive_ipis; 71 72 72 73 /* ··· 1121 1120 .alloc = xive_ipi_irq_domain_alloc, 1122 1121 }; 1123 1122 1124 - static int __init xive_request_ipi(void) 1123 + static int __init xive_init_ipis(void) 1125 1124 { 1126 1125 struct fwnode_handle *fwnode; 1127 1126 struct irq_domain *ipi_domain; ··· 1145 1144 struct xive_ipi_desc *xid = &xive_ipis[node]; 1146 1145 struct xive_ipi_alloc_info info = { node }; 1147 1146 1148 - /* Skip nodes without CPUs */ 1149 - if (cpumask_empty(cpumask_of_node(node))) 1150 - continue; 1151 - 1152 1147 /* 1153 1148 * Map one IPI interrupt per node for all cpus of that node. 1154 1149 * Since the HW interrupt number doesn't have any meaning, ··· 1156 1159 xid->irq = ret; 1157 1160 1158 1161 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); 1159 - 1160 - ret = request_irq(xid->irq, xive_muxed_ipi_action, 1161 - IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL); 1162 - 1163 - WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); 1164 1162 } 1165 1163 1166 1164 return ret; ··· 1167 1175 out_free_fwnode: 1168 1176 irq_domain_free_fwnode(fwnode); 1169 1177 out: 1178 + return ret; 1179 + } 1180 + 1181 + static int __init xive_request_ipi(unsigned int cpu) 1182 + { 1183 + struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; 1184 + int ret; 1185 + 1186 + if (atomic_inc_return(&xid->started) > 1) 1187 + return 0; 1188 + 1189 + ret = request_irq(xid->irq, xive_muxed_ipi_action, 1190 + IRQF_PERCPU | IRQF_NO_THREAD, 1191 + xid->name, NULL); 1192 + 1193 + WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); 1170 1194 return ret; 1171 1195 } 1172 1196 ··· 1199 1191 /* Check if we are already setup */ 1200 1192 if (xc->hw_ipi != XIVE_BAD_IRQ) 1201 1193 return 0; 1194 + 1195 + /* Register the IPI */ 1196 + xive_request_ipi(cpu); 1202 1197 1203 1198 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 1204 1199 if (xive_ops->get_ipi(cpu, xc)) ··· 1242 1231 if (xc->hw_ipi == XIVE_BAD_IRQ) 1243 1232 return; 1244 1233 1234 + /* TODO: clear IPI mapping */ 1235 + 1245 1236 /* Mask the IPI */ 1246 1237 xive_do_source_set_mask(&xc->ipi_data, true); 1247 1238 ··· 1266 1253 smp_ops->cause_ipi = xive_cause_ipi; 1267 1254 1268 1255 /* Register the IPI */ 1269 - xive_request_ipi(); 1256 + xive_init_ipis(); 1270 1257 1271 1258 /* Allocate and setup IPI for the boot CPU */ 1272 1259 xive_setup_cpu_ipi(smp_processor_id());
+1 -1
arch/riscv/kernel/Makefile
··· 11 11 CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) 12 12 13 13 ifdef CONFIG_KEXEC 14 - AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax 14 + AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) 15 15 endif 16 16 17 17 extra-y += head.o
+1 -1
arch/riscv/mm/init.c
··· 197 197 * if end of dram is equal to maximum addressable memory. For 64-bit 198 198 * kernel, this problem can't happen here as the end of the virtual 199 199 * address space is occupied by the kernel mapping then this check must 200 - * be done in create_kernel_page_table. 200 + * be done as soon as the kernel mapping base address is determined. 201 201 */ 202 202 max_mapped_addr = __pa(~(ulong)0); 203 203 if (max_mapped_addr == (phys_ram_end - 1))
+7
arch/x86/include/asm/kvm_host.h
··· 1038 1038 struct list_head lpage_disallowed_mmu_pages; 1039 1039 struct kvm_page_track_notifier_node mmu_sp_tracker; 1040 1040 struct kvm_page_track_notifier_head track_notifier_head; 1041 + /* 1042 + * Protects marking pages unsync during page faults, as TDP MMU page 1043 + * faults only take mmu_lock for read. For simplicity, the unsync 1044 + * pages lock is always taken when marking pages unsync regardless of 1045 + * whether mmu_lock is held for read or write. 1046 + */ 1047 + spinlock_t mmu_unsync_pages_lock; 1041 1048 1042 1049 struct list_head assigned_dev_head; 1043 1050 struct iommu_domain *iommu_domain;
+2
arch/x86/include/asm/svm.h
··· 184 184 #define V_IGN_TPR_SHIFT 20 185 185 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) 186 186 187 + #define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) 188 + 187 189 #define V_INTR_MASKING_SHIFT 24 188 190 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) 189 191
+4 -2
arch/x86/kernel/apic/io_apic.c
··· 1986 1986 .irq_set_affinity = ioapic_set_affinity, 1987 1987 .irq_retrigger = irq_chip_retrigger_hierarchy, 1988 1988 .irq_get_irqchip_state = ioapic_irq_get_chip_state, 1989 - .flags = IRQCHIP_SKIP_SET_WAKE, 1989 + .flags = IRQCHIP_SKIP_SET_WAKE | 1990 + IRQCHIP_AFFINITY_PRE_STARTUP, 1990 1991 }; 1991 1992 1992 1993 static struct irq_chip ioapic_ir_chip __read_mostly = { ··· 2000 1999 .irq_set_affinity = ioapic_set_affinity, 2001 2000 .irq_retrigger = irq_chip_retrigger_hierarchy, 2002 2001 .irq_get_irqchip_state = ioapic_irq_get_chip_state, 2003 - .flags = IRQCHIP_SKIP_SET_WAKE, 2002 + .flags = IRQCHIP_SKIP_SET_WAKE | 2003 + IRQCHIP_AFFINITY_PRE_STARTUP, 2004 2004 }; 2005 2005 2006 2006 static inline void init_IO_APIC_traps(void)
+8 -3
arch/x86/kernel/apic/msi.c
··· 58 58 * The quirk bit is not set in this case. 59 59 * - The new vector is the same as the old vector 60 60 * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) 61 + * - The interrupt is not yet started up 61 62 * - The new destination CPU is the same as the old destination CPU 62 63 */ 63 64 if (!irqd_msi_nomask_quirk(irqd) || 64 65 cfg->vector == old_cfg.vector || 65 66 old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || 67 + !irqd_is_started(irqd) || 66 68 cfg->dest_apicid == old_cfg.dest_apicid) { 67 69 irq_msi_update_msg(irqd, cfg); 68 70 return ret; ··· 152 150 .irq_ack = irq_chip_ack_parent, 153 151 .irq_retrigger = irq_chip_retrigger_hierarchy, 154 152 .irq_set_affinity = msi_set_affinity, 155 - .flags = IRQCHIP_SKIP_SET_WAKE, 153 + .flags = IRQCHIP_SKIP_SET_WAKE | 154 + IRQCHIP_AFFINITY_PRE_STARTUP, 156 155 }; 157 156 158 157 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, ··· 222 219 .irq_mask = pci_msi_mask_irq, 223 220 .irq_ack = irq_chip_ack_parent, 224 221 .irq_retrigger = irq_chip_retrigger_hierarchy, 225 - .flags = IRQCHIP_SKIP_SET_WAKE, 222 + .flags = IRQCHIP_SKIP_SET_WAKE | 223 + IRQCHIP_AFFINITY_PRE_STARTUP, 226 224 }; 227 225 228 226 static struct msi_domain_info pci_msi_ir_domain_info = { ··· 277 273 .irq_retrigger = irq_chip_retrigger_hierarchy, 278 274 .irq_compose_msi_msg = dmar_msi_compose_msg, 279 275 .irq_write_msi_msg = dmar_msi_write_msg, 280 - .flags = IRQCHIP_SKIP_SET_WAKE, 276 + .flags = IRQCHIP_SKIP_SET_WAKE | 277 + IRQCHIP_AFFINITY_PRE_STARTUP, 281 278 }; 282 279 283 280 static int dmar_msi_init(struct irq_domain *domain,
+13 -14
arch/x86/kernel/cpu/resctrl/monitor.c
··· 285 285 return chunks >>= shift; 286 286 } 287 287 288 - static int __mon_event_count(u32 rmid, struct rmid_read *rr) 288 + static u64 __mon_event_count(u32 rmid, struct rmid_read *rr) 289 289 { 290 290 struct mbm_state *m; 291 291 u64 chunks, tval; 292 292 293 293 tval = __rmid_read(rmid, rr->evtid); 294 294 if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) { 295 - rr->val = tval; 296 - return -EINVAL; 295 + return tval; 297 296 } 298 297 switch (rr->evtid) { 299 298 case QOS_L3_OCCUP_EVENT_ID: ··· 304 305 case QOS_L3_MBM_LOCAL_EVENT_ID: 305 306 m = &rr->d->mbm_local[rmid]; 306 307 break; 307 - default: 308 - /* 309 - * Code would never reach here because 310 - * an invalid event id would fail the __rmid_read. 311 - */ 312 - return -EINVAL; 313 308 } 314 309 315 310 if (rr->first) { ··· 354 361 struct rdtgroup *rdtgrp, *entry; 355 362 struct rmid_read *rr = info; 356 363 struct list_head *head; 364 + u64 ret_val; 357 365 358 366 rdtgrp = rr->rgrp; 359 367 360 - if (__mon_event_count(rdtgrp->mon.rmid, rr)) 361 - return; 368 + ret_val = __mon_event_count(rdtgrp->mon.rmid, rr); 362 369 363 370 /* 364 - * For Ctrl groups read data from child monitor groups. 371 + * For Ctrl groups read data from child monitor groups and 372 + * add them together. Count events which are read successfully. 373 + * Discard the rmid_read's reporting errors. 365 374 */ 366 375 head = &rdtgrp->mon.crdtgrp_list; 367 376 368 377 if (rdtgrp->type == RDTCTRL_GROUP) { 369 378 list_for_each_entry(entry, head, mon.crdtgrp_list) { 370 - if (__mon_event_count(entry->mon.rmid, rr)) 371 - return; 379 + if (__mon_event_count(entry->mon.rmid, rr) == 0) 380 + ret_val = 0; 372 381 } 373 382 } 383 + 384 + /* Report error if none of rmid_reads are successful */ 385 + if (ret_val) 386 + rr->val = ret_val; 374 387 } 375 388 376 389 /*
+1 -1
arch/x86/kernel/hpet.c
··· 508 508 .irq_set_affinity = msi_domain_set_affinity, 509 509 .irq_retrigger = irq_chip_retrigger_hierarchy, 510 510 .irq_write_msi_msg = hpet_msi_write_msg, 511 - .flags = IRQCHIP_SKIP_SET_WAKE, 511 + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, 512 512 }; 513 513 514 514 static int hpet_msi_init(struct irq_domain *domain,
+1 -27
arch/x86/kvm/cpuid.c
··· 208 208 kvm_mmu_after_set_cpuid(vcpu); 209 209 } 210 210 211 - static int is_efer_nx(void) 212 - { 213 - return host_efer & EFER_NX; 214 - } 215 - 216 - static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) 217 - { 218 - int i; 219 - struct kvm_cpuid_entry2 *e, *entry; 220 - 221 - entry = NULL; 222 - for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 223 - e = &vcpu->arch.cpuid_entries[i]; 224 - if (e->function == 0x80000001) { 225 - entry = e; 226 - break; 227 - } 228 - } 229 - if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) { 230 - cpuid_entry_clear(entry, X86_FEATURE_NX); 231 - printk(KERN_INFO "kvm: guest NX capability removed\n"); 232 - } 233 - } 234 - 235 211 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) 236 212 { 237 213 struct kvm_cpuid_entry2 *best; ··· 278 302 vcpu->arch.cpuid_entries = e2; 279 303 vcpu->arch.cpuid_nent = cpuid->nent; 280 304 281 - cpuid_fix_nx_cap(vcpu); 282 305 kvm_update_cpuid_runtime(vcpu); 283 306 kvm_vcpu_after_set_cpuid(vcpu); 284 307 ··· 376 401 377 402 void kvm_set_cpu_caps(void) 378 403 { 379 - unsigned int f_nx = is_efer_nx() ? F(NX) : 0; 380 404 #ifdef CONFIG_X86_64 381 405 unsigned int f_gbpages = F(GBPAGES); 382 406 unsigned int f_lm = F(LM); ··· 489 515 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | 490 516 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 491 517 F(PAT) | F(PSE36) | 0 /* Reserved */ | 492 - f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 518 + F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 493 519 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | 494 520 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) 495 521 );
+1 -1
arch/x86/kvm/hyperv.c
··· 1933 1933 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) 1934 1934 { 1935 1935 struct kvm_cpuid_entry2 *entry; 1936 - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 1936 + struct kvm_vcpu_hv *hv_vcpu; 1937 1937 1938 1938 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); 1939 1939 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
+28
arch/x86/kvm/mmu/mmu.c
··· 2535 2535 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) 2536 2536 { 2537 2537 struct kvm_mmu_page *sp; 2538 + bool locked = false; 2538 2539 2539 2540 /* 2540 2541 * Force write-protection if the page is being tracked. Note, the page ··· 2558 2557 if (sp->unsync) 2559 2558 continue; 2560 2559 2560 + /* 2561 + * TDP MMU page faults require an additional spinlock as they 2562 + * run with mmu_lock held for read, not write, and the unsync 2563 + * logic is not thread safe. Take the spinklock regardless of 2564 + * the MMU type to avoid extra conditionals/parameters, there's 2565 + * no meaningful penalty if mmu_lock is held for write. 2566 + */ 2567 + if (!locked) { 2568 + locked = true; 2569 + spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock); 2570 + 2571 + /* 2572 + * Recheck after taking the spinlock, a different vCPU 2573 + * may have since marked the page unsync. A false 2574 + * positive on the unprotected check above is not 2575 + * possible as clearing sp->unsync _must_ hold mmu_lock 2576 + * for write, i.e. unsync cannot transition from 0->1 2577 + * while this CPU holds mmu_lock for read (or write). 2578 + */ 2579 + if (READ_ONCE(sp->unsync)) 2580 + continue; 2581 + } 2582 + 2561 2583 WARN_ON(sp->role.level != PG_LEVEL_4K); 2562 2584 kvm_unsync_page(vcpu, sp); 2563 2585 } 2586 + if (locked) 2587 + spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock); 2564 2588 2565 2589 /* 2566 2590 * We need to ensure that the marking of unsync pages is visible ··· 5562 5536 void kvm_mmu_init_vm(struct kvm *kvm) 5563 5537 { 5564 5538 struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; 5539 + 5540 + spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); 5565 5541 5566 5542 if (!kvm_mmu_init_tdp_mmu(kvm)) 5567 5543 /*
+24 -11
arch/x86/kvm/mmu/tdp_mmu.c
··· 43 43 if (!kvm->arch.tdp_mmu_enabled) 44 44 return; 45 45 46 + WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); 46 47 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 47 48 48 49 /* ··· 82 81 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 83 82 bool shared) 84 83 { 85 - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 86 - 87 84 kvm_lockdep_assert_mmu_lock_held(kvm, shared); 88 85 89 86 if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) ··· 93 94 list_del_rcu(&root->link); 94 95 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 95 96 96 - zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared); 97 + zap_gfn_range(kvm, root, 0, -1ull, false, false, shared); 97 98 98 99 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 99 100 } ··· 723 724 gfn_t start, gfn_t end, bool can_yield, bool flush, 724 725 bool shared) 725 726 { 727 + gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 728 + bool zap_all = (start == 0 && end >= max_gfn_host); 726 729 struct tdp_iter iter; 730 + 731 + /* 732 + * No need to try to step down in the iterator when zapping all SPTEs, 733 + * zapping the top-level non-leaf SPTEs will recurse on their children. 734 + */ 735 + int min_level = zap_all ? root->role.level : PG_LEVEL_4K; 736 + 737 + /* 738 + * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will 739 + * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF, 740 + * and so KVM will never install a SPTE for such addresses. 741 + */ 742 + end = min(end, max_gfn_host); 727 743 728 744 kvm_lockdep_assert_mmu_lock_held(kvm, shared); 729 745 730 746 rcu_read_lock(); 731 747 732 - tdp_root_for_each_pte(iter, root, start, end) { 748 + for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 749 + min_level, start, end) { 733 750 retry: 734 751 if (can_yield && 735 752 tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { ··· 759 744 /* 760 745 * If this is a non-last-level SPTE that covers a larger range 761 746 * than should be zapped, continue, and zap the mappings at a 762 - * lower level. 747 + * lower level, except when zapping all SPTEs. 763 748 */ 764 - if ((iter.gfn < start || 749 + if (!zap_all && 750 + (iter.gfn < start || 765 751 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && 766 752 !is_last_spte(iter.old_spte, iter.level)) 767 753 continue; ··· 810 794 811 795 void kvm_tdp_mmu_zap_all(struct kvm *kvm) 812 796 { 813 - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 814 797 bool flush = false; 815 798 int i; 816 799 817 800 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 818 - flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, 801 + flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, 819 802 flush, false); 820 803 821 804 if (flush) ··· 853 838 */ 854 839 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 855 840 { 856 - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 857 841 struct kvm_mmu_page *next_root; 858 842 struct kvm_mmu_page *root; 859 843 bool flush = false; ··· 868 854 869 855 rcu_read_unlock(); 870 856 871 - flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush, 872 - true); 857 + flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true); 873 858 874 859 /* 875 860 * Put the reference acquired in
+10 -3
arch/x86/kvm/svm/nested.c
··· 158 158 /* If SMI is not intercepted, ignore guest SMI intercept as well */ 159 159 if (!intercept_smi) 160 160 vmcb_clr_intercept(c, INTERCEPT_SMI); 161 + 162 + vmcb_set_intercept(c, INTERCEPT_VMLOAD); 163 + vmcb_set_intercept(c, INTERCEPT_VMSAVE); 161 164 } 162 165 163 166 static void copy_vmcb_control_area(struct vmcb_control_area *dst, ··· 506 503 507 504 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) 508 505 { 509 - const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; 506 + const u32 int_ctl_vmcb01_bits = 507 + V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK; 508 + 509 + const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK; 510 + 510 511 struct kvm_vcpu *vcpu = &svm->vcpu; 511 512 512 513 /* ··· 542 535 vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; 543 536 544 537 svm->vmcb->control.int_ctl = 545 - (svm->nested.ctl.int_ctl & ~mask) | 546 - (svm->vmcb01.ptr->control.int_ctl & mask); 538 + (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | 539 + (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits); 547 540 548 541 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; 549 542 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
+5 -4
arch/x86/kvm/svm/svm.c
··· 1589 1589 1590 1590 static void svm_clear_vintr(struct vcpu_svm *svm) 1591 1591 { 1592 - const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK; 1593 1592 svm_clr_intercept(svm, INTERCEPT_VINTR); 1594 1593 1595 1594 /* Drop int_ctl fields related to VINTR injection. */ 1596 - svm->vmcb->control.int_ctl &= mask; 1595 + svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; 1597 1596 if (is_guest_mode(&svm->vcpu)) { 1598 - svm->vmcb01.ptr->control.int_ctl &= mask; 1597 + svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; 1599 1598 1600 1599 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != 1601 1600 (svm->nested.ctl.int_ctl & V_TPR_MASK)); 1602 - svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; 1601 + 1602 + svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & 1603 + V_IRQ_INJECTION_BITS_MASK; 1603 1604 } 1604 1605 1605 1606 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
+43 -13
arch/x86/kvm/vmx/nested.c
··· 330 330 vcpu_put(vcpu); 331 331 } 332 332 333 + #define EPTP_PA_MASK GENMASK_ULL(51, 12) 334 + 335 + static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 336 + { 337 + return VALID_PAGE(root_hpa) && 338 + ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 339 + } 340 + 341 + static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, 342 + gpa_t addr) 343 + { 344 + uint i; 345 + struct kvm_mmu_root_info *cached_root; 346 + 347 + WARN_ON_ONCE(!mmu_is_nested(vcpu)); 348 + 349 + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 350 + cached_root = &vcpu->arch.mmu->prev_roots[i]; 351 + 352 + if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, 353 + eptp)) 354 + vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa); 355 + } 356 + } 357 + 333 358 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 334 359 struct x86_exception *fault) 335 360 { ··· 367 342 vm_exit_reason = EXIT_REASON_PML_FULL; 368 343 vmx->nested.pml_full = false; 369 344 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 370 - } else if (fault->error_code & PFERR_RSVD_MASK) 371 - vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 372 - else 373 - vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 345 + } else { 346 + if (fault->error_code & PFERR_RSVD_MASK) 347 + vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 348 + else 349 + vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 350 + 351 + /* 352 + * Although the caller (kvm_inject_emulated_page_fault) would 353 + * have already synced the faulting address in the shadow EPT 354 + * tables for the current EPTP12, we also need to sync it for 355 + * any other cached EPTP02s based on the same EP4TA, since the 356 + * TLB associates mappings to the EP4TA rather than the full EPTP. 357 + */ 358 + nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, 359 + fault->address); 360 + } 374 361 375 362 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 376 363 vmcs12->guest_physical_address = fault->address; ··· 5362 5325 return nested_vmx_succeed(vcpu); 5363 5326 } 5364 5327 5365 - #define EPTP_PA_MASK GENMASK_ULL(51, 12) 5366 - 5367 - static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 5368 - { 5369 - return VALID_PAGE(root_hpa) && 5370 - ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 5371 - } 5372 - 5373 5328 /* Emulate the INVEPT instruction */ 5374 5329 static int handle_invept(struct kvm_vcpu *vcpu) 5375 5330 { ··· 5855 5826 if (is_nmi(intr_info)) 5856 5827 return true; 5857 5828 else if (is_page_fault(intr_info)) 5858 - return vcpu->arch.apf.host_apf_flags || !enable_ept; 5829 + return vcpu->arch.apf.host_apf_flags || 5830 + vmx_need_pf_intercept(vcpu); 5859 5831 else if (is_debug(intr_info) && 5860 5832 vcpu->guest_debug & 5861 5833 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+1 -1
arch/x86/kvm/vmx/vmx.h
··· 522 522 523 523 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) 524 524 { 525 - return vmx->secondary_exec_control & 525 + return secondary_exec_controls_get(vmx) & 526 526 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 527 527 } 528 528
+1
arch/x86/tools/chkobjdump.awk
··· 10 10 11 11 /^GNU objdump/ { 12 12 verstr = "" 13 + gsub(/\(.*\)/, ""); 13 14 for (i = 3; i <= NF; i++) 14 15 if (match($(i), "^[0-9]")) { 15 16 verstr = $(i);
-6
block/Kconfig.iosched
··· 9 9 help 10 10 MQ version of the deadline IO scheduler. 11 11 12 - config MQ_IOSCHED_DEADLINE_CGROUP 13 - tristate 14 - default y 15 - depends on MQ_IOSCHED_DEADLINE 16 - depends on BLK_CGROUP 17 - 18 12 config MQ_IOSCHED_KYBER 19 13 tristate "Kyber I/O scheduler" 20 14 default y
-2
block/Makefile
··· 22 22 obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o 23 23 obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o 24 24 obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o 25 - mq-deadline-y += mq-deadline-main.o 26 - mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o 27 25 obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o 28 26 bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o 29 27 obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
+4 -4
block/blk-iocost.c
··· 3061 3061 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) 3062 3062 return -EINVAL; 3063 3063 3064 - spin_lock(&blkcg->lock); 3064 + spin_lock_irq(&blkcg->lock); 3065 3065 iocc->dfl_weight = v * WEIGHT_ONE; 3066 3066 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 3067 3067 struct ioc_gq *iocg = blkg_to_iocg(blkg); 3068 3068 3069 3069 if (iocg) { 3070 - spin_lock_irq(&iocg->ioc->lock); 3070 + spin_lock(&iocg->ioc->lock); 3071 3071 ioc_now(iocg->ioc, &now); 3072 3072 weight_updated(iocg, &now); 3073 - spin_unlock_irq(&iocg->ioc->lock); 3073 + spin_unlock(&iocg->ioc->lock); 3074 3074 } 3075 3075 } 3076 - spin_unlock(&blkcg->lock); 3076 + spin_unlock_irq(&blkcg->lock); 3077 3077 3078 3078 return nbytes; 3079 3079 }
+4 -2
block/blk-mq.c
··· 2994 2994 int i; 2995 2995 2996 2996 queue_for_each_hw_ctx(q, hctx, i) { 2997 - if (shared) 2997 + if (shared) { 2998 2998 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 2999 - else 2999 + } else { 3000 + blk_mq_tag_idle(hctx); 3000 3001 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3002 + } 3001 3003 } 3002 3004 } 3003 3005
-126
block/mq-deadline-cgroup.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - 3 - #include <linux/blk-cgroup.h> 4 - #include <linux/ioprio.h> 5 - 6 - #include "mq-deadline-cgroup.h" 7 - 8 - static struct blkcg_policy dd_blkcg_policy; 9 - 10 - static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp) 11 - { 12 - struct dd_blkcg *pd; 13 - 14 - pd = kzalloc(sizeof(*pd), gfp); 15 - if (!pd) 16 - return NULL; 17 - pd->stats = alloc_percpu_gfp(typeof(*pd->stats), 18 - GFP_KERNEL | __GFP_ZERO); 19 - if (!pd->stats) { 20 - kfree(pd); 21 - return NULL; 22 - } 23 - return &pd->cpd; 24 - } 25 - 26 - static void dd_cpd_free(struct blkcg_policy_data *cpd) 27 - { 28 - struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd); 29 - 30 - free_percpu(dd_blkcg->stats); 31 - kfree(dd_blkcg); 32 - } 33 - 34 - static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd) 35 - { 36 - return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy), 37 - struct dd_blkcg, cpd); 38 - } 39 - 40 - /* 41 - * Convert an association between a block cgroup and a request queue into a 42 - * pointer to the mq-deadline information associated with a (blkcg, queue) pair. 43 - */ 44 - struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) 45 - { 46 - struct blkg_policy_data *pd; 47 - 48 - pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy); 49 - if (!pd) 50 - return NULL; 51 - 52 - return dd_blkcg_from_pd(pd); 53 - } 54 - 55 - static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size) 56 - { 57 - static const char *const prio_class_name[] = { 58 - [IOPRIO_CLASS_NONE] = "NONE", 59 - [IOPRIO_CLASS_RT] = "RT", 60 - [IOPRIO_CLASS_BE] = "BE", 61 - [IOPRIO_CLASS_IDLE] = "IDLE", 62 - }; 63 - struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd); 64 - int res = 0; 65 - u8 prio; 66 - 67 - for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++) 68 - res += scnprintf(buf + res, size - res, 69 - " [%s] dispatched=%u inserted=%u merged=%u", 70 - prio_class_name[prio], 71 - ddcg_sum(blkcg, dispatched, prio) + 72 - ddcg_sum(blkcg, merged, prio) - 73 - ddcg_sum(blkcg, completed, prio), 74 - ddcg_sum(blkcg, inserted, prio) - 75 - ddcg_sum(blkcg, completed, prio), 76 - ddcg_sum(blkcg, merged, prio)); 77 - 78 - return res; 79 - } 80 - 81 - static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q, 82 - struct blkcg *blkcg) 83 - { 84 - struct dd_blkg *pd; 85 - 86 - pd = kzalloc(sizeof(*pd), gfp); 87 - if (!pd) 88 - return NULL; 89 - return &pd->pd; 90 - } 91 - 92 - static void dd_pd_free(struct blkg_policy_data *pd) 93 - { 94 - struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd); 95 - 96 - kfree(dd_blkg); 97 - } 98 - 99 - static struct blkcg_policy dd_blkcg_policy = { 100 - .cpd_alloc_fn = dd_cpd_alloc, 101 - .cpd_free_fn = dd_cpd_free, 102 - 103 - .pd_alloc_fn = dd_pd_alloc, 104 - .pd_free_fn = dd_pd_free, 105 - .pd_stat_fn = dd_pd_stat, 106 - }; 107 - 108 - int dd_activate_policy(struct request_queue *q) 109 - { 110 - return blkcg_activate_policy(q, &dd_blkcg_policy); 111 - } 112 - 113 - void dd_deactivate_policy(struct request_queue *q) 114 - { 115 - blkcg_deactivate_policy(q, &dd_blkcg_policy); 116 - } 117 - 118 - int __init dd_blkcg_init(void) 119 - { 120 - return blkcg_policy_register(&dd_blkcg_policy); 121 - } 122 - 123 - void __exit dd_blkcg_exit(void) 124 - { 125 - blkcg_policy_unregister(&dd_blkcg_policy); 126 - }
-114
block/mq-deadline-cgroup.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - 3 - #if !defined(_MQ_DEADLINE_CGROUP_H_) 4 - #define _MQ_DEADLINE_CGROUP_H_ 5 - 6 - #include <linux/blk-cgroup.h> 7 - 8 - struct request_queue; 9 - 10 - /** 11 - * struct io_stats_per_prio - I/O statistics per I/O priority class. 12 - * @inserted: Number of inserted requests. 13 - * @merged: Number of merged requests. 14 - * @dispatched: Number of dispatched requests. 15 - * @completed: Number of I/O completions. 16 - */ 17 - struct io_stats_per_prio { 18 - local_t inserted; 19 - local_t merged; 20 - local_t dispatched; 21 - local_t completed; 22 - }; 23 - 24 - /* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */ 25 - struct blkcg_io_stats { 26 - struct io_stats_per_prio stats[4]; 27 - }; 28 - 29 - /** 30 - * struct dd_blkcg - Per cgroup data. 31 - * @cpd: blkcg_policy_data structure. 32 - * @stats: I/O statistics. 33 - */ 34 - struct dd_blkcg { 35 - struct blkcg_policy_data cpd; /* must be the first member */ 36 - struct blkcg_io_stats __percpu *stats; 37 - }; 38 - 39 - /* 40 - * Count one event of type 'event_type' and with I/O priority class 41 - * 'prio_class'. 42 - */ 43 - #define ddcg_count(ddcg, event_type, prio_class) do { \ 44 - if (ddcg) { \ 45 - struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \ 46 - \ 47 - BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ 48 - BUILD_BUG_ON(!__same_type((prio_class), u8)); \ 49 - local_inc(&io_stats->stats[(prio_class)].event_type); \ 50 - put_cpu_ptr(io_stats); \ 51 - } \ 52 - } while (0) 53 - 54 - /* 55 - * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls 56 - * across all CPUs. No locking or barriers since it is fine if the returned 57 - * sum is slightly outdated. 58 - */ 59 - #define ddcg_sum(ddcg, event_type, prio) ({ \ 60 - unsigned int cpu; \ 61 - u32 sum = 0; \ 62 - \ 63 - BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ 64 - BUILD_BUG_ON(!__same_type((prio), u8)); \ 65 - for_each_present_cpu(cpu) \ 66 - sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \ 67 - stats[(prio)].event_type); \ 68 - sum; \ 69 - }) 70 - 71 - #ifdef CONFIG_BLK_CGROUP 72 - 73 - /** 74 - * struct dd_blkg - Per (cgroup, request queue) data. 75 - * @pd: blkg_policy_data structure. 76 - */ 77 - struct dd_blkg { 78 - struct blkg_policy_data pd; /* must be the first member */ 79 - }; 80 - 81 - struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio); 82 - int dd_activate_policy(struct request_queue *q); 83 - void dd_deactivate_policy(struct request_queue *q); 84 - int __init dd_blkcg_init(void); 85 - void __exit dd_blkcg_exit(void); 86 - 87 - #else /* CONFIG_BLK_CGROUP */ 88 - 89 - static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) 90 - { 91 - return NULL; 92 - } 93 - 94 - static inline int dd_activate_policy(struct request_queue *q) 95 - { 96 - return 0; 97 - } 98 - 99 - static inline void dd_deactivate_policy(struct request_queue *q) 100 - { 101 - } 102 - 103 - static inline int dd_blkcg_init(void) 104 - { 105 - return 0; 106 - } 107 - 108 - static inline void dd_blkcg_exit(void) 109 - { 110 - } 111 - 112 - #endif /* CONFIG_BLK_CGROUP */ 113 - 114 - #endif /* _MQ_DEADLINE_CGROUP_H_ */
+14 -59
block/mq-deadline-main.c block/mq-deadline.c
··· 25 25 #include "blk-mq-debugfs.h" 26 26 #include "blk-mq-tag.h" 27 27 #include "blk-mq-sched.h" 28 - #include "mq-deadline-cgroup.h" 29 28 30 29 /* 31 30 * See Documentation/block/deadline-iosched.rst ··· 56 57 57 58 enum { DD_PRIO_COUNT = 3 }; 58 59 60 + /* I/O statistics per I/O priority. */ 61 + struct io_stats_per_prio { 62 + local_t inserted; 63 + local_t merged; 64 + local_t dispatched; 65 + local_t completed; 66 + }; 67 + 59 68 /* I/O statistics for all I/O priorities (enum dd_prio). */ 60 69 struct io_stats { 61 70 struct io_stats_per_prio stats[DD_PRIO_COUNT]; ··· 85 78 /* 86 79 * run time data 87 80 */ 88 - 89 - /* Request queue that owns this data structure. */ 90 - struct request_queue *queue; 91 81 92 82 struct dd_per_prio per_prio[DD_PRIO_COUNT]; 93 83 ··· 238 234 struct deadline_data *dd = q->elevator->elevator_data; 239 235 const u8 ioprio_class = dd_rq_ioclass(next); 240 236 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; 241 - struct dd_blkcg *blkcg = next->elv.priv[0]; 242 237 243 238 dd_count(dd, merged, prio); 244 - ddcg_count(blkcg, merged, ioprio_class); 245 239 246 240 /* 247 241 * if next expires before rq, assign its expire time to rq ··· 377 375 { 378 376 struct request *rq, *next_rq; 379 377 enum dd_data_dir data_dir; 380 - struct dd_blkcg *blkcg; 381 378 enum dd_prio prio; 382 379 u8 ioprio_class; 383 380 ··· 475 474 ioprio_class = dd_rq_ioclass(rq); 476 475 prio = ioprio_class_to_prio[ioprio_class]; 477 476 dd_count(dd, dispatched, prio); 478 - blkcg = rq->elv.priv[0]; 479 - ddcg_count(blkcg, dispatched, ioprio_class); 480 477 /* 481 478 * If the request needs its target zone locked, do it. 482 479 */ ··· 568 569 struct deadline_data *dd = e->elevator_data; 569 570 enum dd_prio prio; 570 571 571 - dd_deactivate_policy(dd->queue); 572 - 573 572 for (prio = 0; prio <= DD_PRIO_MAX; prio++) { 574 573 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 575 574 ··· 581 584 } 582 585 583 586 /* 584 - * Initialize elevator private data (deadline_data) and associate with blkcg. 587 + * initialize elevator private data (deadline_data). 585 588 */ 586 589 static int dd_init_sched(struct request_queue *q, struct elevator_type *e) 587 590 { ··· 589 592 struct elevator_queue *eq; 590 593 enum dd_prio prio; 591 594 int ret = -ENOMEM; 592 - 593 - /* 594 - * Initialization would be very tricky if the queue is not frozen, 595 - * hence the warning statement below. 596 - */ 597 - WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter)); 598 595 599 596 eq = elevator_alloc(q, e); 600 597 if (!eq) ··· 604 613 GFP_KERNEL | __GFP_ZERO); 605 614 if (!dd->stats) 606 615 goto free_dd; 607 - 608 - dd->queue = q; 609 616 610 617 for (prio = 0; prio <= DD_PRIO_MAX; prio++) { 611 618 struct dd_per_prio *per_prio = &dd->per_prio[prio]; ··· 624 635 spin_lock_init(&dd->lock); 625 636 spin_lock_init(&dd->zone_lock); 626 637 627 - ret = dd_activate_policy(q); 628 - if (ret) 629 - goto free_stats; 630 - 631 - ret = 0; 632 638 q->elevator = eq; 633 639 return 0; 634 - 635 - free_stats: 636 - free_percpu(dd->stats); 637 640 638 641 free_dd: 639 642 kfree(dd); ··· 699 718 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); 700 719 struct dd_per_prio *per_prio; 701 720 enum dd_prio prio; 702 - struct dd_blkcg *blkcg; 703 721 LIST_HEAD(free); 704 722 705 723 lockdep_assert_held(&dd->lock); ··· 709 729 */ 710 730 blk_req_zone_write_unlock(rq); 711 731 712 - /* 713 - * If a block cgroup has been associated with the submitter and if an 714 - * I/O priority has been set in the associated block cgroup, use the 715 - * lowest of the cgroup priority and the request priority for the 716 - * request. If no priority has been set in the request, use the cgroup 717 - * priority. 718 - */ 719 732 prio = ioprio_class_to_prio[ioprio_class]; 720 733 dd_count(dd, inserted, prio); 721 - blkcg = dd_blkcg_from_bio(rq->bio); 722 - ddcg_count(blkcg, inserted, ioprio_class); 723 - rq->elv.priv[0] = blkcg; 724 734 725 735 if (blk_mq_sched_try_insert_merge(q, rq, &free)) { 726 736 blk_mq_free_requests(&free); ··· 759 789 spin_unlock(&dd->lock); 760 790 } 761 791 762 - /* Callback from inside blk_mq_rq_ctx_init(). */ 792 + /* 793 + * Nothing to do here. This is defined only to ensure that .finish_request 794 + * method is called upon request completion. 795 + */ 763 796 static void dd_prepare_request(struct request *rq) 764 797 { 765 - rq->elv.priv[0] = NULL; 766 798 } 767 799 768 800 /* ··· 787 815 { 788 816 struct request_queue *q = rq->q; 789 817 struct deadline_data *dd = q->elevator->elevator_data; 790 - struct dd_blkcg *blkcg = rq->elv.priv[0]; 791 818 const u8 ioprio_class = dd_rq_ioclass(rq); 792 819 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; 793 820 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 794 821 795 822 dd_count(dd, completed, prio); 796 - ddcg_count(blkcg, completed, ioprio_class); 797 823 798 824 if (blk_queue_is_zoned(q)) { 799 825 unsigned long flags; ··· 1114 1144 1115 1145 static int __init deadline_init(void) 1116 1146 { 1117 - int ret; 1118 - 1119 - ret = elv_register(&mq_deadline); 1120 - if (ret) 1121 - goto out; 1122 - ret = dd_blkcg_init(); 1123 - if (ret) 1124 - goto unreg; 1125 - 1126 - out: 1127 - return ret; 1128 - 1129 - unreg: 1130 - elv_unregister(&mq_deadline); 1131 - goto out; 1147 + return elv_register(&mq_deadline); 1132 1148 } 1133 1149 1134 1150 static void __exit deadline_exit(void) 1135 1151 { 1136 - dd_blkcg_exit(); 1137 1152 elv_unregister(&mq_deadline); 1138 1153 } 1139 1154
+1 -1
crypto/Kconfig
··· 1768 1768 bool 1769 1769 default y 1770 1770 select CRYPTO_HMAC 1771 - select CRYPTO_SHA256 1771 + select CRYPTO_SHA512 1772 1772 1773 1773 config CRYPTO_DRBG_HASH 1774 1774 bool "Enable Hash DRBG"
+3
drivers/acpi/nfit/core.c
··· 3021 3021 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 3022 3022 struct nd_mapping_desc *mapping; 3023 3023 3024 + /* range index 0 == unmapped in SPA or invalid-SPA */ 3025 + if (memdev->range_index == 0 || spa->range_index == 0) 3026 + continue; 3024 3027 if (memdev->range_index != spa->range_index) 3025 3028 continue; 3026 3029 if (count >= ND_MAX_MAPPINGS) {
+1
drivers/base/core.c
··· 2837 2837 device_pm_init(dev); 2838 2838 set_dev_node(dev, -1); 2839 2839 #ifdef CONFIG_GENERIC_MSI_IRQ 2840 + raw_spin_lock_init(&dev->msi_lock); 2840 2841 INIT_LIST_HEAD(&dev->msi_list); 2841 2842 #endif 2842 2843 INIT_LIST_HEAD(&dev->links.consumers);
+11 -3
drivers/block/nbd.c
··· 818 818 { 819 819 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 820 820 821 + /* don't abort one completed request */ 822 + if (blk_mq_request_completed(req)) 823 + return true; 824 + 821 825 mutex_lock(&cmd->lock); 822 826 cmd->status = BLK_STS_IOERR; 823 827 mutex_unlock(&cmd->lock); ··· 2008 2004 { 2009 2005 mutex_lock(&nbd->config_lock); 2010 2006 nbd_disconnect(nbd); 2011 - nbd_clear_sock(nbd); 2012 - mutex_unlock(&nbd->config_lock); 2007 + sock_shutdown(nbd); 2013 2008 /* 2014 2009 * Make sure recv thread has finished, so it does not drop the last 2015 2010 * config ref and try to destroy the workqueue from inside the work 2016 - * queue. 2011 + * queue. And this also ensure that we can safely call nbd_clear_que() 2012 + * to cancel the inflight I/Os. 2017 2013 */ 2018 2014 if (nbd->recv_workq) 2019 2015 flush_workqueue(nbd->recv_workq); 2016 + nbd_clear_que(nbd); 2017 + nbd->task_setup = NULL; 2018 + mutex_unlock(&nbd->config_lock); 2019 + 2020 2020 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, 2021 2021 &nbd->config->runtime_flags)) 2022 2022 nbd_config_put(nbd);
+33 -6
drivers/block/virtio_blk.c
··· 692 692 static unsigned int virtblk_queue_depth; 693 693 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); 694 694 695 + static int virtblk_validate(struct virtio_device *vdev) 696 + { 697 + u32 blk_size; 698 + 699 + if (!vdev->config->get) { 700 + dev_err(&vdev->dev, "%s failure: config access disabled\n", 701 + __func__); 702 + return -EINVAL; 703 + } 704 + 705 + if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE)) 706 + return 0; 707 + 708 + blk_size = virtio_cread32(vdev, 709 + offsetof(struct virtio_blk_config, blk_size)); 710 + 711 + if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) 712 + __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE); 713 + 714 + return 0; 715 + } 716 + 695 717 static int virtblk_probe(struct virtio_device *vdev) 696 718 { 697 719 struct virtio_blk *vblk; ··· 724 702 u16 min_io_size; 725 703 u8 physical_block_exp, alignment_offset; 726 704 unsigned int queue_depth; 727 - 728 - if (!vdev->config->get) { 729 - dev_err(&vdev->dev, "%s failure: config access disabled\n", 730 - __func__); 731 - return -EINVAL; 732 - } 733 705 734 706 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), 735 707 GFP_KERNEL); ··· 839 823 else 840 824 blk_size = queue_logical_block_size(q); 841 825 826 + if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) { 827 + dev_err(&vdev->dev, 828 + "block size is changed unexpectedly, now is %u\n", 829 + blk_size); 830 + err = -EINVAL; 831 + goto err_cleanup_disk; 832 + } 833 + 842 834 /* Use topology information if available */ 843 835 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 844 836 struct virtio_blk_config, physical_block_exp, ··· 905 881 device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); 906 882 return 0; 907 883 884 + err_cleanup_disk: 885 + blk_cleanup_disk(vblk->disk); 908 886 out_free_tags: 909 887 blk_mq_free_tag_set(&vblk->tag_set); 910 888 out_free_vq: ··· 1009 983 .driver.name = KBUILD_MODNAME, 1010 984 .driver.owner = THIS_MODULE, 1011 985 .id_table = id_table, 986 + .validate = virtblk_validate, 1012 987 .probe = virtblk_probe, 1013 988 .remove = virtblk_remove, 1014 989 .config_changed = virtblk_config_changed,
+1 -1
drivers/dax/super.c
··· 313 313 return -ENXIO; 314 314 315 315 if (nr_pages < 0) 316 - return nr_pages; 316 + return -EINVAL; 317 317 318 318 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, 319 319 kaddr, pfn);
+61 -10
drivers/firmware/efi/libstub/arm64-stub.c
··· 35 35 } 36 36 37 37 /* 38 - * Although relocatable kernels can fix up the misalignment with respect to 39 - * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of 40 - * sync with those recorded in the vmlinux when kaslr is disabled but the 41 - * image required relocation anyway. Therefore retain 2M alignment unless 42 - * KASLR is in use. 38 + * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail 39 + * to provide space, and fail to zero it). Check for this condition by double 40 + * checking that the first and the last byte of the image are covered by the 41 + * same EFI memory map entry. 43 42 */ 44 - static u64 min_kimg_align(void) 43 + static bool check_image_region(u64 base, u64 size) 45 44 { 46 - return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; 45 + unsigned long map_size, desc_size, buff_size; 46 + efi_memory_desc_t *memory_map; 47 + struct efi_boot_memmap map; 48 + efi_status_t status; 49 + bool ret = false; 50 + int map_offset; 51 + 52 + map.map = &memory_map; 53 + map.map_size = &map_size; 54 + map.desc_size = &desc_size; 55 + map.desc_ver = NULL; 56 + map.key_ptr = NULL; 57 + map.buff_size = &buff_size; 58 + 59 + status = efi_get_memory_map(&map); 60 + if (status != EFI_SUCCESS) 61 + return false; 62 + 63 + for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { 64 + efi_memory_desc_t *md = (void *)memory_map + map_offset; 65 + u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE; 66 + 67 + /* 68 + * Find the region that covers base, and return whether 69 + * it covers base+size bytes. 70 + */ 71 + if (base >= md->phys_addr && base < end) { 72 + ret = (base + size) <= end; 73 + break; 74 + } 75 + } 76 + 77 + efi_bs_call(free_pool, memory_map); 78 + 79 + return ret; 47 80 } 48 81 49 82 efi_status_t handle_kernel_image(unsigned long *image_addr, ··· 88 55 efi_status_t status; 89 56 unsigned long kernel_size, kernel_memsize = 0; 90 57 u32 phys_seed = 0; 58 + 59 + /* 60 + * Although relocatable kernels can fix up the misalignment with 61 + * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are 62 + * subtly out of sync with those recorded in the vmlinux when kaslr is 63 + * disabled but the image required relocation anyway. Therefore retain 64 + * 2M alignment if KASLR was explicitly disabled, even if it was not 65 + * going to be activated to begin with. 66 + */ 67 + u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; 91 68 92 69 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 93 70 if (!efi_nokaslr) { ··· 119 76 if (image->image_base != _text) 120 77 efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); 121 78 79 + if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) 80 + efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", 81 + EFI_KIMG_ALIGN >> 10); 82 + 122 83 kernel_size = _edata - _text; 123 84 kernel_memsize = kernel_size + (_end - _edata); 124 85 *reserve_size = kernel_memsize; ··· 132 85 * If KASLR is enabled, and we have some randomness available, 133 86 * locate the kernel at a randomized offset in physical memory. 134 87 */ 135 - status = efi_random_alloc(*reserve_size, min_kimg_align(), 88 + status = efi_random_alloc(*reserve_size, min_kimg_align, 136 89 reserve_addr, phys_seed); 90 + if (status != EFI_SUCCESS) 91 + efi_warn("efi_random_alloc() failed: 0x%lx\n", status); 137 92 } else { 138 93 status = EFI_OUT_OF_RESOURCES; 139 94 } 140 95 141 96 if (status != EFI_SUCCESS) { 142 - if (IS_ALIGNED((u64)_text, min_kimg_align())) { 97 + if (!check_image_region((u64)_text, kernel_memsize)) { 98 + efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n"); 99 + } else if (IS_ALIGNED((u64)_text, min_kimg_align)) { 143 100 /* 144 101 * Just execute from wherever we were loaded by the 145 102 * UEFI PE/COFF loader if the alignment is suitable. ··· 154 103 } 155 104 156 105 status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, 157 - ULONG_MAX, min_kimg_align()); 106 + ULONG_MAX, min_kimg_align); 158 107 159 108 if (status != EFI_SUCCESS) { 160 109 efi_err("Failed to relocate kernel\n");
+2
drivers/firmware/efi/libstub/randomalloc.c
··· 30 30 31 31 region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, 32 32 (u64)ULONG_MAX); 33 + if (region_end < size) 34 + return 0; 33 35 34 36 first_slot = round_up(md->phys_addr, align); 35 37 last_slot = round_down(region_end - size + 1, align);
+2 -2
drivers/i2c/busses/i2c-bcm-iproc.c
··· 1224 1224 1225 1225 disable_irq(iproc_i2c->irq); 1226 1226 1227 + tasklet_kill(&iproc_i2c->slave_rx_tasklet); 1228 + 1227 1229 /* disable all slave interrupts */ 1228 1230 tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); 1229 1231 tmp &= ~(IE_S_ALL_INTERRUPT_MASK << 1230 1232 IE_S_ALL_INTERRUPT_SHIFT); 1231 1233 iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); 1232 - 1233 - tasklet_kill(&iproc_i2c->slave_rx_tasklet); 1234 1234 1235 1235 /* Erase the slave address programmed */ 1236 1236 tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+3 -2
drivers/i2c/i2c-dev.c
··· 141 141 if (count > 8192) 142 142 count = 8192; 143 143 144 - tmp = kmalloc(count, GFP_KERNEL); 144 + tmp = kzalloc(count, GFP_KERNEL); 145 145 if (tmp == NULL) 146 146 return -ENOMEM; 147 147 ··· 150 150 151 151 ret = i2c_master_recv(client, tmp, count); 152 152 if (ret >= 0) 153 - ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; 153 + if (copy_to_user(buf, tmp, ret)) 154 + ret = -EFAULT; 154 155 kfree(tmp); 155 156 return ret; 156 157 }
+2
drivers/iio/accel/Kconfig
··· 231 231 232 232 config FXLS8962AF 233 233 tristate 234 + depends on I2C || !I2C # cannot be built-in for modular I2C 234 235 235 236 config FXLS8962AF_I2C 236 237 tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver" ··· 248 247 config FXLS8962AF_SPI 249 248 tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver" 250 249 depends on SPI 250 + depends on I2C || !I2C 251 251 select FXLS8962AF 252 252 select REGMAP_SPI 253 253 help
+1 -1
drivers/iio/accel/fxls8962af-core.c
··· 637 637 return ret; 638 638 } 639 639 640 - return ret; 640 + return 0; 641 641 } 642 642 643 643 static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
+2 -2
drivers/iio/adc/palmas_gpadc.c
··· 664 664 665 665 adc_period = adc->auto_conversion_period; 666 666 for (i = 0; i < 16; ++i) { 667 - if (((1000 * (1 << i)) / 32) < adc_period) 668 - continue; 667 + if (((1000 * (1 << i)) / 32) >= adc_period) 668 + break; 669 669 } 670 670 if (i > 0) 671 671 i--;
-1
drivers/iio/adc/ti-ads7950.c
··· 568 568 st->ring_xfer.tx_buf = &st->tx_buf[0]; 569 569 st->ring_xfer.rx_buf = &st->rx_buf[0]; 570 570 /* len will be set later */ 571 - st->ring_xfer.cs_change = true; 572 571 573 572 spi_message_add_tail(&st->ring_xfer, &st->ring_msg); 574 573
+4 -2
drivers/iio/humidity/hdc100x.c
··· 25 25 #include <linux/iio/trigger_consumer.h> 26 26 #include <linux/iio/triggered_buffer.h> 27 27 28 + #include <linux/time.h> 29 + 28 30 #define HDC100X_REG_TEMP 0x00 29 31 #define HDC100X_REG_HUMIDITY 0x01 30 32 ··· 168 166 struct iio_chan_spec const *chan) 169 167 { 170 168 struct i2c_client *client = data->client; 171 - int delay = data->adc_int_us[chan->address]; 169 + int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC; 172 170 int ret; 173 171 __be16 val; 174 172 ··· 318 316 struct iio_dev *indio_dev = pf->indio_dev; 319 317 struct hdc100x_data *data = iio_priv(indio_dev); 320 318 struct i2c_client *client = data->client; 321 - int delay = data->adc_int_us[0] + data->adc_int_us[1]; 319 + int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC; 322 320 int ret; 323 321 324 322 /* dual read starts at temp register */
+1 -2
drivers/iio/imu/adis.c
··· 411 411 int ret; 412 412 413 413 /* check if the device has rst pin low */ 414 - gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS); 414 + gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH); 415 415 if (IS_ERR(gpio)) 416 416 return PTR_ERR(gpio); 417 417 418 418 if (gpio) { 419 - gpiod_set_value_cansleep(gpio, 1); 420 419 msleep(10); 421 420 /* bring device out of reset */ 422 421 gpiod_set_value_cansleep(gpio, 0);
+1 -1
drivers/mtd/chips/cfi_cmdset_0002.c
··· 119 119 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 120 120 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; 121 121 122 - return extp->MinorVersion >= '5' && 122 + return extp && extp->MinorVersion >= '5' && 123 123 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; 124 124 } 125 125
+3 -2
drivers/mtd/devices/mchp48l640.c
··· 229 229 woff += ws; 230 230 } 231 231 232 - return ret; 232 + return 0; 233 233 } 234 234 235 235 static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len, ··· 255 255 if (!ret) 256 256 *retlen += len; 257 257 258 + kfree(cmd); 258 259 return ret; 259 260 260 261 fail: ··· 287 286 woff += ws; 288 287 } 289 288 290 - return ret; 289 + return 0; 291 290 }; 292 291 293 292 static const struct mchp48_caps mchp48l640_caps = {
+4 -7
drivers/mtd/mtd_blkdevs.c
··· 419 419 if (tr->discard) { 420 420 blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); 421 421 blk_queue_max_discard_sectors(new->rq, UINT_MAX); 422 + new->rq->limits.discard_granularity = tr->blksize; 422 423 } 423 424 424 425 gd->queue = new->rq; ··· 526 525 if (!blktrans_notifier.list.next) 527 526 register_mtd_user(&blktrans_notifier); 528 527 529 - 530 - mutex_lock(&mtd_table_mutex); 531 - 532 528 ret = register_blkdev(tr->major, tr->name); 533 529 if (ret < 0) { 534 530 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 535 531 tr->name, tr->major, ret); 536 - mutex_unlock(&mtd_table_mutex); 537 532 return ret; 538 533 } 539 534 ··· 539 542 tr->blkshift = ffs(tr->blksize) - 1; 540 543 541 544 INIT_LIST_HEAD(&tr->devs); 542 - list_add(&tr->list, &blktrans_majors); 543 545 546 + mutex_lock(&mtd_table_mutex); 547 + list_add(&tr->list, &blktrans_majors); 544 548 mtd_for_each_device(mtd) 545 549 if (mtd->type != MTD_ABSENT) 546 550 tr->add_mtd(tr, mtd); 547 - 548 551 mutex_unlock(&mtd_table_mutex); 549 552 return 0; 550 553 } ··· 561 564 list_for_each_entry_safe(dev, next, &tr->devs, list) 562 565 tr->remove_dev(dev); 563 566 564 - unregister_blkdev(tr->major, tr->name); 565 567 mutex_unlock(&mtd_table_mutex); 568 + unregister_blkdev(tr->major, tr->name); 566 569 567 570 BUG_ON(!list_empty(&tr->devs)); 568 571 return 0;
+3 -1
drivers/mtd/mtdcore.c
··· 806 806 807 807 err: 808 808 kfree(info); 809 - return ret; 809 + 810 + /* ENODATA means there is no OTP region. */ 811 + return ret == -ENODATA ? 0 : ret; 810 812 } 811 813 812 814 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+8 -2
drivers/mtd/nand/raw/nand_base.c
··· 5228 5228 static int of_get_nand_secure_regions(struct nand_chip *chip) 5229 5229 { 5230 5230 struct device_node *dn = nand_get_flash_node(chip); 5231 + struct property *prop; 5231 5232 int nr_elem, i, j; 5232 5233 5233 - nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); 5234 - if (!nr_elem) 5234 + /* Only proceed if the "secure-regions" property is present in DT */ 5235 + prop = of_find_property(dn, "secure-regions", NULL); 5236 + if (!prop) 5235 5237 return 0; 5238 + 5239 + nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); 5240 + if (nr_elem <= 0) 5241 + return nr_elem; 5236 5242 5237 5243 chip->nr_secure_regions = nr_elem / 2; 5238 5244 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
+2 -4
drivers/net/dsa/sja1105/sja1105_mdio.c
··· 284 284 struct mii_bus *bus; 285 285 int rc = 0; 286 286 287 - np = of_find_compatible_node(mdio_node, NULL, 288 - "nxp,sja1110-base-tx-mdio"); 287 + np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio"); 289 288 if (!np) 290 289 return 0; 291 290 ··· 338 339 struct mii_bus *bus; 339 340 int rc = 0; 340 341 341 - np = of_find_compatible_node(mdio_node, NULL, 342 - "nxp,sja1110-base-t1-mdio"); 342 + np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio"); 343 343 if (!np) 344 344 return 0; 345 345
+76 -37
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 72 72 #include "bnxt_debugfs.h" 73 73 74 74 #define BNXT_TX_TIMEOUT (5 * HZ) 75 - #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW) 75 + #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 76 + NETIF_MSG_TX_ERR) 76 77 77 78 MODULE_LICENSE("GPL"); 78 79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); ··· 368 367 return md_dst->u.port_info.port_id; 369 368 } 370 369 370 + static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 371 + u16 prod) 372 + { 373 + bnxt_db_write(bp, &txr->tx_db, prod); 374 + txr->kick_pending = 0; 375 + } 376 + 377 + static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp, 378 + struct bnxt_tx_ring_info *txr, 379 + struct netdev_queue *txq) 380 + { 381 + netif_tx_stop_queue(txq); 382 + 383 + /* netif_tx_stop_queue() must be done before checking 384 + * tx index in bnxt_tx_avail() below, because in 385 + * bnxt_tx_int(), we update tx index before checking for 386 + * netif_tx_queue_stopped(). 387 + */ 388 + smp_mb(); 389 + if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) { 390 + netif_tx_wake_queue(txq); 391 + return false; 392 + } 393 + 394 + return true; 395 + } 396 + 371 397 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 372 398 { 373 399 struct bnxt *bp = netdev_priv(dev); ··· 414 386 i = skb_get_queue_mapping(skb); 415 387 if (unlikely(i >= bp->tx_nr_rings)) { 416 388 dev_kfree_skb_any(skb); 389 + atomic_long_inc(&dev->tx_dropped); 417 390 return NETDEV_TX_OK; 418 391 } 419 392 ··· 424 395 425 396 free_size = bnxt_tx_avail(bp, txr); 426 397 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 427 - netif_tx_stop_queue(txq); 428 - return NETDEV_TX_BUSY; 398 + /* We must have raced with NAPI cleanup */ 399 + if (net_ratelimit() && txr->kick_pending) 400 + netif_warn(bp, tx_err, dev, 401 + "bnxt: ring busy w/ flush pending!\n"); 402 + if (bnxt_txr_netif_try_stop_queue(bp, txr, txq)) 403 + return NETDEV_TX_BUSY; 429 404 } 430 405 431 406 length = skb->len; ··· 552 519 normal_tx: 553 520 if (length < BNXT_MIN_PKT_SIZE) { 554 521 pad = BNXT_MIN_PKT_SIZE - length; 555 - if (skb_pad(skb, pad)) { 522 + if (skb_pad(skb, pad)) 556 523 /* SKB already freed. */ 557 - tx_buf->skb = NULL; 558 - return NETDEV_TX_OK; 559 - } 524 + goto tx_kick_pending; 560 525 length = BNXT_MIN_PKT_SIZE; 561 526 } 562 527 563 528 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 564 529 565 - if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 566 - dev_kfree_skb_any(skb); 567 - tx_buf->skb = NULL; 568 - return NETDEV_TX_OK; 569 - } 530 + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 531 + goto tx_free; 570 532 571 533 dma_unmap_addr_set(tx_buf, mapping, mapping); 572 534 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | ··· 648 620 txr->tx_prod = prod; 649 621 650 622 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 651 - bnxt_db_write(bp, &txr->tx_db, prod); 623 + bnxt_txr_db_kick(bp, txr, prod); 624 + else 625 + txr->kick_pending = 1; 652 626 653 627 tx_done: 654 628 655 629 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 656 630 if (netdev_xmit_more() && !tx_buf->is_push) 657 - bnxt_db_write(bp, &txr->tx_db, prod); 631 + bnxt_txr_db_kick(bp, txr, prod); 658 632 659 - netif_tx_stop_queue(txq); 660 - 661 - /* netif_tx_stop_queue() must be done before checking 662 - * tx index in bnxt_tx_avail() below, because in 663 - * bnxt_tx_int(), we update tx index before checking for 664 - * netif_tx_queue_stopped(). 665 - */ 666 - smp_mb(); 667 - if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 668 - netif_tx_wake_queue(txq); 633 + bnxt_txr_netif_try_stop_queue(bp, txr, txq); 669 634 } 670 635 return NETDEV_TX_OK; 671 636 ··· 671 650 /* start back at beginning and unmap skb */ 672 651 prod = txr->tx_prod; 673 652 tx_buf = &txr->tx_buf_ring[prod]; 674 - tx_buf->skb = NULL; 675 653 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 676 654 skb_headlen(skb), PCI_DMA_TODEVICE); 677 655 prod = NEXT_TX(prod); ··· 684 664 PCI_DMA_TODEVICE); 685 665 } 686 666 667 + tx_free: 687 668 dev_kfree_skb_any(skb); 669 + tx_kick_pending: 670 + if (txr->kick_pending) 671 + bnxt_txr_db_kick(bp, txr, txr->tx_prod); 672 + txr->tx_buf_ring[txr->tx_prod].skb = NULL; 673 + atomic_long_inc(&dev->tx_dropped); 688 674 return NETDEV_TX_OK; 689 675 } 690 676 ··· 760 734 smp_mb(); 761 735 762 736 if (unlikely(netif_tx_queue_stopped(txq)) && 763 - (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 764 - __netif_tx_lock(txq, smp_processor_id()); 765 - if (netif_tx_queue_stopped(txq) && 766 - bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 767 - txr->dev_state != BNXT_DEV_STATE_CLOSING) 768 - netif_tx_wake_queue(txq); 769 - __netif_tx_unlock(txq); 770 - } 737 + bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 738 + READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING) 739 + netif_tx_wake_queue(txq); 771 740 } 772 741 773 742 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, ··· 1790 1769 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1791 1770 return -EBUSY; 1792 1771 1772 + /* The valid test of the entry must be done first before 1773 + * reading any further. 1774 + */ 1775 + dma_rmb(); 1793 1776 prod = rxr->rx_prod; 1794 1777 1795 1778 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { ··· 2016 1991 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2017 1992 return -EBUSY; 2018 1993 1994 + /* The valid test of the entry must be done first before 1995 + * reading any further. 1996 + */ 1997 + dma_rmb(); 2019 1998 cmp_type = RX_CMP_TYPE(rxcmp); 2020 1999 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 2021 2000 rxcmp1->rx_cmp_cfa_code_errors_v2 |= ··· 2506 2477 if (!TX_CMP_VALID(txcmp, raw_cons)) 2507 2478 break; 2508 2479 2480 + /* The valid test of the entry must be done first before 2481 + * reading any further. 2482 + */ 2483 + dma_rmb(); 2509 2484 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2510 2485 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2511 2486 cp_cons = RING_CMP(tmp_raw_cons); ··· 9263 9230 for (i = 0; i < bp->cp_nr_rings; i++) { 9264 9231 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 9265 9232 9233 + napi_disable(&bp->bnapi[i]->napi); 9266 9234 if (bp->bnapi[i]->rx_ring) 9267 9235 cancel_work_sync(&cpr->dim.work); 9268 - 9269 - napi_disable(&bp->bnapi[i]->napi); 9270 9236 } 9271 9237 } 9272 9238 ··· 9299 9267 if (bp->tx_ring) { 9300 9268 for (i = 0; i < bp->tx_nr_rings; i++) { 9301 9269 txr = &bp->tx_ring[i]; 9302 - txr->dev_state = BNXT_DEV_STATE_CLOSING; 9270 + WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 9303 9271 } 9304 9272 } 9273 + /* Make sure napi polls see @dev_state change */ 9274 + synchronize_net(); 9305 9275 /* Drop carrier first to prevent TX timeout */ 9306 9276 netif_carrier_off(bp->dev); 9307 9277 /* Stop all TX queues */ ··· 9317 9283 9318 9284 for (i = 0; i < bp->tx_nr_rings; i++) { 9319 9285 txr = &bp->tx_ring[i]; 9320 - txr->dev_state = 0; 9286 + WRITE_ONCE(txr->dev_state, 0); 9321 9287 } 9288 + /* Make sure napi polls see @dev_state change */ 9289 + synchronize_net(); 9322 9290 netif_tx_wake_all_queues(bp->dev); 9323 9291 if (bp->link_info.link_up) 9324 9292 netif_carrier_on(bp->dev); ··· 10899 10863 return true; 10900 10864 return false; 10901 10865 } 10866 + /* 212 firmware is broken for aRFS */ 10867 + if (BNXT_FW_MAJ(bp) == 212) 10868 + return false; 10902 10869 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 10903 10870 return true; 10904 10871 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 799 799 u16 tx_prod; 800 800 u16 tx_cons; 801 801 u16 txq_index; 802 + u8 kick_pending; 802 803 struct bnxt_db_info tx_db; 803 804 804 805 struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+19 -19
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 3160 3160 return err; 3161 3161 } 3162 3162 3163 - static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) 3164 - { 3165 - struct device *dev = &sw_dev->dev; 3166 - struct ethsw_core *ethsw = dev_get_drvdata(dev); 3167 - int err; 3168 - 3169 - err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3170 - if (err) 3171 - dev_warn(dev, "dpsw_close err %d\n", err); 3172 - } 3173 - 3174 3163 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 3175 3164 { 3176 3165 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); ··· 3167 3178 dpaa2_switch_destroy_rings(ethsw); 3168 3179 dpaa2_switch_drain_bp(ethsw); 3169 3180 dpaa2_switch_free_dpbp(ethsw); 3181 + } 3182 + 3183 + static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev) 3184 + { 3185 + struct device *dev = &sw_dev->dev; 3186 + struct ethsw_core *ethsw = dev_get_drvdata(dev); 3187 + int err; 3188 + 3189 + dpaa2_switch_ctrl_if_teardown(ethsw); 3190 + 3191 + destroy_workqueue(ethsw->workqueue); 3192 + 3193 + err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3194 + if (err) 3195 + dev_warn(dev, "dpsw_close err %d\n", err); 3170 3196 } 3171 3197 3172 3198 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) ··· 3193 3189 3194 3190 dev = &sw_dev->dev; 3195 3191 ethsw = dev_get_drvdata(dev); 3196 - 3197 - dpaa2_switch_ctrl_if_teardown(ethsw); 3198 3192 3199 3193 dpaa2_switch_teardown_irqs(sw_dev); 3200 3194 ··· 3209 3207 kfree(ethsw->filter_blocks); 3210 3208 kfree(ethsw->ports); 3211 3209 3212 - dpaa2_switch_takedown(sw_dev); 3213 - 3214 - destroy_workqueue(ethsw->workqueue); 3210 + dpaa2_switch_teardown(sw_dev); 3215 3211 3216 3212 fsl_mc_portal_free(ethsw->mc_io); 3217 3213 ··· 3326 3326 GFP_KERNEL); 3327 3327 if (!(ethsw->ports)) { 3328 3328 err = -ENOMEM; 3329 - goto err_takedown; 3329 + goto err_teardown; 3330 3330 } 3331 3331 3332 3332 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), ··· 3397 3397 err_free_ports: 3398 3398 kfree(ethsw->ports); 3399 3399 3400 - err_takedown: 3401 - dpaa2_switch_takedown(sw_dev); 3400 + err_teardown: 3401 + dpaa2_switch_teardown(sw_dev); 3402 3402 3403 3403 err_free_cmdport: 3404 3404 fsl_mc_portal_free(ethsw->mc_io);
+1 -2
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 3663 3663 3664 3664 /* is DCB enabled at all? */ 3665 3665 if (vsi->tc_config.numtc == 1) 3666 - return i40e_swdcb_skb_tx_hash(netdev, skb, 3667 - netdev->real_num_tx_queues); 3666 + return netdev_pick_tx(netdev, skb, sb_dev); 3668 3667 3669 3668 prio = skb->priority; 3670 3669 hw = &vsi->back->hw;
+1
drivers/net/ethernet/intel/iavf/iavf.h
··· 136 136 struct iavf_mac_filter { 137 137 struct list_head list; 138 138 u8 macaddr[ETH_ALEN]; 139 + bool is_new_mac; /* filter is new, wait for PF decision */ 139 140 bool remove; /* filter needs to be removed */ 140 141 bool add; /* filter needs to be added */ 141 142 };
+1
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 772 772 773 773 list_add_tail(&f->list, &adapter->mac_filter_list); 774 774 f->add = true; 775 + f->is_new_mac = true; 775 776 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 776 777 } else { 777 778 f->remove = false;
+45 -2
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 541 541 } 542 542 543 543 /** 544 + * iavf_mac_add_ok 545 + * @adapter: adapter structure 546 + * 547 + * Submit list of filters based on PF response. 548 + **/ 549 + static void iavf_mac_add_ok(struct iavf_adapter *adapter) 550 + { 551 + struct iavf_mac_filter *f, *ftmp; 552 + 553 + spin_lock_bh(&adapter->mac_vlan_list_lock); 554 + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 555 + f->is_new_mac = false; 556 + } 557 + spin_unlock_bh(&adapter->mac_vlan_list_lock); 558 + } 559 + 560 + /** 561 + * iavf_mac_add_reject 562 + * @adapter: adapter structure 563 + * 564 + * Remove filters from list based on PF response. 565 + **/ 566 + static void iavf_mac_add_reject(struct iavf_adapter *adapter) 567 + { 568 + struct net_device *netdev = adapter->netdev; 569 + struct iavf_mac_filter *f, *ftmp; 570 + 571 + spin_lock_bh(&adapter->mac_vlan_list_lock); 572 + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 573 + if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 574 + f->remove = false; 575 + 576 + if (f->is_new_mac) { 577 + list_del(&f->list); 578 + kfree(f); 579 + } 580 + } 581 + spin_unlock_bh(&adapter->mac_vlan_list_lock); 582 + } 583 + 584 + /** 544 585 * iavf_add_vlans 545 586 * @adapter: adapter structure 546 587 * ··· 1533 1492 case VIRTCHNL_OP_ADD_ETH_ADDR: 1534 1493 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 1535 1494 iavf_stat_str(&adapter->hw, v_retval)); 1495 + iavf_mac_add_reject(adapter); 1536 1496 /* restore administratively set MAC address */ 1537 1497 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1538 1498 break; ··· 1681 1639 } 1682 1640 } 1683 1641 switch (v_opcode) { 1684 - case VIRTCHNL_OP_ADD_ETH_ADDR: { 1642 + case VIRTCHNL_OP_ADD_ETH_ADDR: 1643 + if (!v_retval) 1644 + iavf_mac_add_ok(adapter); 1685 1645 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 1686 1646 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 1687 - } 1688 1647 break; 1689 1648 case VIRTCHNL_OP_GET_STATS: { 1690 1649 struct iavf_eth_stats *stats =
+1 -1
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 656 656 * maintaining phase 657 657 */ 658 658 if (start_time < current_time) 659 - start_time = div64_u64(current_time + NSEC_PER_MSEC - 1, 659 + start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 660 660 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 661 661 662 662 start_time -= E810_OUT_PROP_DELAY_NS;
+4 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
··· 52 52 53 53 /* Kick start the NAPI context so that receiving will start */ 54 54 err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); 55 - if (err) 55 + if (err) { 56 + clear_bit(qid, adapter->af_xdp_zc_qps); 57 + xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); 56 58 return err; 59 + } 57 60 } 58 61 59 62 return 0;
+1
drivers/net/ethernet/mscc/ocelot.c
··· 1358 1358 struct net_device *bond = ocelot_port->bond; 1359 1359 1360 1360 mask = ocelot_get_bridge_fwd_mask(ocelot, bridge); 1361 + mask |= cpu_fwd_mask; 1361 1362 mask &= ~BIT(port); 1362 1363 if (bond) { 1363 1364 mask &= ~ocelot_get_bond_mask(ocelot, bond,
+20
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 327 327 unsigned long flags; 328 328 int rc = -EINVAL; 329 329 330 + if (!p_ll2_conn) 331 + return rc; 332 + 330 333 spin_lock_irqsave(&p_tx->lock, flags); 331 334 if (p_tx->b_completing_packet) { 332 335 rc = -EBUSY; ··· 503 500 unsigned long flags = 0; 504 501 int rc = 0; 505 502 503 + if (!p_ll2_conn) 504 + return rc; 505 + 506 506 spin_lock_irqsave(&p_rx->lock, flags); 507 + 508 + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) { 509 + spin_unlock_irqrestore(&p_rx->lock, flags); 510 + return 0; 511 + } 512 + 507 513 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); 508 514 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 509 515 ··· 833 821 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; 834 822 int rc; 835 823 824 + if (!p_ll2_conn) 825 + return 0; 826 + 836 827 if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) 837 828 return 0; 838 829 ··· 858 843 bool b_dont_submit_rx = false; 859 844 u16 new_idx = 0, num_bds = 0; 860 845 int rc; 846 + 847 + if (!p_ll2_conn) 848 + return 0; 861 849 862 850 if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) 863 851 return 0; ··· 1746 1728 if (!p_ll2_conn) 1747 1729 return -EINVAL; 1748 1730 p_rx = &p_ll2_conn->rx_queue; 1731 + if (!p_rx->set_prod_addr) 1732 + return -EIO; 1749 1733 1750 1734 spin_lock_irqsave(&p_rx->lock, flags); 1751 1735 if (!list_empty(&p_rx->free_descq))
+1 -2
drivers/net/ethernet/qlogic/qed/qed_rdma.c
··· 1285 1285 1286 1286 if (!rdma_cxt || !in_params || !out_params || 1287 1287 !p_hwfn->p_rdma_info->active) { 1288 - DP_ERR(p_hwfn->cdev, 1289 - "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 1288 + pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 1290 1289 rdma_cxt, in_params, out_params); 1291 1290 return NULL; 1292 1291 }
+3 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 3156 3156 3157 3157 indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); 3158 3158 ret = QLCRD32(adapter, indirect_addr, &err); 3159 - if (err == -EIO) 3159 + if (err == -EIO) { 3160 + qlcnic_83xx_unlock_flash(adapter); 3160 3161 return err; 3162 + } 3161 3163 3162 3164 word = ret; 3163 3165 *(u32 *)p_data = word;
+6
drivers/net/hamradio/6pack.c
··· 827 827 return; 828 828 } 829 829 830 + if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { 831 + pr_err("6pack: cooked buffer overrun, data loss\n"); 832 + sp->rx_count = 0; 833 + return; 834 + } 835 + 830 836 buf = sp->raw_buf; 831 837 sp->cooked_buf[sp->rx_count_cooked++] = 832 838 buf[0] | ((buf[1] << 2) & 0xc0);
+24 -13
drivers/net/mdio/mdio-mux.c
··· 82 82 83 83 static int parent_count; 84 84 85 + static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb) 86 + { 87 + struct mdio_mux_child_bus *cb = pb->children; 88 + 89 + while (cb) { 90 + mdiobus_unregister(cb->mii_bus); 91 + mdiobus_free(cb->mii_bus); 92 + cb = cb->next; 93 + } 94 + } 95 + 85 96 int mdio_mux_init(struct device *dev, 86 97 struct device_node *mux_node, 87 98 int (*switch_fn)(int cur, int desired, void *data), ··· 155 144 cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); 156 145 if (!cb) { 157 146 ret_val = -ENOMEM; 158 - continue; 147 + goto err_loop; 159 148 } 160 149 cb->bus_number = v; 161 150 cb->parent = pb; ··· 163 152 cb->mii_bus = mdiobus_alloc(); 164 153 if (!cb->mii_bus) { 165 154 ret_val = -ENOMEM; 166 - devm_kfree(dev, cb); 167 - continue; 155 + goto err_loop; 168 156 } 169 157 cb->mii_bus->priv = cb; 170 158 ··· 175 165 cb->mii_bus->write = mdio_mux_write; 176 166 r = of_mdiobus_register(cb->mii_bus, child_bus_node); 177 167 if (r) { 168 + mdiobus_free(cb->mii_bus); 169 + if (r == -EPROBE_DEFER) { 170 + ret_val = r; 171 + goto err_loop; 172 + } 173 + devm_kfree(dev, cb); 178 174 dev_err(dev, 179 175 "Error: Failed to register MDIO bus for child %pOF\n", 180 176 child_bus_node); 181 - mdiobus_free(cb->mii_bus); 182 - devm_kfree(dev, cb); 183 177 } else { 184 178 cb->next = pb->children; 185 179 pb->children = cb; ··· 195 181 } 196 182 197 183 dev_err(dev, "Error: No acceptable child buses found\n"); 198 - devm_kfree(dev, pb); 184 + 185 + err_loop: 186 + mdio_mux_uninit_children(pb); 187 + of_node_put(child_bus_node); 199 188 err_pb_kz: 200 189 put_device(&parent_bus->dev); 201 190 err_parent_bus: ··· 210 193 void mdio_mux_uninit(void *mux_handle) 211 194 { 212 195 struct mdio_mux_parent_bus *pb = mux_handle; 213 - struct mdio_mux_child_bus *cb = pb->children; 214 196 215 - while (cb) { 216 - mdiobus_unregister(cb->mii_bus); 217 - mdiobus_free(cb->mii_bus); 218 - cb = cb->next; 219 - } 220 - 197 + mdio_mux_uninit_children(pb); 221 198 put_device(&pb->mii_bus->dev); 222 199 } 223 200 EXPORT_SYMBOL_GPL(mdio_mux_uninit);
+30 -40
drivers/net/usb/asix_common.c
··· 63 63 value, index, data, size); 64 64 } 65 65 66 + static int asix_check_host_enable(struct usbnet *dev, int in_pm) 67 + { 68 + int i, ret; 69 + u8 smsr; 70 + 71 + for (i = 0; i < 30; ++i) { 72 + ret = asix_set_sw_mii(dev, in_pm); 73 + if (ret == -ENODEV || ret == -ETIMEDOUT) 74 + break; 75 + usleep_range(1000, 1100); 76 + ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 77 + 0, 0, 1, &smsr, in_pm); 78 + if (ret == -ENODEV) 79 + break; 80 + else if (ret < 0) 81 + continue; 82 + else if (smsr & AX_HOST_EN) 83 + break; 84 + } 85 + 86 + return ret; 87 + } 88 + 66 89 static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) 67 90 { 68 91 /* Reset the variables that have a lifetime outside of ··· 490 467 { 491 468 struct usbnet *dev = netdev_priv(netdev); 492 469 __le16 res; 493 - u8 smsr; 494 - int i = 0; 495 470 int ret; 496 471 497 472 mutex_lock(&dev->phy_mutex); 498 - do { 499 - ret = asix_set_sw_mii(dev, 0); 500 - if (ret == -ENODEV || ret == -ETIMEDOUT) 501 - break; 502 - usleep_range(1000, 1100); 503 - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 504 - 0, 0, 1, &smsr, 0); 505 - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); 473 + 474 + ret = asix_check_host_enable(dev, 0); 506 475 if (ret == -ENODEV || ret == -ETIMEDOUT) { 507 476 mutex_unlock(&dev->phy_mutex); 508 477 return ret; ··· 520 505 { 521 506 struct usbnet *dev = netdev_priv(netdev); 522 507 __le16 res = cpu_to_le16(val); 523 - u8 smsr; 524 - int i = 0; 525 508 int ret; 526 509 527 510 netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", 528 511 phy_id, loc, val); 529 512 530 513 mutex_lock(&dev->phy_mutex); 531 - do { 532 - ret = asix_set_sw_mii(dev, 0); 533 - if (ret == -ENODEV) 534 - break; 535 - usleep_range(1000, 1100); 536 - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 537 - 0, 0, 1, &smsr, 0); 538 - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); 539 514 515 + ret = asix_check_host_enable(dev, 0); 540 516 if (ret == -ENODEV) 541 517 goto out; 542 518 ··· 567 561 { 568 562 struct usbnet *dev = netdev_priv(netdev); 569 563 __le16 res; 570 - u8 smsr; 571 - int i = 0; 572 564 int ret; 573 565 574 566 mutex_lock(&dev->phy_mutex); 575 - do { 576 - ret = asix_set_sw_mii(dev, 1); 577 - if (ret == -ENODEV || ret == -ETIMEDOUT) 578 - break; 579 - usleep_range(1000, 1100); 580 - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 581 - 0, 0, 1, &smsr, 1); 582 - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); 567 + 568 + ret = asix_check_host_enable(dev, 1); 583 569 if (ret == -ENODEV || ret == -ETIMEDOUT) { 584 570 mutex_unlock(&dev->phy_mutex); 585 571 return ret; ··· 593 595 { 594 596 struct usbnet *dev = netdev_priv(netdev); 595 597 __le16 res = cpu_to_le16(val); 596 - u8 smsr; 597 - int i = 0; 598 598 int ret; 599 599 600 600 netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", 601 601 phy_id, loc, val); 602 602 603 603 mutex_lock(&dev->phy_mutex); 604 - do { 605 - ret = asix_set_sw_mii(dev, 1); 606 - if (ret == -ENODEV) 607 - break; 608 - usleep_range(1000, 1100); 609 - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 610 - 0, 0, 1, &smsr, 1); 611 - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); 604 + 605 + ret = asix_check_host_enable(dev, 1); 612 606 if (ret == -ENODEV) { 613 607 mutex_unlock(&dev->phy_mutex); 614 608 return;
+20 -3
drivers/net/usb/r8152.c
··· 3955 3955 case RTL_VER_06: 3956 3956 ocp_write_byte(tp, type, PLA_BP_EN, 0); 3957 3957 break; 3958 + case RTL_VER_14: 3959 + ocp_write_word(tp, type, USB_BP2_EN, 0); 3960 + 3961 + ocp_write_word(tp, type, USB_BP_8, 0); 3962 + ocp_write_word(tp, type, USB_BP_9, 0); 3963 + ocp_write_word(tp, type, USB_BP_10, 0); 3964 + ocp_write_word(tp, type, USB_BP_11, 0); 3965 + ocp_write_word(tp, type, USB_BP_12, 0); 3966 + ocp_write_word(tp, type, USB_BP_13, 0); 3967 + ocp_write_word(tp, type, USB_BP_14, 0); 3968 + ocp_write_word(tp, type, USB_BP_15, 0); 3969 + break; 3958 3970 case RTL_VER_08: 3959 3971 case RTL_VER_09: 3960 3972 case RTL_VER_10: 3961 3973 case RTL_VER_11: 3962 3974 case RTL_VER_12: 3963 3975 case RTL_VER_13: 3964 - case RTL_VER_14: 3965 3976 case RTL_VER_15: 3966 3977 default: 3967 3978 if (type == MCU_TYPE_USB) { 3968 - ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0); 3979 + ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0); 3969 3980 3970 3981 ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0); 3971 3982 ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0); ··· 4342 4331 case RTL_VER_11: 4343 4332 case RTL_VER_12: 4344 4333 case RTL_VER_13: 4345 - case RTL_VER_14: 4346 4334 case RTL_VER_15: 4347 4335 fw_reg = 0xf800; 4348 4336 bp_ba_addr = PLA_BP_BA; 4349 4337 bp_en_addr = PLA_BP_EN; 4350 4338 bp_start = PLA_BP_0; 4351 4339 max_bp = 8; 4340 + break; 4341 + case RTL_VER_14: 4342 + fw_reg = 0xf800; 4343 + bp_ba_addr = PLA_BP_BA; 4344 + bp_en_addr = USB_BP2_EN; 4345 + bp_start = PLA_BP_0; 4346 + max_bp = 16; 4352 4347 break; 4353 4348 default: 4354 4349 goto out;
+7 -7
drivers/net/virtio_net.c
··· 63 63 VIRTIO_NET_F_GUEST_CSUM 64 64 }; 65 65 66 - #define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 66 + #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 67 67 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 68 68 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 69 69 (1ULL << VIRTIO_NET_F_GUEST_UFO)) ··· 2504 2504 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 2505 2505 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 2506 2506 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { 2507 - NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); 2507 + NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); 2508 2508 return -EOPNOTSUPP; 2509 2509 } 2510 2510 ··· 2635 2635 u64 offloads; 2636 2636 int err; 2637 2637 2638 - if ((dev->features ^ features) & NETIF_F_LRO) { 2638 + if ((dev->features ^ features) & NETIF_F_GRO_HW) { 2639 2639 if (vi->xdp_enabled) 2640 2640 return -EBUSY; 2641 2641 2642 - if (features & NETIF_F_LRO) 2642 + if (features & NETIF_F_GRO_HW) 2643 2643 offloads = vi->guest_offloads_capable; 2644 2644 else 2645 2645 offloads = vi->guest_offloads_capable & 2646 - ~GUEST_OFFLOAD_LRO_MASK; 2646 + ~GUEST_OFFLOAD_GRO_HW_MASK; 2647 2647 2648 2648 err = virtnet_set_guest_offloads(vi, offloads); 2649 2649 if (err) ··· 3123 3123 dev->features |= NETIF_F_RXCSUM; 3124 3124 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 3125 3125 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 3126 - dev->features |= NETIF_F_LRO; 3126 + dev->features |= NETIF_F_GRO_HW; 3127 3127 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) 3128 - dev->hw_features |= NETIF_F_LRO; 3128 + dev->hw_features |= NETIF_F_GRO_HW; 3129 3129 3130 3130 dev->vlan_features = dev->features; 3131 3131
+4
drivers/net/vrf.c
··· 1360 1360 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); 1361 1361 bool is_ndisc = ipv6_ndisc_frame(skb); 1362 1362 1363 + nf_reset_ct(skb); 1364 + 1363 1365 /* loopback, multicast & non-ND link-local traffic; do not push through 1364 1366 * packet taps again. Reset pkt_type for upper layers to process skb. 1365 1367 * For strict packets with a source LLA, determine the dst using the ··· 1423 1421 skb->dev = vrf_dev; 1424 1422 skb->skb_iif = vrf_dev->ifindex; 1425 1423 IPCB(skb)->flags |= IPSKB_L3SLAVE; 1424 + 1425 + nf_reset_ct(skb); 1426 1426 1427 1427 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) 1428 1428 goto out;
+16 -9
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
··· 37 37 u32 sha1 = 0; 38 38 u16 mac_type = 0, rf_id = 0; 39 39 u8 *pnvm_data = NULL, *tmp; 40 + bool hw_match = false; 40 41 u32 size = 0; 41 42 int ret; 42 43 ··· 84 83 break; 85 84 } 86 85 86 + if (hw_match) 87 + break; 88 + 87 89 mac_type = le16_to_cpup((__le16 *)data); 88 90 rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16))); 89 91 ··· 94 90 "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n", 95 91 mac_type, rf_id); 96 92 97 - if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) || 98 - rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) { 99 - IWL_DEBUG_FW(trans, 100 - "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n", 101 - CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id); 102 - ret = -ENOENT; 103 - goto out; 104 - } 105 - 93 + if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) && 94 + rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id)) 95 + hw_match = true; 106 96 break; 107 97 case IWL_UCODE_TLV_SEC_RT: { 108 98 struct iwl_pnvm_section *section = (void *)data; ··· 147 149 } 148 150 149 151 done: 152 + if (!hw_match) { 153 + IWL_DEBUG_FW(trans, 154 + "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n", 155 + CSR_HW_REV_TYPE(trans->hw_rev), 156 + CSR_HW_RFID_TYPE(trans->hw_rf_id)); 157 + ret = -ENOENT; 158 + goto out; 159 + } 160 + 150 161 if (!size) { 151 162 IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n"); 152 163 ret = -ENOENT;
+69 -1
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 1110 1110 IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1111 1111 iwl_cfg_bz_a0_mr_a0, iwl_ax211_name), 1112 1112 1113 + /* SoF with JF2 */ 1114 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1115 + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1116 + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, 1117 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1118 + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), 1119 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1120 + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1121 + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, 1122 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1123 + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), 1124 + 1125 + /* SoF with JF */ 1126 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1127 + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1128 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, 1129 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1130 + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), 1131 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1132 + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1133 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, 1134 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1135 + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), 1136 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1137 + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1138 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, 1139 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1140 + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), 1141 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1142 + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1143 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, 1144 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1145 + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), 1146 + 1113 1147 /* So with GF */ 1114 1148 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1115 1149 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1116 1150 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, 1117 1151 IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1118 - iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name) 1152 + iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), 1153 + 1154 + /* So with JF2 */ 1155 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1156 + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1157 + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, 1158 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1159 + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), 1160 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1161 + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1162 + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, 1163 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1164 + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), 1165 + 1166 + /* So with JF */ 1167 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1168 + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1169 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, 1170 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1171 + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), 1172 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1173 + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1174 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, 1175 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1176 + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), 1177 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1178 + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1179 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, 1180 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1181 + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), 1182 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1183 + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1184 + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, 1185 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1186 + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name) 1119 1187 1120 1188 #endif /* CONFIG_IWLMVM */ 1121 1189 };
+1 -1
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
··· 111 111 case WLAN_CIPHER_SUITE_SMS4: 112 112 return MCU_CIPHER_WAPI; 113 113 default: 114 - return MT_CIPHER_NONE; 114 + return MCU_CIPHER_NONE; 115 115 } 116 116 } 117 117
+2 -1
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
··· 1073 1073 }; 1074 1074 1075 1075 enum mcu_cipher_type { 1076 - MCU_CIPHER_WEP40 = 1, 1076 + MCU_CIPHER_NONE = 0, 1077 + MCU_CIPHER_WEP40, 1077 1078 MCU_CIPHER_WEP104, 1078 1079 MCU_CIPHER_WEP128, 1079 1080 MCU_CIPHER_TKIP,
+1 -1
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
··· 111 111 case WLAN_CIPHER_SUITE_SMS4: 112 112 return MCU_CIPHER_WAPI; 113 113 default: 114 - return MT_CIPHER_NONE; 114 + return MCU_CIPHER_NONE; 115 115 } 116 116 } 117 117
+2 -1
drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
··· 199 199 } __packed; 200 200 201 201 enum mcu_cipher_type { 202 - MCU_CIPHER_WEP40 = 1, 202 + MCU_CIPHER_NONE = 0, 203 + MCU_CIPHER_WEP40, 203 204 MCU_CIPHER_WEP104, 204 205 MCU_CIPHER_WEP128, 205 206 MCU_CIPHER_TKIP,
+3 -4
drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
··· 64 64 65 65 int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index) 66 66 { 67 - int array_size = ARRAY_SIZE(modem_cfg); 68 - 69 - if (index >= array_size) { 70 - pr_err("index: %d and array_size %d", index, array_size); 67 + if (index >= ARRAY_SIZE(modem_cfg)) { 68 + pr_err("index: %d and array size %zu", index, 69 + ARRAY_SIZE(modem_cfg)); 71 70 return -ECHRNG; 72 71 } 73 72
+11 -6
drivers/nvdimm/namespace_devs.c
··· 2527 2527 2528 2528 static int init_active_labels(struct nd_region *nd_region) 2529 2529 { 2530 - int i; 2530 + int i, rc = 0; 2531 2531 2532 2532 for (i = 0; i < nd_region->ndr_mappings; i++) { 2533 2533 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; ··· 2546 2546 else if (test_bit(NDD_LABELING, &nvdimm->flags)) 2547 2547 /* fail, labels needed to disambiguate dpa */; 2548 2548 else 2549 - return 0; 2549 + continue; 2550 2550 2551 2551 dev_err(&nd_region->dev, "%s: is %s, failing probe\n", 2552 2552 dev_name(&nd_mapping->nvdimm->dev), 2553 2553 test_bit(NDD_LOCKED, &nvdimm->flags) 2554 2554 ? "locked" : "disabled"); 2555 - return -ENXIO; 2555 + rc = -ENXIO; 2556 + goto out; 2556 2557 } 2557 2558 nd_mapping->ndd = ndd; 2558 2559 atomic_inc(&nvdimm->busy); ··· 2587 2586 break; 2588 2587 } 2589 2588 2590 - if (i < nd_region->ndr_mappings) { 2589 + if (i < nd_region->ndr_mappings) 2590 + rc = -ENOMEM; 2591 + 2592 + out: 2593 + if (rc) { 2591 2594 deactivate_labels(nd_region); 2592 - return -ENOMEM; 2595 + return rc; 2593 2596 } 2594 2597 2595 2598 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels, 2596 - nd_region); 2599 + nd_region); 2597 2600 } 2598 2601 2599 2602 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
+78 -49
drivers/pci/msi.c
··· 143 143 * reliably as devices without an INTx disable bit will then generate a 144 144 * level IRQ which will never be cleared. 145 145 */ 146 - u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 146 + void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 147 147 { 148 - u32 mask_bits = desc->masked; 148 + raw_spinlock_t *lock = &desc->dev->msi_lock; 149 + unsigned long flags; 149 150 150 151 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) 151 - return 0; 152 + return; 152 153 153 - mask_bits &= ~mask; 154 - mask_bits |= flag; 154 + raw_spin_lock_irqsave(lock, flags); 155 + desc->masked &= ~mask; 156 + desc->masked |= flag; 155 157 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, 156 - mask_bits); 157 - 158 - return mask_bits; 158 + desc->masked); 159 + raw_spin_unlock_irqrestore(lock, flags); 159 160 } 160 161 161 162 static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 162 163 { 163 - desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); 164 + __pci_msi_desc_mask_irq(desc, mask, flag); 164 165 } 165 166 166 167 static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) ··· 290 289 /* Don't touch the hardware now */ 291 290 } else if (entry->msi_attrib.is_msix) { 292 291 void __iomem *base = pci_msix_desc_addr(entry); 292 + bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); 293 293 294 294 if (!base) 295 295 goto skip; 296 296 297 + /* 298 + * The specification mandates that the entry is masked 299 + * when the message is modified: 300 + * 301 + * "If software changes the Address or Data value of an 302 + * entry while the entry is unmasked, the result is 303 + * undefined." 304 + */ 305 + if (unmasked) 306 + __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT); 307 + 297 308 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); 298 309 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); 299 310 writel(msg->data, base + PCI_MSIX_ENTRY_DATA); 311 + 312 + if (unmasked) 313 + __pci_msix_desc_mask_irq(entry, 0); 314 + 315 + /* Ensure that the writes are visible in the device */ 316 + readl(base + PCI_MSIX_ENTRY_DATA); 300 317 } else { 301 318 int pos = dev->msi_cap; 302 319 u16 msgctl; ··· 335 316 pci_write_config_word(dev, pos + PCI_MSI_DATA_32, 336 317 msg->data); 337 318 } 319 + /* Ensure that the writes are visible in the device */ 320 + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); 338 321 } 339 322 340 323 skip: ··· 657 636 /* Configure MSI capability structure */ 658 637 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 659 638 if (ret) { 660 - msi_mask_irq(entry, mask, ~mask); 639 + msi_mask_irq(entry, mask, 0); 661 640 free_msi_irqs(dev); 662 641 return ret; 663 642 } 664 643 665 644 ret = msi_verify_entries(dev); 666 645 if (ret) { 667 - msi_mask_irq(entry, mask, ~mask); 646 + msi_mask_irq(entry, mask, 0); 668 647 free_msi_irqs(dev); 669 648 return ret; 670 649 } 671 650 672 651 ret = populate_msi_sysfs(dev); 673 652 if (ret) { 674 - msi_mask_irq(entry, mask, ~mask); 653 + msi_mask_irq(entry, mask, 0); 675 654 free_msi_irqs(dev); 676 655 return ret; 677 656 } ··· 712 691 { 713 692 struct irq_affinity_desc *curmsk, *masks = NULL; 714 693 struct msi_desc *entry; 694 + void __iomem *addr; 715 695 int ret, i; 716 696 int vec_count = pci_msix_vec_count(dev); 717 697 ··· 733 711 734 712 entry->msi_attrib.is_msix = 1; 735 713 entry->msi_attrib.is_64 = 1; 714 + 736 715 if (entries) 737 716 entry->msi_attrib.entry_nr = entries[i].entry; 738 717 else ··· 745 722 entry->msi_attrib.default_irq = dev->irq; 746 723 entry->mask_base = base; 747 724 725 + addr = pci_msix_desc_addr(entry); 726 + if (addr) 727 + entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); 728 + 748 729 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); 749 730 if (masks) 750 731 curmsk++; ··· 759 732 return ret; 760 733 } 761 734 762 - static void msix_program_entries(struct pci_dev *dev, 763 - struct msix_entry *entries) 735 + static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) 764 736 { 765 737 struct msi_desc *entry; 766 - int i = 0; 767 - void __iomem *desc_addr; 768 738 769 739 for_each_pci_msi_entry(entry, dev) { 770 - if (entries) 771 - entries[i++].vector = entry->irq; 772 - 773 - desc_addr = pci_msix_desc_addr(entry); 774 - if (desc_addr) 775 - entry->masked = readl(desc_addr + 776 - PCI_MSIX_ENTRY_VECTOR_CTRL); 777 - else 778 - entry->masked = 0; 779 - 780 - msix_mask_irq(entry, 1); 740 + if (entries) { 741 + entries->vector = entry->irq; 742 + entries++; 743 + } 781 744 } 745 + } 746 + 747 + static void msix_mask_all(void __iomem *base, int tsize) 748 + { 749 + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; 750 + int i; 751 + 752 + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) 753 + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); 782 754 } 783 755 784 756 /** ··· 794 768 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, 795 769 int nvec, struct irq_affinity *affd) 796 770 { 797 - int ret; 798 - u16 control; 799 771 void __iomem *base; 772 + int ret, tsize; 773 + u16 control; 800 774 801 - /* Ensure MSI-X is disabled while it is set up */ 802 - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 775 + /* 776 + * Some devices require MSI-X to be enabled before the MSI-X 777 + * registers can be accessed. Mask all the vectors to prevent 778 + * interrupts coming in before they're fully set up. 779 + */ 780 + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | 781 + PCI_MSIX_FLAGS_ENABLE); 803 782 804 783 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); 805 784 /* Request & Map MSI-X table region */ 806 - base = msix_map_region(dev, msix_table_size(control)); 807 - if (!base) 808 - return -ENOMEM; 785 + tsize = msix_table_size(control); 786 + base = msix_map_region(dev, tsize); 787 + if (!base) { 788 + ret = -ENOMEM; 789 + goto out_disable; 790 + } 791 + 792 + /* Ensure that all table entries are masked. */ 793 + msix_mask_all(base, tsize); 809 794 810 795 ret = msix_setup_entries(dev, base, entries, nvec, affd); 811 796 if (ret) 812 - return ret; 797 + goto out_disable; 813 798 814 799 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 815 800 if (ret) ··· 831 794 if (ret) 832 795 goto out_free; 833 796 834 - /* 835 - * Some devices require MSI-X to be enabled before we can touch the 836 - * MSI-X registers. We need to mask all the vectors to prevent 837 - * interrupts coming in before they're fully set up. 838 - */ 839 - pci_msix_clear_and_set_ctrl(dev, 0, 840 - PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); 841 - 842 - msix_program_entries(dev, entries); 797 + msix_update_entries(dev, entries); 843 798 844 799 ret = populate_msi_sysfs(dev); 845 800 if (ret) ··· 864 835 865 836 out_free: 866 837 free_msi_irqs(dev); 838 + 839 + out_disable: 840 + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 867 841 868 842 return ret; 869 843 } ··· 962 930 963 931 /* Return the device with MSI unmasked as initial states */ 964 932 mask = msi_mask(desc->msi_attrib.multi_cap); 965 - /* Keep cached state to be restored */ 966 - __pci_msi_desc_mask_irq(desc, mask, ~mask); 933 + msi_mask_irq(desc, mask, 0); 967 934 968 935 /* Restore dev->irq to its default pin-assertion IRQ */ 969 936 dev->irq = desc->msi_attrib.default_irq; ··· 1047 1016 } 1048 1017 1049 1018 /* Return the device with MSI-X masked as initial states */ 1050 - for_each_pci_msi_entry(entry, dev) { 1051 - /* Keep cached states to be restored */ 1019 + for_each_pci_msi_entry(entry, dev) 1052 1020 __pci_msix_desc_mask_irq(entry, 1); 1053 - } 1054 1021 1055 1022 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 1056 1023 pci_intx_for_msi(dev, 1);
+13 -13
drivers/pinctrl/intel/pinctrl-tigerlake.c
··· 701 701 702 702 static const struct intel_padgroup tglh_community0_gpps[] = { 703 703 TGL_GPP(0, 0, 24, 0), /* GPP_A */ 704 - TGL_GPP(1, 25, 44, 128), /* GPP_R */ 705 - TGL_GPP(2, 45, 70, 32), /* GPP_B */ 706 - TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP), /* vGPIO_0 */ 704 + TGL_GPP(1, 25, 44, 32), /* GPP_R */ 705 + TGL_GPP(2, 45, 70, 64), /* GPP_B */ 706 + TGL_GPP(3, 71, 78, 96), /* vGPIO_0 */ 707 707 }; 708 708 709 709 static const struct intel_padgroup tglh_community1_gpps[] = { 710 - TGL_GPP(0, 79, 104, 96), /* GPP_D */ 711 - TGL_GPP(1, 105, 128, 64), /* GPP_C */ 712 - TGL_GPP(2, 129, 136, 160), /* GPP_S */ 713 - TGL_GPP(3, 137, 153, 192), /* GPP_G */ 714 - TGL_GPP(4, 154, 180, 224), /* vGPIO */ 710 + TGL_GPP(0, 79, 104, 128), /* GPP_D */ 711 + TGL_GPP(1, 105, 128, 160), /* GPP_C */ 712 + TGL_GPP(2, 129, 136, 192), /* GPP_S */ 713 + TGL_GPP(3, 137, 153, 224), /* GPP_G */ 714 + TGL_GPP(4, 154, 180, 256), /* vGPIO */ 715 715 }; 716 716 717 717 static const struct intel_padgroup tglh_community3_gpps[] = { 718 - TGL_GPP(0, 181, 193, 256), /* GPP_E */ 719 - TGL_GPP(1, 194, 217, 288), /* GPP_F */ 718 + TGL_GPP(0, 181, 193, 288), /* GPP_E */ 719 + TGL_GPP(1, 194, 217, 320), /* GPP_F */ 720 720 }; 721 721 722 722 static const struct intel_padgroup tglh_community4_gpps[] = { 723 - TGL_GPP(0, 218, 241, 320), /* GPP_H */ 723 + TGL_GPP(0, 218, 241, 352), /* GPP_H */ 724 724 TGL_GPP(1, 242, 251, 384), /* GPP_J */ 725 - TGL_GPP(2, 252, 266, 352), /* GPP_K */ 725 + TGL_GPP(2, 252, 266, 416), /* GPP_K */ 726 726 }; 727 727 728 728 static const struct intel_padgroup tglh_community5_gpps[] = { 729 - TGL_GPP(0, 267, 281, 416), /* GPP_I */ 729 + TGL_GPP(0, 267, 281, 448), /* GPP_I */ 730 730 TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP), /* JTAG */ 731 731 }; 732 732
+3 -5
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
··· 925 925 err = hw->soc->bias_set(hw, desc, pullup); 926 926 if (err) 927 927 return err; 928 - } else if (hw->soc->bias_set_combo) { 929 - err = hw->soc->bias_set_combo(hw, desc, pullup, arg); 930 - if (err) 931 - return err; 932 928 } else { 933 - return -ENOTSUPP; 929 + err = mtk_pinconf_bias_set_rev1(hw, desc, pullup); 930 + if (err) 931 + err = mtk_pinconf_bias_set(hw, desc, pullup); 934 932 } 935 933 } 936 934
+1 -2
drivers/pinctrl/pinctrl-amd.c
··· 444 444 unsigned long flags; 445 445 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 446 446 struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 447 - u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) | 448 - BIT(WAKE_CNTRL_OFF_S4); 447 + u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3); 449 448 450 449 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 451 450 pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+20 -6
drivers/pinctrl/pinctrl-k210.c
··· 950 950 return ret; 951 951 952 952 pdata->pclk = devm_clk_get_optional(dev, "pclk"); 953 - if (!IS_ERR(pdata->pclk)) 954 - clk_prepare_enable(pdata->pclk); 953 + if (!IS_ERR(pdata->pclk)) { 954 + ret = clk_prepare_enable(pdata->pclk); 955 + if (ret) 956 + goto disable_clk; 957 + } 955 958 956 959 pdata->sysctl_map = 957 960 syscon_regmap_lookup_by_phandle_args(np, 958 961 "canaan,k210-sysctl-power", 959 962 1, &pdata->power_offset); 960 - if (IS_ERR(pdata->sysctl_map)) 961 - return PTR_ERR(pdata->sysctl_map); 963 + if (IS_ERR(pdata->sysctl_map)) { 964 + ret = PTR_ERR(pdata->sysctl_map); 965 + goto disable_pclk; 966 + } 962 967 963 968 k210_fpioa_init_ties(pdata); 964 969 965 970 pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata); 966 - if (IS_ERR(pdata->pctl)) 967 - return PTR_ERR(pdata->pctl); 971 + if (IS_ERR(pdata->pctl)) { 972 + ret = PTR_ERR(pdata->pctl); 973 + goto disable_pclk; 974 + } 968 975 969 976 return 0; 977 + 978 + disable_pclk: 979 + clk_disable_unprepare(pdata->pclk); 980 + disable_clk: 981 + clk_disable_unprepare(pdata->clk); 982 + 983 + return ret; 970 984 } 971 985 972 986 static const struct of_device_id k210_fpioa_dt_ids[] = {
+31 -32
drivers/pinctrl/qcom/Kconfig
··· 13 13 14 14 config PINCTRL_APQ8064 15 15 tristate "Qualcomm APQ8064 pin controller driver" 16 - depends on GPIOLIB && OF 16 + depends on OF 17 17 depends on PINCTRL_MSM 18 18 help 19 19 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 21 21 22 22 config PINCTRL_APQ8084 23 23 tristate "Qualcomm APQ8084 pin controller driver" 24 - depends on GPIOLIB && OF 24 + depends on OF 25 25 depends on PINCTRL_MSM 26 26 help 27 27 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 29 29 30 30 config PINCTRL_IPQ4019 31 31 tristate "Qualcomm IPQ4019 pin controller driver" 32 - depends on GPIOLIB && OF 32 + depends on OF 33 33 depends on PINCTRL_MSM 34 34 help 35 35 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 37 37 38 38 config PINCTRL_IPQ8064 39 39 tristate "Qualcomm IPQ8064 pin controller driver" 40 - depends on GPIOLIB && OF 40 + depends on OF 41 41 depends on PINCTRL_MSM 42 42 help 43 43 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 45 45 46 46 config PINCTRL_IPQ8074 47 47 tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver" 48 - depends on GPIOLIB && OF 48 + depends on OF 49 49 depends on PINCTRL_MSM 50 50 help 51 51 This is the pinctrl, pinmux, pinconf and gpiolib driver for ··· 55 55 56 56 config PINCTRL_IPQ6018 57 57 tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver" 58 - depends on GPIOLIB && OF 58 + depends on OF 59 59 depends on PINCTRL_MSM 60 60 help 61 61 This is the pinctrl, pinmux, pinconf and gpiolib driver for ··· 65 65 66 66 config PINCTRL_MSM8226 67 67 tristate "Qualcomm 8226 pin controller driver" 68 - depends on GPIOLIB && OF 68 + depends on OF 69 69 depends on PINCTRL_MSM 70 70 help 71 71 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 74 74 75 75 config PINCTRL_MSM8660 76 76 tristate "Qualcomm 8660 pin controller driver" 77 - depends on GPIOLIB && OF 77 + depends on OF 78 78 depends on PINCTRL_MSM 79 79 help 80 80 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 82 82 83 83 config PINCTRL_MSM8960 84 84 tristate "Qualcomm 8960 pin controller driver" 85 - depends on GPIOLIB && OF 85 + depends on OF 86 86 depends on PINCTRL_MSM 87 87 help 88 88 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 90 90 91 91 config PINCTRL_MDM9615 92 92 tristate "Qualcomm 9615 pin controller driver" 93 - depends on GPIOLIB && OF 93 + depends on OF 94 94 depends on PINCTRL_MSM 95 95 help 96 96 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 98 98 99 99 config PINCTRL_MSM8X74 100 100 tristate "Qualcomm 8x74 pin controller driver" 101 - depends on GPIOLIB && OF 101 + depends on OF 102 102 depends on PINCTRL_MSM 103 103 help 104 104 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 106 106 107 107 config PINCTRL_MSM8916 108 108 tristate "Qualcomm 8916 pin controller driver" 109 - depends on GPIOLIB && OF 109 + depends on OF 110 110 depends on PINCTRL_MSM 111 111 help 112 112 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 114 114 115 115 config PINCTRL_MSM8953 116 116 tristate "Qualcomm 8953 pin controller driver" 117 - depends on GPIOLIB && OF 117 + depends on OF 118 118 depends on PINCTRL_MSM 119 119 help 120 120 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 124 124 125 125 config PINCTRL_MSM8976 126 126 tristate "Qualcomm 8976 pin controller driver" 127 - depends on GPIOLIB && OF 127 + depends on OF 128 128 depends on PINCTRL_MSM 129 129 help 130 130 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 134 134 135 135 config PINCTRL_MSM8994 136 136 tristate "Qualcomm 8994 pin controller driver" 137 - depends on GPIOLIB && OF 137 + depends on OF 138 138 depends on PINCTRL_MSM 139 139 help 140 140 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 143 143 144 144 config PINCTRL_MSM8996 145 145 tristate "Qualcomm MSM8996 pin controller driver" 146 - depends on GPIOLIB && OF 146 + depends on OF 147 147 depends on PINCTRL_MSM 148 148 help 149 149 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 151 151 152 152 config PINCTRL_MSM8998 153 153 tristate "Qualcomm MSM8998 pin controller driver" 154 - depends on GPIOLIB && OF 154 + depends on OF 155 155 depends on PINCTRL_MSM 156 156 help 157 157 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 159 159 160 160 config PINCTRL_QCS404 161 161 tristate "Qualcomm QCS404 pin controller driver" 162 - depends on GPIOLIB && OF 162 + depends on OF 163 163 depends on PINCTRL_MSM 164 164 help 165 165 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 167 167 168 168 config PINCTRL_QDF2XXX 169 169 tristate "Qualcomm Technologies QDF2xxx pin controller driver" 170 - depends on GPIOLIB && ACPI 170 + depends on ACPI 171 171 depends on PINCTRL_MSM 172 172 help 173 173 This is the GPIO driver for the TLMM block found on the ··· 175 175 176 176 config PINCTRL_QCOM_SPMI_PMIC 177 177 tristate "Qualcomm SPMI PMIC pin controller driver" 178 - depends on GPIOLIB && OF && SPMI 178 + depends on OF && SPMI 179 179 select REGMAP_SPMI 180 180 select PINMUX 181 181 select PINCONF ··· 190 190 191 191 config PINCTRL_QCOM_SSBI_PMIC 192 192 tristate "Qualcomm SSBI PMIC pin controller driver" 193 - depends on GPIOLIB && OF 193 + depends on OF 194 194 select PINMUX 195 195 select PINCONF 196 196 select GENERIC_PINCONF ··· 204 204 205 205 config PINCTRL_SC7180 206 206 tristate "Qualcomm Technologies Inc SC7180 pin controller driver" 207 - depends on GPIOLIB && OF 207 + depends on OF 208 208 depends on PINCTRL_MSM 209 209 help 210 210 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 213 213 214 214 config PINCTRL_SC7280 215 215 tristate "Qualcomm Technologies Inc SC7280 pin controller driver" 216 - depends on GPIOLIB && OF 216 + depends on OF 217 217 depends on PINCTRL_MSM 218 218 help 219 219 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 222 222 223 223 config PINCTRL_SC8180X 224 224 tristate "Qualcomm Technologies Inc SC8180x pin controller driver" 225 - depends on GPIOLIB && (OF || ACPI) 225 + depends on (OF || ACPI) 226 226 depends on PINCTRL_MSM 227 227 help 228 228 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 231 231 232 232 config PINCTRL_SDM660 233 233 tristate "Qualcomm Technologies Inc SDM660 pin controller driver" 234 - depends on GPIOLIB && OF 234 + depends on OF 235 235 depends on PINCTRL_MSM 236 236 help 237 237 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 240 240 241 241 config PINCTRL_SDM845 242 242 tristate "Qualcomm Technologies Inc SDM845 pin controller driver" 243 - depends on GPIOLIB && (OF || ACPI) 243 + depends on (OF || ACPI) 244 244 depends on PINCTRL_MSM 245 245 help 246 246 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 249 249 250 250 config PINCTRL_SDX55 251 251 tristate "Qualcomm Technologies Inc SDX55 pin controller driver" 252 - depends on GPIOLIB && OF 252 + depends on OF 253 253 depends on PINCTRL_MSM 254 254 help 255 255 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 258 258 259 259 config PINCTRL_SM6125 260 260 tristate "Qualcomm Technologies Inc SM6125 pin controller driver" 261 - depends on GPIOLIB && OF 261 + depends on OF 262 262 depends on PINCTRL_MSM 263 263 help 264 264 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 267 267 268 268 config PINCTRL_SM8150 269 269 tristate "Qualcomm Technologies Inc SM8150 pin controller driver" 270 - depends on GPIOLIB && OF 270 + depends on OF 271 271 depends on PINCTRL_MSM 272 272 help 273 273 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 276 276 277 277 config PINCTRL_SM8250 278 278 tristate "Qualcomm Technologies Inc SM8250 pin controller driver" 279 - depends on GPIOLIB && OF 279 + depends on OF 280 280 depends on PINCTRL_MSM 281 281 help 282 282 This is the pinctrl, pinmux, pinconf and gpiolib driver for the ··· 285 285 286 286 config PINCTRL_SM8350 287 287 tristate "Qualcomm Technologies Inc SM8350 pin controller driver" 288 - depends on GPIOLIB && OF 289 - select PINCTRL_MSM 288 + depends on PINCTRL_MSM 290 289 help 291 290 This is the pinctrl, pinmux, pinconf and gpiolib driver for the 292 291 Qualcomm Technologies Inc TLMM block found on the Qualcomm
+5 -3
drivers/pinctrl/sunxi/pinctrl-sunxi.c
··· 1219 1219 } 1220 1220 1221 1221 /* 1222 - * We suppose that we won't have any more functions than pins, 1223 - * we'll reallocate that later anyway 1222 + * Find an upper bound for the maximum number of functions: in 1223 + * the worst case we have gpio_in, gpio_out, irq and up to four 1224 + * special functions per pin, plus one entry for the sentinel. 1225 + * We'll reallocate that later anyway. 1224 1226 */ 1225 - pctl->functions = kcalloc(pctl->ngroups, 1227 + pctl->functions = kcalloc(4 * pctl->ngroups + 4, 1226 1228 sizeof(*pctl->functions), 1227 1229 GFP_KERNEL); 1228 1230 if (!pctl->functions)
+28
drivers/platform/x86/asus-nb-wmi.c
··· 41 41 module_param(wapf, uint, 0444); 42 42 MODULE_PARM_DESC(wapf, "WAPF value"); 43 43 44 + static int tablet_mode_sw = -1; 45 + module_param(tablet_mode_sw, uint, 0444); 46 + MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip"); 47 + 44 48 static struct quirk_entry *quirks; 45 49 46 50 static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str, ··· 462 458 }, 463 459 .driver_data = &quirk_asus_use_lid_flip_devid, 464 460 }, 461 + { 462 + .callback = dmi_matched, 463 + .ident = "ASUS TP200s / E205SA", 464 + .matches = { 465 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 466 + DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"), 467 + }, 468 + .driver_data = &quirk_asus_use_lid_flip_devid, 469 + }, 465 470 {}, 466 471 }; 467 472 ··· 489 476 quirks->wapf = wapf; 490 477 else 491 478 wapf = quirks->wapf; 479 + 480 + switch (tablet_mode_sw) { 481 + case 0: 482 + quirks->use_kbd_dock_devid = false; 483 + quirks->use_lid_flip_devid = false; 484 + break; 485 + case 1: 486 + quirks->use_kbd_dock_devid = true; 487 + quirks->use_lid_flip_devid = false; 488 + break; 489 + case 2: 490 + quirks->use_kbd_dock_devid = false; 491 + quirks->use_lid_flip_devid = true; 492 + break; 493 + } 492 494 493 495 if (quirks->i8042_filter) { 494 496 ret = i8042_install_filter(quirks->i8042_filter);
+2
drivers/platform/x86/gigabyte-wmi.c
··· 140 140 }} 141 141 142 142 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { 143 + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"), 143 144 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), 144 145 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"), 145 146 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"), ··· 148 147 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"), 149 148 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"), 150 149 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"), 150 + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"), 151 151 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"), 152 152 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"), 153 153 { }
+2 -1
drivers/ptp/Kconfig
··· 103 103 config PTP_1588_CLOCK_PCH 104 104 tristate "Intel PCH EG20T as PTP clock" 105 105 depends on X86_32 || COMPILE_TEST 106 - depends on HAS_IOMEM && NET 106 + depends on HAS_IOMEM && PCI 107 + depends on NET 107 108 depends on PTP_1588_CLOCK 108 109 help 109 110 This driver adds support for using the PCH EG20T as a PTP
+2 -1
drivers/scsi/lpfc/lpfc_init.c
··· 13193 13193 if (!phba) 13194 13194 return -ENOMEM; 13195 13195 13196 + INIT_LIST_HEAD(&phba->poll_list); 13197 + 13196 13198 /* Perform generic PCI device enabling operation */ 13197 13199 error = lpfc_enable_pci_dev(phba); 13198 13200 if (error) ··· 13329 13327 /* Enable RAS FW log support */ 13330 13328 lpfc_sli4_ras_setup(phba); 13331 13329 13332 - INIT_LIST_HEAD(&phba->poll_list); 13333 13330 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 13334 13331 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 13335 13332
+1 -1
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 7851 7851 return r; 7852 7852 } 7853 7853 7854 - rc = _base_static_config_pages(ioc); 7854 + r = _base_static_config_pages(ioc); 7855 7855 if (r) 7856 7856 return r; 7857 7857
+12 -2
drivers/scsi/storvsc_drv.c
··· 1199 1199 vstor_packet->vm_srb.sense_info_length); 1200 1200 1201 1201 if (vstor_packet->vm_srb.scsi_status != 0 || 1202 - vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) 1203 - storvsc_log(device, STORVSC_LOGGING_ERROR, 1202 + vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) { 1203 + 1204 + /* 1205 + * Log TEST_UNIT_READY errors only as warnings. Hyper-V can 1206 + * return errors when detecting devices using TEST_UNIT_READY, 1207 + * and logging these as errors produces unhelpful noise. 1208 + */ 1209 + int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? 1210 + STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; 1211 + 1212 + storvsc_log(device, loglevel, 1204 1213 "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", 1205 1214 request->cmd->request->tag, 1206 1215 stor_pkt->vm_srb.cdb[0], 1207 1216 vstor_packet->vm_srb.scsi_status, 1208 1217 vstor_packet->vm_srb.srb_status, 1209 1218 vstor_packet->status); 1219 + } 1210 1220 1211 1221 if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION && 1212 1222 (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
+2 -16
drivers/usb/dwc3/gadget.c
··· 1741 1741 { 1742 1742 struct dwc3_request *req; 1743 1743 struct dwc3_request *tmp; 1744 - struct list_head local; 1745 1744 struct dwc3 *dwc = dep->dwc; 1746 1745 1747 - restart: 1748 - list_replace_init(&dep->cancelled_list, &local); 1749 - 1750 - list_for_each_entry_safe(req, tmp, &local, list) { 1746 + list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) { 1751 1747 dwc3_gadget_ep_skip_trbs(dep, req); 1752 1748 switch (req->status) { 1753 1749 case DWC3_REQUEST_STATUS_DISCONNECTED: ··· 1761 1765 break; 1762 1766 } 1763 1767 } 1764 - 1765 - if (!list_empty(&dep->cancelled_list)) 1766 - goto restart; 1767 1768 } 1768 1769 1769 1770 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, ··· 2969 2976 { 2970 2977 struct dwc3_request *req; 2971 2978 struct dwc3_request *tmp; 2972 - struct list_head local; 2973 2979 2974 - restart: 2975 - list_replace_init(&dep->started_list, &local); 2976 - 2977 - list_for_each_entry_safe(req, tmp, &local, list) { 2980 + list_for_each_entry_safe(req, tmp, &dep->started_list, list) { 2978 2981 int ret; 2979 2982 2980 2983 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, ··· 2978 2989 if (ret) 2979 2990 break; 2980 2991 } 2981 - 2982 - if (!list_empty(&dep->started_list)) 2983 - goto restart; 2984 2992 } 2985 2993 2986 2994 static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
+2 -2
drivers/vdpa/ifcvf/ifcvf_main.c
··· 493 493 494 494 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, 495 495 dev, &ifc_vdpa_ops, NULL); 496 - if (adapter == NULL) { 496 + if (IS_ERR(adapter)) { 497 497 IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); 498 - return -ENOMEM; 498 + return PTR_ERR(adapter); 499 499 } 500 500 501 501 pci_set_master(pdev);
-9
drivers/vdpa/mlx5/core/mr.c
··· 512 512 mutex_unlock(&mr->mkey_mtx); 513 513 } 514 514 515 - static bool map_empty(struct vhost_iotlb *iotlb) 516 - { 517 - return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX); 518 - } 519 - 520 515 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 521 516 bool *change_map) 522 517 { ··· 519 524 int err = 0; 520 525 521 526 *change_map = false; 522 - if (map_empty(iotlb)) { 523 - mlx5_vdpa_destroy_mr(mvdev); 524 - return 0; 525 - } 526 527 mutex_lock(&mr->mkey_mtx); 527 528 if (mr->initialized) { 528 529 mlx5_vdpa_info(mvdev, "memory map update\n");
+10 -4
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 752 752 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); 753 753 754 754 /* prefer split queue */ 755 - if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED) 756 - return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED; 755 + if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT) 756 + return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT; 757 757 758 - WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)); 758 + WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)); 759 759 760 - return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT; 760 + return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED; 761 761 } 762 762 763 763 static bool vq_is_tx(u16 idx) ··· 2029 2029 return -ENOSPC; 2030 2030 2031 2031 mdev = mgtdev->madev->mdev; 2032 + if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) & 2033 + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) { 2034 + dev_warn(mdev->device, "missing support for split virtqueues\n"); 2035 + return -EOPNOTSUPP; 2036 + } 2037 + 2032 2038 /* we save one virtqueue for control virtqueue should we require it */ 2033 2039 max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues); 2034 2040 max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+3 -1
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 251 251 252 252 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 253 253 dev_attr->name); 254 - if (!vdpasim) 254 + if (IS_ERR(vdpasim)) { 255 + ret = PTR_ERR(vdpasim); 255 256 goto err_alloc; 257 + } 256 258 257 259 vdpasim->dev_attr = *dev_attr; 258 260 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
+2 -2
drivers/vdpa/virtio_pci/vp_vdpa.c
··· 436 436 437 437 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, 438 438 dev, &vp_vdpa_ops, NULL); 439 - if (vp_vdpa == NULL) { 439 + if (IS_ERR(vp_vdpa)) { 440 440 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); 441 - return -ENOMEM; 441 + return PTR_ERR(vp_vdpa); 442 442 } 443 443 444 444 mdev = &vp_vdpa->mdev;
+2 -1
drivers/vhost/vdpa.c
··· 614 614 long pinned; 615 615 int ret = 0; 616 616 617 - if (msg->iova < v->range.first || 617 + if (msg->iova < v->range.first || !msg->size || 618 + msg->iova > U64_MAX - msg->size + 1 || 618 619 msg->iova + msg->size - 1 > v->range.last) 619 620 return -EINVAL; 620 621
+8 -2
drivers/vhost/vhost.c
··· 735 735 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); 736 736 } 737 737 738 + /* Make sure 64 bit math will not overflow. */ 738 739 static bool vhost_overflow(u64 uaddr, u64 size) 739 740 { 740 - /* Make sure 64 bit math will not overflow. */ 741 - return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size; 741 + if (uaddr > ULONG_MAX || size > ULONG_MAX) 742 + return true; 743 + 744 + if (!size) 745 + return false; 746 + 747 + return uaddr > ULONG_MAX - size + 1; 742 748 } 743 749 744 750 /* Caller should have vq mutex and device mutex. */
+1 -1
drivers/vhost/vringh.c
··· 359 359 iov = wiov; 360 360 else { 361 361 iov = riov; 362 - if (unlikely(wiov && wiov->i)) { 362 + if (unlikely(wiov && wiov->used)) { 363 363 vringh_bad("Readable desc %p after writable", 364 364 &descs[i]); 365 365 err = -EINVAL;
+1
drivers/virtio/virtio.c
··· 355 355 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 356 356 357 357 INIT_LIST_HEAD(&dev->vqs); 358 + spin_lock_init(&dev->vqs_list_lock); 358 359 359 360 /* 360 361 * device_add() causes the bus infrastructure to look for a matching
+7
drivers/virtio/virtio_pci_common.c
··· 576 576 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 577 577 struct device *dev = get_device(&vp_dev->vdev.dev); 578 578 579 + /* 580 + * Device is marked broken on surprise removal so that virtio upper 581 + * layers can abort any ongoing operation. 582 + */ 583 + if (!pci_device_is_present(pci_dev)) 584 + virtio_break_device(&vp_dev->vdev); 585 + 579 586 pci_disable_sriov(pci_dev); 580 587 581 588 unregister_virtio_device(&vp_dev->vdev);
+15 -3
drivers/virtio/virtio_ring.c
··· 11 11 #include <linux/module.h> 12 12 #include <linux/hrtimer.h> 13 13 #include <linux/dma-mapping.h> 14 + #include <linux/spinlock.h> 14 15 #include <xen/xen.h> 15 16 16 17 #ifdef DEBUG ··· 1756 1755 cpu_to_le16(vq->packed.event_flags_shadow); 1757 1756 } 1758 1757 1758 + spin_lock(&vdev->vqs_list_lock); 1759 1759 list_add_tail(&vq->vq.list, &vdev->vqs); 1760 + spin_unlock(&vdev->vqs_list_lock); 1760 1761 return &vq->vq; 1761 1762 1762 1763 err_desc_extra: ··· 2232 2229 memset(vq->split.desc_state, 0, vring.num * 2233 2230 sizeof(struct vring_desc_state_split)); 2234 2231 2232 + spin_lock(&vdev->vqs_list_lock); 2235 2233 list_add_tail(&vq->vq.list, &vdev->vqs); 2234 + spin_unlock(&vdev->vqs_list_lock); 2236 2235 return &vq->vq; 2237 2236 2238 2237 err_extra: ··· 2296 2291 { 2297 2292 struct vring_virtqueue *vq = to_vvq(_vq); 2298 2293 2294 + spin_lock(&vq->vq.vdev->vqs_list_lock); 2295 + list_del(&_vq->list); 2296 + spin_unlock(&vq->vq.vdev->vqs_list_lock); 2297 + 2299 2298 if (vq->we_own_ring) { 2300 2299 if (vq->packed_ring) { 2301 2300 vring_free_queue(vq->vq.vdev, ··· 2330 2321 kfree(vq->split.desc_state); 2331 2322 kfree(vq->split.desc_extra); 2332 2323 } 2333 - list_del(&_vq->list); 2334 2324 kfree(vq); 2335 2325 } 2336 2326 EXPORT_SYMBOL_GPL(vring_del_virtqueue); ··· 2381 2373 { 2382 2374 struct vring_virtqueue *vq = to_vvq(_vq); 2383 2375 2384 - return vq->broken; 2376 + return READ_ONCE(vq->broken); 2385 2377 } 2386 2378 EXPORT_SYMBOL_GPL(virtqueue_is_broken); 2387 2379 ··· 2393 2385 { 2394 2386 struct virtqueue *_vq; 2395 2387 2388 + spin_lock(&dev->vqs_list_lock); 2396 2389 list_for_each_entry(_vq, &dev->vqs, list) { 2397 2390 struct vring_virtqueue *vq = to_vvq(_vq); 2398 - vq->broken = true; 2391 + 2392 + /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 2393 + WRITE_ONCE(vq->broken, true); 2399 2394 } 2395 + spin_unlock(&dev->vqs_list_lock); 2400 2396 } 2401 2397 EXPORT_SYMBOL_GPL(virtio_break_device); 2402 2398
+3
drivers/virtio/virtio_vdpa.c
··· 151 151 if (!name) 152 152 return NULL; 153 153 154 + if (index >= vdpa->nvqs) 155 + return ERR_PTR(-ENOENT); 156 + 154 157 /* Queue shouldn't already be set up. */ 155 158 if (ops->get_vq_ready(vdpa, index)) 156 159 return ERR_PTR(-ENOENT);
+15 -7
drivers/xen/events/events_base.c
··· 198 198 199 199 static DEFINE_PER_CPU(unsigned int, irq_epoch); 200 200 201 - static void clear_evtchn_to_irq_row(unsigned row) 201 + static void clear_evtchn_to_irq_row(int *evtchn_row) 202 202 { 203 203 unsigned col; 204 204 205 205 for (col = 0; col < EVTCHN_PER_ROW; col++) 206 - WRITE_ONCE(evtchn_to_irq[row][col], -1); 206 + WRITE_ONCE(evtchn_row[col], -1); 207 207 } 208 208 209 209 static void clear_evtchn_to_irq_all(void) ··· 213 213 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { 214 214 if (evtchn_to_irq[row] == NULL) 215 215 continue; 216 - clear_evtchn_to_irq_row(row); 216 + clear_evtchn_to_irq_row(evtchn_to_irq[row]); 217 217 } 218 218 } 219 219 ··· 221 221 { 222 222 unsigned row; 223 223 unsigned col; 224 + int *evtchn_row; 224 225 225 226 if (evtchn >= xen_evtchn_max_channels()) 226 227 return -EINVAL; ··· 234 233 if (irq == -1) 235 234 return 0; 236 235 237 - evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); 238 - if (evtchn_to_irq[row] == NULL) 236 + evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0); 237 + if (evtchn_row == NULL) 239 238 return -ENOMEM; 240 239 241 - clear_evtchn_to_irq_row(row); 240 + clear_evtchn_to_irq_row(evtchn_row); 241 + 242 + /* 243 + * We've prepared an empty row for the mapping. If a different 244 + * thread was faster inserting it, we can drop ours. 245 + */ 246 + if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL) 247 + free_page((unsigned long) evtchn_row); 242 248 } 243 249 244 250 WRITE_ONCE(evtchn_to_irq[row][col], irq); ··· 1017 1009 int xen_bind_pirq_gsi_to_irq(unsigned gsi, 1018 1010 unsigned pirq, int shareable, char *name) 1019 1011 { 1020 - int irq = -1; 1012 + int irq; 1021 1013 struct physdev_irq irq_op; 1022 1014 int ret; 1023 1015
+8 -2
fs/btrfs/inode.c
··· 9226 9226 bool dest_log_pinned = false; 9227 9227 bool need_abort = false; 9228 9228 9229 - /* we only allow rename subvolume link between subvolumes */ 9230 - if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9229 + /* 9230 + * For non-subvolumes allow exchange only within one subvolume, in the 9231 + * same inode namespace. Two subvolumes (represented as directory) can 9232 + * be exchanged as they're a logical link and have a fixed inode number. 9233 + */ 9234 + if (root != dest && 9235 + (old_ino != BTRFS_FIRST_FREE_OBJECTID || 9236 + new_ino != BTRFS_FIRST_FREE_OBJECTID)) 9231 9237 return -EXDEV; 9232 9238 9233 9239 /* close the race window with snapshot create/destroy ioctl */
+5
fs/cifs/cifsglob.h
··· 1611 1611 int ttl; 1612 1612 }; 1613 1613 1614 + struct file_list { 1615 + struct list_head list; 1616 + struct cifsFileInfo *cfile; 1617 + }; 1618 + 1614 1619 /* 1615 1620 * common struct for holding inode info when searching for or updating an 1616 1621 * inode with new info
+1 -1
fs/cifs/dir.c
··· 100 100 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 101 101 pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; 102 102 103 - s = dentry_path_raw(direntry, page, PAGE_SIZE); 103 + s = dentry_path_raw(direntry, page, PATH_MAX); 104 104 if (IS_ERR(s)) 105 105 return s; 106 106 if (!s[1]) // for root we want "", not "/"
+17 -18
fs/cifs/file.c
··· 4848 4848 4849 4849 oplock_break_ack: 4850 4850 /* 4851 + * When oplock break is received and there are no active 4852 + * file handles but cached, then schedule deferred close immediately. 4853 + * So, new open will not use cached handle. 4854 + */ 4855 + spin_lock(&CIFS_I(inode)->deferred_lock); 4856 + is_deferred = cifs_is_deferred_close(cfile, &dclose); 4857 + spin_unlock(&CIFS_I(inode)->deferred_lock); 4858 + if (is_deferred && 4859 + cfile->deferred_close_scheduled && 4860 + delayed_work_pending(&cfile->deferred)) { 4861 + if (cancel_delayed_work(&cfile->deferred)) { 4862 + _cifsFileInfo_put(cfile, false, false); 4863 + goto oplock_break_done; 4864 + } 4865 + } 4866 + /* 4851 4867 * releasing stale oplock after recent reconnect of smb session using 4852 4868 * a now incorrect file handle is not a data integrity issue but do 4853 4869 * not bother sending an oplock release if session to server still is ··· 4874 4858 cinode); 4875 4859 cifs_dbg(FYI, "Oplock release rc = %d\n", rc); 4876 4860 } 4877 - /* 4878 - * When oplock break is received and there are no active 4879 - * file handles but cached, then schedule deferred close immediately. 4880 - * So, new open will not use cached handle. 4881 - */ 4882 - spin_lock(&CIFS_I(inode)->deferred_lock); 4883 - is_deferred = cifs_is_deferred_close(cfile, &dclose); 4884 - if (is_deferred && 4885 - cfile->deferred_close_scheduled && 4886 - delayed_work_pending(&cfile->deferred)) { 4887 - /* 4888 - * If there is no pending work, mod_delayed_work queues new work. 4889 - * So, Increase the ref count to avoid use-after-free. 4890 - */ 4891 - if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0)) 4892 - cifsFileInfo_get(cfile); 4893 - } 4894 - spin_unlock(&CIFS_I(inode)->deferred_lock); 4861 + oplock_break_done: 4895 4862 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); 4896 4863 cifs_done_oplock_break(cinode); 4897 4864 }
+17 -2
fs/cifs/inode.c
··· 1625 1625 goto unlink_out; 1626 1626 } 1627 1627 1628 - cifs_close_all_deferred_files(tcon); 1628 + cifs_close_deferred_file(CIFS_I(inode)); 1629 1629 if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1630 1630 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1631 1631 rc = CIFSPOSIXDelFile(xid, tcon, full_path, ··· 2084 2084 FILE_UNIX_BASIC_INFO *info_buf_target; 2085 2085 unsigned int xid; 2086 2086 int rc, tmprc; 2087 + int retry_count = 0; 2087 2088 2088 2089 if (flags & ~RENAME_NOREPLACE) 2089 2090 return -EINVAL; ··· 2114 2113 goto cifs_rename_exit; 2115 2114 } 2116 2115 2117 - cifs_close_all_deferred_files(tcon); 2116 + cifs_close_deferred_file(CIFS_I(d_inode(source_dentry))); 2117 + if (d_inode(target_dentry) != NULL) 2118 + cifs_close_deferred_file(CIFS_I(d_inode(target_dentry))); 2119 + 2118 2120 rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, 2119 2121 to_name); 2122 + 2123 + if (rc == -EACCES) { 2124 + while (retry_count < 3) { 2125 + cifs_close_all_deferred_files(tcon); 2126 + rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, 2127 + to_name); 2128 + if (rc != -EACCES) 2129 + break; 2130 + retry_count++; 2131 + } 2132 + } 2120 2133 2121 2134 /* 2122 2135 * No-replace is the natural behavior for CIFS, so skip unlink hacks.
+39 -11
fs/cifs/misc.c
··· 723 723 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) 724 724 { 725 725 struct cifsFileInfo *cfile = NULL; 726 - struct cifs_deferred_close *dclose; 726 + struct file_list *tmp_list, *tmp_next_list; 727 + struct list_head file_head; 727 728 729 + if (cifs_inode == NULL) 730 + return; 731 + 732 + INIT_LIST_HEAD(&file_head); 733 + spin_lock(&cifs_inode->open_file_lock); 728 734 list_for_each_entry(cfile, &cifs_inode->openFileList, flist) { 729 - spin_lock(&cifs_inode->deferred_lock); 730 - if (cifs_is_deferred_close(cfile, &dclose)) 731 - mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); 732 - spin_unlock(&cifs_inode->deferred_lock); 735 + if (delayed_work_pending(&cfile->deferred)) { 736 + if (cancel_delayed_work(&cfile->deferred)) { 737 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 738 + if (tmp_list == NULL) 739 + continue; 740 + tmp_list->cfile = cfile; 741 + list_add_tail(&tmp_list->list, &file_head); 742 + } 743 + } 744 + } 745 + spin_unlock(&cifs_inode->open_file_lock); 746 + 747 + list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { 748 + _cifsFileInfo_put(tmp_list->cfile, true, false); 749 + list_del(&tmp_list->list); 750 + kfree(tmp_list); 733 751 } 734 752 } 735 753 ··· 756 738 { 757 739 struct cifsFileInfo *cfile; 758 740 struct list_head *tmp; 741 + struct file_list *tmp_list, *tmp_next_list; 742 + struct list_head file_head; 759 743 744 + INIT_LIST_HEAD(&file_head); 760 745 spin_lock(&tcon->open_file_lock); 761 746 list_for_each(tmp, &tcon->openFileList) { 762 747 cfile = list_entry(tmp, struct cifsFileInfo, tlist); 763 748 if (delayed_work_pending(&cfile->deferred)) { 764 - /* 765 - * If there is no pending work, mod_delayed_work queues new work. 766 - * So, Increase the ref count to avoid use-after-free. 767 - */ 768 - if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0)) 769 - cifsFileInfo_get(cfile); 749 + if (cancel_delayed_work(&cfile->deferred)) { 750 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 751 + if (tmp_list == NULL) 752 + continue; 753 + tmp_list->cfile = cfile; 754 + list_add_tail(&tmp_list->list, &file_head); 755 + } 770 756 } 771 757 } 772 758 spin_unlock(&tcon->open_file_lock); 759 + 760 + list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { 761 + _cifsFileInfo_put(tmp_list->cfile, true, false); 762 + list_del(&tmp_list->list); 763 + kfree(tmp_list); 764 + } 773 765 } 774 766 775 767 /* parses DFS refferal V3 structure
+1 -1
fs/cifs/smb2pdu.c
··· 2426 2426 memcpy(aclptr, &acl, sizeof(struct cifs_acl)); 2427 2427 2428 2428 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2429 - *len = ptr - (__u8 *)buf; 2429 + *len = roundup(ptr - (__u8 *)buf, 8); 2430 2430 2431 2431 return buf; 2432 2432 }
+6 -12
fs/configfs/file.c
··· 177 177 return retval; 178 178 } 179 179 180 - /* Fill [buffer, buffer + pos) with data coming from @from. */ 181 - static int fill_write_buffer(struct configfs_buffer *buffer, loff_t pos, 180 + /* Fill @buffer with data coming from @from. */ 181 + static int fill_write_buffer(struct configfs_buffer *buffer, 182 182 struct iov_iter *from) 183 183 { 184 - loff_t to_copy; 185 184 int copied; 186 - u8 *to; 187 185 188 186 if (!buffer->page) 189 187 buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0); 190 188 if (!buffer->page) 191 189 return -ENOMEM; 192 190 193 - to_copy = SIMPLE_ATTR_SIZE - 1 - pos; 194 - if (to_copy <= 0) 195 - return 0; 196 - to = buffer->page + pos; 197 - copied = copy_from_iter(to, to_copy, from); 191 + copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from); 198 192 buffer->needs_read_fill = 1; 199 193 /* if buf is assumed to contain a string, terminate it by \0, 200 194 * so e.g. sscanf() can scan the string easily */ 201 - to[copied] = 0; 195 + buffer->page[copied] = 0; 202 196 return copied ? : -EFAULT; 203 197 } 204 198 ··· 221 227 { 222 228 struct file *file = iocb->ki_filp; 223 229 struct configfs_buffer *buffer = file->private_data; 224 - ssize_t len; 230 + int len; 225 231 226 232 mutex_lock(&buffer->mutex); 227 - len = fill_write_buffer(buffer, iocb->ki_pos, from); 233 + len = fill_write_buffer(buffer, from); 228 234 if (len > 0) 229 235 len = flush_write_buffer(file, buffer, len); 230 236 if (len > 0)
+1 -1
fs/dax.c
··· 722 722 return rc; 723 723 724 724 id = dax_read_lock(); 725 - rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL); 725 + rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 726 726 if (rc < 0) { 727 727 dax_read_unlock(id); 728 728 return rc;
+2 -4
fs/fuse/dax.c
··· 1235 1235 static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd) 1236 1236 { 1237 1237 long nr_pages, nr_ranges; 1238 - void *kaddr; 1239 - pfn_t pfn; 1240 1238 struct fuse_dax_mapping *range; 1241 1239 int ret, id; 1242 1240 size_t dax_size = -1; ··· 1246 1248 INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker); 1247 1249 1248 1250 id = dax_read_lock(); 1249 - nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr, 1250 - &pfn); 1251 + nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL, 1252 + NULL); 1251 1253 dax_read_unlock(id); 1252 1254 if (nr_pages < 0) { 1253 1255 pr_debug("dax_direct_access() returned %ld\n", nr_pages);
+19 -7
fs/io-wq.c
··· 129 129 bool cancel_all; 130 130 }; 131 131 132 - static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index); 132 + static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first); 133 133 static void io_wqe_dec_running(struct io_worker *worker); 134 134 135 135 static bool io_worker_get(struct io_worker *worker) ··· 248 248 rcu_read_unlock(); 249 249 250 250 if (!ret) { 251 - bool do_create = false; 251 + bool do_create = false, first = false; 252 252 253 253 raw_spin_lock_irq(&wqe->lock); 254 254 if (acct->nr_workers < acct->max_workers) { 255 255 atomic_inc(&acct->nr_running); 256 256 atomic_inc(&wqe->wq->worker_refs); 257 + if (!acct->nr_workers) 258 + first = true; 257 259 acct->nr_workers++; 258 260 do_create = true; 259 261 } 260 262 raw_spin_unlock_irq(&wqe->lock); 261 263 if (do_create) 262 - create_io_worker(wqe->wq, wqe, acct->index); 264 + create_io_worker(wqe->wq, wqe, acct->index, first); 263 265 } 264 266 } 265 267 ··· 284 282 struct io_wq *wq; 285 283 struct io_wqe *wqe; 286 284 struct io_wqe_acct *acct; 285 + bool do_create = false, first = false; 287 286 288 287 cwd = container_of(cb, struct create_worker_data, work); 289 288 wqe = cwd->wqe; 290 289 wq = wqe->wq; 291 290 acct = &wqe->acct[cwd->index]; 292 291 raw_spin_lock_irq(&wqe->lock); 293 - if (acct->nr_workers < acct->max_workers) 292 + if (acct->nr_workers < acct->max_workers) { 293 + if (!acct->nr_workers) 294 + first = true; 294 295 acct->nr_workers++; 296 + do_create = true; 297 + } 295 298 raw_spin_unlock_irq(&wqe->lock); 296 - create_io_worker(wq, cwd->wqe, cwd->index); 299 + if (do_create) { 300 + create_io_worker(wq, wqe, cwd->index, first); 301 + } else { 302 + atomic_dec(&acct->nr_running); 303 + io_worker_ref_put(wq); 304 + } 297 305 kfree(cwd); 298 306 } 299 307 ··· 641 629 raw_spin_unlock_irq(&worker->wqe->lock); 642 630 } 643 631 644 - static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) 632 + static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first) 645 633 { 646 634 struct io_wqe_acct *acct = &wqe->acct[index]; 647 635 struct io_worker *worker; ··· 682 670 worker->flags |= IO_WORKER_F_FREE; 683 671 if (index == IO_WQ_ACCT_BOUND) 684 672 worker->flags |= IO_WORKER_F_BOUND; 685 - if ((acct->nr_workers == 1) && (worker->flags & IO_WORKER_F_BOUND)) 673 + if (first && (worker->flags & IO_WORKER_F_BOUND)) 686 674 worker->flags |= IO_WORKER_F_FIXED; 687 675 raw_spin_unlock_irq(&wqe->lock); 688 676 wake_up_new_task(tsk);
+29 -29
fs/io_uring.c
··· 78 78 #include <linux/task_work.h> 79 79 #include <linux/pagemap.h> 80 80 #include <linux/io_uring.h> 81 + #include <linux/tracehook.h> 81 82 82 83 #define CREATE_TRACE_POINTS 83 84 #include <trace/events/io_uring.h> ··· 1500 1499 all_flushed = list_empty(&ctx->cq_overflow_list); 1501 1500 if (all_flushed) { 1502 1501 clear_bit(0, &ctx->check_cq_overflow); 1503 - ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; 1502 + WRITE_ONCE(ctx->rings->sq_flags, 1503 + ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW); 1504 1504 } 1505 1505 1506 1506 if (posted) ··· 1580 1578 } 1581 1579 if (list_empty(&ctx->cq_overflow_list)) { 1582 1580 set_bit(0, &ctx->check_cq_overflow); 1583 - ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; 1581 + WRITE_ONCE(ctx->rings->sq_flags, 1582 + ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW); 1583 + 1584 1584 } 1585 1585 ocqe->cqe.user_data = user_data; 1586 1586 ocqe->cqe.res = res; ··· 2226 2222 2227 2223 static inline bool io_run_task_work(void) 2228 2224 { 2229 - if (current->task_works) { 2225 + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { 2230 2226 __set_current_state(TASK_RUNNING); 2231 - task_work_run(); 2227 + tracehook_notify_signal(); 2232 2228 return true; 2233 2229 } 2234 2230 ··· 6807 6803 { 6808 6804 /* Tell userspace we may need a wakeup call */ 6809 6805 spin_lock_irq(&ctx->completion_lock); 6810 - ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; 6806 + WRITE_ONCE(ctx->rings->sq_flags, 6807 + ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP); 6811 6808 spin_unlock_irq(&ctx->completion_lock); 6812 6809 } 6813 6810 6814 6811 static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx) 6815 6812 { 6816 6813 spin_lock_irq(&ctx->completion_lock); 6817 - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; 6814 + WRITE_ONCE(ctx->rings->sq_flags, 6815 + ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP); 6818 6816 spin_unlock_irq(&ctx->completion_lock); 6819 6817 } 6820 6818 ··· 7138 7132 return table; 7139 7133 } 7140 7134 7141 - static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx) 7142 - { 7143 - spin_lock_bh(&ctx->rsrc_ref_lock); 7144 - } 7145 - 7146 - static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx) 7147 - { 7148 - spin_unlock_bh(&ctx->rsrc_ref_lock); 7149 - } 7150 - 7151 7135 static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) 7152 7136 { 7153 7137 percpu_ref_exit(&ref_node->refs); ··· 7154 7158 struct io_rsrc_node *rsrc_node = ctx->rsrc_node; 7155 7159 7156 7160 rsrc_node->rsrc_data = data_to_kill; 7157 - io_rsrc_ref_lock(ctx); 7161 + spin_lock_irq(&ctx->rsrc_ref_lock); 7158 7162 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list); 7159 - io_rsrc_ref_unlock(ctx); 7163 + spin_unlock_irq(&ctx->rsrc_ref_lock); 7160 7164 7161 7165 atomic_inc(&data_to_kill->refs); 7162 7166 percpu_ref_kill(&rsrc_node->refs); ··· 7195 7199 /* kill initial ref, already quiesced if zero */ 7196 7200 if (atomic_dec_and_test(&data->refs)) 7197 7201 break; 7202 + mutex_unlock(&ctx->uring_lock); 7198 7203 flush_delayed_work(&ctx->rsrc_put_work); 7199 7204 ret = wait_for_completion_interruptible(&data->done); 7200 - if (!ret) 7205 + if (!ret) { 7206 + mutex_lock(&ctx->uring_lock); 7201 7207 break; 7208 + } 7202 7209 7203 7210 atomic_inc(&data->refs); 7204 7211 /* wait for all works potentially completing data->done */ 7205 7212 flush_delayed_work(&ctx->rsrc_put_work); 7206 7213 reinit_completion(&data->done); 7207 7214 7208 - mutex_unlock(&ctx->uring_lock); 7209 7215 ret = io_run_task_work_sig(); 7210 7216 mutex_lock(&ctx->uring_lock); 7211 7217 } while (ret >= 0); ··· 7666 7668 { 7667 7669 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); 7668 7670 struct io_ring_ctx *ctx = node->rsrc_data->ctx; 7671 + unsigned long flags; 7669 7672 bool first_add = false; 7670 7673 7671 - io_rsrc_ref_lock(ctx); 7674 + spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); 7672 7675 node->done = true; 7673 7676 7674 7677 while (!list_empty(&ctx->rsrc_ref_list)) { ··· 7681 7682 list_del(&node->node); 7682 7683 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist); 7683 7684 } 7684 - io_rsrc_ref_unlock(ctx); 7685 + spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); 7685 7686 7686 7687 if (first_add) 7687 7688 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ); ··· 8652 8653 mutex_unlock(&ctx->uring_lock); 8653 8654 } 8654 8655 8655 - static bool io_wait_rsrc_data(struct io_rsrc_data *data) 8656 + static void io_wait_rsrc_data(struct io_rsrc_data *data) 8656 8657 { 8657 - if (!data) 8658 - return false; 8659 - if (!atomic_dec_and_test(&data->refs)) 8658 + if (data && !atomic_dec_and_test(&data->refs)) 8660 8659 wait_for_completion(&data->done); 8661 - return true; 8662 8660 } 8663 8661 8664 8662 static void io_ring_ctx_free(struct io_ring_ctx *ctx) ··· 8667 8671 ctx->mm_account = NULL; 8668 8672 } 8669 8673 8674 + /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ 8675 + io_wait_rsrc_data(ctx->buf_data); 8676 + io_wait_rsrc_data(ctx->file_data); 8677 + 8670 8678 mutex_lock(&ctx->uring_lock); 8671 - if (io_wait_rsrc_data(ctx->buf_data)) 8679 + if (ctx->buf_data) 8672 8680 __io_sqe_buffers_unregister(ctx); 8673 - if (io_wait_rsrc_data(ctx->file_data)) 8681 + if (ctx->file_data) 8674 8682 __io_sqe_files_unregister(ctx); 8675 8683 if (ctx->rings) 8676 8684 __io_cqring_overflow_flush(ctx, true);
+9 -6
fs/pipe.c
··· 444 444 #endif 445 445 446 446 /* 447 - * Epoll nonsensically wants a wakeup whether the pipe 448 - * was already empty or not. 449 - * 450 447 * If it wasn't empty we try to merge new data into 451 448 * the last buffer. 452 449 * ··· 452 455 * spanning multiple pages. 453 456 */ 454 457 head = pipe->head; 455 - was_empty = true; 458 + was_empty = pipe_empty(head, pipe->tail); 456 459 chars = total_len & (PAGE_SIZE-1); 457 - if (chars && !pipe_empty(head, pipe->tail)) { 460 + if (chars && !was_empty) { 458 461 unsigned int mask = pipe->ring_size - 1; 459 462 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask]; 460 463 int offset = buf->offset + buf->len; ··· 587 590 * This is particularly important for small writes, because of 588 591 * how (for example) the GNU make jobserver uses small writes to 589 592 * wake up pending jobs 593 + * 594 + * Epoll nonsensically wants a wakeup whether the pipe 595 + * was already empty or not. 590 596 */ 591 - if (was_empty) { 597 + if (was_empty || pipe->poll_usage) { 592 598 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); 593 599 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 594 600 } ··· 653 653 __poll_t mask; 654 654 struct pipe_inode_info *pipe = filp->private_data; 655 655 unsigned int head, tail; 656 + 657 + /* Epoll has some historical nasty semantics, this enables them */ 658 + pipe->poll_usage = 1; 656 659 657 660 /* 658 661 * Reading pipe state only -- no need for acquiring the semaphore.
+2
include/linux/device.h
··· 407 407 * @em_pd: device's energy model performance domain 408 408 * @pins: For device pin management. 409 409 * See Documentation/driver-api/pin-control.rst for details. 410 + * @msi_lock: Lock to protect MSI mask cache and mask register 410 411 * @msi_list: Hosts MSI descriptors 411 412 * @msi_domain: The generic MSI domain this device is using. 412 413 * @numa_node: NUMA node this device is close to. ··· 507 506 struct dev_pin_info *pins; 508 507 #endif 509 508 #ifdef CONFIG_GENERIC_MSI_IRQ 509 + raw_spinlock_t msi_lock; 510 510 struct list_head msi_list; 511 511 #endif 512 512 #ifdef CONFIG_DMA_OPS
+2
include/linux/irq.h
··· 569 569 * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips 570 570 * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs 571 571 * in the suspend path if they are in disabled state 572 + * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup 572 573 */ 573 574 enum { 574 575 IRQCHIP_SET_TYPE_MASKED = (1 << 0), ··· 582 581 IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), 583 582 IRQCHIP_SUPPORTS_NMI = (1 << 8), 584 583 IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), 584 + IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), 585 585 }; 586 586 587 587 #include <linux/irqdesc.h>
+6 -4
include/linux/mlx5/mlx5_ifc_vdpa.h
··· 11 11 }; 12 12 13 13 enum { 14 - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? 15 - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, 14 + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, 15 + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, 16 16 }; 17 17 18 18 enum { 19 - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, 20 - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, 19 + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 20 + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT), 21 + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 22 + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED), 21 23 }; 22 24 23 25 struct mlx5_ifc_virtio_q_bits {
+1 -1
include/linux/msi.h
··· 233 233 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 234 234 235 235 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); 236 - u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); 236 + void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); 237 237 void pci_msi_mask_irq(struct irq_data *data); 238 238 void pci_msi_unmask_irq(struct irq_data *data); 239 239
+2
include/linux/pipe_fs_i.h
··· 48 48 * @files: number of struct file referring this pipe (protected by ->i_lock) 49 49 * @r_counter: reader counter 50 50 * @w_counter: writer counter 51 + * @poll_usage: is this pipe used for epoll, which has crazy wakeups? 51 52 * @fasync_readers: reader side fasync 52 53 * @fasync_writers: writer side fasync 53 54 * @bufs: the circular array of pipe buffers ··· 71 70 unsigned int files; 72 71 unsigned int r_counter; 73 72 unsigned int w_counter; 73 + unsigned int poll_usage; 74 74 struct page *tmp_page; 75 75 struct fasync_struct *fasync_readers; 76 76 struct fasync_struct *fasync_writers;
+11
include/linux/vdpa.h
··· 277 277 const struct vdpa_config_ops *config, 278 278 size_t size, const char *name); 279 279 280 + /** 281 + * vdpa_alloc_device - allocate and initilaize a vDPA device 282 + * 283 + * @dev_struct: the type of the parent structure 284 + * @member: the name of struct vdpa_device within the @dev_struct 285 + * @parent: the parent device 286 + * @config: the bus operations that is supported by this device 287 + * @name: name of the vdpa device 288 + * 289 + * Return allocated data structure or ERR_PTR upon error 290 + */ 280 291 #define vdpa_alloc_device(dev_struct, member, parent, config, name) \ 281 292 container_of(__vdpa_alloc_device( \ 282 293 parent, config, \
+1
include/linux/virtio.h
··· 110 110 bool config_enabled; 111 111 bool config_change_pending; 112 112 spinlock_t config_lock; 113 + spinlock_t vqs_list_lock; /* Protects VQs list access */ 113 114 struct device dev; 114 115 struct virtio_device_id id; 115 116 const struct virtio_config_ops *config;
+1
include/linux/vringh.h
··· 14 14 #include <linux/virtio_byteorder.h> 15 15 #include <linux/uio.h> 16 16 #include <linux/slab.h> 17 + #include <linux/spinlock.h> 17 18 #if IS_REACHABLE(CONFIG_VHOST_IOTLB) 18 19 #include <linux/dma-direction.h> 19 20 #include <linux/vhost_iotlb.h>
+5 -7
include/net/flow_offload.h
··· 319 319 if (flow_offload_has_one_action(action)) 320 320 return true; 321 321 322 - if (action) { 323 - flow_action_for_each(i, action_entry, action) { 324 - if (i && action_entry->hw_stats != last_hw_stats) { 325 - NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); 326 - return false; 327 - } 328 - last_hw_stats = action_entry->hw_stats; 322 + flow_action_for_each(i, action_entry, action) { 323 + if (i && action_entry->hw_stats != last_hw_stats) { 324 + NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); 325 + return false; 329 326 } 327 + last_hw_stats = action_entry->hw_stats; 330 328 } 331 329 return true; 332 330 }
+7 -2
init/main.c
··· 397 397 return 0; 398 398 } 399 399 400 + static int __init warn_bootconfig(char *str) 401 + { 402 + /* The 'bootconfig' has been handled by bootconfig_params(). */ 403 + return 0; 404 + } 405 + 400 406 static void __init setup_boot_config(void) 401 407 { 402 408 static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata; ··· 481 475 pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n"); 482 476 return 0; 483 477 } 484 - early_param("bootconfig", warn_bootconfig); 485 - 486 478 #endif 479 + early_param("bootconfig", warn_bootconfig); 487 480 488 481 /* Change NUL term back to "=", to make "param" the whole string. */ 489 482 static void __init repair_env_string(char *param, char *val)
+16 -6
kernel/bpf/helpers.c
··· 363 363 #ifdef CONFIG_CGROUPS 364 364 BPF_CALL_0(bpf_get_current_cgroup_id) 365 365 { 366 - struct cgroup *cgrp = task_dfl_cgroup(current); 366 + struct cgroup *cgrp; 367 + u64 cgrp_id; 367 368 368 - return cgroup_id(cgrp); 369 + rcu_read_lock(); 370 + cgrp = task_dfl_cgroup(current); 371 + cgrp_id = cgroup_id(cgrp); 372 + rcu_read_unlock(); 373 + 374 + return cgrp_id; 369 375 } 370 376 371 377 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { ··· 382 376 383 377 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 384 378 { 385 - struct cgroup *cgrp = task_dfl_cgroup(current); 379 + struct cgroup *cgrp; 386 380 struct cgroup *ancestor; 381 + u64 cgrp_id; 387 382 383 + rcu_read_lock(); 384 + cgrp = task_dfl_cgroup(current); 388 385 ancestor = cgroup_ancestor(cgrp, ancestor_level); 389 - if (!ancestor) 390 - return 0; 391 - return cgroup_id(ancestor); 386 + cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 387 + rcu_read_unlock(); 388 + 389 + return cgrp_id; 392 390 } 393 391 394 392 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
+1
kernel/bpf/verifier.c
··· 11955 11955 if (aux_data[i].seen) 11956 11956 continue; 11957 11957 memcpy(insn + i, &trap, sizeof(trap)); 11958 + aux_data[i].zext_dst = false; 11958 11959 } 11959 11960 } 11960 11961
+4 -4
kernel/cfi.c
··· 248 248 { 249 249 cfi_check_fn fn; 250 250 251 - rcu_read_lock_sched(); 251 + rcu_read_lock_sched_notrace(); 252 252 fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr); 253 - rcu_read_unlock_sched(); 253 + rcu_read_unlock_sched_notrace(); 254 254 255 255 return fn; 256 256 } ··· 269 269 cfi_check_fn fn = NULL; 270 270 struct module *mod; 271 271 272 - rcu_read_lock_sched(); 272 + rcu_read_lock_sched_notrace(); 273 273 mod = __module_address(ptr); 274 274 if (mod) 275 275 fn = mod->cfi_check; 276 - rcu_read_unlock_sched(); 276 + rcu_read_unlock_sched_notrace(); 277 277 278 278 return fn; 279 279 }
+4 -1
kernel/irq/chip.c
··· 265 265 } else { 266 266 switch (__irq_startup_managed(desc, aff, force)) { 267 267 case IRQ_STARTUP_NORMAL: 268 + if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) 269 + irq_setup_affinity(desc); 268 270 ret = __irq_startup(desc); 269 - irq_setup_affinity(desc); 271 + if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) 272 + irq_setup_affinity(desc); 270 273 break; 271 274 case IRQ_STARTUP_MANAGED: 272 275 irq_do_set_affinity(d, aff, false);
+8 -5
kernel/irq/msi.c
··· 476 476 return 0; 477 477 478 478 cleanup: 479 - for_each_msi_vector(desc, i, dev) { 480 - irq_data = irq_domain_get_irq_data(domain, i); 481 - if (irqd_is_activated(irq_data)) 482 - irq_domain_deactivate_irq(irq_data); 483 - } 484 479 msi_domain_free_irqs(domain, dev); 485 480 return ret; 486 481 } ··· 500 505 501 506 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 502 507 { 508 + struct irq_data *irq_data; 503 509 struct msi_desc *desc; 510 + int i; 511 + 512 + for_each_msi_vector(desc, i, dev) { 513 + irq_data = irq_domain_get_irq_data(domain, i); 514 + if (irqd_is_activated(irq_data)) 515 + irq_domain_deactivate_irq(irq_data); 516 + } 504 517 505 518 for_each_msi_entry(desc, dev) { 506 519 /*
+5
kernel/irq/timings.c
··· 453 453 */ 454 454 index = irq_timings_interval_index(interval); 455 455 456 + if (index > PREDICTION_BUFFER_SIZE - 1) { 457 + irqs->count = 0; 458 + return; 459 + } 460 + 456 461 /* 457 462 * Store the index as an element of the pattern in another 458 463 * circular array.
+1 -1
kernel/locking/rtmutex.c
··· 343 343 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, 344 344 enum rtmutex_chainwalk chwalk) 345 345 { 346 - if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX)) 346 + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 347 347 return waiter != NULL; 348 348 return chwalk == RT_MUTEX_FULL_CHAINWALK; 349 349 }
+5
kernel/trace/Kconfig
··· 219 219 depends on DYNAMIC_FTRACE_WITH_REGS 220 220 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 221 221 222 + config DYNAMIC_FTRACE_WITH_ARGS 223 + def_bool y 224 + depends on DYNAMIC_FTRACE 225 + depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 226 + 222 227 config FUNCTION_PROFILER 223 228 bool "Kernel function profiler" 224 229 depends on FUNCTION_TRACER
+15 -3
kernel/trace/trace.c
··· 2897 2897 2898 2898 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 2899 2899 { 2900 + enum event_trigger_type tt = ETT_NONE; 2901 + struct trace_event_file *file = fbuffer->trace_file; 2902 + 2903 + if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, 2904 + fbuffer->entry, &tt)) 2905 + goto discard; 2906 + 2900 2907 if (static_key_false(&tracepoint_printk_key.key)) 2901 2908 output_printk(fbuffer); 2902 2909 2903 2910 if (static_branch_unlikely(&trace_event_exports_enabled)) 2904 2911 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); 2905 - event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, 2906 - fbuffer->event, fbuffer->entry, 2907 - fbuffer->trace_ctx, fbuffer->regs); 2912 + 2913 + trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, 2914 + fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); 2915 + 2916 + discard: 2917 + if (tt) 2918 + event_triggers_post_call(file, tt); 2919 + 2908 2920 } 2909 2921 EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2910 2922
-32
kernel/trace/trace.h
··· 1389 1389 event_triggers_post_call(file, tt); 1390 1390 } 1391 1391 1392 - /** 1393 - * event_trigger_unlock_commit_regs - handle triggers and finish event commit 1394 - * @file: The file pointer associated with the event 1395 - * @buffer: The ring buffer that the event is being written to 1396 - * @event: The event meta data in the ring buffer 1397 - * @entry: The event itself 1398 - * @trace_ctx: The tracing context flags. 1399 - * 1400 - * This is a helper function to handle triggers that require data 1401 - * from the event itself. It also tests the event against filters and 1402 - * if the event is soft disabled and should be discarded. 1403 - * 1404 - * Same as event_trigger_unlock_commit() but calls 1405 - * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 1406 - */ 1407 - static inline void 1408 - event_trigger_unlock_commit_regs(struct trace_event_file *file, 1409 - struct trace_buffer *buffer, 1410 - struct ring_buffer_event *event, 1411 - void *entry, unsigned int trace_ctx, 1412 - struct pt_regs *regs) 1413 - { 1414 - enum event_trigger_type tt = ETT_NONE; 1415 - 1416 - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1417 - trace_buffer_unlock_commit_regs(file->tr, buffer, event, 1418 - trace_ctx, regs); 1419 - 1420 - if (tt) 1421 - event_triggers_post_call(file, tt); 1422 - } 1423 - 1424 1392 #define FILTER_PRED_INVALID ((unsigned short)-1) 1425 1393 #define FILTER_PRED_IS_RIGHT (1 << 15) 1426 1394 #define FILTER_PRED_FOLD (1 << 15)
+2
kernel/trace/trace_events_hist.c
··· 3430 3430 event = data->match_data.event; 3431 3431 } 3432 3432 3433 + if (!event) 3434 + goto free; 3433 3435 /* 3434 3436 * At this point, we're looking at a field on another 3435 3437 * event. Because we can't modify a hist trigger on
+55 -1
kernel/trace/trace_osnoise.c
··· 253 253 */ 254 254 static bool osnoise_busy; 255 255 256 + #ifdef CONFIG_PREEMPT_RT 256 257 /* 257 258 * Print the osnoise header info. 258 259 */ 260 + static void print_osnoise_headers(struct seq_file *s) 261 + { 262 + if (osnoise_data.tainted) 263 + seq_puts(s, "# osnoise is tainted!\n"); 264 + 265 + seq_puts(s, "# _-------=> irqs-off\n"); 266 + seq_puts(s, "# / _------=> need-resched\n"); 267 + seq_puts(s, "# | / _-----=> need-resched-lazy\n"); 268 + seq_puts(s, "# || / _----=> hardirq/softirq\n"); 269 + seq_puts(s, "# ||| / _---=> preempt-depth\n"); 270 + seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n"); 271 + seq_puts(s, "# ||||| / _-=> migrate-disable\n"); 272 + 273 + seq_puts(s, "# |||||| / "); 274 + seq_puts(s, " MAX\n"); 275 + 276 + seq_puts(s, "# ||||| / "); 277 + seq_puts(s, " SINGLE Interference counters:\n"); 278 + 279 + seq_puts(s, "# ||||||| RUNTIME "); 280 + seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n"); 281 + 282 + seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP IN US "); 283 + seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n"); 284 + 285 + seq_puts(s, "# | | | ||||||| | | "); 286 + seq_puts(s, " | | | | | | | |\n"); 287 + } 288 + #else /* CONFIG_PREEMPT_RT */ 259 289 static void print_osnoise_headers(struct seq_file *s) 260 290 { 261 291 if (osnoise_data.tainted) ··· 309 279 seq_puts(s, "# | | | |||| | | "); 310 280 seq_puts(s, " | | | | | | | |\n"); 311 281 } 282 + #endif /* CONFIG_PREEMPT_RT */ 312 283 313 284 /* 314 285 * osnoise_taint - report an osnoise error. ··· 354 323 /* 355 324 * Print the timerlat header info. 356 325 */ 326 + #ifdef CONFIG_PREEMPT_RT 327 + static void print_timerlat_headers(struct seq_file *s) 328 + { 329 + seq_puts(s, "# _-------=> irqs-off\n"); 330 + seq_puts(s, "# / _------=> need-resched\n"); 331 + seq_puts(s, "# | / _-----=> need-resched-lazy\n"); 332 + seq_puts(s, "# || / _----=> hardirq/softirq\n"); 333 + seq_puts(s, "# ||| / _---=> preempt-depth\n"); 334 + seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n"); 335 + seq_puts(s, "# ||||| / _-=> migrate-disable\n"); 336 + seq_puts(s, "# |||||| /\n"); 337 + seq_puts(s, "# ||||||| ACTIVATION\n"); 338 + seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP ID "); 339 + seq_puts(s, " CONTEXT LATENCY\n"); 340 + seq_puts(s, "# | | | ||||||| | | "); 341 + seq_puts(s, " | |\n"); 342 + } 343 + #else /* CONFIG_PREEMPT_RT */ 357 344 static void print_timerlat_headers(struct seq_file *s) 358 345 { 359 346 seq_puts(s, "# _-----=> irqs-off\n"); ··· 385 336 seq_puts(s, "# | | | |||| | | "); 386 337 seq_puts(s, " | |\n"); 387 338 } 339 + #endif /* CONFIG_PREEMPT_RT */ 388 340 389 341 /* 390 342 * Record an timerlat_sample into the tracer buffer. ··· 1075 1025 /* 1076 1026 * osnoise_stop_tracing - Stop tracing and the tracer. 1077 1027 */ 1078 - static void osnoise_stop_tracing(void) 1028 + static __always_inline void osnoise_stop_tracing(void) 1079 1029 { 1080 1030 struct trace_array *tr = osnoise_trace; 1031 + 1032 + trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, 1033 + "stop tracing hit on cpu %d\n", smp_processor_id()); 1034 + 1081 1035 tracer_tracing_off(tr); 1082 1036 } 1083 1037
+1 -1
lib/devmem_is_allowed.c
··· 19 19 */ 20 20 int devmem_is_allowed(unsigned long pfn) 21 21 { 22 - if (iomem_is_exclusive(pfn << PAGE_SHIFT)) 22 + if (iomem_is_exclusive(PFN_PHYS(pfn))) 23 23 return 0; 24 24 if (!page_is_ram(pfn)) 25 25 return 1;
+5 -2
mm/gup.c
··· 1558 1558 gup_flags |= FOLL_WRITE; 1559 1559 1560 1560 /* 1561 - * See check_vma_flags(): Will return -EFAULT on incompatible mappings 1562 - * or with insufficient permissions. 1561 + * We want to report -EINVAL instead of -EFAULT for any permission 1562 + * problems or incompatible mappings. 1563 1563 */ 1564 + if (check_vma_flags(vma, gup_flags)) 1565 + return -EINVAL; 1566 + 1564 1567 return __get_user_pages(mm, start, nr_pages, gup_flags, 1565 1568 NULL, NULL, locked); 1566 1569 }
+3 -3
mm/kmemleak.c
··· 290 290 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len); 291 291 kasan_disable_current(); 292 292 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE, 293 - HEX_GROUP_SIZE, ptr, len, HEX_ASCII); 293 + HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII); 294 294 kasan_enable_current(); 295 295 } 296 296 ··· 1171 1171 1172 1172 kasan_disable_current(); 1173 1173 kcsan_disable_current(); 1174 - object->checksum = crc32(0, (void *)object->pointer, object->size); 1174 + object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); 1175 1175 kasan_enable_current(); 1176 1176 kcsan_enable_current(); 1177 1177 ··· 1246 1246 break; 1247 1247 1248 1248 kasan_disable_current(); 1249 - pointer = *ptr; 1249 + pointer = *(unsigned long *)kasan_reset_tag((void *)ptr); 1250 1250 kasan_enable_current(); 1251 1251 1252 1252 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
+3 -1
mm/madvise.c
··· 862 862 switch (pages) { 863 863 case -EINTR: 864 864 return -EINTR; 865 - case -EFAULT: /* Incompatible mappings / permissions. */ 865 + case -EINVAL: /* Incompatible mappings / permissions. */ 866 866 return -EINVAL; 867 867 case -EHWPOISON: 868 868 return -EHWPOISON; 869 + case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 870 + return -EFAULT; 869 871 default: 870 872 pr_warn_once("%s: unhandled return value: %ld\n", 871 873 __func__, pages);
+4 -2
mm/memcontrol.c
··· 3106 3106 stock->cached_pgdat = pgdat; 3107 3107 } else if (stock->cached_pgdat != pgdat) { 3108 3108 /* Flush the existing cached vmstat data */ 3109 + struct pglist_data *oldpg = stock->cached_pgdat; 3110 + 3109 3111 if (stock->nr_slab_reclaimable_b) { 3110 - mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B, 3112 + mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 3111 3113 stock->nr_slab_reclaimable_b); 3112 3114 stock->nr_slab_reclaimable_b = 0; 3113 3115 } 3114 3116 if (stock->nr_slab_unreclaimable_b) { 3115 - mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B, 3117 + mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 3116 3118 stock->nr_slab_unreclaimable_b); 3117 3119 stock->nr_slab_unreclaimable_b = 0; 3118 3120 }
+14 -11
mm/slub.c
··· 576 576 unsigned int length) 577 577 { 578 578 metadata_access_enable(); 579 - print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS, 580 - 16, 1, addr, length, 1); 579 + print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 580 + 16, 1, kasan_reset_tag((void *)addr), length, 1); 581 581 metadata_access_disable(); 582 582 } 583 583 ··· 1400 1400 static int __init setup_slub_debug(char *str) 1401 1401 { 1402 1402 slab_flags_t flags; 1403 + slab_flags_t global_flags; 1403 1404 char *saved_str; 1404 1405 char *slab_list; 1405 1406 bool global_slub_debug_changed = false; 1406 1407 bool slab_list_specified = false; 1407 1408 1408 - slub_debug = DEBUG_DEFAULT_FLAGS; 1409 + global_flags = DEBUG_DEFAULT_FLAGS; 1409 1410 if (*str++ != '=' || !*str) 1410 1411 /* 1411 1412 * No options specified. Switch on full debugging. ··· 1418 1417 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1419 1418 1420 1419 if (!slab_list) { 1421 - slub_debug = flags; 1420 + global_flags = flags; 1422 1421 global_slub_debug_changed = true; 1423 1422 } else { 1424 1423 slab_list_specified = true; ··· 1427 1426 1428 1427 /* 1429 1428 * For backwards compatibility, a single list of flags with list of 1430 - * slabs means debugging is only enabled for those slabs, so the global 1431 - * slub_debug should be 0. We can extended that to multiple lists as 1429 + * slabs means debugging is only changed for those slabs, so the global 1430 + * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1431 + * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1432 1432 * long as there is no option specifying flags without a slab list. 1433 1433 */ 1434 1434 if (slab_list_specified) { 1435 1435 if (!global_slub_debug_changed) 1436 - slub_debug = 0; 1436 + global_flags = slub_debug; 1437 1437 slub_debug_string = saved_str; 1438 1438 } 1439 1439 out: 1440 + slub_debug = global_flags; 1440 1441 if (slub_debug != 0 || slub_debug_string) 1441 1442 static_branch_enable(&slub_debug_enabled); 1442 1443 else ··· 3239 3236 struct kmem_cache *s; 3240 3237 }; 3241 3238 3242 - static inline void free_nonslab_page(struct page *page) 3239 + static inline void free_nonslab_page(struct page *page, void *object) 3243 3240 { 3244 3241 unsigned int order = compound_order(page); 3245 3242 3246 3243 VM_BUG_ON_PAGE(!PageCompound(page), page); 3247 - kfree_hook(page_address(page)); 3244 + kfree_hook(object); 3248 3245 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); 3249 3246 __free_pages(page, order); 3250 3247 } ··· 3285 3282 if (!s) { 3286 3283 /* Handle kalloc'ed objects */ 3287 3284 if (unlikely(!PageSlab(page))) { 3288 - free_nonslab_page(page); 3285 + free_nonslab_page(page, object); 3289 3286 p[size] = NULL; /* mark object processed */ 3290 3287 return size; 3291 3288 } ··· 4261 4258 4262 4259 page = virt_to_head_page(x); 4263 4260 if (unlikely(!PageSlab(page))) { 4264 - free_nonslab_page(page); 4261 + free_nonslab_page(page, object); 4265 4262 return; 4266 4263 } 4267 4264 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
+2
net/mac80211/main.c
··· 260 260 flush_work(&local->radar_detected_work); 261 261 262 262 rtnl_lock(); 263 + /* we might do interface manipulations, so need both */ 264 + wiphy_lock(local->hw.wiphy); 263 265 264 266 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), 265 267 "%s called with hardware scan in progress\n", __func__);
+3 -7
net/mptcp/options.c
··· 885 885 return subflow->mp_capable; 886 886 } 887 887 888 - if (mp_opt->dss && mp_opt->use_ack) { 888 + if ((mp_opt->dss && mp_opt->use_ack) || 889 + (mp_opt->add_addr && !mp_opt->echo)) { 889 890 /* subflows are fully established as soon as we get any 890 - * additional ack. 891 + * additional ack, including ADD_ADDR. 891 892 */ 892 893 subflow->fully_established = 1; 893 894 WRITE_ONCE(msk->fully_established, true); 894 895 goto fully_established; 895 - } 896 - 897 - if (mp_opt->add_addr) { 898 - WRITE_ONCE(msk->fully_established, true); 899 - return true; 900 896 } 901 897 902 898 /* If the first established packet does not contain MP_CAPABLE + data
+12 -32
net/mptcp/pm_netlink.c
··· 1297 1297 return 0; 1298 1298 } 1299 1299 1300 - struct addr_entry_release_work { 1301 - struct rcu_work rwork; 1302 - struct mptcp_pm_addr_entry *entry; 1303 - }; 1304 - 1305 - static void mptcp_pm_release_addr_entry(struct work_struct *work) 1300 + /* caller must ensure the RCU grace period is already elapsed */ 1301 + static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) 1306 1302 { 1307 - struct addr_entry_release_work *w; 1308 - struct mptcp_pm_addr_entry *entry; 1309 - 1310 - w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork); 1311 - entry = w->entry; 1312 - if (entry) { 1313 - if (entry->lsk) 1314 - sock_release(entry->lsk); 1315 - kfree(entry); 1316 - } 1317 - kfree(w); 1318 - } 1319 - 1320 - static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry) 1321 - { 1322 - struct addr_entry_release_work *w; 1323 - 1324 - w = kmalloc(sizeof(*w), GFP_ATOMIC); 1325 - if (w) { 1326 - INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry); 1327 - w->entry = entry; 1328 - queue_rcu_work(system_wq, &w->rwork); 1329 - } 1303 + if (entry->lsk) 1304 + sock_release(entry->lsk); 1305 + kfree(entry); 1330 1306 } 1331 1307 1332 1308 static int mptcp_nl_remove_id_zero_address(struct net *net, ··· 1382 1406 spin_unlock_bh(&pernet->lock); 1383 1407 1384 1408 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr); 1385 - mptcp_pm_free_addr_entry(entry); 1409 + synchronize_rcu(); 1410 + __mptcp_pm_release_addr_entry(entry); 1386 1411 1387 1412 return ret; 1388 1413 } ··· 1436 1459 } 1437 1460 } 1438 1461 1462 + /* caller must ensure the RCU grace period is already elapsed */ 1439 1463 static void __flush_addrs(struct list_head *list) 1440 1464 { 1441 1465 while (!list_empty(list)) { ··· 1445 1467 cur = list_entry(list->next, 1446 1468 struct mptcp_pm_addr_entry, list); 1447 1469 list_del_rcu(&cur->list); 1448 - mptcp_pm_free_addr_entry(cur); 1470 + __mptcp_pm_release_addr_entry(cur); 1449 1471 } 1450 1472 } 1451 1473 ··· 1469 1491 bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1); 1470 1492 spin_unlock_bh(&pernet->lock); 1471 1493 mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list); 1494 + synchronize_rcu(); 1472 1495 __flush_addrs(&free_list); 1473 1496 return 0; 1474 1497 } ··· 2081 2102 struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id); 2082 2103 2083 2104 /* net is removed from namespace list, can't race with 2084 - * other modifiers 2105 + * other modifiers, also netns core already waited for a 2106 + * RCU grace period. 2085 2107 */ 2086 2108 __flush_addrs(&pernet->local_addr_list); 2087 2109 }
+1
net/openvswitch/vport.c
··· 507 507 } 508 508 509 509 skb->dev = vport->dev; 510 + skb->tstamp = 0; 510 511 vport->ops->send(skb); 511 512 return; 512 513
+2 -2
net/rds/ib_frmr.c
··· 131 131 cpu_relax(); 132 132 } 133 133 134 - ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 134 + ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, 135 135 &off, PAGE_SIZE); 136 - if (unlikely(ret != ibmr->sg_len)) 136 + if (unlikely(ret != ibmr->sg_dma_len)) 137 137 return ret < 0 ? ret : -EINVAL; 138 138 139 139 if (cmpxchg(&frmr->fr_state,
+1 -1
net/sched/sch_cake.c
··· 720 720 skip_hash: 721 721 if (flow_override) 722 722 flow_hash = flow_override - 1; 723 - else if (use_skbhash) 723 + else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS)) 724 724 flow_hash = skb->hash; 725 725 if (host_override) { 726 726 dsthost_hash = host_override - 1;
+1 -1
net/tipc/socket.c
··· 1518 1518 1519 1519 if (unlikely(syn && !rc)) { 1520 1520 tipc_set_sk_state(sk, TIPC_CONNECTING); 1521 - if (timeout) { 1521 + if (dlen && timeout) { 1522 1522 timeout = msecs_to_jiffies(timeout); 1523 1523 tipc_wait_for_connect(sock, &timeout); 1524 1524 }
+8 -1
sound/firewire/oxfw/oxfw-stream.c
··· 153 153 struct cmp_connection *conn; 154 154 enum cmp_direction c_dir; 155 155 enum amdtp_stream_direction s_dir; 156 - unsigned int flags = CIP_UNAWARE_SYT; 156 + unsigned int flags = 0; 157 157 int err; 158 158 159 159 if (!(oxfw->quirks & SND_OXFW_QUIRK_BLOCKING_TRANSMISSION)) 160 160 flags |= CIP_NONBLOCKING; 161 161 else 162 162 flags |= CIP_BLOCKING; 163 + 164 + // OXFW 970/971 has no function to generate playback timing according to the sequence 165 + // of value in syt field, thus the packet should include NO_INFO value in the field. 166 + // However, some models just ignore data blocks in packet with NO_INFO for audio data 167 + // processing. 168 + if (!(oxfw->quirks & SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET)) 169 + flags |= CIP_UNAWARE_SYT; 163 170 164 171 if (stream == &oxfw->tx_stream) { 165 172 conn = &oxfw->out_conn;
+4 -2
sound/firewire/oxfw/oxfw.c
··· 159 159 return snd_oxfw_scs1x_add(oxfw); 160 160 } 161 161 162 - if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) 163 - oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION; 162 + if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) { 163 + oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION | 164 + SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET; 165 + } 164 166 165 167 /* 166 168 * TASCAM FireOne has physical control and requires a pair of additional
+5
sound/firewire/oxfw/oxfw.h
··· 42 42 SND_OXFW_QUIRK_BLOCKING_TRANSMISSION = 0x04, 43 43 // Stanton SCS1.d and SCS1.m support unique transaction. 44 44 SND_OXFW_QUIRK_SCS_TRANSACTION = 0x08, 45 + // Apogee Duet FireWire ignores data blocks in packet with NO_INFO for audio data 46 + // processing, while output level meter moves. Any value in syt field of packet takes 47 + // the device to process audio data even if the value is invalid in a point of 48 + // IEC 61883-1/6. 49 + SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET = 0x10, 45 50 }; 46 51 47 52 /* This is an arbitrary number for convinience. */
+7 -3
sound/pci/hda/hda_generic.c
··· 3460 3460 struct hda_gen_spec *spec = codec->spec; 3461 3461 const struct hda_input_mux *imux; 3462 3462 struct nid_path *path; 3463 - int i, adc_idx, err = 0; 3463 + int i, adc_idx, ret, err = 0; 3464 3464 3465 3465 imux = &spec->input_mux; 3466 3466 adc_idx = kcontrol->id.index; ··· 3470 3470 if (!path || !path->ctls[type]) 3471 3471 continue; 3472 3472 kcontrol->private_value = path->ctls[type]; 3473 - err = func(kcontrol, ucontrol); 3474 - if (err < 0) 3473 + ret = func(kcontrol, ucontrol); 3474 + if (ret < 0) { 3475 + err = ret; 3475 3476 break; 3477 + } 3478 + if (ret > 0) 3479 + err = 1; 3476 3480 } 3477 3481 mutex_unlock(&codec->control_mutex); 3478 3482 if (err >= 0 && spec->cap_sync_hook)
+9 -3
sound/pci/hda/hda_intel.c
··· 883 883 return azx_get_pos_posbuf(chip, azx_dev); 884 884 } 885 885 886 - static void azx_shutdown_chip(struct azx *chip) 886 + static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset) 887 887 { 888 888 azx_stop_chip(chip); 889 - azx_enter_link_reset(chip); 889 + if (!skip_link_reset) 890 + azx_enter_link_reset(chip); 890 891 azx_clear_irq_pending(chip); 891 892 display_power(chip, false); 892 893 } ··· 895 894 #ifdef CONFIG_PM 896 895 static DEFINE_MUTEX(card_list_lock); 897 896 static LIST_HEAD(card_list); 897 + 898 + static void azx_shutdown_chip(struct azx *chip) 899 + { 900 + __azx_shutdown_chip(chip, false); 901 + } 898 902 899 903 static void azx_add_card_list(struct azx *chip) 900 904 { ··· 2391 2385 return; 2392 2386 chip = card->private_data; 2393 2387 if (chip && chip->running) 2394 - azx_shutdown_chip(chip); 2388 + __azx_shutdown_chip(chip, true); 2395 2389 } 2396 2390 2397 2391 /* PCI IDs */
+1
sound/pci/hda/patch_realtek.c
··· 8332 8332 SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), 8333 8333 SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), 8334 8334 SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC), 8335 + SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK), 8335 8336 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 8336 8337 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 8337 8338 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+1
sound/pci/hda/patch_via.c
··· 1041 1041 }; 1042 1042 1043 1043 static const struct snd_pci_quirk vt2002p_fixups[] = { 1044 + SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE), 1044 1045 SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), 1045 1046 SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), 1046 1047 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
+27 -4
tools/io_uring/io_uring-cp.c
··· 131 131 writes = reads = offset = 0; 132 132 133 133 while (insize || write_left) { 134 - unsigned long had_reads; 135 - int got_comp; 134 + int had_reads, got_comp; 136 135 137 136 /* 138 137 * Queue up as many reads as we can ··· 173 174 if (!got_comp) { 174 175 ret = io_uring_wait_cqe(ring, &cqe); 175 176 got_comp = 1; 176 - } else 177 + } else { 177 178 ret = io_uring_peek_cqe(ring, &cqe); 179 + if (ret == -EAGAIN) { 180 + cqe = NULL; 181 + ret = 0; 182 + } 183 + } 178 184 if (ret < 0) { 179 185 fprintf(stderr, "io_uring_peek_cqe: %s\n", 180 186 strerror(-ret)); ··· 198 194 fprintf(stderr, "cqe failed: %s\n", 199 195 strerror(-cqe->res)); 200 196 return 1; 201 - } else if ((size_t) cqe->res != data->iov.iov_len) { 197 + } else if (cqe->res != data->iov.iov_len) { 202 198 /* Short read/write, adjust and requeue */ 203 199 data->iov.iov_base += cqe->res; 204 200 data->iov.iov_len -= cqe->res; ··· 223 219 } 224 220 io_uring_cqe_seen(ring, cqe); 225 221 } 222 + } 223 + 224 + /* wait out pending writes */ 225 + while (writes) { 226 + struct io_data *data; 227 + 228 + ret = io_uring_wait_cqe(ring, &cqe); 229 + if (ret) { 230 + fprintf(stderr, "wait_cqe=%d\n", ret); 231 + return 1; 232 + } 233 + if (cqe->res < 0) { 234 + fprintf(stderr, "write res=%d\n", cqe->res); 235 + return 1; 236 + } 237 + data = io_uring_cqe_get_data(cqe); 238 + free(data); 239 + writes--; 240 + io_uring_cqe_seen(ring, cqe); 226 241 } 227 242 228 243 return 0;
+1 -1
tools/testing/nvdimm/test/nfit.c
··· 434 434 dev_dbg(dev, "%s: transition out verify\n", __func__); 435 435 fw->state = FW_STATE_UPDATED; 436 436 fw->missed_activate = false; 437 - /* fall through */ 437 + fallthrough; 438 438 case FW_STATE_UPDATED: 439 439 nd_cmd->status = 0; 440 440 /* bogus test version */
+12
tools/testing/selftests/bpf/verifier/dead_code.c
··· 159 159 .result = ACCEPT, 160 160 .retval = 2, 161 161 }, 162 + { 163 + "dead code: zero extension", 164 + .insns = { 165 + BPF_MOV64_IMM(BPF_REG_0, 0), 166 + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), 167 + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 168 + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4), 169 + BPF_EXIT_INSN(), 170 + }, 171 + .result = ACCEPT, 172 + .retval = 0, 173 + },
+21 -20
tools/testing/selftests/sgx/sigstruct.c
··· 55 55 return true; 56 56 } 57 57 58 + static void reverse_bytes(void *data, int length) 59 + { 60 + int i = 0; 61 + int j = length - 1; 62 + uint8_t temp; 63 + uint8_t *ptr = data; 64 + 65 + while (i < j) { 66 + temp = ptr[i]; 67 + ptr[i] = ptr[j]; 68 + ptr[j] = temp; 69 + i++; 70 + j--; 71 + } 72 + } 73 + 58 74 static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1, 59 75 uint8_t *q2) 60 76 { 61 77 struct q1q2_ctx ctx; 78 + int len; 62 79 63 80 if (!alloc_q1q2_ctx(s, m, &ctx)) { 64 81 fprintf(stderr, "Not enough memory for Q1Q2 calculation\n"); ··· 106 89 goto out; 107 90 } 108 91 109 - BN_bn2bin(ctx.q1, q1); 110 - BN_bn2bin(ctx.q2, q2); 92 + len = BN_bn2bin(ctx.q1, q1); 93 + reverse_bytes(q1, len); 94 + len = BN_bn2bin(ctx.q2, q2); 95 + reverse_bytes(q2, len); 111 96 112 97 free_q1q2_ctx(&ctx); 113 98 return true; ··· 169 150 BIO_free(bio); 170 151 171 152 return key; 172 - } 173 - 174 - static void reverse_bytes(void *data, int length) 175 - { 176 - int i = 0; 177 - int j = length - 1; 178 - uint8_t temp; 179 - uint8_t *ptr = data; 180 - 181 - while (i < j) { 182 - temp = ptr[i]; 183 - ptr[i] = ptr[j]; 184 - ptr[j] = temp; 185 - i++; 186 - j--; 187 - } 188 153 } 189 154 190 155 enum mrtags { ··· 370 367 /* BE -> LE */ 371 368 reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE); 372 369 reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE); 373 - reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE); 374 - reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE); 375 370 376 371 EVP_MD_CTX_destroy(ctx); 377 372 RSA_free(key);
+2 -1
tools/virtio/Makefile
··· 4 4 virtio_test: virtio_ring.o virtio_test.o 5 5 vringh_test: vringh_test.o vringh.o virtio_ring.o 6 6 7 - CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h 7 + CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h 8 + LDFLAGS += -lpthread 8 9 vpath %.c ../../drivers/virtio ../../drivers/vhost 9 10 mod: 10 11 ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
+56
tools/virtio/linux/spinlock.h
··· 1 + #ifndef SPINLOCK_H_STUB 2 + #define SPINLOCK_H_STUB 3 + 4 + #include <pthread.h> 5 + 6 + typedef pthread_spinlock_t spinlock_t; 7 + 8 + static inline void spin_lock_init(spinlock_t *lock) 9 + { 10 + int r = pthread_spin_init(lock, 0); 11 + assert(!r); 12 + } 13 + 14 + static inline void spin_lock(spinlock_t *lock) 15 + { 16 + int ret = pthread_spin_lock(lock); 17 + assert(!ret); 18 + } 19 + 20 + static inline void spin_unlock(spinlock_t *lock) 21 + { 22 + int ret = pthread_spin_unlock(lock); 23 + assert(!ret); 24 + } 25 + 26 + static inline void spin_lock_bh(spinlock_t *lock) 27 + { 28 + spin_lock(lock); 29 + } 30 + 31 + static inline void spin_unlock_bh(spinlock_t *lock) 32 + { 33 + spin_unlock(lock); 34 + } 35 + 36 + static inline void spin_lock_irq(spinlock_t *lock) 37 + { 38 + spin_lock(lock); 39 + } 40 + 41 + static inline void spin_unlock_irq(spinlock_t *lock) 42 + { 43 + spin_unlock(lock); 44 + } 45 + 46 + static inline void spin_lock_irqsave(spinlock_t *lock, unsigned long f) 47 + { 48 + spin_lock(lock); 49 + } 50 + 51 + static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) 52 + { 53 + spin_unlock(lock); 54 + } 55 + 56 + #endif
+2
tools/virtio/linux/virtio.h
··· 3 3 #define LINUX_VIRTIO_H 4 4 #include <linux/scatterlist.h> 5 5 #include <linux/kernel.h> 6 + #include <linux/spinlock.h> 6 7 7 8 struct device { 8 9 void *parent; ··· 13 12 struct device dev; 14 13 u64 features; 15 14 struct list_head vqs; 15 + spinlock_t vqs_list_lock; 16 16 }; 17 17 18 18 struct virtqueue {