Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net

The BTF conflicts were simple overlapping changes.

The virtio_net conflict was an overlap of a fix of statistics counter,
happening alongisde a move over to a bonafide statistics structure
rather than counting value on the stack.

Signed-off-by: David S. Miller <davem@davemloft.net>

+2011 -922
+2 -1
Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
··· 16 16 the node is not important. The content of the node is defined in dwc3.txt. 17 17 18 18 Phy documentation is provided in the following places: 19 - Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt 19 + Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY 20 + Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY 20 21 21 22 Example device nodes: 22 23
+1
Documentation/networking/dpaa2/overview.rst
··· 1 1 .. include:: <isonum.txt> 2 2 3 + ========================================================= 3 4 DPAA2 (Data Path Acceleration Architecture Gen2) Overview 4 5 ========================================================= 5 6
+2 -1
MAINTAINERS
··· 7095 7095 F: include/uapi/linux/input-event-codes.h 7096 7096 F: include/linux/input/ 7097 7097 F: Documentation/devicetree/bindings/input/ 7098 + F: Documentation/devicetree/bindings/serio/ 7098 7099 F: Documentation/input/ 7099 7100 7100 7101 INPUT MULTITOUCH (MT) PROTOCOL ··· 7985 7984 F: tools/testing/selftests/kmod/ 7986 7985 7987 7986 KPROBES 7988 - M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> 7987 + M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> 7989 7988 M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7990 7989 M: "David S. Miller" <davem@davemloft.net> 7991 7990 M: Masami Hiramatsu <mhiramat@kernel.org>
+1 -1
Makefile
··· 2 2 VERSION = 4 3 3 PATCHLEVEL = 18 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Merciless Moray 7 7 8 8 # *DOCUMENTATION*
+3 -1
arch/arm/kernel/entry-common.S
··· 48 48 * from those features make this path too inefficient. 49 49 */ 50 50 ret_fast_syscall: 51 + __ret_fast_syscall: 51 52 UNWIND(.fnstart ) 52 53 UNWIND(.cantunwind ) 53 54 disable_irq_notrace @ disable interrupts ··· 79 78 * call. 80 79 */ 81 80 ret_fast_syscall: 81 + __ret_fast_syscall: 82 82 UNWIND(.fnstart ) 83 83 UNWIND(.cantunwind ) 84 84 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 ··· 257 255 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 258 256 bne __sys_trace 259 257 260 - invoke_syscall tbl, scno, r10, ret_fast_syscall 258 + invoke_syscall tbl, scno, r10, __ret_fast_syscall 261 259 262 260 add r1, sp, #S_OFF 263 261 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+1
arch/arm/kernel/process.c
··· 338 338 339 339 static int __init gate_vma_init(void) 340 340 { 341 + vma_init(&gate_vma, NULL); 341 342 gate_vma.vm_page_prot = PAGE_READONLY_EXEC; 342 343 return 0; 343 344 }
+1 -4
arch/arm/mach-rpc/ecard.c
··· 212 212 */ 213 213 static void ecard_init_pgtables(struct mm_struct *mm) 214 214 { 215 - struct vm_area_struct vma; 215 + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC); 216 216 217 217 /* We want to set up the page tables for the following mapping: 218 218 * Virtual Physical ··· 236 236 dst_pgd = pgd_offset(mm, EASI_START); 237 237 238 238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 239 - 240 - vma.vm_flags = VM_EXEC; 241 - vma.vm_mm = mm; 242 239 243 240 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 244 241 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
+1 -1
arch/arm64/include/asm/tlb.h
··· 37 37 38 38 static inline void tlb_flush(struct mmu_gather *tlb) 39 39 { 40 - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; 40 + struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); 41 41 42 42 /* 43 43 * The ASID allocator will either invalidate the ASID or mark
+2 -2
arch/arm64/kernel/cpufeature.c
··· 1351 1351 1352 1352 static void update_cpu_capabilities(u16 scope_mask) 1353 1353 { 1354 - __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); 1355 1354 __update_cpu_capabilities(arm64_errata, scope_mask, 1356 1355 "enabling workaround for"); 1356 + __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); 1357 1357 } 1358 1358 1359 1359 static int __enable_cpu_capability(void *arg) ··· 1408 1408 1409 1409 static void __init enable_cpu_capabilities(u16 scope_mask) 1410 1410 { 1411 - __enable_cpu_capabilities(arm64_features, scope_mask); 1412 1411 __enable_cpu_capabilities(arm64_errata, scope_mask); 1412 + __enable_cpu_capabilities(arm64_features, scope_mask); 1413 1413 } 1414 1414 1415 1415 /*
+4 -3
arch/arm64/mm/hugetlbpage.c
··· 108 108 unsigned long pgsize, 109 109 unsigned long ncontig) 110 110 { 111 - struct vm_area_struct vma = { .vm_mm = mm }; 112 111 pte_t orig_pte = huge_ptep_get(ptep); 113 112 bool valid = pte_valid(orig_pte); 114 113 unsigned long i, saddr = addr; ··· 124 125 orig_pte = pte_mkdirty(orig_pte); 125 126 } 126 127 127 - if (valid) 128 + if (valid) { 129 + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); 128 130 flush_tlb_range(&vma, saddr, addr); 131 + } 129 132 return orig_pte; 130 133 } 131 134 ··· 146 145 unsigned long pgsize, 147 146 unsigned long ncontig) 148 147 { 149 - struct vm_area_struct vma = { .vm_mm = mm }; 148 + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); 150 149 unsigned long i, saddr = addr; 151 150 152 151 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+3 -1
arch/arm64/mm/init.c
··· 611 611 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); 612 612 #endif 613 613 614 + #ifdef CONFIG_SPARSEMEM_VMEMMAP 614 615 /* 615 616 * Make sure we chose the upper bound of sizeof(struct page) 616 - * correctly. 617 + * correctly when sizing the VMEMMAP array. 617 618 */ 618 619 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); 620 + #endif 619 621 620 622 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 621 623 extern int sysctl_overcommit_memory;
+3 -4
arch/ia64/include/asm/tlb.h
··· 115 115 flush_tlb_all(); 116 116 } else { 117 117 /* 118 - * XXX fix me: flush_tlb_range() should take an mm pointer instead of a 119 - * vma pointer. 118 + * flush_tlb_range() takes a vma instead of a mm pointer because 119 + * some architectures want the vm_flags for ITLB/DTLB flush. 120 120 */ 121 - struct vm_area_struct vma; 121 + struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); 122 122 123 - vma.vm_mm = tlb->mm; 124 123 /* flush the address range from the tlb: */ 125 124 flush_tlb_range(&vma, start, end); 126 125 /* now flush the virt. page-table area mapping the address range: */
+3 -1
arch/ia64/mm/init.c
··· 116 116 */ 117 117 vma = vm_area_alloc(current->mm); 118 118 if (vma) { 119 + vma_set_anonymous(vma); 119 120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 120 121 vma->vm_end = vma->vm_start + PAGE_SIZE; 121 122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; ··· 134 133 if (!(current->personality & MMAP_PAGE_ZERO)) { 135 134 vma = vm_area_alloc(current->mm); 136 135 if (vma) { 136 + vma_set_anonymous(vma); 137 137 vma->vm_end = PAGE_SIZE; 138 138 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 139 139 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | ··· 275 273 276 274 static int __init gate_vma_init(void) 277 275 { 278 - gate_vma.vm_mm = NULL; 276 + vma_init(&gate_vma, NULL); 279 277 gate_vma.vm_start = FIXADDR_USER_START; 280 278 gate_vma.vm_end = FIXADDR_USER_END; 281 279 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-6
arch/mips/bcm47xx/setup.c
··· 212 212 */ 213 213 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) 214 214 cpu_wait = NULL; 215 - 216 - /* 217 - * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" 218 - * Enable ExternalSync for sync instruction to take effect 219 - */ 220 - set_c0_config7(MIPS_CONF7_ES); 221 215 break; 222 216 #endif 223 217 }
-3
arch/mips/include/asm/mipsregs.h
··· 681 681 #define MIPS_CONF7_WII (_ULCAST_(1) << 31) 682 682 683 683 #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) 684 - /* ExternalSync */ 685 - #define MIPS_CONF7_ES (_ULCAST_(1) << 8) 686 684 687 685 #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) 688 686 #define MIPS_CONF7_AR (_ULCAST_(1) << 16) ··· 2765 2767 __BUILD_SET_C0(cause) 2766 2768 __BUILD_SET_C0(config) 2767 2769 __BUILD_SET_C0(config5) 2768 - __BUILD_SET_C0(config7) 2769 2770 __BUILD_SET_C0(intcontrol) 2770 2771 __BUILD_SET_C0(intctl) 2771 2772 __BUILD_SET_C0(srsmap)
+1
arch/sparc/include/asm/Kbuild
··· 13 13 generic-y += mcs_spinlock.h 14 14 generic-y += mm-arch-hooks.h 15 15 generic-y += module.h 16 + generic-y += msi.h 16 17 generic-y += preempt.h 17 18 generic-y += rwsem.h 18 19 generic-y += serial.h
-32
arch/sparc/include/asm/msi.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * msi.h: Defines specific to the MBus - Sbus - Interface. 4 - * 5 - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 6 - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 7 - */ 8 - 9 - #ifndef _SPARC_MSI_H 10 - #define _SPARC_MSI_H 11 - 12 - /* 13 - * Locations of MSI Registers. 14 - */ 15 - #define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ 16 - 17 - /* 18 - * Useful bits in the MSI Registers. 19 - */ 20 - #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ 21 - 22 - 23 - static inline void msi_set_sync(void) 24 - { 25 - __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" 26 - "andn %%g3, %2, %%g3\n\t" 27 - "sta %%g3, [%0] %1\n\t" : : 28 - "r" (MSI_MBUS_ARBEN), 29 - "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); 30 - } 31 - 32 - #endif /* !(_SPARC_MSI_H) */
+1 -1
arch/sparc/kernel/time_64.c
··· 814 814 } 815 815 } 816 816 817 - static void init_tick_ops(struct sparc64_tick_ops *ops) 817 + static void __init init_tick_ops(struct sparc64_tick_ops *ops) 818 818 { 819 819 unsigned long freq, quotient, tick; 820 820
+19 -1
arch/sparc/mm/srmmu.c
··· 37 37 #include <asm/mbus.h> 38 38 #include <asm/page.h> 39 39 #include <asm/asi.h> 40 - #include <asm/msi.h> 41 40 #include <asm/smp.h> 42 41 #include <asm/io.h> 43 42 ··· 113 114 114 115 pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4))); 115 116 set_pte((pte_t *)ctxp, pte); 117 + } 118 + 119 + /* 120 + * Locations of MSI Registers. 121 + */ 122 + #define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ 123 + 124 + /* 125 + * Useful bits in the MSI Registers. 126 + */ 127 + #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ 128 + 129 + static void msi_set_sync(void) 130 + { 131 + __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" 132 + "andn %%g3, %2, %%g3\n\t" 133 + "sta %%g3, [%0] %1\n\t" : : 134 + "r" (MSI_MBUS_ARBEN), 135 + "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); 116 136 } 117 137 118 138 void pmd_set(pmd_t *pmdp, pte_t *ptep)
+6 -2
arch/x86/boot/compressed/Makefile
··· 106 106 done 107 107 endef 108 108 109 + # We need to run two commands under "if_changed", so merge them into a 110 + # single invocation. 111 + quiet_cmd_check-and-link-vmlinux = LD $@ 112 + cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) 113 + 109 114 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE 110 - $(call if_changed,check_data_rel) 111 - $(call if_changed,ld) 115 + $(call if_changed,check-and-link-vmlinux) 112 116 113 117 OBJCOPYFLAGS_vmlinux.bin := -R .comment -S 114 118 $(obj)/vmlinux.bin: vmlinux FORCE
+4 -14
arch/x86/entry/entry_64.S
··· 981 981 982 982 call \do_sym 983 983 984 - jmp error_exit /* %ebx: no swapgs flag */ 984 + jmp error_exit 985 985 .endif 986 986 END(\sym) 987 987 .endm ··· 1222 1222 1223 1223 /* 1224 1224 * Save all registers in pt_regs, and switch GS if needed. 1225 - * Return: EBX=0: came from user mode; EBX=1: otherwise 1226 1225 */ 1227 1226 ENTRY(error_entry) 1228 1227 UNWIND_HINT_FUNC ··· 1268 1269 * for these here too. 1269 1270 */ 1270 1271 .Lerror_kernelspace: 1271 - incl %ebx 1272 1272 leaq native_irq_return_iret(%rip), %rcx 1273 1273 cmpq %rcx, RIP+8(%rsp) 1274 1274 je .Lerror_bad_iret ··· 1301 1303 1302 1304 /* 1303 1305 * Pretend that the exception came from user mode: set up pt_regs 1304 - * as if we faulted immediately after IRET and clear EBX so that 1305 - * error_exit knows that we will be returning to user mode. 1306 + * as if we faulted immediately after IRET. 1306 1307 */ 1307 1308 mov %rsp, %rdi 1308 1309 call fixup_bad_iret 1309 1310 mov %rax, %rsp 1310 - decl %ebx 1311 1311 jmp .Lerror_entry_from_usermode_after_swapgs 1312 1312 END(error_entry) 1313 1313 1314 - 1315 - /* 1316 - * On entry, EBX is a "return to kernel mode" flag: 1317 - * 1: already in kernel mode, don't need SWAPGS 1318 - * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode 1319 - */ 1320 1314 ENTRY(error_exit) 1321 1315 UNWIND_HINT_REGS 1322 1316 DISABLE_INTERRUPTS(CLBR_ANY) 1323 1317 TRACE_IRQS_OFF 1324 - testl %ebx, %ebx 1325 - jnz retint_kernel 1318 + testb $3, CS(%rsp) 1319 + jz retint_kernel 1326 1320 jmp retint_user 1327 1321 END(error_exit) 1328 1322
+5 -1
arch/x86/events/amd/ibs.c
··· 579 579 { 580 580 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); 581 581 struct perf_event *event = pcpu->event; 582 - struct hw_perf_event *hwc = &event->hw; 582 + struct hw_perf_event *hwc; 583 583 struct perf_sample_data data; 584 584 struct perf_raw_record raw; 585 585 struct pt_regs regs; ··· 602 602 return 0; 603 603 } 604 604 605 + if (WARN_ON_ONCE(!event)) 606 + goto fail; 607 + 608 + hwc = &event->hw; 605 609 msr = hwc->config_base; 606 610 buf = ibs_data.regs; 607 611 rdmsrl(msr, *buf);
+3
arch/x86/events/intel/core.c
··· 2997 2997 } 2998 2998 if (x86_pmu.pebs_aliases) 2999 2999 x86_pmu.pebs_aliases(event); 3000 + 3001 + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3002 + event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY; 3000 3003 } 3001 3004 3002 3005 if (needs_branch_stack(event)) {
+11 -14
arch/x86/events/intel/ds.c
··· 1186 1186 } 1187 1187 1188 1188 /* 1189 + * We must however always use iregs for the unwinder to stay sane; the 1190 + * record BP,SP,IP can point into thin air when the record is from a 1191 + * previous PMI context or an (I)RET happend between the record and 1192 + * PMI. 1193 + */ 1194 + if (sample_type & PERF_SAMPLE_CALLCHAIN) 1195 + data->callchain = perf_callchain(event, iregs); 1196 + 1197 + /* 1189 1198 * We use the interrupt regs as a base because the PEBS record does not 1190 1199 * contain a full regs set, specifically it seems to lack segment 1191 1200 * descriptors, which get used by things like user_mode(). 1192 1201 * 1193 1202 * In the simple case fix up only the IP for PERF_SAMPLE_IP. 1194 - * 1195 - * We must however always use BP,SP from iregs for the unwinder to stay 1196 - * sane; the record BP,SP can point into thin air when the record is 1197 - * from a previous PMI context or an (I)RET happend between the record 1198 - * and PMI. 1199 1203 */ 1200 1204 *regs = *iregs; 1201 1205 ··· 1218 1214 regs->si = pebs->si; 1219 1215 regs->di = pebs->di; 1220 1216 1221 - /* 1222 - * Per the above; only set BP,SP if we don't need callchains. 1223 - * 1224 - * XXX: does this make sense? 1225 - */ 1226 - if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 1227 - regs->bp = pebs->bp; 1228 - regs->sp = pebs->sp; 1229 - } 1217 + regs->bp = pebs->bp; 1218 + regs->sp = pebs->sp; 1230 1219 1231 1220 #ifndef CONFIG_X86_32 1232 1221 regs->r8 = pebs->r8;
+1 -1
arch/x86/include/asm/qspinlock_paravirt.h
··· 43 43 "push %rdx;" 44 44 "mov $0x1,%eax;" 45 45 "xor %edx,%edx;" 46 - "lock cmpxchg %dl,(%rdi);" 46 + LOCK_PREFIX "cmpxchg %dl,(%rdi);" 47 47 "cmp $0x1,%al;" 48 48 "jne .slowpath;" 49 49 "pop %rdx;"
+3
arch/x86/kernel/apic/apic.c
··· 573 573 case 0x04: return 0x02000014; 574 574 } 575 575 576 + if (boot_cpu_data.x86_stepping > 4) 577 + return 0; 578 + 576 579 return ~0U; 577 580 } 578 581
+1 -1
arch/x86/kvm/mmu.c
··· 890 890 if (cache->nobjs >= min) 891 891 return 0; 892 892 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 893 - page = (void *)__get_free_page(GFP_KERNEL); 893 + page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); 894 894 if (!page) 895 895 return -ENOMEM; 896 896 cache->objects[cache->nobjs++] = page;
+4 -4
arch/x86/net/bpf_jit_comp32.c
··· 1441 1441 1442 1442 /* sub esp,STACK_SIZE */ 1443 1443 EMIT2_off32(0x81, 0xEC, STACK_SIZE); 1444 - /* sub ebp,SCRATCH_SIZE+4+12*/ 1445 - EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16); 1444 + /* sub ebp,SCRATCH_SIZE+12*/ 1445 + EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12); 1446 1446 /* xor ebx,ebx */ 1447 1447 EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX)); 1448 1448 ··· 1475 1475 /* mov edx,dword ptr [ebp+off]*/ 1476 1476 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1])); 1477 1477 1478 - /* add ebp,SCRATCH_SIZE+4+12*/ 1479 - EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16); 1478 + /* add ebp,SCRATCH_SIZE+12*/ 1479 + EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12); 1480 1480 1481 1481 /* mov ebx,dword ptr [ebp-12]*/ 1482 1482 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
+1 -1
arch/x86/platform/efi/efi_64.c
··· 417 417 if (!(md->attribute & EFI_MEMORY_WB)) 418 418 flags |= _PAGE_PCD; 419 419 420 - if (sev_active()) 420 + if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO) 421 421 flags |= _PAGE_ENC; 422 422 423 423 pfn = md->phys_addr >> PAGE_SHIFT;
+1 -1
arch/x86/um/mem_32.c
··· 16 16 if (!FIXADDR_USER_START) 17 17 return 0; 18 18 19 - gate_vma.vm_mm = NULL; 19 + vma_init(&gate_vma, NULL); 20 20 gate_vma.vm_start = FIXADDR_USER_START; 21 21 gate_vma.vm_end = FIXADDR_USER_END; 22 22 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+41 -13
block/bio.c
··· 903 903 EXPORT_SYMBOL(bio_add_page); 904 904 905 905 /** 906 - * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 906 + * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 907 907 * @bio: bio to add pages to 908 908 * @iter: iov iterator describing the region to be mapped 909 909 * 910 - * Pins as many pages from *iter and appends them to @bio's bvec array. The 910 + * Pins pages from *iter and appends them to @bio's bvec array. The 911 911 * pages will have to be released using put_page() when done. 912 + * For multi-segment *iter, this function only adds pages from the 913 + * the next non-empty segment of the iov iterator. 912 914 */ 913 - int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 915 + static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 914 916 { 915 - unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 917 + unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx; 916 918 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 917 919 struct page **pages = (struct page **)bv; 918 - size_t offset, diff; 920 + size_t offset; 919 921 ssize_t size; 920 922 921 923 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 922 924 if (unlikely(size <= 0)) 923 925 return size ? size : -EFAULT; 924 - nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; 926 + idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; 925 927 926 928 /* 927 929 * Deep magic below: We need to walk the pinned pages backwards ··· 936 934 bio->bi_iter.bi_size += size; 937 935 bio->bi_vcnt += nr_pages; 938 936 939 - diff = (nr_pages * PAGE_SIZE - offset) - size; 940 - while (nr_pages--) { 941 - bv[nr_pages].bv_page = pages[nr_pages]; 942 - bv[nr_pages].bv_len = PAGE_SIZE; 943 - bv[nr_pages].bv_offset = 0; 937 + while (idx--) { 938 + bv[idx].bv_page = pages[idx]; 939 + bv[idx].bv_len = PAGE_SIZE; 940 + bv[idx].bv_offset = 0; 944 941 } 945 942 946 943 bv[0].bv_offset += offset; 947 944 bv[0].bv_len -= offset; 948 - if (diff) 949 - bv[bio->bi_vcnt - 1].bv_len -= diff; 945 + bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size; 950 946 951 947 iov_iter_advance(iter, size); 948 + return 0; 949 + } 950 + 951 + /** 952 + * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 953 + * @bio: bio to add pages to 954 + * @iter: iov iterator describing the region to be mapped 955 + * 956 + * Pins pages from *iter and appends them to @bio's bvec array. The 957 + * pages will have to be released using put_page() when done. 958 + * The function tries, but does not guarantee, to pin as many pages as 959 + * fit into the bio, or are requested in *iter, whatever is smaller. 960 + * If MM encounters an error pinning the requested pages, it stops. 961 + * Error is returned only if 0 pages could be pinned. 962 + */ 963 + int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 964 + { 965 + unsigned short orig_vcnt = bio->bi_vcnt; 966 + 967 + do { 968 + int ret = __bio_iov_iter_get_pages(bio, iter); 969 + 970 + if (unlikely(ret)) 971 + return bio->bi_vcnt > orig_vcnt ? 0 : ret; 972 + 973 + } while (iov_iter_count(iter) && !bio_full(bio)); 974 + 952 975 return 0; 953 976 } 954 977 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); ··· 1893 1866 bio_integrity_trim(split); 1894 1867 1895 1868 bio_advance(bio, split->bi_iter.bi_size); 1869 + bio->bi_iter.bi_done = 0; 1896 1870 1897 1871 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1898 1872 bio_set_flag(split, BIO_TRACE_COMPLETION);
+1 -3
block/blk-mq.c
··· 558 558 bool shared = false; 559 559 int cpu; 560 560 561 - if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) != 562 - MQ_RQ_IN_FLIGHT) 561 + if (!blk_mq_mark_complete(rq)) 563 562 return; 564 - 565 563 if (rq->internal_tag != -1) 566 564 blk_mq_sched_completed_request(rq); 567 565
+17 -9
drivers/acpi/acpi_lpss.c
··· 879 879 #define LPSS_GPIODEF0_DMA_LLP BIT(13) 880 880 881 881 static DEFINE_MUTEX(lpss_iosf_mutex); 882 + static bool lpss_iosf_d3_entered; 882 883 883 884 static void lpss_iosf_enter_d3_state(void) 884 885 { ··· 922 921 923 922 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 924 923 LPSS_IOSF_GPIODEF0, value1, mask1); 924 + 925 + lpss_iosf_d3_entered = true; 926 + 925 927 exit: 926 928 mutex_unlock(&lpss_iosf_mutex); 927 929 } ··· 939 935 940 936 mutex_lock(&lpss_iosf_mutex); 941 937 938 + if (!lpss_iosf_d3_entered) 939 + goto exit; 940 + 941 + lpss_iosf_d3_entered = false; 942 + 942 943 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 943 944 LPSS_IOSF_GPIODEF0, value1, mask1); 944 945 ··· 953 944 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, 954 945 LPSS_IOSF_PMCSR, value2, mask2); 955 946 947 + exit: 956 948 mutex_unlock(&lpss_iosf_mutex); 957 949 } 958 950 959 - static int acpi_lpss_suspend(struct device *dev, bool runtime) 951 + static int acpi_lpss_suspend(struct device *dev, bool wakeup) 960 952 { 961 953 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 962 - bool wakeup = runtime || device_may_wakeup(dev); 963 954 int ret; 964 955 965 956 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) ··· 972 963 * wrong status for devices being about to be powered off. See 973 964 * lpss_iosf_enter_d3_state() for further information. 974 965 */ 975 - if ((runtime || !pm_suspend_via_firmware()) && 966 + if (acpi_target_system_state() == ACPI_STATE_S0 && 976 967 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 977 968 lpss_iosf_enter_d3_state(); 978 969 979 970 return ret; 980 971 } 981 972 982 - static int acpi_lpss_resume(struct device *dev, bool runtime) 973 + static int acpi_lpss_resume(struct device *dev) 983 974 { 984 975 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 985 976 int ret; ··· 988 979 * This call is kept first to be in symmetry with 989 980 * acpi_lpss_runtime_suspend() one. 990 981 */ 991 - if ((runtime || !pm_resume_via_firmware()) && 992 - lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 982 + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 993 983 lpss_iosf_exit_d3_state(); 994 984 995 985 ret = acpi_dev_resume(dev); ··· 1012 1004 return 0; 1013 1005 1014 1006 ret = pm_generic_suspend_late(dev); 1015 - return ret ? ret : acpi_lpss_suspend(dev, false); 1007 + return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); 1016 1008 } 1017 1009 1018 1010 static int acpi_lpss_resume_early(struct device *dev) 1019 1011 { 1020 - int ret = acpi_lpss_resume(dev, false); 1012 + int ret = acpi_lpss_resume(dev); 1021 1013 1022 1014 return ret ? ret : pm_generic_resume_early(dev); 1023 1015 } ··· 1032 1024 1033 1025 static int acpi_lpss_runtime_resume(struct device *dev) 1034 1026 { 1035 - int ret = acpi_lpss_resume(dev, true); 1027 + int ret = acpi_lpss_resume(dev); 1036 1028 1037 1029 return ret ? ret : pm_generic_runtime_resume(dev); 1038 1030 }
+31
drivers/acpi/acpica/psloop.c
··· 497 497 status = 498 498 acpi_ps_create_op(walk_state, aml_op_start, &op); 499 499 if (ACPI_FAILURE(status)) { 500 + /* 501 + * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by 502 + * executing it as a control method. However, if we encounter 503 + * an error while loading the table, we need to keep trying to 504 + * load the table rather than aborting the table load. Set the 505 + * status to AE_OK to proceed with the table load. 506 + */ 507 + if ((walk_state-> 508 + parse_flags & ACPI_PARSE_MODULE_LEVEL) 509 + && status == AE_ALREADY_EXISTS) { 510 + status = AE_OK; 511 + } 500 512 if (status == AE_CTRL_PARSE_CONTINUE) { 501 513 continue; 502 514 } ··· 705 693 status = 706 694 acpi_ps_next_parse_state(walk_state, op, status); 707 695 if (status == AE_CTRL_PENDING) { 696 + status = AE_OK; 697 + } else 698 + if ((walk_state-> 699 + parse_flags & ACPI_PARSE_MODULE_LEVEL) 700 + && status != AE_CTRL_TRANSFER 701 + && ACPI_FAILURE(status)) { 702 + /* 703 + * ACPI_PARSE_MODULE_LEVEL flag means that we are currently 704 + * loading a table by executing it as a control method. 705 + * However, if we encounter an error while loading the table, 706 + * we need to keep trying to load the table rather than 707 + * aborting the table load (setting the status to AE_OK 708 + * continues the table load). If we get a failure at this 709 + * point, it means that the dispatcher got an error while 710 + * processing Op (most likely an AML operand error) or a 711 + * control method was called from module level and the 712 + * dispatcher returned AE_CTRL_TRANSFER. In the latter case, 713 + * leave the status alone, there's nothing wrong with it. 714 + */ 708 715 status = AE_OK; 709 716 } 710 717 }
-8
drivers/base/dd.c
··· 434 434 goto probe_failed; 435 435 } 436 436 437 - /* 438 - * Ensure devices are listed in devices_kset in correct order 439 - * It's important to move Dev to the end of devices_kset before 440 - * calling .probe, because it could be recursive and parent Dev 441 - * should always go first 442 - */ 443 - devices_kset_move_last(dev); 444 - 445 437 if (dev->bus->probe) { 446 438 ret = dev->bus->probe(dev); 447 439 if (ret)
+79 -17
drivers/block/nbd.c
··· 112 112 struct task_struct *task_setup; 113 113 }; 114 114 115 + #define NBD_CMD_REQUEUED 1 116 + 115 117 struct nbd_cmd { 116 118 struct nbd_device *nbd; 119 + struct mutex lock; 117 120 int index; 118 121 int cookie; 119 - struct completion send_complete; 120 122 blk_status_t status; 123 + unsigned long flags; 124 + u32 cmd_cookie; 121 125 }; 122 126 123 127 #if IS_ENABLED(CONFIG_DEBUG_FS) ··· 148 144 static inline struct device *nbd_to_dev(struct nbd_device *nbd) 149 145 { 150 146 return disk_to_dev(nbd->disk); 147 + } 148 + 149 + static void nbd_requeue_cmd(struct nbd_cmd *cmd) 150 + { 151 + struct request *req = blk_mq_rq_from_pdu(cmd); 152 + 153 + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) 154 + blk_mq_requeue_request(req, true); 155 + } 156 + 157 + #define NBD_COOKIE_BITS 32 158 + 159 + static u64 nbd_cmd_handle(struct nbd_cmd *cmd) 160 + { 161 + struct request *req = blk_mq_rq_from_pdu(cmd); 162 + u32 tag = blk_mq_unique_tag(req); 163 + u64 cookie = cmd->cmd_cookie; 164 + 165 + return (cookie << NBD_COOKIE_BITS) | tag; 166 + } 167 + 168 + static u32 nbd_handle_to_tag(u64 handle) 169 + { 170 + return (u32)handle; 171 + } 172 + 173 + static u32 nbd_handle_to_cookie(u64 handle) 174 + { 175 + return (u32)(handle >> NBD_COOKIE_BITS); 151 176 } 152 177 153 178 static const char *nbdcmd_to_ascii(int cmd) ··· 352 319 } 353 320 config = nbd->config; 354 321 322 + if (!mutex_trylock(&cmd->lock)) 323 + return BLK_EH_RESET_TIMER; 324 + 355 325 if (config->num_connections > 1) { 356 326 dev_err_ratelimited(nbd_to_dev(nbd), 357 327 "Connection timed out, retrying (%d/%d alive)\n", ··· 379 343 nbd_mark_nsock_dead(nbd, nsock, 1); 380 344 mutex_unlock(&nsock->tx_lock); 381 345 } 382 - blk_mq_requeue_request(req, true); 346 + mutex_unlock(&cmd->lock); 347 + nbd_requeue_cmd(cmd); 383 348 nbd_config_put(nbd); 384 349 return BLK_EH_DONE; 385 350 } ··· 390 353 } 391 354 set_bit(NBD_TIMEDOUT, &config->runtime_flags); 392 355 cmd->status = BLK_STS_IOERR; 356 + mutex_unlock(&cmd->lock); 393 357 sock_shutdown(nbd); 394 358 nbd_config_put(nbd); 395 359 done: ··· 468 430 struct iov_iter from; 469 431 unsigned long size = blk_rq_bytes(req); 470 432 struct bio *bio; 433 + u64 handle; 471 434 u32 type; 472 435 u32 nbd_cmd_flags = 0; 473 - u32 tag = blk_mq_unique_tag(req); 474 436 int sent = nsock->sent, skip = 0; 475 437 476 438 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); ··· 512 474 goto send_pages; 513 475 } 514 476 iov_iter_advance(&from, sent); 477 + } else { 478 + cmd->cmd_cookie++; 515 479 } 516 480 cmd->index = index; 517 481 cmd->cookie = nsock->cookie; ··· 522 482 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 523 483 request.len = htonl(size); 524 484 } 525 - memcpy(request.handle, &tag, sizeof(tag)); 485 + handle = nbd_cmd_handle(cmd); 486 + memcpy(request.handle, &handle, sizeof(handle)); 526 487 527 488 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 528 489 req, nbdcmd_to_ascii(type), ··· 541 500 nsock->pending = req; 542 501 nsock->sent = sent; 543 502 } 503 + set_bit(NBD_CMD_REQUEUED, &cmd->flags); 544 504 return BLK_STS_RESOURCE; 545 505 } 546 506 dev_err_ratelimited(disk_to_dev(nbd->disk), ··· 583 541 */ 584 542 nsock->pending = req; 585 543 nsock->sent = sent; 544 + set_bit(NBD_CMD_REQUEUED, &cmd->flags); 586 545 return BLK_STS_RESOURCE; 587 546 } 588 547 dev_err(disk_to_dev(nbd->disk), ··· 616 573 struct nbd_reply reply; 617 574 struct nbd_cmd *cmd; 618 575 struct request *req = NULL; 576 + u64 handle; 619 577 u16 hwq; 620 578 u32 tag; 621 579 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 622 580 struct iov_iter to; 581 + int ret = 0; 623 582 624 583 reply.magic = 0; 625 584 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); ··· 639 594 return ERR_PTR(-EPROTO); 640 595 } 641 596 642 - memcpy(&tag, reply.handle, sizeof(u32)); 643 - 597 + memcpy(&handle, reply.handle, sizeof(handle)); 598 + tag = nbd_handle_to_tag(handle); 644 599 hwq = blk_mq_unique_tag_to_hwq(tag); 645 600 if (hwq < nbd->tag_set.nr_hw_queues) 646 601 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], ··· 651 606 return ERR_PTR(-ENOENT); 652 607 } 653 608 cmd = blk_mq_rq_to_pdu(req); 609 + 610 + mutex_lock(&cmd->lock); 611 + if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { 612 + dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", 613 + req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); 614 + ret = -ENOENT; 615 + goto out; 616 + } 617 + if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { 618 + dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", 619 + req); 620 + ret = -ENOENT; 621 + goto out; 622 + } 654 623 if (ntohl(reply.error)) { 655 624 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 656 625 ntohl(reply.error)); 657 626 cmd->status = BLK_STS_IOERR; 658 - return cmd; 627 + goto out; 659 628 } 660 629 661 630 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); ··· 694 635 if (nbd_disconnected(config) || 695 636 config->num_connections <= 1) { 696 637 cmd->status = BLK_STS_IOERR; 697 - return cmd; 638 + goto out; 698 639 } 699 - return ERR_PTR(-EIO); 640 + ret = -EIO; 641 + goto out; 700 642 } 701 643 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 702 644 req, bvec.bv_len); 703 645 } 704 - } else { 705 - /* See the comment in nbd_queue_rq. */ 706 - wait_for_completion(&cmd->send_complete); 707 646 } 708 - return cmd; 647 + out: 648 + mutex_unlock(&cmd->lock); 649 + return ret ? ERR_PTR(ret) : cmd; 709 650 } 710 651 711 652 static void recv_work(struct work_struct *work) ··· 864 805 */ 865 806 blk_mq_start_request(req); 866 807 if (unlikely(nsock->pending && nsock->pending != req)) { 867 - blk_mq_requeue_request(req, true); 808 + nbd_requeue_cmd(cmd); 868 809 ret = 0; 869 810 goto out; 870 811 } ··· 877 818 dev_err_ratelimited(disk_to_dev(nbd->disk), 878 819 "Request send failed, requeueing\n"); 879 820 nbd_mark_nsock_dead(nbd, nsock, 1); 880 - blk_mq_requeue_request(req, true); 821 + nbd_requeue_cmd(cmd); 881 822 ret = 0; 882 823 } 883 824 out: ··· 901 842 * that the server is misbehaving (or there was an error) before we're 902 843 * done sending everything over the wire. 903 844 */ 904 - init_completion(&cmd->send_complete); 845 + mutex_lock(&cmd->lock); 846 + clear_bit(NBD_CMD_REQUEUED, &cmd->flags); 905 847 906 848 /* We can be called directly from the user space process, which means we 907 849 * could possibly have signals pending so our sendmsg will fail. In ··· 914 854 ret = BLK_STS_IOERR; 915 855 else if (!ret) 916 856 ret = BLK_STS_OK; 917 - complete(&cmd->send_complete); 857 + mutex_unlock(&cmd->lock); 918 858 919 859 return ret; 920 860 } ··· 1520 1460 { 1521 1461 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1522 1462 cmd->nbd = set->driver_data; 1463 + cmd->flags = 0; 1464 + mutex_init(&cmd->lock); 1523 1465 return 0; 1524 1466 } 1525 1467
+1
drivers/char/mem.c
··· 708 708 #endif 709 709 if (vma->vm_flags & VM_SHARED) 710 710 return shmem_zero_setup(vma); 711 + vma_set_anonymous(vma); 711 712 return 0; 712 713 } 713 714
+9 -1
drivers/char/random.c
··· 1895 1895 write_pool(struct entropy_store *r, const char __user *buffer, size_t count) 1896 1896 { 1897 1897 size_t bytes; 1898 - __u32 buf[16]; 1898 + __u32 t, buf[16]; 1899 1899 const char __user *p = buffer; 1900 1900 1901 1901 while (count > 0) { 1902 + int b, i = 0; 1903 + 1902 1904 bytes = min(count, sizeof(buf)); 1903 1905 if (copy_from_user(&buf, p, bytes)) 1904 1906 return -EFAULT; 1907 + 1908 + for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { 1909 + if (!arch_get_random_int(&t)) 1910 + break; 1911 + buf[i] ^= t; 1912 + } 1905 1913 1906 1914 count -= bytes; 1907 1915 p += bytes;
+44 -15
drivers/clk/clk-aspeed.c
··· 24 24 #define ASPEED_MPLL_PARAM 0x20 25 25 #define ASPEED_HPLL_PARAM 0x24 26 26 #define AST2500_HPLL_BYPASS_EN BIT(20) 27 - #define AST2400_HPLL_STRAPPED BIT(18) 27 + #define AST2400_HPLL_PROGRAMMED BIT(18) 28 28 #define AST2400_HPLL_BYPASS_EN BIT(17) 29 29 #define ASPEED_MISC_CTRL 0x2c 30 30 #define UART_DIV13_EN BIT(12) ··· 91 91 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ 92 92 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ 93 93 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ 94 - [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ 95 - [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */ 94 + [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */ 95 + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ 96 96 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, 97 97 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ 98 98 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */ ··· 212 212 { 213 213 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); 214 214 u32 clk = BIT(gate->clock_idx); 215 + u32 rst = BIT(gate->reset_idx); 215 216 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; 216 217 u32 reg; 218 + 219 + /* 220 + * If the IP is in reset, treat the clock as not enabled, 221 + * this happens with some clocks such as the USB one when 222 + * coming from cold reset. Without this, aspeed_clk_enable() 223 + * will fail to lift the reset. 224 + */ 225 + if (gate->reset_idx >= 0) { 226 + regmap_read(gate->map, ASPEED_RESET_CTRL, &reg); 227 + if (reg & rst) 228 + return 0; 229 + } 217 230 218 231 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg); 219 232 ··· 578 565 static void __init aspeed_ast2400_cc(struct regmap *map) 579 566 { 580 567 struct clk_hw *hw; 581 - u32 val, freq, div; 568 + u32 val, div, clkin, hpll; 569 + const u16 hpll_rates[][4] = { 570 + {384, 360, 336, 408}, 571 + {400, 375, 350, 425}, 572 + }; 573 + int rate; 582 574 583 575 /* 584 576 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by 585 577 * strapping 586 578 */ 587 579 regmap_read(map, ASPEED_STRAP, &val); 588 - if (val & CLKIN_25MHZ_EN) 589 - freq = 25000000; 590 - else if (val & AST2400_CLK_SOURCE_SEL) 591 - freq = 48000000; 592 - else 593 - freq = 24000000; 594 - hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq); 595 - pr_debug("clkin @%u MHz\n", freq / 1000000); 580 + rate = (val >> 8) & 3; 581 + if (val & CLKIN_25MHZ_EN) { 582 + clkin = 25000000; 583 + hpll = hpll_rates[1][rate]; 584 + } else if (val & AST2400_CLK_SOURCE_SEL) { 585 + clkin = 48000000; 586 + hpll = hpll_rates[0][rate]; 587 + } else { 588 + clkin = 24000000; 589 + hpll = hpll_rates[0][rate]; 590 + } 591 + hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin); 592 + pr_debug("clkin @%u MHz\n", clkin / 1000000); 596 593 597 594 /* 598 595 * High-speed PLL clock derived from the crystal. This the CPU clock, 599 - * and we assume that it is enabled 596 + * and we assume that it is enabled. It can be configured through the 597 + * HPLL_PARAM register, or set to a specified frequency by strapping. 600 598 */ 601 599 regmap_read(map, ASPEED_HPLL_PARAM, &val); 602 - WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured"); 603 - aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val); 600 + if (val & AST2400_HPLL_PROGRAMMED) 601 + hw = aspeed_ast2400_calc_pll("hpll", val); 602 + else 603 + hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0, 604 + hpll * 1000000); 605 + 606 + aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw; 604 607 605 608 /* 606 609 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
+1 -2
drivers/clk/clk.c
··· 24 24 #include <linux/pm_runtime.h> 25 25 #include <linux/sched.h> 26 26 #include <linux/clkdev.h> 27 - #include <linux/stringify.h> 28 27 29 28 #include "clk.h" 30 29 ··· 2558 2559 unsigned long flag; 2559 2560 const char *name; 2560 2561 } clk_flags[] = { 2561 - #define ENTRY(f) { f, __stringify(f) } 2562 + #define ENTRY(f) { f, #f } 2562 2563 ENTRY(CLK_SET_RATE_GATE), 2563 2564 ENTRY(CLK_SET_PARENT_GATE), 2564 2565 ENTRY(CLK_SET_RATE_PARENT),
+1 -1
drivers/clk/meson/clk-audio-divider.c
··· 51 51 struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk); 52 52 unsigned long divider; 53 53 54 - divider = meson_parm_read(clk->map, &adiv->div); 54 + divider = meson_parm_read(clk->map, &adiv->div) + 1; 55 55 56 56 return DIV_ROUND_UP_ULL((u64)parent_rate, divider); 57 57 }
+1
drivers/clk/meson/gxbb.c
··· 498 498 .ops = &clk_regmap_gate_ops, 499 499 .parent_names = (const char *[]){ "fclk_div2_div" }, 500 500 .num_parents = 1, 501 + .flags = CLK_IS_CRITICAL, 501 502 }, 502 503 }; 503 504
+38
drivers/clk/mvebu/armada-37xx-periph.c
··· 35 35 #define CLK_SEL 0x10 36 36 #define CLK_DIS 0x14 37 37 38 + #define ARMADA_37XX_DVFS_LOAD_1 1 38 39 #define LOAD_LEVEL_NR 4 39 40 40 41 #define ARMADA_37XX_NB_L0L1 0x18 ··· 508 507 return -EINVAL; 509 508 } 510 509 510 + /* 511 + * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz 512 + * respectively) to L0 frequency (1.2 Ghz) requires a significant 513 + * amount of time to let VDD stabilize to the appropriate 514 + * voltage. This amount of time is large enough that it cannot be 515 + * covered by the hardware countdown register. Due to this, the CPU 516 + * might start operating at L0 before the voltage is stabilized, 517 + * leading to CPU stalls. 518 + * 519 + * To work around this problem, we prevent switching directly from the 520 + * L2/L3 frequencies to the L0 frequency, and instead switch to the L1 521 + * frequency in-between. The sequence therefore becomes: 522 + * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ) 523 + * 2. Sleep 20ms for stabling VDD voltage 524 + * 3. Then switch from L1(600MHZ) to L0(1200Mhz). 525 + */ 526 + static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base) 527 + { 528 + unsigned int cur_level; 529 + 530 + if (rate != 1200 * 1000 * 1000) 531 + return; 532 + 533 + regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level); 534 + cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; 535 + if (cur_level <= ARMADA_37XX_DVFS_LOAD_1) 536 + return; 537 + 538 + regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD, 539 + ARMADA_37XX_NB_CPU_LOAD_MASK, 540 + ARMADA_37XX_DVFS_LOAD_1); 541 + msleep(20); 542 + } 543 + 511 544 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, 512 545 unsigned long parent_rate) 513 546 { ··· 572 537 */ 573 538 reg = ARMADA_37XX_NB_CPU_LOAD; 574 539 mask = ARMADA_37XX_NB_CPU_LOAD_MASK; 540 + 541 + clk_pm_cpu_set_rate_wa(rate, base); 542 + 575 543 regmap_update_bits(base, reg, mask, load_level); 576 544 577 545 return rate;
+1
drivers/clk/qcom/gcc-msm8996.c
··· 2781 2781 2782 2782 static struct clk_branch gcc_ufs_tx_symbol_0_clk = { 2783 2783 .halt_reg = 0x75018, 2784 + .halt_check = BRANCH_HALT_SKIP, 2784 2785 .clkr = { 2785 2786 .enable_reg = 0x75018, 2786 2787 .enable_mask = BIT(0),
+1
drivers/clk/qcom/mmcc-msm8996.c
··· 2910 2910 .name = "mmagic_bimc", 2911 2911 }, 2912 2912 .pwrsts = PWRSTS_OFF_ON, 2913 + .flags = ALWAYS_ON, 2913 2914 }; 2914 2915 2915 2916 static struct gdsc mmagic_video_gdsc = {
+15 -2
drivers/cpufreq/intel_pstate.c
··· 311 311 312 312 #ifdef CONFIG_ACPI 313 313 314 - static bool intel_pstate_get_ppc_enable_status(void) 314 + static bool intel_pstate_acpi_pm_profile_server(void) 315 315 { 316 316 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 317 317 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 318 + return true; 319 + 320 + return false; 321 + } 322 + 323 + static bool intel_pstate_get_ppc_enable_status(void) 324 + { 325 + if (intel_pstate_acpi_pm_profile_server()) 318 326 return true; 319 327 320 328 return acpi_ppc; ··· 466 458 467 459 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 468 460 { 461 + } 462 + 463 + static inline bool intel_pstate_acpi_pm_profile_server(void) 464 + { 465 + return false; 469 466 } 470 467 #endif 471 468 ··· 1854 1841 intel_pstate_hwp_enable(cpu); 1855 1842 1856 1843 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 1857 - if (id) 1844 + if (id && intel_pstate_acpi_pm_profile_server()) 1858 1845 hwp_boost = true; 1859 1846 } 1860 1847
+1
drivers/cpufreq/qcom-cpufreq-kryo.c
··· 183 183 static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = { 184 184 { .compatible = "qcom,apq8096", }, 185 185 { .compatible = "qcom,msm8996", }, 186 + {} 186 187 }; 187 188 188 189 /*
+5 -1
drivers/gpio/gpio-uniphier.c
··· 181 181 fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node); 182 182 fwspec.param_count = 2; 183 183 fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET; 184 - fwspec.param[1] = IRQ_TYPE_NONE; 184 + /* 185 + * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH 186 + * temporarily. Anyway, ->irq_set_type() will override it later. 187 + */ 188 + fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH; 185 189 186 190 return irq_create_fwspec_mapping(&fwspec); 187 191 }
+2 -1
drivers/gpio/gpiolib-of.c
··· 64 64 * Note that active low is the default. 65 65 */ 66 66 if (IS_ENABLED(CONFIG_REGULATOR) && 67 - (of_device_is_compatible(np, "reg-fixed-voltage") || 67 + (of_device_is_compatible(np, "regulator-fixed") || 68 + of_device_is_compatible(np, "reg-fixed-voltage") || 68 69 of_device_is_compatible(np, "regulator-gpio"))) { 69 70 /* 70 71 * The regulator GPIO handles are specified such that the
+1
drivers/gpu/drm/i915/i915_drv.h
··· 652 652 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 653 653 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 654 654 #define QUIRK_INCREASE_T12_DELAY (1<<6) 655 + #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) 655 656 656 657 struct intel_fbdev; 657 658 struct intel_fbc_work;
+11 -2
drivers/gpu/drm/i915/intel_ddi.c
··· 1782 1782 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); 1783 1783 } 1784 1784 1785 - void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1786 - enum transcoder cpu_transcoder) 1785 + void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) 1787 1786 { 1787 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1788 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1789 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1788 1790 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1789 1791 uint32_t val = I915_READ(reg); 1790 1792 1791 1793 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1792 1794 val |= TRANS_DDI_PORT_NONE; 1793 1795 I915_WRITE(reg, val); 1796 + 1797 + if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && 1798 + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 1799 + DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n"); 1800 + /* Quirk time at 100ms for reliable operation */ 1801 + msleep(100); 1802 + } 1794 1803 } 1795 1804 1796 1805 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+20 -1
drivers/gpu/drm/i915/intel_display.c
··· 5809 5809 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); 5810 5810 5811 5811 if (!transcoder_is_dsi(cpu_transcoder)) 5812 - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5812 + intel_ddi_disable_transcoder_func(old_crtc_state); 5813 5813 5814 5814 if (INTEL_GEN(dev_priv) >= 9) 5815 5815 skylake_scaler_disable(intel_crtc); ··· 14646 14646 DRM_INFO("Applying T12 delay quirk\n"); 14647 14647 } 14648 14648 14649 + /* 14650 + * GeminiLake NUC HDMI outputs require additional off time 14651 + * this allows the onboard retimer to correctly sync to signal 14652 + */ 14653 + static void quirk_increase_ddi_disabled_time(struct drm_device *dev) 14654 + { 14655 + struct drm_i915_private *dev_priv = to_i915(dev); 14656 + 14657 + dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; 14658 + DRM_INFO("Applying Increase DDI Disabled quirk\n"); 14659 + } 14660 + 14649 14661 struct intel_quirk { 14650 14662 int device; 14651 14663 int subsystem_vendor; ··· 14744 14732 14745 14733 /* Toshiba Satellite P50-C-18C */ 14746 14734 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, 14735 + 14736 + /* GeminiLake NUC */ 14737 + { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, 14738 + { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, 14739 + /* ASRock ITX*/ 14740 + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, 14741 + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, 14747 14742 }; 14748 14743 14749 14744 static void intel_init_quirks(struct drm_device *dev)
+1 -2
drivers/gpu/drm/i915/intel_drv.h
··· 1388 1388 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); 1389 1389 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); 1390 1390 void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); 1391 - void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1392 - enum transcoder cpu_transcoder); 1391 + void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); 1393 1392 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); 1394 1393 void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); 1395 1394 struct intel_encoder *
+6 -3
drivers/gpu/drm/imx/imx-ldb.c
··· 612 612 return PTR_ERR(imx_ldb->regmap); 613 613 } 614 614 615 + /* disable LDB by resetting the control register to POR default */ 616 + regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); 617 + 615 618 imx_ldb->dev = dev; 616 619 617 620 if (of_id) ··· 655 652 if (ret || i < 0 || i > 1) 656 653 return -EINVAL; 657 654 655 + if (!of_device_is_available(child)) 656 + continue; 657 + 658 658 if (dual && i > 0) { 659 659 dev_warn(dev, "dual-channel mode, ignoring second output\n"); 660 660 continue; 661 661 } 662 - 663 - if (!of_device_is_available(child)) 664 - continue; 665 662 666 663 channel = &imx_ldb->channel[i]; 667 664 channel->ldb = imx_ldb;
+2 -1
drivers/gpu/ipu-v3/ipu-csi.c
··· 339 339 break; 340 340 case V4L2_MBUS_BT656: 341 341 csicfg->ext_vsync = 0; 342 - if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field)) 342 + if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) || 343 + mbus_fmt->field == V4L2_FIELD_ALTERNATE) 343 344 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; 344 345 else 345 346 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
+6 -2
drivers/i2c/busses/i2c-davinci.c
··· 237 237 /* 238 238 * It's not always possible to have 1 to 2 ratio when d=7, so fall back 239 239 * to minimal possible clkh in this case. 240 + * 241 + * Note: 242 + * CLKH is not allowed to be 0, in this case I2C clock is not generated 243 + * at all 240 244 */ 241 - if (clk >= clkl + d) { 245 + if (clk > clkl + d) { 242 246 clkh = clk - clkl - d; 243 247 clkl -= d; 244 248 } else { 245 - clkh = 0; 249 + clkh = 1; 246 250 clkl = clk - (d << 1); 247 251 } 248 252
+2 -3
drivers/i2c/busses/i2c-imx.c
··· 368 368 goto err_desc; 369 369 } 370 370 371 + reinit_completion(&dma->cmd_complete); 371 372 txdesc->callback = i2c_imx_dma_callback; 372 373 txdesc->callback_param = i2c_imx; 373 374 if (dma_submit_error(dmaengine_submit(txdesc))) { ··· 623 622 * The first byte must be transmitted by the CPU. 624 623 */ 625 624 imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); 626 - reinit_completion(&i2c_imx->dma->cmd_complete); 627 625 time_left = wait_for_completion_timeout( 628 626 &i2c_imx->dma->cmd_complete, 629 627 msecs_to_jiffies(DMA_TIMEOUT)); ··· 681 681 if (result) 682 682 return result; 683 683 684 - reinit_completion(&i2c_imx->dma->cmd_complete); 685 684 time_left = wait_for_completion_timeout( 686 685 &i2c_imx->dma->cmd_complete, 687 686 msecs_to_jiffies(DMA_TIMEOUT)); ··· 1009 1010 i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl, 1010 1011 "gpio"); 1011 1012 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); 1012 - rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH); 1013 + rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); 1013 1014 1014 1015 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER || 1015 1016 PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
+51 -3
drivers/i2c/busses/i2c-rcar.c
··· 32 32 #include <linux/of_device.h> 33 33 #include <linux/platform_device.h> 34 34 #include <linux/pm_runtime.h> 35 + #include <linux/reset.h> 35 36 #include <linux/slab.h> 36 37 37 38 /* register offsets */ ··· 112 111 #define ID_ARBLOST (1 << 3) 113 112 #define ID_NACK (1 << 4) 114 113 /* persistent flags */ 114 + #define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */ 115 115 #define ID_P_PM_BLOCKED (1 << 31) 116 - #define ID_P_MASK ID_P_PM_BLOCKED 116 + #define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA) 117 117 118 118 enum rcar_i2c_type { 119 119 I2C_RCAR_GEN1, ··· 143 141 struct dma_chan *dma_rx; 144 142 struct scatterlist sg; 145 143 enum dma_data_direction dma_direction; 144 + 145 + struct reset_control *rstc; 146 146 }; 147 147 148 148 #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) ··· 374 370 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 375 371 sg_dma_len(&priv->sg), priv->dma_direction); 376 372 373 + /* Gen3 can only do one RXDMA per transfer and we just completed it */ 374 + if (priv->devtype == I2C_RCAR_GEN3 && 375 + priv->dma_direction == DMA_FROM_DEVICE) 376 + priv->flags |= ID_P_NO_RXDMA; 377 + 377 378 priv->dma_direction = DMA_NONE; 378 379 } 379 380 ··· 416 407 unsigned char *buf; 417 408 int len; 418 409 419 - /* Do not use DMA if it's not available or for messages < 8 bytes */ 420 - if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE)) 410 + /* Do various checks to see if DMA is feasible at all */ 411 + if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) || 412 + (read && priv->flags & ID_P_NO_RXDMA)) 421 413 return; 422 414 423 415 if (read) { ··· 749 739 } 750 740 } 751 741 742 + /* I2C is a special case, we need to poll the status of a reset */ 743 + static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv) 744 + { 745 + int i, ret; 746 + 747 + ret = reset_control_reset(priv->rstc); 748 + if (ret) 749 + return ret; 750 + 751 + for (i = 0; i < LOOP_TIMEOUT; i++) { 752 + ret = reset_control_status(priv->rstc); 753 + if (ret == 0) 754 + return 0; 755 + udelay(1); 756 + } 757 + 758 + return -ETIMEDOUT; 759 + } 760 + 752 761 static int rcar_i2c_master_xfer(struct i2c_adapter *adap, 753 762 struct i2c_msg *msgs, 754 763 int num) ··· 778 749 long time_left; 779 750 780 751 pm_runtime_get_sync(dev); 752 + 753 + /* Gen3 needs a reset before allowing RXDMA once */ 754 + if (priv->devtype == I2C_RCAR_GEN3) { 755 + priv->flags |= ID_P_NO_RXDMA; 756 + if (!IS_ERR(priv->rstc)) { 757 + ret = rcar_i2c_do_reset(priv); 758 + if (ret == 0) 759 + priv->flags &= ~ID_P_NO_RXDMA; 760 + } 761 + } 781 762 782 763 rcar_i2c_init(priv); 783 764 ··· 958 919 ret = rcar_i2c_clock_calculate(priv, &i2c_t); 959 920 if (ret < 0) 960 921 goto out_pm_put; 922 + 923 + if (priv->devtype == I2C_RCAR_GEN3) { 924 + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); 925 + if (!IS_ERR(priv->rstc)) { 926 + ret = reset_control_status(priv->rstc); 927 + if (ret < 0) 928 + priv->rstc = ERR_PTR(-ENOTSUPP); 929 + } 930 + } 961 931 962 932 /* Stay always active when multi-master to keep arbitration working */ 963 933 if (of_property_read_bool(dev->of_node, "multi-master"))
+1 -1
drivers/i2c/i2c-core-base.c
··· 624 624 static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, 625 625 unsigned int flags) 626 626 { 627 - rt_mutex_lock(&adapter->bus_lock); 627 + rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); 628 628 } 629 629 630 630 /**
+2 -2
drivers/i2c/i2c-mux.c
··· 144 144 struct i2c_mux_priv *priv = adapter->algo_data; 145 145 struct i2c_adapter *parent = priv->muxc->parent; 146 146 147 - rt_mutex_lock(&parent->mux_lock); 147 + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); 148 148 if (!(flags & I2C_LOCK_ROOT_ADAPTER)) 149 149 return; 150 150 i2c_lock_bus(parent, flags); ··· 181 181 struct i2c_mux_priv *priv = adapter->algo_data; 182 182 struct i2c_adapter *parent = priv->muxc->parent; 183 183 184 - rt_mutex_lock(&parent->mux_lock); 184 + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); 185 185 i2c_lock_bus(parent, flags); 186 186 } 187 187
+2
drivers/input/mouse/elan_i2c_core.c
··· 1346 1346 { "ELAN0611", 0 }, 1347 1347 { "ELAN0612", 0 }, 1348 1348 { "ELAN0618", 0 }, 1349 + { "ELAN061D", 0 }, 1350 + { "ELAN0622", 0 }, 1349 1351 { "ELAN1000", 0 }, 1350 1352 { } 1351 1353 };
+7
drivers/input/serio/i8042-x86ia64io.h
··· 527 527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), 528 528 }, 529 529 }, 530 + { 531 + /* Lenovo LaVie Z */ 532 + .matches = { 533 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 534 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), 535 + }, 536 + }, 530 537 { } 531 538 }; 532 539
+2 -1
drivers/mmc/host/mxcmmc.c
··· 293 293 int i; 294 294 295 295 for_each_sg(data->sg, sg, data->sg_len, i) { 296 - void *buf = kmap_atomic(sg_page(sg) + sg->offset; 296 + void *buf = kmap_atomic(sg_page(sg) + sg->offset); 297 297 buffer_swap32(buf, sg->length); 298 298 kunmap_atomic(buf); 299 + } 299 300 } 300 301 #else 301 302 static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
+12 -2
drivers/net/bonding/bond_main.c
··· 1717 1717 goto err_upper_unlink; 1718 1718 } 1719 1719 1720 + bond->nest_level = dev_get_nest_level(bond_dev) + 1; 1721 + 1720 1722 /* If the mode uses primary, then the following is handled by 1721 1723 * bond_change_active_slave(). 1722 1724 */ ··· 1766 1764 if (bond_mode_can_use_xmit_hash(bond)) 1767 1765 bond_update_slave_arr(bond, NULL); 1768 1766 1769 - bond->nest_level = dev_get_nest_level(bond_dev); 1770 1767 1771 1768 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", 1772 1769 slave_dev->name, ··· 3416 3415 } 3417 3416 } 3418 3417 3418 + static int bond_get_nest_level(struct net_device *bond_dev) 3419 + { 3420 + struct bonding *bond = netdev_priv(bond_dev); 3421 + 3422 + return bond->nest_level; 3423 + } 3424 + 3419 3425 static void bond_get_stats(struct net_device *bond_dev, 3420 3426 struct rtnl_link_stats64 *stats) 3421 3427 { ··· 3431 3423 struct list_head *iter; 3432 3424 struct slave *slave; 3433 3425 3434 - spin_lock(&bond->stats_lock); 3426 + spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev)); 3435 3427 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 3436 3428 3437 3429 rcu_read_lock(); ··· 4236 4228 .ndo_neigh_setup = bond_neigh_setup, 4237 4229 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4238 4230 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4231 + .ndo_get_lock_subclass = bond_get_nest_level, 4239 4232 #ifdef CONFIG_NET_POLL_CONTROLLER 4240 4233 .ndo_netpoll_setup = bond_netpoll_setup, 4241 4234 .ndo_netpoll_cleanup = bond_netpoll_cleanup, ··· 4735 4726 if (!bond->wq) 4736 4727 return -ENOMEM; 4737 4728 4729 + bond->nest_level = SINGLE_DEPTH_NESTING; 4738 4730 netdev_lockdep_set_classes(bond_dev); 4739 4731 4740 4732 list_add_tail(&bond->bond_list, &bn->dev_list);
+1
drivers/net/can/usb/ems_usb.c
··· 1072 1072 usb_free_urb(dev->intr_urb); 1073 1073 1074 1074 kfree(dev->intr_in_buffer); 1075 + kfree(dev->tx_msg_buffer); 1075 1076 } 1076 1077 } 1077 1078
+2 -2
drivers/net/dsa/mv88e6xxx/chip.c
··· 2617 2617 .rmu_disable = mv88e6085_g1_rmu_disable, 2618 2618 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2619 2619 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2620 - .serdes_power = mv88e6341_serdes_power, 2621 2620 }; 2622 2621 2623 2622 static const struct mv88e6xxx_ops mv88e6095_ops = { ··· 2782 2783 .reset = mv88e6352_g1_reset, 2783 2784 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2784 2785 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2786 + .serdes_power = mv88e6341_serdes_power, 2785 2787 .gpio_ops = &mv88e6352_gpio_ops, 2786 2788 }; 2787 2789 ··· 2964 2964 .reset = mv88e6352_g1_reset, 2965 2965 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2966 2966 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2967 - .serdes_power = mv88e6341_serdes_power, 2968 2967 }; 2969 2968 2970 2969 static const struct mv88e6xxx_ops mv88e6176_ops = { ··· 3345 3346 .reset = mv88e6352_g1_reset, 3346 3347 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3347 3348 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3349 + .serdes_power = mv88e6341_serdes_power, 3348 3350 .gpio_ops = &mv88e6352_gpio_ops, 3349 3351 .avb_ops = &mv88e6390_avb_ops, 3350 3352 .ptp_ops = &mv88e6352_ptp_ops,
+1
drivers/net/ethernet/amazon/ena/ena_com.c
··· 333 333 334 334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 335 335 336 + io_sq->dma_addr_bits = ena_dev->dma_addr_bits; 336 337 io_sq->desc_entry_size = 337 338 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 338 339 sizeof(struct ena_eth_io_tx_desc) :
+2 -2
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
··· 1111 1111 1112 1112 if (pdata->tx_pause != pdata->phy.tx_pause) { 1113 1113 new_state = 1; 1114 - pdata->hw_if.config_tx_flow_control(pdata); 1115 1114 pdata->tx_pause = pdata->phy.tx_pause; 1115 + pdata->hw_if.config_tx_flow_control(pdata); 1116 1116 } 1117 1117 1118 1118 if (pdata->rx_pause != pdata->phy.rx_pause) { 1119 1119 new_state = 1; 1120 - pdata->hw_if.config_rx_flow_control(pdata); 1121 1120 pdata->rx_pause = pdata->phy.rx_pause; 1121 + pdata->hw_if.config_rx_flow_control(pdata); 1122 1122 } 1123 1123 1124 1124 /* Speed support */
+2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 3073 3073 3074 3074 adapter->geneve_port = 0; 3075 3075 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); 3076 + break; 3076 3077 default: 3077 3078 return; 3078 3079 } ··· 3159 3158 3160 3159 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 3161 3160 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); 3161 + break; 3162 3162 default: 3163 3163 return; 3164 3164 }
+28 -52
drivers/net/ethernet/cisco/enic/enic_main.c
··· 2047 2047 return 0; 2048 2048 } 2049 2049 2050 + static int _enic_change_mtu(struct net_device *netdev, int new_mtu) 2051 + { 2052 + bool running = netif_running(netdev); 2053 + int err = 0; 2054 + 2055 + ASSERT_RTNL(); 2056 + if (running) { 2057 + err = enic_stop(netdev); 2058 + if (err) 2059 + return err; 2060 + } 2061 + 2062 + netdev->mtu = new_mtu; 2063 + 2064 + if (running) { 2065 + err = enic_open(netdev); 2066 + if (err) 2067 + return err; 2068 + } 2069 + 2070 + return 0; 2071 + } 2072 + 2050 2073 static int enic_change_mtu(struct net_device *netdev, int new_mtu) 2051 2074 { 2052 2075 struct enic *enic = netdev_priv(netdev); 2053 - int running = netif_running(netdev); 2054 2076 2055 2077 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2056 2078 return -EOPNOTSUPP; 2057 2079 2058 - if (running) 2059 - enic_stop(netdev); 2060 - 2061 - netdev->mtu = new_mtu; 2062 - 2063 2080 if (netdev->mtu > enic->port_mtu) 2064 2081 netdev_warn(netdev, 2065 - "interface MTU (%d) set higher than port MTU (%d)\n", 2066 - netdev->mtu, enic->port_mtu); 2082 + "interface MTU (%d) set higher than port MTU (%d)\n", 2083 + netdev->mtu, enic->port_mtu); 2067 2084 2068 - if (running) 2069 - enic_open(netdev); 2070 - 2071 - return 0; 2085 + return _enic_change_mtu(netdev, new_mtu); 2072 2086 } 2073 2087 2074 2088 static void enic_change_mtu_work(struct work_struct *work) ··· 2090 2076 struct enic *enic = container_of(work, struct enic, change_mtu_work); 2091 2077 struct net_device *netdev = enic->netdev; 2092 2078 int new_mtu = vnic_dev_mtu(enic->vdev); 2093 - int err; 2094 - unsigned int i; 2095 - 2096 - new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); 2097 2079 2098 2080 rtnl_lock(); 2099 - 2100 - /* Stop RQ */ 2101 - del_timer_sync(&enic->notify_timer); 2102 - 2103 - for (i = 0; i < enic->rq_count; i++) 2104 - napi_disable(&enic->napi[i]); 2105 - 2106 - vnic_intr_mask(&enic->intr[0]); 2107 - enic_synchronize_irqs(enic); 2108 - err = vnic_rq_disable(&enic->rq[0]); 2109 - if (err) { 2110 - rtnl_unlock(); 2111 - netdev_err(netdev, "Unable to disable RQ.\n"); 2112 - return; 2113 - } 2114 - vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); 2115 - vnic_cq_clean(&enic->cq[0]); 2116 - vnic_intr_clean(&enic->intr[0]); 2117 - 2118 - /* Fill RQ with new_mtu-sized buffers */ 2119 - netdev->mtu = new_mtu; 2120 - vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 2121 - /* Need at least one buffer on ring to get going */ 2122 - if (vnic_rq_desc_used(&enic->rq[0]) == 0) { 2123 - rtnl_unlock(); 2124 - netdev_err(netdev, "Unable to alloc receive buffers.\n"); 2125 - return; 2126 - } 2127 - 2128 - /* Start RQ */ 2129 - vnic_rq_enable(&enic->rq[0]); 2130 - napi_enable(&enic->napi[0]); 2131 - vnic_intr_unmask(&enic->intr[0]); 2132 - enic_notify_timer_start(enic); 2133 - 2081 + (void)_enic_change_mtu(netdev, new_mtu); 2134 2082 rtnl_unlock(); 2135 2083 2136 2084 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); ··· 2892 2916 */ 2893 2917 2894 2918 enic->port_mtu = enic->config.mtu; 2895 - (void)enic_change_mtu(netdev, enic->port_mtu); 2896 2919 2897 2920 err = enic_set_mac_addr(netdev, enic->mac_addr); 2898 2921 if (err) { ··· 2981 3006 /* MTU range: 68 - 9000 */ 2982 3007 netdev->min_mtu = ENIC_MIN_MTU; 2983 3008 netdev->max_mtu = ENIC_MAX_MTU; 3009 + netdev->mtu = enic->port_mtu; 2984 3010 2985 3011 err = register_netdev(netdev); 2986 3012 if (err) {
+1
drivers/net/ethernet/huawei/hinic/hinic_main.c
··· 983 983 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, 984 984 nic_dev, link_status_event_handler); 985 985 986 + SET_NETDEV_DEV(netdev, &pdev->dev); 986 987 err = register_netdev(netdev); 987 988 if (err) { 988 989 dev_err(&pdev->dev, "Failed to register netdev\n");
+2
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 1172 1172 struct mlx5_core_dev *mdev = priv->mdev; 1173 1173 int err; 1174 1174 1175 + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; 1176 + 1175 1177 if (!MLX5_DSCP_SUPPORTED(mdev)) 1176 1178 return 0; 1177 1179
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3754 3754 3755 3755 if (!reset) { 3756 3756 params->sw_mtu = new_mtu; 3757 - set_mtu_cb(priv); 3757 + if (set_mtu_cb) 3758 + set_mtu_cb(priv); 3758 3759 netdev->mtu = params->sw_mtu; 3759 3760 goto out; 3760 3761 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1696 1696 int vport_num; 1697 1697 int err; 1698 1698 1699 - if (!MLX5_VPORT_MANAGER(dev)) 1699 + if (!MLX5_ESWITCH_MANAGER(dev)) 1700 1700 return 0; 1701 1701 1702 1702 esw_info(dev, ··· 1765 1765 1766 1766 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1767 1767 { 1768 - if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1768 + if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) 1769 1769 return; 1770 1770 1771 1771 esw_info(esw->dev, "cleanup\n");
+4
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 76 76 void *ppriv) 77 77 { 78 78 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 79 + u16 max_mtu; 79 80 80 81 /* priv init */ 81 82 priv->mdev = mdev; ··· 84 83 priv->profile = profile; 85 84 priv->ppriv = ppriv; 86 85 mutex_init(&priv->state_lock); 86 + 87 + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 88 + netdev->mtu = max_mtu; 87 89 88 90 mlx5e_build_nic_params(mdev, &priv->channels.params, 89 91 profile->max_nch(mdev), netdev->mtu);
+3 -1
drivers/net/ethernet/netronome/nfp/flower/main.c
··· 80 80 return NFP_REPR_TYPE_VF; 81 81 } 82 82 83 - return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; 83 + return __NFP_REPR_TYPE_MAX; 84 84 } 85 85 86 86 static struct net_device * ··· 91 91 u8 port = 0; 92 92 93 93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); 94 + if (repr_type > NFP_REPR_TYPE_MAX) 95 + return NULL; 94 96 95 97 reprs = rcu_dereference(app->reprs[repr_type]); 96 98 if (!reprs)
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 53 53 #include "dwmac1000.h" 54 54 #include "hwif.h" 55 55 56 - #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 56 + #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) 57 57 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 58 58 59 59 /* Module parameters */
+38 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
··· 257 257 return -ENOMEM; 258 258 259 259 /* Enable pci device */ 260 - ret = pcim_enable_device(pdev); 260 + ret = pci_enable_device(pdev); 261 261 if (ret) { 262 262 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 263 263 __func__); ··· 300 300 static void stmmac_pci_remove(struct pci_dev *pdev) 301 301 { 302 302 stmmac_dvr_remove(&pdev->dev); 303 + pci_disable_device(pdev); 303 304 } 304 305 305 - static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); 306 + static int stmmac_pci_suspend(struct device *dev) 307 + { 308 + struct pci_dev *pdev = to_pci_dev(dev); 309 + int ret; 310 + 311 + ret = stmmac_suspend(dev); 312 + if (ret) 313 + return ret; 314 + 315 + ret = pci_save_state(pdev); 316 + if (ret) 317 + return ret; 318 + 319 + pci_disable_device(pdev); 320 + pci_wake_from_d3(pdev, true); 321 + return 0; 322 + } 323 + 324 + static int stmmac_pci_resume(struct device *dev) 325 + { 326 + struct pci_dev *pdev = to_pci_dev(dev); 327 + int ret; 328 + 329 + pci_restore_state(pdev); 330 + pci_set_power_state(pdev, PCI_D0); 331 + 332 + ret = pci_enable_device(pdev); 333 + if (ret) 334 + return ret; 335 + 336 + pci_set_master(pdev); 337 + 338 + return stmmac_resume(dev); 339 + } 340 + 341 + static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); 306 342 307 343 /* synthetic ID, no official vendor */ 308 344 #define PCI_VENDOR_ID_STMMAC 0x700
+1
drivers/net/netdevsim/devlink.c
··· 207 207 struct net *net = nsim_to_net(ns); 208 208 bool *reg_devlink = net_generic(net, nsim_devlink_id); 209 209 210 + devlink_resources_unregister(ns->devlink, NULL); 210 211 devlink_unregister(ns->devlink); 211 212 devlink_free(ns->devlink); 212 213 ns->devlink = NULL;
+1 -1
drivers/net/phy/mdio-mux-bcm-iproc.c
··· 218 218 219 219 static int mdio_mux_iproc_remove(struct platform_device *pdev) 220 220 { 221 - struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); 221 + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); 222 222 223 223 mdio_mux_uninit(md->mux_handle); 224 224 mdiobus_unregister(md->mii_bus);
+2
drivers/net/usb/lan78xx.c
··· 1242 1242 mod_timer(&dev->stat_monitor, 1243 1243 jiffies + STAT_UPDATE_TIMER); 1244 1244 } 1245 + 1246 + tasklet_schedule(&dev->bh); 1245 1247 } 1246 1248 1247 1249 return ret;
+2 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1785 1785 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; 1786 1786 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; 1787 1787 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; 1788 - fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus); 1788 + /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */ 1789 + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; 1789 1790 fwreq->bus_nr = devinfo->pdev->bus->number; 1790 1791 1791 1792 return fwreq;
+69
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
··· 178 178 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 179 179 }; 180 180 181 + const struct iwl_cfg iwl9260_killer_2ac_cfg = { 182 + .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", 183 + .fw_name_pre = IWL9260A_FW_PRE, 184 + .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, 185 + IWL_DEVICE_9000, 186 + .ht_params = &iwl9000_ht_params, 187 + .nvm_ver = IWL9000_NVM_VERSION, 188 + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, 189 + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 190 + }; 191 + 181 192 const struct iwl_cfg iwl9270_2ac_cfg = { 182 193 .name = "Intel(R) Dual Band Wireless AC 9270", 183 194 .fw_name_pre = IWL9260A_FW_PRE, ··· 278 267 .soc_latency = 5000, 279 268 }; 280 269 270 + const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { 271 + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", 272 + .fw_name_pre = IWL9000A_FW_PRE, 273 + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, 274 + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, 275 + IWL_DEVICE_9000, 276 + .ht_params = &iwl9000_ht_params, 277 + .nvm_ver = IWL9000_NVM_VERSION, 278 + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, 279 + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 280 + .integrated = true, 281 + .soc_latency = 5000, 282 + }; 283 + 284 + const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { 285 + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", 286 + .fw_name_pre = IWL9000A_FW_PRE, 287 + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, 288 + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, 289 + IWL_DEVICE_9000, 290 + .ht_params = &iwl9000_ht_params, 291 + .nvm_ver = IWL9000_NVM_VERSION, 292 + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, 293 + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 294 + .integrated = true, 295 + .soc_latency = 5000, 296 + }; 297 + 281 298 const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { 282 299 .name = "Intel(R) Dual Band Wireless AC 9460", 283 300 .fw_name_pre = IWL9000A_FW_PRE, ··· 353 314 354 315 const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { 355 316 .name = "Intel(R) Dual Band Wireless AC 9560", 317 + .fw_name_pre = IWL9000A_FW_PRE, 318 + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, 319 + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, 320 + IWL_DEVICE_9000, 321 + .ht_params = &iwl9000_ht_params, 322 + .nvm_ver = IWL9000_NVM_VERSION, 323 + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, 324 + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 325 + .integrated = true, 326 + .soc_latency = 5000, 327 + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK 328 + }; 329 + 330 + const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { 331 + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", 332 + .fw_name_pre = IWL9000A_FW_PRE, 333 + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, 334 + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, 335 + IWL_DEVICE_9000, 336 + .ht_params = &iwl9000_ht_params, 337 + .nvm_ver = IWL9000_NVM_VERSION, 338 + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, 339 + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 340 + .integrated = true, 341 + .soc_latency = 5000, 342 + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK 343 + }; 344 + 345 + const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { 346 + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", 356 347 .fw_name_pre = IWL9000A_FW_PRE, 357 348 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, 358 349 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+5
drivers/net/wireless/intel/iwlwifi/iwl-config.h
··· 551 551 extern const struct iwl_cfg iwl4165_2ac_cfg; 552 552 extern const struct iwl_cfg iwl9160_2ac_cfg; 553 553 extern const struct iwl_cfg iwl9260_2ac_cfg; 554 + extern const struct iwl_cfg iwl9260_killer_2ac_cfg; 554 555 extern const struct iwl_cfg iwl9270_2ac_cfg; 555 556 extern const struct iwl_cfg iwl9460_2ac_cfg; 556 557 extern const struct iwl_cfg iwl9560_2ac_cfg; ··· 559 558 extern const struct iwl_cfg iwl9461_2ac_cfg_soc; 560 559 extern const struct iwl_cfg iwl9462_2ac_cfg_soc; 561 560 extern const struct iwl_cfg iwl9560_2ac_cfg_soc; 561 + extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; 562 + extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; 562 563 extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; 563 564 extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; 564 565 extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; 565 566 extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; 567 + extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; 568 + extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; 566 569 extern const struct iwl_cfg iwl22000_2ac_cfg_hr; 567 570 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; 568 571 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+22
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 545 545 {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, 546 546 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, 547 547 {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, 548 + {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)}, 549 + {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 550 + {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 548 551 {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, 549 552 {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, 550 553 {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, ··· 557 554 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, 558 555 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, 559 556 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, 557 + {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)}, 560 558 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, 561 559 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, 562 560 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, ··· 582 578 {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, 583 579 {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, 584 580 {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, 581 + {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 582 + {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 585 583 {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, 586 584 {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, 587 585 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, ··· 610 604 {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, 611 605 {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, 612 606 {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, 607 + {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 608 + {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 613 609 {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, 614 610 {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, 615 611 {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, ··· 638 630 {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, 639 631 {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, 640 632 {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, 633 + {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, 634 + {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, 641 635 {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, 642 636 {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, 643 637 {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, ··· 666 656 {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, 667 657 {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, 668 658 {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, 659 + {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 660 + {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 669 661 {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, 670 662 {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, 671 663 {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, ··· 694 682 {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, 695 683 {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, 696 684 {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, 685 + {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 686 + {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 697 687 {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, 698 688 {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, 699 689 {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, ··· 722 708 {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, 723 709 {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, 724 710 {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, 711 + {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 712 + {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 725 713 {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, 726 714 {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, 727 715 {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, ··· 759 743 {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, 760 744 {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, 761 745 {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, 746 + {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 747 + {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 762 748 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, 763 749 {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, 764 750 {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, ··· 789 771 {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, 790 772 {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, 791 773 {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, 774 + {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 775 + {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 792 776 {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, 793 777 {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, 794 778 {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, ··· 817 797 {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, 818 798 {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, 819 799 {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, 800 + {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 801 + {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 820 802 {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, 821 803 {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, 822 804 {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+6
drivers/net/xen-netfront.c
··· 87 87 /* IRQ name is queue name with "-tx" or "-rx" appended */ 88 88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 89 89 90 + static DECLARE_WAIT_QUEUE_HEAD(module_load_q); 90 91 static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); 91 92 92 93 struct netfront_stats { ··· 1332 1331 netif_carrier_off(netdev); 1333 1332 1334 1333 xenbus_switch_state(dev, XenbusStateInitialising); 1334 + wait_event(module_load_q, 1335 + xenbus_read_driver_state(dev->otherend) != 1336 + XenbusStateClosed && 1337 + xenbus_read_driver_state(dev->otherend) != 1338 + XenbusStateUnknown); 1335 1339 return netdev; 1336 1340 1337 1341 exit:
+7 -3
drivers/nvme/host/fabrics.c
··· 539 539 /* 540 540 * For something we're not in a state to send to the device the default action 541 541 * is to busy it and retry it after the controller state is recovered. However, 542 - * anything marked for failfast or nvme multipath is immediately failed. 542 + * if the controller is deleting or if anything is marked for failfast or 543 + * nvme multipath it is immediately failed. 543 544 * 544 545 * Note: commands used to initialize the controller will be marked for failfast. 545 546 * Note: nvme cli/ioctl commands are marked for failfast. 546 547 */ 547 - blk_status_t nvmf_fail_nonready_command(struct request *rq) 548 + blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, 549 + struct request *rq) 548 550 { 549 - if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 551 + if (ctrl->state != NVME_CTRL_DELETING && 552 + ctrl->state != NVME_CTRL_DEAD && 553 + !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 550 554 return BLK_STS_RESOURCE; 551 555 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 552 556 return BLK_STS_IOERR;
+2 -1
drivers/nvme/host/fabrics.h
··· 162 162 void nvmf_free_options(struct nvmf_ctrl_options *opts); 163 163 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); 164 164 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); 165 - blk_status_t nvmf_fail_nonready_command(struct request *rq); 165 + blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, 166 + struct request *rq); 166 167 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 167 168 bool queue_live); 168 169
+1 -1
drivers/nvme/host/fc.c
··· 2272 2272 2273 2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2274 2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2275 - return nvmf_fail_nonready_command(rq); 2275 + return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 2276 2276 2277 2277 ret = nvme_setup_cmd(ns, rq, sqe); 2278 2278 if (ret)
+1 -1
drivers/nvme/host/rdma.c
··· 1639 1639 WARN_ON_ONCE(rq->tag < 0); 1640 1640 1641 1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 1642 - return nvmf_fail_nonready_command(rq); 1642 + return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 1643 1643 1644 1644 dev = queue->device->dev; 1645 1645 ib_dma_sync_single_for_cpu(dev, sqe->dma,
+7 -2
drivers/nvme/target/configfs.c
··· 282 282 { 283 283 struct nvmet_ns *ns = to_nvmet_ns(item); 284 284 struct nvmet_subsys *subsys = ns->subsys; 285 + size_t len; 285 286 int ret; 286 287 287 288 mutex_lock(&subsys->lock); ··· 290 289 if (ns->enabled) 291 290 goto out_unlock; 292 291 293 - kfree(ns->device_path); 292 + ret = -EINVAL; 293 + len = strcspn(page, "\n"); 294 + if (!len) 295 + goto out_unlock; 294 296 297 + kfree(ns->device_path); 295 298 ret = -ENOMEM; 296 - ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL); 299 + ns->device_path = kstrndup(page, len, GFP_KERNEL); 297 300 if (!ns->device_path) 298 301 goto out_unlock; 299 302
+1 -1
drivers/nvme/target/core.c
··· 339 339 goto out_unlock; 340 340 341 341 ret = nvmet_bdev_ns_enable(ns); 342 - if (ret) 342 + if (ret == -ENOTBLK) 343 343 ret = nvmet_file_ns_enable(ns); 344 344 if (ret) 345 345 goto out_unlock;
+35 -9
drivers/nvme/target/fc.c
··· 58 58 struct work_struct work; 59 59 } __aligned(sizeof(unsigned long long)); 60 60 61 + /* desired maximum for a single sequence - if sg list allows it */ 61 62 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 - #define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE) 63 63 64 64 enum nvmet_fcp_datadir { 65 65 NVMET_FCP_NODATA, ··· 74 74 struct nvme_fc_cmd_iu cmdiubuf; 75 75 struct nvme_fc_ersp_iu rspiubuf; 76 76 dma_addr_t rspdma; 77 + struct scatterlist *next_sg; 77 78 struct scatterlist *data_sg; 78 79 int data_sg_cnt; 79 80 u32 offset; ··· 1026 1025 INIT_LIST_HEAD(&newrec->assoc_list); 1027 1026 kref_init(&newrec->ref); 1028 1027 ida_init(&newrec->assoc_cnt); 1029 - newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, 1030 - template->max_sgl_segments); 1028 + newrec->max_sg_cnt = template->max_sgl_segments; 1031 1029 1032 1030 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1033 1031 if (ret) { ··· 1722 1722 ((fod->io_dir == NVMET_FCP_WRITE) ? 1723 1723 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 1724 1724 /* note: write from initiator perspective */ 1725 + fod->next_sg = fod->data_sg; 1725 1726 1726 1727 return 0; 1727 1728 ··· 1867 1866 struct nvmet_fc_fcp_iod *fod, u8 op) 1868 1867 { 1869 1868 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 1869 + struct scatterlist *sg = fod->next_sg; 1870 1870 unsigned long flags; 1871 - u32 tlen; 1871 + u32 remaininglen = fod->req.transfer_len - fod->offset; 1872 + u32 tlen = 0; 1872 1873 int ret; 1873 1874 1874 1875 fcpreq->op = op; 1875 1876 fcpreq->offset = fod->offset; 1876 1877 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 1877 1878 1878 - tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, 1879 - (fod->req.transfer_len - fod->offset)); 1879 + /* 1880 + * for next sequence: 1881 + * break at a sg element boundary 1882 + * attempt to keep sequence length capped at 1883 + * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 1884 + * be longer if a single sg element is larger 1885 + * than that amount. This is done to avoid creating 1886 + * a new sg list to use for the tgtport api. 1887 + */ 1888 + fcpreq->sg = sg; 1889 + fcpreq->sg_cnt = 0; 1890 + while (tlen < remaininglen && 1891 + fcpreq->sg_cnt < tgtport->max_sg_cnt && 1892 + tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 1893 + fcpreq->sg_cnt++; 1894 + tlen += sg_dma_len(sg); 1895 + sg = sg_next(sg); 1896 + } 1897 + if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 1898 + fcpreq->sg_cnt++; 1899 + tlen += min_t(u32, sg_dma_len(sg), remaininglen); 1900 + sg = sg_next(sg); 1901 + } 1902 + if (tlen < remaininglen) 1903 + fod->next_sg = sg; 1904 + else 1905 + fod->next_sg = NULL; 1906 + 1880 1907 fcpreq->transfer_length = tlen; 1881 1908 fcpreq->transferred_length = 0; 1882 1909 fcpreq->fcp_error = 0; 1883 1910 fcpreq->rsplen = 0; 1884 - 1885 - fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE]; 1886 - fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE); 1887 1911 1888 1912 /* 1889 1913 * If the last READDATA request: check if LLDD supports
+1 -1
drivers/nvme/target/loop.c
··· 162 162 blk_status_t ret; 163 163 164 164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) 165 - return nvmf_fail_nonready_command(req); 165 + return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req); 166 166 167 167 ret = nvme_setup_cmd(ns, req, &iod->cmd); 168 168 if (ret)
+2
drivers/pci/pcie/err.c
··· 295 295 296 296 parent = udev->subordinate; 297 297 pci_lock_rescan_remove(); 298 + pci_dev_get(dev); 298 299 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices, 299 300 bus_list) { 300 301 pci_dev_get(pdev); ··· 329 328 pci_info(dev, "Device recovery from fatal error failed\n"); 330 329 } 331 330 331 + pci_dev_put(dev); 332 332 pci_unlock_rescan_remove(); 333 333 } 334 334
+4
drivers/phy/broadcom/phy-brcm-usb-init.c
··· 962 962 { 963 963 void __iomem *ctrl = params->ctrl_regs; 964 964 965 + USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE); 966 + /* 1 millisecond - for USB clocks to settle down */ 967 + usleep_range(1000, 2000); 968 + 965 969 if (BRCM_ID(params->family_id) == 0x7366) { 966 970 /* 967 971 * The PHY3_SOFT_RESETB bits default to the wrong state.
+2 -2
drivers/phy/motorola/phy-mapphone-mdm6600.c
··· 182 182 ddata = container_of(work, struct phy_mdm6600, status_work.work); 183 183 dev = ddata->dev; 184 184 185 - error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES, 185 + error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES, 186 186 ddata->status_gpios->desc, 187 187 values); 188 188 if (error) 189 189 return; 190 190 191 - for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) { 191 + for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) { 192 192 val |= values[i] << i; 193 193 dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", 194 194 __func__, i, values[i], val);
+6 -6
drivers/scsi/libiscsi.c
··· 284 284 */ 285 285 if (opcode != ISCSI_OP_SCSI_DATA_OUT) { 286 286 iscsi_conn_printk(KERN_INFO, conn, 287 - "task [op %x/%x itt " 287 + "task [op %x itt " 288 288 "0x%x/0x%x] " 289 289 "rejected.\n", 290 - task->hdr->opcode, opcode, 291 - task->itt, task->hdr_itt); 290 + opcode, task->itt, 291 + task->hdr_itt); 292 292 return -EACCES; 293 293 } 294 294 /* ··· 297 297 */ 298 298 if (conn->session->fast_abort) { 299 299 iscsi_conn_printk(KERN_INFO, conn, 300 - "task [op %x/%x itt " 300 + "task [op %x itt " 301 301 "0x%x/0x%x] fast abort.\n", 302 - task->hdr->opcode, opcode, 303 - task->itt, task->hdr_itt); 302 + opcode, task->itt, 303 + task->hdr_itt); 304 304 return -EACCES; 305 305 } 306 306 break;
+1
drivers/scsi/qla2xxx/qla_attr.c
··· 2141 2141 msleep(1000); 2142 2142 2143 2143 qla24xx_disable_vp(vha); 2144 + qla2x00_wait_for_sess_deletion(vha); 2144 2145 2145 2146 vha->flags.delete_progress = 1; 2146 2147
+1
drivers/scsi/qla2xxx/qla_gbl.h
··· 214 214 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 215 215 int qla24xx_async_abort_cmd(srb_t *); 216 216 int qla24xx_post_relogin_work(struct scsi_qla_host *vha); 217 + void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); 217 218 218 219 /* 219 220 * Global Functions in qla_mid.c source file.
+4
drivers/scsi/qla2xxx/qla_gs.c
··· 3708 3708 return rval; 3709 3709 3710 3710 done_free_sp: 3711 + spin_lock_irqsave(&vha->hw->vport_slock, flags); 3712 + list_del(&sp->elem); 3713 + spin_unlock_irqrestore(&vha->hw->vport_slock, flags); 3714 + 3711 3715 if (sp->u.iocb_cmd.u.ctarg.req) { 3712 3716 dma_free_coherent(&vha->hw->pdev->dev, 3713 3717 sizeof(struct ct_sns_pkt),
+3 -4
drivers/scsi/qla2xxx/qla_init.c
··· 1489 1489 1490 1490 wait_for_completion(&tm_iocb->u.tmf.comp); 1491 1491 1492 - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 1493 - QLA_SUCCESS : QLA_FUNCTION_FAILED; 1492 + rval = tm_iocb->u.tmf.data; 1494 1493 1495 - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { 1496 - ql_dbg(ql_dbg_taskm, vha, 0x8030, 1494 + if (rval != QLA_SUCCESS) { 1495 + ql_log(ql_log_warn, vha, 0x8030, 1497 1496 "TM IOCB failed (%x).\n", rval); 1498 1497 } 1499 1498
+2
drivers/scsi/qla2xxx/qla_inline.h
··· 222 222 sp->fcport = fcport; 223 223 sp->iocbs = 1; 224 224 sp->vha = qpair->vha; 225 + INIT_LIST_HEAD(&sp->elem); 226 + 225 227 done: 226 228 if (!sp) 227 229 QLA_QPAIR_MARK_NOT_BUSY(qpair);
+3
drivers/scsi/qla2xxx/qla_isr.c
··· 631 631 unsigned long flags; 632 632 fc_port_t *fcport = NULL; 633 633 634 + if (!vha->hw->flags.fw_started) 635 + return; 636 + 634 637 /* Setup to process RIO completion. */ 635 638 handle_cnt = 0; 636 639 if (IS_CNA_CAPABLE(ha))
+6
drivers/scsi/qla2xxx/qla_mbx.c
··· 4220 4220 mbx_cmd_t *mcp = &mc; 4221 4221 struct qla_hw_data *ha = vha->hw; 4222 4222 4223 + if (!ha->flags.fw_started) 4224 + return QLA_SUCCESS; 4225 + 4223 4226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4224 4227 "Entered %s.\n", __func__); 4225 4228 ··· 4291 4288 mbx_cmd_t mc; 4292 4289 mbx_cmd_t *mcp = &mc; 4293 4290 struct qla_hw_data *ha = vha->hw; 4291 + 4292 + if (!ha->flags.fw_started) 4293 + return QLA_SUCCESS; 4294 4294 4295 4295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4296 4296 "Entered %s.\n", __func__);
+9 -2
drivers/scsi/qla2xxx/qla_mid.c
··· 152 152 qla24xx_disable_vp(scsi_qla_host_t *vha) 153 153 { 154 154 unsigned long flags; 155 - int ret; 155 + int ret = QLA_SUCCESS; 156 + fc_port_t *fcport; 156 157 157 - ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 158 + if (vha->hw->flags.fw_started) 159 + ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 160 + 158 161 atomic_set(&vha->loop_state, LOOP_DOWN); 159 162 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 163 + list_for_each_entry(fcport, &vha->vp_fcports, list) 164 + fcport->logout_on_delete = 0; 165 + 166 + qla2x00_mark_all_devices_lost(vha, 0); 160 167 161 168 /* Remove port id from vp target map */ 162 169 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+22 -29
drivers/scsi/qla2xxx/qla_os.c
··· 303 303 static int qla2xxx_map_queues(struct Scsi_Host *shost); 304 304 static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 305 305 306 + 306 307 struct scsi_host_template qla2xxx_driver_template = { 307 308 .module = THIS_MODULE, 308 309 .name = QLA2XXX_DRIVER_NAME, ··· 1148 1147 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1149 1148 * it has dependency on UNLOADING flag to stop device discovery 1150 1149 */ 1151 - static void 1150 + void 1152 1151 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1153 1152 { 1154 1153 qla2x00_mark_all_devices_lost(vha, 0); ··· 3604 3603 3605 3604 base_vha = pci_get_drvdata(pdev); 3606 3605 ha = base_vha->hw; 3606 + ql_log(ql_log_info, base_vha, 0xb079, 3607 + "Removing driver\n"); 3607 3608 3608 3609 /* Indicate device removal to prevent future board_disable and wait 3609 3610 * until any pending board_disable has completed. */ ··· 3627 3624 return; 3628 3625 } 3629 3626 qla2x00_wait_for_hba_ready(base_vha); 3627 + 3628 + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { 3629 + if (ha->flags.fw_started) 3630 + qla2x00_abort_isp_cleanup(base_vha); 3631 + } else if (!IS_QLAFX00(ha)) { 3632 + if (IS_QLA8031(ha)) { 3633 + ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3634 + "Clearing fcoe driver presence.\n"); 3635 + if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3636 + ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3637 + "Error while clearing DRV-Presence.\n"); 3638 + } 3639 + 3640 + qla2x00_try_to_stop_firmware(base_vha); 3641 + } 3630 3642 3631 3643 qla2x00_wait_for_sess_deletion(base_vha); 3632 3644 ··· 3665 3647 qlafx00_driver_shutdown(base_vha, 20); 3666 3648 3667 3649 qla2x00_delete_all_vps(ha, base_vha); 3668 - 3669 - if (IS_QLA8031(ha)) { 3670 - ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3671 - "Clearing fcoe driver presence.\n"); 3672 - if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3673 - ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3674 - "Error while clearing DRV-Presence.\n"); 3675 - } 3676 3650 3677 3651 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3678 3652 ··· 3725 3715 qla2x00_stop_timer(vha); 3726 3716 3727 3717 qla25xx_delete_queues(vha); 3728 - 3729 - if (ha->flags.fce_enabled) 3730 - qla2x00_disable_fce_trace(vha, NULL, NULL); 3731 - 3732 - if (ha->eft) 3733 - qla2x00_disable_eft_trace(vha); 3734 - 3735 - if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { 3736 - if (ha->flags.fw_started) 3737 - qla2x00_abort_isp_cleanup(vha); 3738 - } else { 3739 - if (ha->flags.fw_started) { 3740 - /* Stop currently executing firmware. */ 3741 - qla2x00_try_to_stop_firmware(vha); 3742 - ha->flags.fw_started = 0; 3743 - } 3744 - } 3745 - 3746 3718 vha->flags.online = 0; 3747 3719 3748 3720 /* turn-off interrupts on the card */ ··· 6020 6028 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6021 6029 } 6022 6030 6023 - if (test_and_clear_bit(ISP_ABORT_NEEDED, 6024 - &base_vha->dpc_flags)) { 6031 + if (test_and_clear_bit 6032 + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 6033 + !test_bit(UNLOADING, &base_vha->dpc_flags)) { 6025 6034 6026 6035 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 6027 6036 "ISP abort scheduled.\n");
+3
drivers/scsi/qla2xxx/qla_sup.c
··· 1880 1880 if (IS_P3P_TYPE(ha)) 1881 1881 return QLA_SUCCESS; 1882 1882 1883 + if (!ha->flags.fw_started) 1884 + return QLA_SUCCESS; 1885 + 1883 1886 ha->beacon_blink_led = 0; 1884 1887 1885 1888 if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+14
drivers/scsi/scsi_error.c
··· 296 296 rtn = host->hostt->eh_timed_out(scmd); 297 297 298 298 if (rtn == BLK_EH_DONE) { 299 + /* 300 + * For blk-mq, we must set the request state to complete now 301 + * before sending the request to the scsi error handler. This 302 + * will prevent a use-after-free in the event the LLD manages 303 + * to complete the request before the error handler finishes 304 + * processing this timed out request. 305 + * 306 + * If the request was already completed, then the LLD beat the 307 + * time out handler from transferring the request to the scsi 308 + * error handler. In that case we can return immediately as no 309 + * further action is required. 310 + */ 311 + if (req->q->mq_ops && !blk_mq_mark_complete(req)) 312 + return rtn; 299 313 if (scsi_abort_command(scmd) != SUCCESS) { 300 314 set_host_byte(scmd, DID_TIME_OUT); 301 315 scsi_eh_scmd_add(scmd);
+6 -9
drivers/scsi/sg.c
··· 1741 1741 * 1742 1742 * With scsi-mq enabled, there are a fixed number of preallocated 1743 1743 * requests equal in number to shost->can_queue. If all of the 1744 - * preallocated requests are already in use, then using GFP_ATOMIC with 1745 - * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL 1746 - * will cause blk_get_request() to sleep until an active command 1747 - * completes, freeing up a request. Neither option is ideal, but 1748 - * GFP_KERNEL is the better choice to prevent userspace from getting an 1749 - * unexpected EWOULDBLOCK. 1750 - * 1751 - * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually 1752 - * does not sleep except under memory pressure. 1744 + * preallocated requests are already in use, then blk_get_request() 1745 + * will sleep until an active command completes, freeing up a request. 1746 + * Although waiting in an asynchronous interface is less than ideal, we 1747 + * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might 1748 + * not expect an EWOULDBLOCK from this condition. 1753 1749 */ 1754 1750 rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? 1755 1751 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); ··· 2181 2185 write_lock_irqsave(&sdp->sfd_lock, iflags); 2182 2186 if (atomic_read(&sdp->detaching)) { 2183 2187 write_unlock_irqrestore(&sdp->sfd_lock, iflags); 2188 + kfree(sfp); 2184 2189 return ERR_PTR(-ENODEV); 2185 2190 } 2186 2191 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
+2
drivers/staging/android/ashmem.c
··· 402 402 fput(asma->file); 403 403 goto out; 404 404 } 405 + } else { 406 + vma_set_anonymous(vma); 405 407 } 406 408 407 409 if (vma->vm_file)
+6 -6
drivers/staging/ks7010/ks_hostif.c
··· 1842 1842 memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN); 1843 1843 1844 1844 if (dev->flags & IFF_PROMISC) { 1845 - hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1846 - MCAST_FILTER_PROMISC); 1845 + hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, 1846 + MCAST_FILTER_PROMISC); 1847 1847 goto spin_unlock; 1848 1848 } 1849 1849 1850 1850 if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) || 1851 1851 (dev->flags & IFF_ALLMULTI)) { 1852 - hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1853 - MCAST_FILTER_MCASTALL); 1852 + hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, 1853 + MCAST_FILTER_MCASTALL); 1854 1854 goto spin_unlock; 1855 1855 } 1856 1856 ··· 1866 1866 ETH_ALEN * mc_count); 1867 1867 } else { 1868 1868 priv->sme_i.sme_flag |= SME_MULTICAST; 1869 - hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1870 - MCAST_FILTER_MCAST); 1869 + hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, 1870 + MCAST_FILTER_MCAST); 1871 1871 } 1872 1872 1873 1873 spin_unlock:
-1
drivers/staging/rtl8188eu/Kconfig
··· 7 7 select LIB80211 8 8 select LIB80211_CRYPT_WEP 9 9 select LIB80211_CRYPT_CCMP 10 - select LIB80211_CRYPT_TKIP 11 10 ---help--- 12 11 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N. 13 12 If built as a module, it will be called r8188eu.
+119 -52
drivers/staging/rtl8188eu/core/rtw_recv.c
··· 23 23 #include <mon.h> 24 24 #include <wifi.h> 25 25 #include <linux/vmalloc.h> 26 - #include <net/lib80211.h> 27 26 28 27 #define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */ 29 28 #define LLC_HEADER_SIZE 6 /* LLC Header Length */ ··· 220 221 static int recvframe_chkmic(struct adapter *adapter, 221 222 struct recv_frame *precvframe) 222 223 { 223 - int res = _SUCCESS; 224 - struct rx_pkt_attrib *prxattrib = &precvframe->attrib; 225 - struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta); 224 + int i, res = _SUCCESS; 225 + u32 datalen; 226 + u8 miccode[8]; 227 + u8 bmic_err = false, brpt_micerror = true; 228 + u8 *pframe, *payload, *pframemic; 229 + u8 *mickey; 230 + struct sta_info *stainfo; 231 + struct rx_pkt_attrib *prxattrib = &precvframe->attrib; 232 + struct security_priv *psecuritypriv = &adapter->securitypriv; 233 + 234 + struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; 235 + struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 236 + 237 + stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]); 226 238 227 239 if (prxattrib->encrypt == _TKIP_) { 228 - if (stainfo) { 229 - int key_idx; 230 - const int iv_len = 8, icv_len = 4, key_length = 32; 231 - struct sk_buff *skb = precvframe->pkt; 232 - u8 key[32], iv[8], icv[4], *pframe = skb->data; 233 - void *crypto_private = NULL; 234 - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip"); 235 - struct security_priv *psecuritypriv = &adapter->securitypriv; 240 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, 241 + ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__)); 242 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, 243 + ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", 244 + __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], 245 + prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5])); 236 246 247 + /* calculate mic code */ 248 + if (stainfo) { 237 249 if (IS_MCAST(prxattrib->ra)) { 238 250 if (!psecuritypriv) { 239 251 res = _FAIL; ··· 253 243 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__); 254 244 goto exit; 255 245 } 256 - key_idx = prxattrib->key_index; 257 - memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16); 258 - memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16); 246 + mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0]; 247 + 248 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, 249 + ("\n %s: bcmc key\n", __func__)); 259 250 } else { 260 - key_idx = 0; 261 - memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 262 - memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16); 251 + mickey = &stainfo->dot11tkiprxmickey.skey[0]; 252 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 253 + ("\n %s: unicast key\n", __func__)); 263 254 } 264 255 265 - if (!crypto_ops) { 256 + /* icv_len included the mic code */ 257 + datalen = precvframe->pkt->len-prxattrib->hdrlen - 258 + prxattrib->iv_len-prxattrib->icv_len-8; 259 + pframe = precvframe->pkt->data; 260 + payload = pframe+prxattrib->hdrlen+prxattrib->iv_len; 261 + 262 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len)); 263 + rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], 264 + (unsigned char)prxattrib->priority); /* care the length of the data */ 265 + 266 + pframemic = payload+datalen; 267 + 268 + bmic_err = false; 269 + 270 + for (i = 0; i < 8; i++) { 271 + if (miccode[i] != *(pframemic+i)) { 272 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 273 + ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ", 274 + __func__, i, miccode[i], i, *(pframemic + i))); 275 + bmic_err = true; 276 + } 277 + } 278 + 279 + if (bmic_err) { 280 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 281 + ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", 282 + *(pframemic-8), *(pframemic-7), *(pframemic-6), 283 + *(pframemic-5), *(pframemic-4), *(pframemic-3), 284 + *(pframemic-2), *(pframemic-1))); 285 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 286 + ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", 287 + *(pframemic-16), *(pframemic-15), *(pframemic-14), 288 + *(pframemic-13), *(pframemic-12), *(pframemic-11), 289 + *(pframemic-10), *(pframemic-9))); 290 + { 291 + uint i; 292 + 293 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 294 + ("\n ======demp packet (len=%d)======\n", 295 + precvframe->pkt->len)); 296 + for (i = 0; i < precvframe->pkt->len; i += 8) { 297 + RT_TRACE(_module_rtl871x_recv_c_, 298 + _drv_err_, 299 + ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x", 300 + *(precvframe->pkt->data+i), 301 + *(precvframe->pkt->data+i+1), 302 + *(precvframe->pkt->data+i+2), 303 + *(precvframe->pkt->data+i+3), 304 + *(precvframe->pkt->data+i+4), 305 + *(precvframe->pkt->data+i+5), 306 + *(precvframe->pkt->data+i+6), 307 + *(precvframe->pkt->data+i+7))); 308 + } 309 + RT_TRACE(_module_rtl871x_recv_c_, 310 + _drv_err_, 311 + ("\n ====== demp packet end [len=%d]======\n", 312 + precvframe->pkt->len)); 313 + RT_TRACE(_module_rtl871x_recv_c_, 314 + _drv_err_, 315 + ("\n hrdlen=%d,\n", 316 + prxattrib->hdrlen)); 317 + } 318 + 319 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 320 + ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ", 321 + prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], 322 + prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey)); 323 + 324 + /* double check key_index for some timing issue , */ 325 + /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */ 326 + if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index)) 327 + brpt_micerror = false; 328 + 329 + if ((prxattrib->bdecrypted) && (brpt_micerror)) { 330 + rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra)); 331 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted)); 332 + DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted); 333 + } else { 334 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted)); 335 + DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted); 336 + } 266 337 res = _FAIL; 267 - goto exit_lib80211_tkip; 338 + } else { 339 + /* mic checked ok */ 340 + if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) { 341 + psecuritypriv->bcheck_grpkey = true; 342 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true")); 343 + } 268 344 } 269 - 270 - memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 271 - memcpy(icv, pframe + skb->len - icv_len, icv_len); 272 - memmove(pframe + iv_len, pframe, prxattrib->hdrlen); 273 - 274 - skb_pull(skb, iv_len); 275 - skb_trim(skb, skb->len - icv_len); 276 - 277 - crypto_private = crypto_ops->init(key_idx); 278 - if (!crypto_private) { 279 - res = _FAIL; 280 - goto exit_lib80211_tkip; 281 - } 282 - if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) { 283 - res = _FAIL; 284 - goto exit_lib80211_tkip; 285 - } 286 - if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) { 287 - res = _FAIL; 288 - goto exit_lib80211_tkip; 289 - } 290 - 291 - memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 292 - skb_push(skb, iv_len); 293 - skb_put(skb, icv_len); 294 - 295 - memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 296 - memcpy(pframe + skb->len - icv_len, icv, icv_len); 297 - 298 - exit_lib80211_tkip: 299 - if (crypto_ops && crypto_private) 300 - crypto_ops->deinit(crypto_private); 301 345 } else { 302 346 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 303 347 ("%s: rtw_get_stainfo==NULL!!!\n", __func__)); 304 348 } 349 + 350 + skb_trim(precvframe->pkt, precvframe->pkt->len - 8); 305 351 } 306 352 307 353 exit: 354 + 308 355 return res; 309 356 } 310 357
+49 -49
drivers/staging/rtl8188eu/core/rtw_security.c
··· 650 650 return res; 651 651 } 652 652 653 + /* The hlen isn't include the IV */ 653 654 u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe) 654 - { 655 - struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib; 656 - u32 res = _SUCCESS; 655 + { /* exclude ICV */ 656 + u16 pnl; 657 + u32 pnh; 658 + u8 rc4key[16]; 659 + u8 ttkey[16]; 660 + u8 crc[4]; 661 + struct arc4context mycontext; 662 + int length; 663 + 664 + u8 *pframe, *payload, *iv, *prwskey; 665 + union pn48 dot11txpn; 666 + struct sta_info *stainfo; 667 + struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib; 668 + struct security_priv *psecuritypriv = &padapter->securitypriv; 669 + u32 res = _SUCCESS; 670 + 671 + 672 + pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data; 657 673 658 674 /* 4 start to decrypt recvframe */ 659 675 if (prxattrib->encrypt == _TKIP_) { 660 - struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta); 661 - 676 + stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]); 662 677 if (stainfo) { 663 - int key_idx; 664 - const int iv_len = 8, icv_len = 4, key_length = 32; 665 - void *crypto_private = NULL; 666 - struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; 667 - u8 key[32], iv[8], icv[4], *pframe = skb->data; 668 - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip"); 669 - struct security_priv *psecuritypriv = &padapter->securitypriv; 670 - 671 678 if (IS_MCAST(prxattrib->ra)) { 672 679 if (!psecuritypriv->binstallGrpkey) { 673 680 res = _FAIL; 674 681 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__); 675 682 goto exit; 676 683 } 677 - key_idx = prxattrib->key_index; 678 - memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16); 679 - memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16); 684 + prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey; 680 685 } else { 681 - key_idx = 0; 682 - memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 683 - memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16); 686 + RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__)); 687 + prwskey = &stainfo->dot118021x_UncstKey.skey[0]; 684 688 } 685 689 686 - if (!crypto_ops) { 690 + iv = pframe+prxattrib->hdrlen; 691 + payload = pframe+prxattrib->iv_len+prxattrib->hdrlen; 692 + length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len; 693 + 694 + GET_TKIP_PN(iv, dot11txpn); 695 + 696 + pnl = (u16)(dot11txpn.val); 697 + pnh = (u32)(dot11txpn.val>>16); 698 + 699 + phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh); 700 + phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl); 701 + 702 + /* 4 decrypt payload include icv */ 703 + 704 + arcfour_init(&mycontext, rc4key, 16); 705 + arcfour_encrypt(&mycontext, payload, payload, length); 706 + 707 + *((__le32 *)crc) = getcrc32(payload, length-4); 708 + 709 + if (crc[3] != payload[length-1] || 710 + crc[2] != payload[length-2] || 711 + crc[1] != payload[length-3] || 712 + crc[0] != payload[length-4]) { 713 + RT_TRACE(_module_rtl871x_security_c_, _drv_err_, 714 + ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n", 715 + &crc, &payload[length-4])); 687 716 res = _FAIL; 688 - goto exit_lib80211_tkip; 689 717 } 690 - 691 - memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 692 - memcpy(icv, pframe + skb->len - icv_len, icv_len); 693 - 694 - crypto_private = crypto_ops->init(key_idx); 695 - if (!crypto_private) { 696 - res = _FAIL; 697 - goto exit_lib80211_tkip; 698 - } 699 - if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) { 700 - res = _FAIL; 701 - goto exit_lib80211_tkip; 702 - } 703 - if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) { 704 - res = _FAIL; 705 - goto exit_lib80211_tkip; 706 - } 707 - 708 - memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 709 - skb_push(skb, iv_len); 710 - skb_put(skb, icv_len); 711 - 712 - memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 713 - memcpy(pframe + skb->len - icv_len, icv, icv_len); 714 - 715 - exit_lib80211_tkip: 716 - if (crypto_ops && crypto_private) 717 - crypto_ops->deinit(crypto_private); 718 718 } else { 719 719 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n")); 720 720 res = _FAIL;
+5 -1
drivers/staging/speakup/speakup_soft.c
··· 198 198 int chars_sent = 0; 199 199 char __user *cp; 200 200 char *init; 201 + size_t bytes_per_ch = unicode ? 3 : 1; 201 202 u16 ch; 202 203 int empty; 203 204 unsigned long flags; 204 205 DEFINE_WAIT(wait); 206 + 207 + if (count < bytes_per_ch) 208 + return -EINVAL; 205 209 206 210 spin_lock_irqsave(&speakup_info.spinlock, flags); 207 211 while (1) { ··· 232 228 init = get_initstring(); 233 229 234 230 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ 235 - while (chars_sent <= count - 3) { 231 + while (chars_sent <= count - bytes_per_ch) { 236 232 if (speakup_info.flushing) { 237 233 speakup_info.flushing = 0; 238 234 ch = '\x18';
+9 -7
drivers/target/iscsi/cxgbit/cxgbit_target.c
··· 652 652 struct iscsi_param *param; 653 653 u32 mrdsl, mbl; 654 654 u32 max_npdu, max_iso_npdu; 655 + u32 max_iso_payload; 655 656 656 657 if (conn->login->leading_connection) { 657 658 param = iscsi_find_param_from_key(MAXBURSTLENGTH, ··· 671 670 mrdsl = conn_ops->MaxRecvDataSegmentLength; 672 671 max_npdu = mbl / mrdsl; 673 672 674 - max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / 675 - (ISCSI_HDR_LEN + mrdsl + 673 + max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); 674 + 675 + max_iso_npdu = max_iso_payload / 676 + (ISCSI_HDR_LEN + mrdsl + 676 677 cxgbit_digest_len[csk->submode]); 677 678 678 679 csk->max_iso_npdu = min(max_npdu, max_iso_npdu); ··· 744 741 if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) 745 742 conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; 746 743 744 + if (cxgbit_set_digest(csk)) 745 + return -1; 746 + 747 747 if (conn->login->leading_connection) { 748 748 param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, 749 749 conn->param_list); ··· 770 764 if (is_t5(cdev->lldi.adapter_type)) 771 765 goto enable_ddp; 772 766 else 773 - goto enable_digest; 767 + return 0; 774 768 } 775 769 776 770 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { ··· 786 780 set_bit(CSK_DDP_ENABLE, &csk->com.flags); 787 781 } 788 782 } 789 - 790 - enable_digest: 791 - if (cxgbit_set_digest(csk)) 792 - return -1; 793 783 794 784 return 0; 795 785 }
+1 -8
drivers/usb/chipidea/Kconfig
··· 3 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA 4 4 select EXTCON 5 5 select RESET_CONTROLLER 6 + select USB_ULPI_BUS 6 7 help 7 8 Say Y here if your system has a dual role high speed USB 8 9 controller based on ChipIdea silicon IP. It supports: ··· 39 38 help 40 39 Say Y here to enable host controller functionality of the 41 40 ChipIdea driver. 42 - 43 - config USB_CHIPIDEA_ULPI 44 - bool "ChipIdea ULPI PHY support" 45 - depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA 46 - help 47 - Say Y here if you have a ULPI PHY attached to your ChipIdea 48 - controller. 49 - 50 41 endif
+1 -2
drivers/usb/chipidea/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o 3 3 4 - ci_hdrc-y := core.o otg.o debug.o 4 + ci_hdrc-y := core.o otg.o debug.o ulpi.o 5 5 ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o 6 6 ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o 7 7 ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o 8 - ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o 9 8 10 9 # Glue/Bridge layers go here 11 10
-8
drivers/usb/chipidea/ci.h
··· 240 240 241 241 struct ci_hdrc_platform_data *platdata; 242 242 int vbus_active; 243 - #ifdef CONFIG_USB_CHIPIDEA_ULPI 244 243 struct ulpi *ulpi; 245 244 struct ulpi_ops ulpi_ops; 246 - #endif 247 245 struct phy *phy; 248 246 /* old usb_phy interface */ 249 247 struct usb_phy *usb_phy; ··· 424 426 #endif 425 427 } 426 428 427 - #if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI) 428 429 int ci_ulpi_init(struct ci_hdrc *ci); 429 430 void ci_ulpi_exit(struct ci_hdrc *ci); 430 431 int ci_ulpi_resume(struct ci_hdrc *ci); 431 - #else 432 - static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; } 433 - static inline void ci_ulpi_exit(struct ci_hdrc *ci) { } 434 - static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; } 435 - #endif 436 432 437 433 u32 hw_read_intr_enable(struct ci_hdrc *ci); 438 434
+3
drivers/usb/chipidea/ulpi.c
··· 95 95 { 96 96 int cnt = 100000; 97 97 98 + if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI) 99 + return 0; 100 + 98 101 while (cnt-- > 0) { 99 102 if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE)) 100 103 return 0;
+3
drivers/usb/class/cdc-acm.c
··· 1831 1831 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ 1832 1832 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ 1833 1833 }, 1834 + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ 1835 + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ 1836 + }, 1834 1837 1835 1838 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ 1836 1839 .driver_info = CLEAR_HALT_CONDITIONS,
+6 -2
drivers/usb/core/hub.c
··· 1142 1142 1143 1143 if (!udev || udev->state == USB_STATE_NOTATTACHED) { 1144 1144 /* Tell hub_wq to disconnect the device or 1145 - * check for a new connection 1145 + * check for a new connection or over current condition. 1146 + * Based on USB2.0 Spec Section 11.12.5, 1147 + * C_PORT_OVER_CURRENT could be set while 1148 + * PORT_OVER_CURRENT is not. So check for any of them. 1146 1149 */ 1147 1150 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || 1148 - (portstatus & USB_PORT_STAT_OVERCURRENT)) 1151 + (portstatus & USB_PORT_STAT_OVERCURRENT) || 1152 + (portchange & USB_PORT_STAT_C_OVERCURRENT)) 1149 1153 set_bit(port1, hub->change_bits); 1150 1154 1151 1155 } else if (portstatus & USB_PORT_STAT_ENABLE) {
+3 -3
drivers/usb/dwc2/gadget.c
··· 3430 3430 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3431 3431 hs_ep = hsotg->eps_in[idx]; 3432 3432 /* Proceed only unmasked ISOC EPs */ 3433 - if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3433 + if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3434 3434 continue; 3435 3435 3436 3436 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx)); ··· 3476 3476 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3477 3477 hs_ep = hsotg->eps_out[idx]; 3478 3478 /* Proceed only unmasked ISOC EPs */ 3479 - if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3479 + if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3480 3480 continue; 3481 3481 3482 3482 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); ··· 3650 3650 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3651 3651 hs_ep = hsotg->eps_out[idx]; 3652 3652 /* Proceed only unmasked ISOC EPs */ 3653 - if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3653 + if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3654 3654 continue; 3655 3655 3656 3656 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+31 -23
drivers/usb/dwc2/hcd.c
··· 2665 2665 2666 2666 #define DWC2_USB_DMA_ALIGN 4 2667 2667 2668 - struct dma_aligned_buffer { 2669 - void *kmalloc_ptr; 2670 - void *old_xfer_buffer; 2671 - u8 data[0]; 2672 - }; 2673 - 2674 2668 static void dwc2_free_dma_aligned_buffer(struct urb *urb) 2675 2669 { 2676 - struct dma_aligned_buffer *temp; 2670 + void *stored_xfer_buffer; 2671 + size_t length; 2677 2672 2678 2673 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) 2679 2674 return; 2680 2675 2681 - temp = container_of(urb->transfer_buffer, 2682 - struct dma_aligned_buffer, data); 2676 + /* Restore urb->transfer_buffer from the end of the allocated area */ 2677 + memcpy(&stored_xfer_buffer, urb->transfer_buffer + 2678 + urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); 2683 2679 2684 - if (usb_urb_dir_in(urb)) 2685 - memcpy(temp->old_xfer_buffer, temp->data, 2686 - urb->transfer_buffer_length); 2687 - urb->transfer_buffer = temp->old_xfer_buffer; 2688 - kfree(temp->kmalloc_ptr); 2680 + if (usb_urb_dir_in(urb)) { 2681 + if (usb_pipeisoc(urb->pipe)) 2682 + length = urb->transfer_buffer_length; 2683 + else 2684 + length = urb->actual_length; 2685 + 2686 + memcpy(stored_xfer_buffer, urb->transfer_buffer, length); 2687 + } 2688 + kfree(urb->transfer_buffer); 2689 + urb->transfer_buffer = stored_xfer_buffer; 2689 2690 2690 2691 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; 2691 2692 } 2692 2693 2693 2694 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) 2694 2695 { 2695 - struct dma_aligned_buffer *temp, *kmalloc_ptr; 2696 + void *kmalloc_ptr; 2696 2697 size_t kmalloc_size; 2697 2698 2698 2699 if (urb->num_sgs || urb->sg || ··· 2701 2700 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) 2702 2701 return 0; 2703 2702 2704 - /* Allocate a buffer with enough padding for alignment */ 2703 + /* 2704 + * Allocate a buffer with enough padding for original transfer_buffer 2705 + * pointer. This allocation is guaranteed to be aligned properly for 2706 + * DMA 2707 + */ 2705 2708 kmalloc_size = urb->transfer_buffer_length + 2706 - sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; 2709 + sizeof(urb->transfer_buffer); 2707 2710 2708 2711 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); 2709 2712 if (!kmalloc_ptr) 2710 2713 return -ENOMEM; 2711 2714 2712 - /* Position our struct dma_aligned_buffer such that data is aligned */ 2713 - temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; 2714 - temp->kmalloc_ptr = kmalloc_ptr; 2715 - temp->old_xfer_buffer = urb->transfer_buffer; 2715 + /* 2716 + * Position value of original urb->transfer_buffer pointer to the end 2717 + * of allocation for later referencing 2718 + */ 2719 + memcpy(kmalloc_ptr + urb->transfer_buffer_length, 2720 + &urb->transfer_buffer, sizeof(urb->transfer_buffer)); 2721 + 2716 2722 if (usb_urb_dir_out(urb)) 2717 - memcpy(temp->data, urb->transfer_buffer, 2723 + memcpy(kmalloc_ptr, urb->transfer_buffer, 2718 2724 urb->transfer_buffer_length); 2719 - urb->transfer_buffer = temp->data; 2725 + urb->transfer_buffer = kmalloc_ptr; 2720 2726 2721 2727 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; 2722 2728
+7 -2
drivers/usb/dwc2/hcd_intr.c
··· 1231 1231 * avoid interrupt storms we'll wait before retrying if we've got 1232 1232 * several NAKs. If we didn't do this we'd retry directly from the 1233 1233 * interrupt handler and could end up quickly getting another 1234 - * interrupt (another NAK), which we'd retry. 1234 + * interrupt (another NAK), which we'd retry. Note that we do not 1235 + * delay retries for IN parts of control requests, as those are expected 1236 + * to complete fairly quickly, and if we delay them we risk confusing 1237 + * the device and cause it issue STALL. 1235 1238 * 1236 1239 * Note that in DMA mode software only gets involved to re-send NAKed 1237 1240 * transfers for split transactions, so we only need to apply this ··· 1247 1244 qtd->error_count = 0; 1248 1245 qtd->complete_split = 0; 1249 1246 qtd->num_naks++; 1250 - qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY; 1247 + qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY && 1248 + !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL && 1249 + chan->ep_is_in); 1251 1250 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1252 1251 goto handle_nak_done; 1253 1252 }
-3
drivers/usb/dwc3/ep0.c
··· 973 973 ret = dwc3_ep0_start_trans(dep); 974 974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) && 975 975 req->request.length && req->request.zero) { 976 - u32 maxpacket; 977 976 978 977 ret = usb_gadget_map_request_by_dev(dwc->sysdev, 979 978 &req->request, dep->number); 980 979 if (ret) 981 980 return; 982 - 983 - maxpacket = dep->endpoint.maxpacket; 984 981 985 982 /* prepare normal TRB */ 986 983 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
-1
drivers/usb/gadget/composite.c
··· 1819 1819 if (cdev->use_os_string && cdev->os_desc_config && 1820 1820 (ctrl->bRequestType & USB_TYPE_VENDOR) && 1821 1821 ctrl->bRequest == cdev->b_vendor_code) { 1822 - struct usb_request *req; 1823 1822 struct usb_configuration *os_desc_cfg; 1824 1823 u8 *buf; 1825 1824 int interface;
+1 -1
drivers/usb/gadget/function/f_fs.c
··· 3263 3263 __ffs_event_add(ffs, FUNCTIONFS_SETUP); 3264 3264 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); 3265 3265 3266 - return USB_GADGET_DELAYED_STATUS; 3266 + return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; 3267 3267 } 3268 3268 3269 3269 static bool ffs_func_req_match(struct usb_function *f,
+12 -12
drivers/usb/gadget/function/f_uac2.c
··· 438 438 }; 439 439 440 440 struct cntrl_cur_lay3 { 441 - __u32 dCUR; 441 + __le32 dCUR; 442 442 }; 443 443 444 444 struct cntrl_range_lay3 { 445 - __u16 wNumSubRanges; 446 - __u32 dMIN; 447 - __u32 dMAX; 448 - __u32 dRES; 445 + __le16 wNumSubRanges; 446 + __le32 dMIN; 447 + __le32 dMAX; 448 + __le32 dRES; 449 449 } __packed; 450 450 451 451 static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, ··· 559 559 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); 560 560 if (!agdev->out_ep) { 561 561 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 562 - return ret; 562 + return -ENODEV; 563 563 } 564 564 565 565 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); 566 566 if (!agdev->in_ep) { 567 567 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 568 - return ret; 568 + return -ENODEV; 569 569 } 570 570 571 571 agdev->in_ep_maxpsize = max_t(u16, ··· 703 703 memset(&c, 0, sizeof(struct cntrl_cur_lay3)); 704 704 705 705 if (entity_id == USB_IN_CLK_ID) 706 - c.dCUR = p_srate; 706 + c.dCUR = cpu_to_le32(p_srate); 707 707 else if (entity_id == USB_OUT_CLK_ID) 708 - c.dCUR = c_srate; 708 + c.dCUR = cpu_to_le32(c_srate); 709 709 710 710 value = min_t(unsigned, w_length, sizeof c); 711 711 memcpy(req->buf, &c, value); ··· 742 742 743 743 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { 744 744 if (entity_id == USB_IN_CLK_ID) 745 - r.dMIN = p_srate; 745 + r.dMIN = cpu_to_le32(p_srate); 746 746 else if (entity_id == USB_OUT_CLK_ID) 747 - r.dMIN = c_srate; 747 + r.dMIN = cpu_to_le32(c_srate); 748 748 else 749 749 return -EOPNOTSUPP; 750 750 751 751 r.dMAX = r.dMIN; 752 752 r.dRES = 0; 753 - r.wNumSubRanges = 1; 753 + r.wNumSubRanges = cpu_to_le16(1); 754 754 755 755 value = min_t(unsigned, w_length, sizeof r); 756 756 memcpy(req->buf, &r, value);
+33 -55
drivers/usb/gadget/function/u_audio.c
··· 32 32 struct uac_rtd_params { 33 33 struct snd_uac_chip *uac; /* parent chip */ 34 34 bool ep_enabled; /* if the ep is enabled */ 35 - /* Size of the ring buffer */ 36 - size_t dma_bytes; 37 - unsigned char *dma_area; 38 35 39 36 struct snd_pcm_substream *ss; 40 37 ··· 39 42 ssize_t hw_ptr; 40 43 41 44 void *rbuf; 42 - 43 - size_t period_size; 44 45 45 46 unsigned max_psize; /* MaxPacketSize of endpoint */ 46 47 struct uac_req *ureq; ··· 79 84 static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) 80 85 { 81 86 unsigned pending; 82 - unsigned long flags; 87 + unsigned long flags, flags2; 83 88 unsigned int hw_ptr; 84 - bool update_alsa = false; 85 89 int status = req->status; 86 90 struct uac_req *ur = req->context; 87 91 struct snd_pcm_substream *substream; 92 + struct snd_pcm_runtime *runtime; 88 93 struct uac_rtd_params *prm = ur->pp; 89 94 struct snd_uac_chip *uac = prm->uac; 90 95 ··· 105 110 /* Do nothing if ALSA isn't active */ 106 111 if (!substream) 107 112 goto exit; 113 + 114 + snd_pcm_stream_lock_irqsave(substream, flags2); 115 + 116 + runtime = substream->runtime; 117 + if (!runtime || !snd_pcm_running(substream)) { 118 + snd_pcm_stream_unlock_irqrestore(substream, flags2); 119 + goto exit; 120 + } 108 121 109 122 spin_lock_irqsave(&prm->lock, flags); 110 123 ··· 140 137 req->actual = req->length; 141 138 } 142 139 143 - pending = prm->hw_ptr % prm->period_size; 144 - pending += req->actual; 145 - if (pending >= prm->period_size) 146 - update_alsa = true; 147 - 148 140 hw_ptr = prm->hw_ptr; 149 - prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes; 150 141 151 142 spin_unlock_irqrestore(&prm->lock, flags); 152 143 153 144 /* Pack USB load in ALSA ring buffer */ 154 - pending = prm->dma_bytes - hw_ptr; 145 + pending = runtime->dma_bytes - hw_ptr; 155 146 156 147 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 157 148 if (unlikely(pending < req->actual)) { 158 - memcpy(req->buf, prm->dma_area + hw_ptr, pending); 159 - memcpy(req->buf + pending, prm->dma_area, 149 + memcpy(req->buf, runtime->dma_area + hw_ptr, pending); 150 + memcpy(req->buf + pending, runtime->dma_area, 160 151 req->actual - pending); 161 152 } else { 162 - memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); 153 + memcpy(req->buf, runtime->dma_area + hw_ptr, 154 + req->actual); 163 155 } 164 156 } else { 165 157 if (unlikely(pending < req->actual)) { 166 - memcpy(prm->dma_area + hw_ptr, req->buf, pending); 167 - memcpy(prm->dma_area, req->buf + pending, 158 + memcpy(runtime->dma_area + hw_ptr, req->buf, pending); 159 + memcpy(runtime->dma_area, req->buf + pending, 168 160 req->actual - pending); 169 161 } else { 170 - memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); 162 + memcpy(runtime->dma_area + hw_ptr, req->buf, 163 + req->actual); 171 164 } 172 165 } 166 + 167 + spin_lock_irqsave(&prm->lock, flags); 168 + /* update hw_ptr after data is copied to memory */ 169 + prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes; 170 + hw_ptr = prm->hw_ptr; 171 + spin_unlock_irqrestore(&prm->lock, flags); 172 + snd_pcm_stream_unlock_irqrestore(substream, flags2); 173 + 174 + if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual) 175 + snd_pcm_period_elapsed(substream); 173 176 174 177 exit: 175 178 if (usb_ep_queue(ep, req, GFP_ATOMIC)) 176 179 dev_err(uac->card->dev, "%d Error!\n", __LINE__); 177 - 178 - if (update_alsa) 179 - snd_pcm_period_elapsed(substream); 180 180 } 181 181 182 182 static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) ··· 242 236 static int uac_pcm_hw_params(struct snd_pcm_substream *substream, 243 237 struct snd_pcm_hw_params *hw_params) 244 238 { 245 - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); 246 - struct uac_rtd_params *prm; 247 - int err; 248 - 249 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 250 - prm = &uac->p_prm; 251 - else 252 - prm = &uac->c_prm; 253 - 254 - err = snd_pcm_lib_malloc_pages(substream, 239 + return snd_pcm_lib_malloc_pages(substream, 255 240 params_buffer_bytes(hw_params)); 256 - if (err >= 0) { 257 - prm->dma_bytes = substream->runtime->dma_bytes; 258 - prm->dma_area = substream->runtime->dma_area; 259 - prm->period_size = params_period_bytes(hw_params); 260 - } 261 - 262 - return err; 263 241 } 264 242 265 243 static int uac_pcm_hw_free(struct snd_pcm_substream *substream) 266 244 { 267 - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); 268 - struct uac_rtd_params *prm; 269 - 270 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 271 - prm = &uac->p_prm; 272 - else 273 - prm = &uac->c_prm; 274 - 275 - prm->dma_area = NULL; 276 - prm->dma_bytes = 0; 277 - prm->period_size = 0; 278 - 279 245 return snd_pcm_lib_free_pages(substream); 280 246 } 281 247 ··· 573 595 if (err < 0) 574 596 goto snd_fail; 575 597 576 - strcpy(pcm->name, pcm_name); 598 + strlcpy(pcm->name, pcm_name, sizeof(pcm->name)); 577 599 pcm->private_data = uac; 578 600 uac->pcm = pcm; 579 601 580 602 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops); 581 603 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops); 582 604 583 - strcpy(card->driver, card_name); 584 - strcpy(card->shortname, card_name); 605 + strlcpy(card->driver, card_name, sizeof(card->driver)); 606 + strlcpy(card->shortname, card_name, sizeof(card->shortname)); 585 607 sprintf(card->longname, "%s %i", card_name, card->dev->id); 586 608 587 609 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+10 -1
drivers/usb/gadget/udc/aspeed-vhub/ep0.c
··· 108 108 /* Check our state, cancel pending requests if needed */ 109 109 if (ep->ep0.state != ep0_state_token) { 110 110 EPDBG(ep, "wrong state\n"); 111 + ast_vhub_nuke(ep, -EIO); 112 + 113 + /* 114 + * Accept the packet regardless, this seems to happen 115 + * when stalling a SETUP packet that has an OUT data 116 + * phase. 117 + */ 111 118 ast_vhub_nuke(ep, 0); 112 119 goto stall; 113 120 } ··· 219 212 if (chunk && req->req.buf) 220 213 memcpy(ep->buf, req->req.buf + req->req.actual, chunk); 221 214 215 + vhub_dma_workaround(ep->buf); 216 + 222 217 /* Remember chunk size and trigger send */ 223 218 reg = VHUB_EP0_SET_TX_LEN(chunk); 224 219 writel(reg, ep->ep0.ctlstat); ··· 233 224 EPVDBG(ep, "rx prime\n"); 234 225 235 226 /* Prime endpoint for receiving data */ 236 - writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL); 227 + writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat); 237 228 } 238 229 239 230 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+11 -3
drivers/usb/gadget/udc/aspeed-vhub/epn.c
··· 66 66 if (!req->req.dma) { 67 67 68 68 /* For IN transfers, copy data over first */ 69 - if (ep->epn.is_in) 69 + if (ep->epn.is_in) { 70 70 memcpy(ep->buf, req->req.buf + act, chunk); 71 + vhub_dma_workaround(ep->buf); 72 + } 71 73 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 72 - } else 74 + } else { 75 + if (ep->epn.is_in) 76 + vhub_dma_workaround(req->req.buf); 73 77 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 78 + } 74 79 75 80 /* Start DMA */ 76 81 req->active = true; ··· 166 161 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, 167 162 struct ast_vhub_req *req) 168 163 { 164 + struct ast_vhub_desc *desc = NULL; 169 165 unsigned int act = req->act_count; 170 166 unsigned int len = req->req.length; 171 167 unsigned int chunk; ··· 183 177 184 178 /* While we can create descriptors */ 185 179 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { 186 - struct ast_vhub_desc *desc; 187 180 unsigned int d_num; 188 181 189 182 /* Grab next free descriptor */ ··· 231 226 /* Account packet */ 232 227 req->act_count = act = act + chunk; 233 228 } 229 + 230 + if (likely(desc)) 231 + vhub_dma_workaround(desc); 234 232 235 233 /* Tell HW about new descriptors */ 236 234 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
+33
drivers/usb/gadget/udc/aspeed-vhub/vhub.h
··· 462 462 #define DDBG(d, fmt, ...) do { } while(0) 463 463 #endif 464 464 465 + static inline void vhub_dma_workaround(void *addr) 466 + { 467 + /* 468 + * This works around a confirmed HW issue with the Aspeed chip. 469 + * 470 + * The core uses a different bus to memory than the AHB going to 471 + * the USB device controller. Due to the latter having a higher 472 + * priority than the core for arbitration on that bus, it's 473 + * possible for an MMIO to the device, followed by a DMA by the 474 + * device from memory to all be performed and services before 475 + * a previous store to memory gets completed. 476 + * 477 + * This the following scenario can happen: 478 + * 479 + * - Driver writes to a DMA descriptor (Mbus) 480 + * - Driver writes to the MMIO register to start the DMA (AHB) 481 + * - The gadget sees the second write and sends a read of the 482 + * descriptor to the memory controller (Mbus) 483 + * - The gadget hits memory before the descriptor write 484 + * causing it to read an obsolete value. 485 + * 486 + * Thankfully the problem is limited to the USB gadget device, other 487 + * masters in the SoC all have a lower priority than the core, thus 488 + * ensuring that the store by the core arrives first. 489 + * 490 + * The workaround consists of using a dummy read of the memory before 491 + * doing the MMIO writes. This will ensure that the previous writes 492 + * have been "pushed out". 493 + */ 494 + mb(); 495 + (void)__raw_readl((void __iomem *)addr); 496 + } 497 + 465 498 /* core.c */ 466 499 void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 467 500 int status);
+3 -3
drivers/usb/gadget/udc/r8a66597-udc.c
··· 832 832 833 833 r8a66597_bset(r8a66597, XCKE, SYSCFG0); 834 834 835 - msleep(3); 835 + mdelay(3); 836 836 837 837 r8a66597_bset(r8a66597, PLLC, SYSCFG0); 838 838 839 - msleep(1); 839 + mdelay(1); 840 840 841 841 r8a66597_bset(r8a66597, SCKE, SYSCFG0); 842 842 ··· 1190 1190 r8a66597->ep0_req->length = 2; 1191 1191 /* AV: what happens if we get called again before that gets through? */ 1192 1192 spin_unlock(&r8a66597->lock); 1193 - r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); 1193 + r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC); 1194 1194 spin_lock(&r8a66597->lock); 1195 1195 } 1196 1196
+1
drivers/usb/host/xhci.c
··· 3051 3051 if (!list_empty(&ep->ring->td_list)) { 3052 3052 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3053 3053 spin_unlock_irqrestore(&xhci->lock, flags); 3054 + xhci_free_command(xhci, cfg_cmd); 3054 3055 goto cleanup; 3055 3056 } 3056 3057 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+3 -1
drivers/usb/phy/phy-fsl-usb.c
··· 861 861 if (pdata->init && pdata->init(pdev) != 0) 862 862 return -EINVAL; 863 863 864 + #ifdef CONFIG_PPC32 864 865 if (pdata->big_endian_mmio) { 865 866 _fsl_readl = _fsl_readl_be; 866 867 _fsl_writel = _fsl_writel_be; ··· 869 868 _fsl_readl = _fsl_readl_le; 870 869 _fsl_writel = _fsl_writel_le; 871 870 } 871 + #endif 872 872 873 873 /* request irq */ 874 874 p_otg->irq = platform_get_irq(pdev, 0); ··· 960 958 /* 961 959 * state file in sysfs 962 960 */ 963 - static int show_fsl_usb2_otg_state(struct device *dev, 961 + static ssize_t show_fsl_usb2_otg_state(struct device *dev, 964 962 struct device_attribute *attr, char *buf) 965 963 { 966 964 struct otg_fsm *fsm = &fsl_otg_dev->fsm;
+1 -1
drivers/usb/typec/tcpm.c
··· 2140 2140 * PPS APDO. Again skip the first sink PDO as this will 2141 2141 * always be 5V 3A. 2142 2142 */ 2143 - for (j = i; j < port->nr_snk_pdo; j++) { 2143 + for (j = 1; j < port->nr_snk_pdo; j++) { 2144 2144 pdo = port->snk_pdo[j]; 2145 2145 2146 2146 switch (pdo_type(pdo)) {
+2
drivers/virtio/virtio_balloon.c
··· 513 513 tell_host(vb, vb->inflate_vq); 514 514 515 515 /* balloon's page migration 2nd step -- deflate "page" */ 516 + spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 516 517 balloon_page_delete(page); 518 + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 517 519 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 518 520 set_page_pfns(vb, vb->pfns, page); 519 521 tell_host(vb, vb->deflate_vq);
+5 -4
fs/block_dev.c
··· 221 221 222 222 ret = bio_iov_iter_get_pages(&bio, iter); 223 223 if (unlikely(ret)) 224 - return ret; 224 + goto out; 225 225 ret = bio.bi_iter.bi_size; 226 226 227 227 if (iov_iter_rw(iter) == READ) { ··· 250 250 put_page(bvec->bv_page); 251 251 } 252 252 253 - if (vecs != inline_vecs) 254 - kfree(vecs); 255 - 256 253 if (unlikely(bio.bi_status)) 257 254 ret = blk_status_to_errno(bio.bi_status); 255 + 256 + out: 257 + if (vecs != inline_vecs) 258 + kfree(vecs); 258 259 259 260 bio_uninit(&bio); 260 261
+2 -1
fs/cachefiles/bind.c
··· 218 218 "%s", 219 219 fsdef->dentry->d_sb->s_id); 220 220 221 - fscache_object_init(&fsdef->fscache, NULL, &cache->cache); 221 + fscache_object_init(&fsdef->fscache, &fscache_fsdef_index, 222 + &cache->cache); 222 223 223 224 ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag); 224 225 if (ret < 0)
+1 -2
fs/cachefiles/namei.c
··· 186 186 * need to wait for it to be destroyed */ 187 187 wait_for_old_object: 188 188 trace_cachefiles_wait_active(object, dentry, xobject); 189 + clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 189 190 190 191 if (fscache_object_is_live(&xobject->fscache)) { 191 192 pr_err("\n"); 192 193 pr_err("Error: Unexpected object collision\n"); 193 194 cachefiles_printk_object(object, xobject); 194 - BUG(); 195 195 } 196 196 atomic_inc(&xobject->usage); 197 197 write_unlock(&cache->active_lock); ··· 248 248 goto try_again; 249 249 250 250 requeue: 251 - clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 252 251 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); 253 252 _leave(" = -ETIMEDOUT"); 254 253 return -ETIMEDOUT;
+12 -5
fs/cachefiles/rdwr.c
··· 27 27 struct cachefiles_one_read *monitor = 28 28 container_of(wait, struct cachefiles_one_read, monitor); 29 29 struct cachefiles_object *object; 30 + struct fscache_retrieval *op = monitor->op; 30 31 struct wait_bit_key *key = _key; 31 32 struct page *page = wait->private; 32 33 ··· 52 51 list_del(&wait->entry); 53 52 54 53 /* move onto the action list and queue for FS-Cache thread pool */ 55 - ASSERT(monitor->op); 54 + ASSERT(op); 56 55 57 - object = container_of(monitor->op->op.object, 58 - struct cachefiles_object, fscache); 56 + /* We need to temporarily bump the usage count as we don't own a ref 57 + * here otherwise cachefiles_read_copier() may free the op between the 58 + * monitor being enqueued on the op->to_do list and the op getting 59 + * enqueued on the work queue. 60 + */ 61 + fscache_get_retrieval(op); 59 62 63 + object = container_of(op->op.object, struct cachefiles_object, fscache); 60 64 spin_lock(&object->work_lock); 61 - list_add_tail(&monitor->op_link, &monitor->op->to_do); 65 + list_add_tail(&monitor->op_link, &op->to_do); 62 66 spin_unlock(&object->work_lock); 63 67 64 - fscache_enqueue_retrieval(monitor->op); 68 + fscache_enqueue_retrieval(op); 69 + fscache_put_retrieval(op); 65 70 return 0; 66 71 } 67 72
+1
fs/exec.c
··· 293 293 bprm->vma = vma = vm_area_alloc(mm); 294 294 if (!vma) 295 295 return -ENOMEM; 296 + vma_set_anonymous(vma); 296 297 297 298 if (down_write_killable(&mm->mmap_sem)) { 298 299 err = -EINTR;
+3
fs/ext4/balloc.c
··· 368 368 return -EFSCORRUPTED; 369 369 370 370 ext4_lock_group(sb, block_group); 371 + if (buffer_verified(bh)) 372 + goto verified; 371 373 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 372 374 desc, bh))) { 373 375 ext4_unlock_group(sb, block_group); ··· 388 386 return -EFSCORRUPTED; 389 387 } 390 388 set_buffer_verified(bh); 389 + verified: 391 390 ext4_unlock_group(sb, block_group); 392 391 return 0; 393 392 }
+7 -1
fs/ext4/ialloc.c
··· 90 90 return -EFSCORRUPTED; 91 91 92 92 ext4_lock_group(sb, block_group); 93 + if (buffer_verified(bh)) 94 + goto verified; 93 95 blk = ext4_inode_bitmap(sb, desc); 94 96 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, 95 97 EXT4_INODES_PER_GROUP(sb) / 8)) { ··· 103 101 return -EFSBADCRC; 104 102 } 105 103 set_buffer_verified(bh); 104 + verified: 106 105 ext4_unlock_group(sb, block_group); 107 106 return 0; 108 107 } ··· 1388 1385 ext4_itable_unused_count(sb, gdp)), 1389 1386 sbi->s_inodes_per_block); 1390 1387 1391 - if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { 1388 + if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || 1389 + ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - 1390 + ext4_itable_unused_count(sb, gdp)) < 1391 + EXT4_FIRST_INO(sb)))) { 1392 1392 ext4_error(sb, "Something is wrong with group %u: " 1393 1393 "used itable blocks: %d; " 1394 1394 "itable unused count: %u",
+11 -8
fs/ext4/inline.c
··· 682 682 goto convert; 683 683 } 684 684 685 + ret = ext4_journal_get_write_access(handle, iloc.bh); 686 + if (ret) 687 + goto out; 688 + 685 689 flags |= AOP_FLAG_NOFS; 686 690 687 691 page = grab_cache_page_write_begin(mapping, 0, flags); ··· 714 710 out_up_read: 715 711 up_read(&EXT4_I(inode)->xattr_sem); 716 712 out: 717 - if (handle) 713 + if (handle && (ret != 1)) 718 714 ext4_journal_stop(handle); 719 715 brelse(iloc.bh); 720 716 return ret; ··· 756 752 757 753 ext4_write_unlock_xattr(inode, &no_expand); 758 754 brelse(iloc.bh); 755 + mark_inode_dirty(inode); 759 756 out: 760 757 return copied; 761 758 } ··· 903 898 goto out; 904 899 } 905 900 906 - 907 901 page = grab_cache_page_write_begin(mapping, 0, flags); 908 902 if (!page) { 909 903 ret = -ENOMEM; ··· 920 916 if (ret < 0) 921 917 goto out_release_page; 922 918 } 919 + ret = ext4_journal_get_write_access(handle, iloc.bh); 920 + if (ret) 921 + goto out_release_page; 923 922 924 923 up_read(&EXT4_I(inode)->xattr_sem); 925 924 *pagep = page; ··· 943 936 unsigned len, unsigned copied, 944 937 struct page *page) 945 938 { 946 - int i_size_changed = 0; 947 939 int ret; 948 940 949 941 ret = ext4_write_inline_data_end(inode, pos, len, copied, page); ··· 960 954 * But it's important to update i_size while still holding page lock: 961 955 * page writeout could otherwise come in and zero beyond i_size. 962 956 */ 963 - if (pos+copied > inode->i_size) { 957 + if (pos+copied > inode->i_size) 964 958 i_size_write(inode, pos+copied); 965 - i_size_changed = 1; 966 - } 967 959 unlock_page(page); 968 960 put_page(page); 969 961 ··· 971 967 * ordering of page lock and transaction start for journaling 972 968 * filesystems. 973 969 */ 974 - if (i_size_changed) 975 - mark_inode_dirty(inode); 970 + mark_inode_dirty(inode); 976 971 977 972 return copied; 978 973 }
+7 -9
fs/ext4/inode.c
··· 1389 1389 loff_t old_size = inode->i_size; 1390 1390 int ret = 0, ret2; 1391 1391 int i_size_changed = 0; 1392 + int inline_data = ext4_has_inline_data(inode); 1392 1393 1393 1394 trace_ext4_write_end(inode, pos, len, copied); 1394 - if (ext4_has_inline_data(inode)) { 1395 + if (inline_data) { 1395 1396 ret = ext4_write_inline_data_end(inode, pos, len, 1396 1397 copied, page); 1397 1398 if (ret < 0) { ··· 1420 1419 * ordering of page lock and transaction start for journaling 1421 1420 * filesystems. 1422 1421 */ 1423 - if (i_size_changed) 1422 + if (i_size_changed || inline_data) 1424 1423 ext4_mark_inode_dirty(handle, inode); 1425 1424 1426 1425 if (pos + len > inode->i_size && ext4_can_truncate(inode)) ··· 1494 1493 int partial = 0; 1495 1494 unsigned from, to; 1496 1495 int size_changed = 0; 1496 + int inline_data = ext4_has_inline_data(inode); 1497 1497 1498 1498 trace_ext4_journalled_write_end(inode, pos, len, copied); 1499 1499 from = pos & (PAGE_SIZE - 1); ··· 1502 1500 1503 1501 BUG_ON(!ext4_handle_valid(handle)); 1504 1502 1505 - if (ext4_has_inline_data(inode)) { 1503 + if (inline_data) { 1506 1504 ret = ext4_write_inline_data_end(inode, pos, len, 1507 1505 copied, page); 1508 1506 if (ret < 0) { ··· 1533 1531 if (old_size < pos) 1534 1532 pagecache_isize_extended(inode, old_size, pos); 1535 1533 1536 - if (size_changed) { 1534 + if (size_changed || inline_data) { 1537 1535 ret2 = ext4_mark_inode_dirty(handle, inode); 1538 1536 if (!ret) 1539 1537 ret = ret2; ··· 2030 2028 } 2031 2029 2032 2030 if (inline_data) { 2033 - BUFFER_TRACE(inode_bh, "get write access"); 2034 - ret = ext4_journal_get_write_access(handle, inode_bh); 2035 - 2036 - err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 2037 - 2031 + ret = ext4_mark_inode_dirty(handle, inode); 2038 2032 } else { 2039 2033 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2040 2034 do_journal_get_write_access);
+2 -5
fs/ext4/mmp.c
··· 186 186 goto exit_thread; 187 187 } 188 188 189 - if (sb_rdonly(sb)) { 190 - ext4_warning(sb, "kmmpd being stopped since filesystem " 191 - "has been remounted as readonly."); 192 - goto exit_thread; 193 - } 189 + if (sb_rdonly(sb)) 190 + break; 194 191 195 192 diff = jiffies - last_update_time; 196 193 if (diff < mmp_update_interval * HZ)
+5 -10
fs/ext4/super.c
··· 2342 2342 struct ext4_sb_info *sbi = EXT4_SB(sb); 2343 2343 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 2344 2344 ext4_fsblk_t last_block; 2345 - ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; 2345 + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); 2346 2346 ext4_fsblk_t block_bitmap; 2347 2347 ext4_fsblk_t inode_bitmap; 2348 2348 ext4_fsblk_t inode_table; ··· 3141 3141 if (!gdp) 3142 3142 continue; 3143 3143 3144 - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) 3145 - continue; 3146 - if (group != 0) 3144 + if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3147 3145 break; 3148 - ext4_error(sb, "Inode table for bg 0 marked as " 3149 - "needing zeroing"); 3150 - if (sb_rdonly(sb)) 3151 - return ngroups; 3152 3146 } 3153 3147 3154 3148 return group; ··· 4079 4085 goto failed_mount2; 4080 4086 } 4081 4087 } 4088 + sbi->s_gdb_count = db_count; 4082 4089 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { 4083 4090 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4084 4091 ret = -EFSCORRUPTED; 4085 4092 goto failed_mount2; 4086 4093 } 4087 - 4088 - sbi->s_gdb_count = db_count; 4089 4094 4090 4095 timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 4091 4096 ··· 5206 5213 5207 5214 if (sbi->s_journal) 5208 5215 ext4_mark_recovery_complete(sb, es); 5216 + if (sbi->s_mmp_tsk) 5217 + kthread_stop(sbi->s_mmp_tsk); 5209 5218 } else { 5210 5219 /* Make sure we can mount this feature set readwrite */ 5211 5220 if (ext4_has_feature_readonly(sb) ||
+1 -1
fs/fscache/cache.c
··· 220 220 { 221 221 struct fscache_cache_tag *tag; 222 222 223 + ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index); 223 224 BUG_ON(!cache->ops); 224 225 BUG_ON(!ifsdef); 225 226 ··· 249 248 if (!cache->kobj) 250 249 goto error; 251 250 252 - ifsdef->cookie = &fscache_fsdef_index; 253 251 ifsdef->cache = cache; 254 252 cache->fsdef = ifsdef; 255 253
+4 -3
fs/fscache/cookie.c
··· 516 516 goto error; 517 517 } 518 518 519 + ASSERTCMP(object->cookie, ==, cookie); 519 520 fscache_stat(&fscache_n_object_alloc); 520 521 521 522 object->debug_id = atomic_inc_return(&fscache_object_debug_id); ··· 572 571 573 572 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); 574 573 574 + ASSERTCMP(object->cookie, ==, cookie); 575 + 575 576 spin_lock(&cookie->lock); 576 577 577 578 /* there may be multiple initial creations of this object, but we only ··· 613 610 spin_unlock(&cache->object_list_lock); 614 611 } 615 612 616 - /* attach to the cookie */ 617 - object->cookie = cookie; 618 - fscache_cookie_get(cookie, fscache_cookie_get_attach_object); 613 + /* Attach to the cookie. The object already has a ref on it. */ 619 614 hlist_add_head(&object->cookie_link, &cookie->backing_objects); 620 615 621 616 fscache_objlist_add(object);
+1
fs/fscache/object.c
··· 327 327 object->store_limit_l = 0; 328 328 object->cache = cache; 329 329 object->cookie = cookie; 330 + fscache_cookie_get(cookie, fscache_cookie_get_attach_object); 330 331 object->parent = NULL; 331 332 #ifdef CONFIG_FSCACHE_OBJECT_LIST 332 333 RB_CLEAR_NODE(&object->objlist_link);
+4 -2
fs/fscache/operation.c
··· 70 70 ASSERT(op->processor != NULL); 71 71 ASSERT(fscache_object_is_available(op->object)); 72 72 ASSERTCMP(atomic_read(&op->usage), >, 0); 73 - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); 73 + ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, 74 + op->state, ==, FSCACHE_OP_ST_CANCELLED); 74 75 75 76 fscache_stat(&fscache_n_op_enqueue); 76 77 switch (op->flags & FSCACHE_OP_TYPE) { ··· 500 499 struct fscache_cache *cache; 501 500 502 501 _enter("{OBJ%x OP%x,%d}", 503 - op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 502 + op->object ? op->object->debug_id : 0, 503 + op->debug_id, atomic_read(&op->usage)); 504 504 505 505 ASSERTCMP(atomic_read(&op->usage), >, 0); 506 506
+2
fs/hugetlbfs/inode.c
··· 411 411 bool truncate_op = (lend == LLONG_MAX); 412 412 413 413 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 414 + vma_init(&pseudo_vma, current->mm); 414 415 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 415 416 pagevec_init(&pvec); 416 417 next = start; ··· 596 595 * as input to create an allocation policy. 597 596 */ 598 597 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 598 + vma_init(&pseudo_vma, mm); 599 599 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 600 600 pseudo_vma.vm_file = file; 601 601
+2
fs/squashfs/block.c
··· 167 167 } 168 168 169 169 if (compressed) { 170 + if (!msblk->stream) 171 + goto read_failure; 170 172 length = squashfs_decompress(msblk, bh, b, offset, length, 171 173 output); 172 174 if (length < 0)
+3
fs/squashfs/cache.c
··· 350 350 351 351 TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); 352 352 353 + if (unlikely(length < 0)) 354 + return -EIO; 355 + 353 356 while (length) { 354 357 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); 355 358 if (entry->error) {
+34 -24
fs/squashfs/file.c
··· 194 194 } 195 195 196 196 for (i = 0; i < blocks; i++) { 197 - int size = le32_to_cpu(blist[i]); 197 + int size = squashfs_block_size(blist[i]); 198 + if (size < 0) { 199 + err = size; 200 + goto failure; 201 + } 198 202 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); 199 203 } 200 204 n -= blocks; ··· 371 367 sizeof(size)); 372 368 if (res < 0) 373 369 return res; 374 - return le32_to_cpu(size); 370 + return squashfs_block_size(size); 371 + } 372 + 373 + void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) 374 + { 375 + int copied; 376 + void *pageaddr; 377 + 378 + pageaddr = kmap_atomic(page); 379 + copied = squashfs_copy_data(pageaddr, buffer, offset, avail); 380 + memset(pageaddr + copied, 0, PAGE_SIZE - copied); 381 + kunmap_atomic(pageaddr); 382 + 383 + flush_dcache_page(page); 384 + if (copied == avail) 385 + SetPageUptodate(page); 386 + else 387 + SetPageError(page); 375 388 } 376 389 377 390 /* Copy data into page cache */ ··· 397 376 { 398 377 struct inode *inode = page->mapping->host; 399 378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 400 - void *pageaddr; 401 379 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 402 380 int start_index = page->index & ~mask, end_index = start_index | mask; 403 381 ··· 422 402 if (PageUptodate(push_page)) 423 403 goto skip_page; 424 404 425 - pageaddr = kmap_atomic(push_page); 426 - squashfs_copy_data(pageaddr, buffer, offset, avail); 427 - memset(pageaddr + avail, 0, PAGE_SIZE - avail); 428 - kunmap_atomic(pageaddr); 429 - flush_dcache_page(push_page); 430 - SetPageUptodate(push_page); 405 + squashfs_fill_page(push_page, buffer, offset, avail); 431 406 skip_page: 432 407 unlock_page(push_page); 433 408 if (i != page->index) ··· 431 416 } 432 417 433 418 /* Read datablock stored packed inside a fragment (tail-end packed block) */ 434 - static int squashfs_readpage_fragment(struct page *page) 419 + static int squashfs_readpage_fragment(struct page *page, int expected) 435 420 { 436 421 struct inode *inode = page->mapping->host; 437 - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 438 422 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 439 423 squashfs_i(inode)->fragment_block, 440 424 squashfs_i(inode)->fragment_size); ··· 444 430 squashfs_i(inode)->fragment_block, 445 431 squashfs_i(inode)->fragment_size); 446 432 else 447 - squashfs_copy_cache(page, buffer, i_size_read(inode) & 448 - (msblk->block_size - 1), 433 + squashfs_copy_cache(page, buffer, expected, 449 434 squashfs_i(inode)->fragment_offset); 450 435 451 436 squashfs_cache_put(buffer); 452 437 return res; 453 438 } 454 439 455 - static int squashfs_readpage_sparse(struct page *page, int index, int file_end) 440 + static int squashfs_readpage_sparse(struct page *page, int expected) 456 441 { 457 - struct inode *inode = page->mapping->host; 458 - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 459 - int bytes = index == file_end ? 460 - (i_size_read(inode) & (msblk->block_size - 1)) : 461 - msblk->block_size; 462 - 463 - squashfs_copy_cache(page, NULL, bytes, 0); 442 + squashfs_copy_cache(page, NULL, expected, 0); 464 443 return 0; 465 444 } 466 445 ··· 463 456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 464 457 int index = page->index >> (msblk->block_log - PAGE_SHIFT); 465 458 int file_end = i_size_read(inode) >> msblk->block_log; 459 + int expected = index == file_end ? 460 + (i_size_read(inode) & (msblk->block_size - 1)) : 461 + msblk->block_size; 466 462 int res; 467 463 void *pageaddr; 468 464 ··· 484 474 goto error_out; 485 475 486 476 if (bsize == 0) 487 - res = squashfs_readpage_sparse(page, index, file_end); 477 + res = squashfs_readpage_sparse(page, expected); 488 478 else 489 - res = squashfs_readpage_block(page, block, bsize); 479 + res = squashfs_readpage_block(page, block, bsize, expected); 490 480 } else 491 - res = squashfs_readpage_fragment(page); 481 + res = squashfs_readpage_fragment(page, expected); 492 482 493 483 if (!res) 494 484 return 0;
+2 -2
fs/squashfs/file_cache.c
··· 20 20 #include "squashfs.h" 21 21 22 22 /* Read separately compressed datablock and memcopy into page cache */ 23 - int squashfs_readpage_block(struct page *page, u64 block, int bsize) 23 + int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) 24 24 { 25 25 struct inode *i = page->mapping->host; 26 26 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, ··· 31 31 ERROR("Unable to read page, block %llx, size %x\n", block, 32 32 bsize); 33 33 else 34 - squashfs_copy_cache(page, buffer, buffer->length, 0); 34 + squashfs_copy_cache(page, buffer, expected, 0); 35 35 36 36 squashfs_cache_put(buffer); 37 37 return res;
+12 -12
fs/squashfs/file_direct.c
··· 21 21 #include "page_actor.h" 22 22 23 23 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 24 - int pages, struct page **page); 24 + int pages, struct page **page, int bytes); 25 25 26 26 /* Read separately compressed datablock directly into page cache */ 27 - int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) 27 + int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, 28 + int expected) 28 29 29 30 { 30 31 struct inode *inode = target_page->mapping->host; ··· 84 83 * using an intermediate buffer. 85 84 */ 86 85 res = squashfs_read_cache(target_page, block, bsize, pages, 87 - page); 86 + page, expected); 88 87 if (res < 0) 89 88 goto mark_errored; 90 89 ··· 95 94 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); 96 95 if (res < 0) 97 96 goto mark_errored; 97 + 98 + if (res != expected) { 99 + res = -EIO; 100 + goto mark_errored; 101 + } 98 102 99 103 /* Last page may have trailing bytes not filled */ 100 104 bytes = res % PAGE_SIZE; ··· 144 138 145 139 146 140 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 147 - int pages, struct page **page) 141 + int pages, struct page **page, int bytes) 148 142 { 149 143 struct inode *i = target_page->mapping->host; 150 144 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, 151 145 block, bsize); 152 - int bytes = buffer->length, res = buffer->error, n, offset = 0; 153 - void *pageaddr; 146 + int res = buffer->error, n, offset = 0; 154 147 155 148 if (res) { 156 149 ERROR("Unable to read page, block %llx, size %x\n", block, ··· 164 159 if (page[n] == NULL) 165 160 continue; 166 161 167 - pageaddr = kmap_atomic(page[n]); 168 - squashfs_copy_data(pageaddr, buffer, offset, avail); 169 - memset(pageaddr + avail, 0, PAGE_SIZE - avail); 170 - kunmap_atomic(pageaddr); 171 - flush_dcache_page(page[n]); 172 - SetPageUptodate(page[n]); 162 + squashfs_fill_page(page[n], buffer, offset, avail); 173 163 unlock_page(page[n]); 174 164 if (page[n] != target_page) 175 165 put_page(page[n]);
+10 -7
fs/squashfs/fragment.c
··· 49 49 u64 *fragment_block) 50 50 { 51 51 struct squashfs_sb_info *msblk = sb->s_fs_info; 52 - int block = SQUASHFS_FRAGMENT_INDEX(fragment); 53 - int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); 54 - u64 start_block = le64_to_cpu(msblk->fragment_index[block]); 52 + int block, offset, size; 55 53 struct squashfs_fragment_entry fragment_entry; 56 - int size; 54 + u64 start_block; 55 + 56 + if (fragment >= msblk->fragments) 57 + return -EIO; 58 + block = SQUASHFS_FRAGMENT_INDEX(fragment); 59 + offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); 60 + 61 + start_block = le64_to_cpu(msblk->fragment_index[block]); 57 62 58 63 size = squashfs_read_metadata(sb, &fragment_entry, &start_block, 59 64 &offset, sizeof(fragment_entry)); ··· 66 61 return size; 67 62 68 63 *fragment_block = le64_to_cpu(fragment_entry.start_block); 69 - size = le32_to_cpu(fragment_entry.size); 70 - 71 - return size; 64 + return squashfs_block_size(fragment_entry.size); 72 65 } 73 66 74 67
+2 -1
fs/squashfs/squashfs.h
··· 67 67 u64, u64, unsigned int); 68 68 69 69 /* file.c */ 70 + void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); 70 71 void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, 71 72 int); 72 73 73 74 /* file_xxx.c */ 74 - extern int squashfs_readpage_block(struct page *, u64, int); 75 + extern int squashfs_readpage_block(struct page *, u64, int, int); 75 76 76 77 /* id.c */ 77 78 extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
+6
fs/squashfs/squashfs_fs.h
··· 129 129 130 130 #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) 131 131 132 + static inline int squashfs_block_size(__le32 raw) 133 + { 134 + u32 size = le32_to_cpu(raw); 135 + return (size >> 25) ? -EIO : size; 136 + } 137 + 132 138 /* 133 139 * Inode number ops. Inodes consist of a compressed block number, and an 134 140 * uncompressed offset within that block
+1
fs/squashfs/squashfs_fs_sb.h
··· 75 75 unsigned short block_log; 76 76 long long bytes_used; 77 77 unsigned int inodes; 78 + unsigned int fragments; 78 79 int xattr_ids; 79 80 }; 80 81 #endif
+3 -2
fs/squashfs/super.c
··· 175 175 msblk->inode_table = le64_to_cpu(sblk->inode_table_start); 176 176 msblk->directory_table = le64_to_cpu(sblk->directory_table_start); 177 177 msblk->inodes = le32_to_cpu(sblk->inodes); 178 + msblk->fragments = le32_to_cpu(sblk->fragments); 178 179 flags = le16_to_cpu(sblk->flags); 179 180 180 181 TRACE("Found valid superblock on %pg\n", sb->s_bdev); ··· 186 185 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); 187 186 TRACE("Block size %d\n", msblk->block_size); 188 187 TRACE("Number of inodes %d\n", msblk->inodes); 189 - TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); 188 + TRACE("Number of fragments %d\n", msblk->fragments); 190 189 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); 191 190 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); 192 191 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); ··· 273 272 sb->s_export_op = &squashfs_export_ops; 274 273 275 274 handle_fragments: 276 - fragments = le32_to_cpu(sblk->fragments); 275 + fragments = msblk->fragments; 277 276 if (fragments == 0) 278 277 goto check_directory_table; 279 278
+3 -2
fs/xfs/libxfs/xfs_alloc.c
··· 223 223 error = xfs_btree_get_rec(cur, &rec, stat); 224 224 if (error || !(*stat)) 225 225 return error; 226 - if (rec->alloc.ar_blockcount == 0) 227 - goto out_bad_rec; 228 226 229 227 *bno = be32_to_cpu(rec->alloc.ar_startblock); 230 228 *len = be32_to_cpu(rec->alloc.ar_blockcount); 229 + 230 + if (*len == 0) 231 + goto out_bad_rec; 231 232 232 233 /* check for valid extent range, including overflow */ 233 234 if (!xfs_verify_agbno(mp, agno, *bno))
+4 -2
fs/xfs/libxfs/xfs_inode_buf.c
··· 731 731 if ((hint_flag || inherit_flag) && extsize == 0) 732 732 return __this_address; 733 733 734 - if (!(hint_flag || inherit_flag) && extsize != 0) 734 + /* free inodes get flags set to zero but extsize remains */ 735 + if (mode && !(hint_flag || inherit_flag) && extsize != 0) 735 736 return __this_address; 736 737 737 738 if (extsize_bytes % blocksize_bytes) ··· 778 777 if (hint_flag && cowextsize == 0) 779 778 return __this_address; 780 779 781 - if (!hint_flag && cowextsize != 0) 780 + /* free inodes get flags set to zero but cowextsize remains */ 781 + if (mode && !hint_flag && cowextsize != 0) 782 782 return __this_address; 783 783 784 784 if (hint_flag && rt_flag)
+14
include/linux/blk-mq.h
··· 287 287 288 288 void blk_mq_quiesce_queue_nowait(struct request_queue *q); 289 289 290 + /** 291 + * blk_mq_mark_complete() - Set request state to complete 292 + * @rq: request to set to complete state 293 + * 294 + * Returns true if request state was successfully set to complete. If 295 + * successful, the caller is responsibile for seeing this request is ended, as 296 + * blk_mq_complete_request will not work again. 297 + */ 298 + static inline bool blk_mq_mark_complete(struct request *rq) 299 + { 300 + return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) == 301 + MQ_RQ_IN_FLIGHT; 302 + } 303 + 290 304 /* 291 305 * Driver command data is immediately after the request. So subtract request 292 306 * size to get back to the original request, add request size to get the PDU.
+1 -1
include/linux/delayacct.h
··· 124 124 125 125 static inline void delayacct_blkio_end(struct task_struct *p) 126 126 { 127 - if (current->delays) 127 + if (p->delays) 128 128 __delayacct_blkio_end(p); 129 129 delayacct_clear_flag(DELAYACCT_PF_BLKIO); 130 130 }
+1
include/linux/eventfd.h
··· 11 11 12 12 #include <linux/fcntl.h> 13 13 #include <linux/wait.h> 14 + #include <linux/err.h> 14 15 15 16 /* 16 17 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
+17
include/linux/mm.h
··· 452 452 unsigned long addr); 453 453 }; 454 454 455 + static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 456 + { 457 + static const struct vm_operations_struct dummy_vm_ops = {}; 458 + 459 + vma->vm_mm = mm; 460 + vma->vm_ops = &dummy_vm_ops; 461 + INIT_LIST_HEAD(&vma->anon_vma_chain); 462 + } 463 + 464 + static inline void vma_set_anonymous(struct vm_area_struct *vma) 465 + { 466 + vma->vm_ops = NULL; 467 + } 468 + 469 + /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ 470 + #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } 471 + 455 472 struct mmu_gather; 456 473 struct inode; 457 474
+1
include/linux/perf_event.h
··· 1130 1130 extern struct perf_callchain_entry * 1131 1131 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1132 1132 u32 max_stack, bool crosstask, bool add_mark); 1133 + extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); 1133 1134 extern int get_callchain_buffers(int max_stack); 1134 1135 extern void put_callchain_buffers(void); 1135 1136
+1
include/linux/ring_buffer.h
··· 165 165 void ring_buffer_record_off(struct ring_buffer *buffer); 166 166 void ring_buffer_record_on(struct ring_buffer *buffer); 167 167 int ring_buffer_record_is_on(struct ring_buffer *buffer); 168 + int ring_buffer_record_is_set_on(struct ring_buffer *buffer); 168 169 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 169 170 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 170 171
+7
include/linux/rtmutex.h
··· 106 106 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); 107 107 extern void rt_mutex_destroy(struct rt_mutex *lock); 108 108 109 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 110 + extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); 111 + #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) 112 + #else 109 113 extern void rt_mutex_lock(struct rt_mutex *lock); 114 + #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) 115 + #endif 116 + 110 117 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); 111 118 extern int rt_mutex_timed_lock(struct rt_mutex *lock, 112 119 struct hrtimer_sleeper *timeout);
+2
include/uapi/linux/perf_event.h
··· 143 143 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 144 144 145 145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 146 + 147 + __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, 146 148 }; 147 149 148 150 /*
+1 -1
ipc/sem.c
··· 2119 2119 } 2120 2120 2121 2121 do { 2122 - queue.status = -EINTR; 2122 + WRITE_ONCE(queue.status, -EINTR); 2123 2123 queue.sleeper = current; 2124 2124 2125 2125 __set_current_state(TASK_INTERRUPTIBLE);
+9 -4
kernel/auditsc.c
··· 1279 1279 break; 1280 1280 case AUDIT_KERN_MODULE: 1281 1281 audit_log_format(ab, "name="); 1282 - audit_log_untrustedstring(ab, context->module.name); 1283 - kfree(context->module.name); 1282 + if (context->module.name) { 1283 + audit_log_untrustedstring(ab, context->module.name); 1284 + kfree(context->module.name); 1285 + } else 1286 + audit_log_format(ab, "(null)"); 1287 + 1284 1288 break; 1285 1289 } 1286 1290 audit_log_end(ab); ··· 2415 2411 { 2416 2412 struct audit_context *context = audit_context(); 2417 2413 2418 - context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL); 2419 - strcpy(context->module.name, name); 2414 + context->module.name = kstrdup(name, GFP_KERNEL); 2415 + if (!context->module.name) 2416 + audit_log_lost("out of memory in __audit_log_kern_module"); 2420 2417 context->type = AUDIT_KERN_MODULE; 2421 2418 } 2422 2419
+1 -1
kernel/bpf/arraymap.c
··· 378 378 return -EINVAL; 379 379 380 380 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 381 - if (!value_type || value_size > map->value_size) 381 + if (!value_type || value_size != map->value_size) 382 382 return -EINVAL; 383 383 384 384 return 0;
+13 -1
kernel/bpf/btf.c
··· 1519 1519 { 1520 1520 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 1521 1521 const struct btf_member *member; 1522 + u32 meta_needed, last_offset; 1522 1523 struct btf *btf = env->btf; 1523 1524 u32 struct_size = t->size; 1524 - u32 meta_needed; 1525 1525 u16 i; 1526 1526 1527 1527 meta_needed = btf_type_vlen(t) * sizeof(*member); ··· 1534 1534 1535 1535 btf_verifier_log_type(env, t, NULL); 1536 1536 1537 + last_offset = 0; 1537 1538 for_each_member(i, t, member) { 1538 1539 if (!btf_name_offset_valid(btf, member->name_off)) { 1539 1540 btf_verifier_log_member(env, t, member, ··· 1556 1555 return -EINVAL; 1557 1556 } 1558 1557 1558 + /* 1559 + * ">" instead of ">=" because the last member could be 1560 + * "char a[0];" 1561 + */ 1562 + if (last_offset > member->offset) { 1563 + btf_verifier_log_member(env, t, member, 1564 + "Invalid member bits_offset"); 1565 + return -EINVAL; 1566 + } 1567 + 1559 1568 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) { 1560 1569 btf_verifier_log_member(env, t, member, 1561 1570 "Memmber bits_offset exceeds its struct size"); ··· 1573 1562 } 1574 1563 1575 1564 btf_verifier_log_member(env, t, member, NULL); 1565 + last_offset = member->offset; 1576 1566 } 1577 1567 1578 1568 return meta_needed;
+8 -2
kernel/events/core.c
··· 6343 6343 6344 6344 static struct perf_callchain_entry __empty_callchain = { .nr = 0, }; 6345 6345 6346 - static struct perf_callchain_entry * 6346 + struct perf_callchain_entry * 6347 6347 perf_callchain(struct perf_event *event, struct pt_regs *regs) 6348 6348 { 6349 6349 bool kernel = !event->attr.exclude_callchain_kernel; ··· 6382 6382 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 6383 6383 int size = 1; 6384 6384 6385 - data->callchain = perf_callchain(event, regs); 6385 + if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) 6386 + data->callchain = perf_callchain(event, regs); 6387 + 6386 6388 size += data->callchain->nr; 6387 6389 6388 6390 header->size += size * sizeof(u64); ··· 7337 7335 struct file *file, unsigned long offset, 7338 7336 unsigned long size) 7339 7337 { 7338 + /* d_inode(NULL) won't be equal to any mapped user-space file */ 7339 + if (!filter->path.dentry) 7340 + return false; 7341 + 7340 7342 if (d_inode(filter->path.dentry) != file_inode(file)) 7341 7343 return false; 7342 7344
+2 -4
kernel/fork.c
··· 312 312 { 313 313 struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 314 314 315 - if (vma) { 316 - vma->vm_mm = mm; 317 - INIT_LIST_HEAD(&vma->anon_vma_chain); 318 - } 315 + if (vma) 316 + vma_init(vma, mm); 319 317 return vma; 320 318 } 321 319
+7 -1
kernel/kthread.c
··· 325 325 task = create->result; 326 326 if (!IS_ERR(task)) { 327 327 static const struct sched_param param = { .sched_priority = 0 }; 328 + char name[TASK_COMM_LEN]; 328 329 329 - vsnprintf(task->comm, sizeof(task->comm), namefmt, args); 330 + /* 331 + * task is already visible to other tasks, so updating 332 + * COMM must be protected. 333 + */ 334 + vsnprintf(name, sizeof(name), namefmt, args); 335 + set_task_comm(task, name); 330 336 /* 331 337 * root may have changed our (kthreadd's) priority or CPU mask. 332 338 * The kernel thread should not inherit these properties.
+25 -4
kernel/locking/rtmutex.c
··· 1465 1465 rt_mutex_postunlock(&wake_q); 1466 1466 } 1467 1467 1468 + static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) 1469 + { 1470 + might_sleep(); 1471 + 1472 + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 1473 + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); 1474 + } 1475 + 1476 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 1477 + /** 1478 + * rt_mutex_lock_nested - lock a rt_mutex 1479 + * 1480 + * @lock: the rt_mutex to be locked 1481 + * @subclass: the lockdep subclass 1482 + */ 1483 + void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) 1484 + { 1485 + __rt_mutex_lock(lock, subclass); 1486 + } 1487 + EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); 1488 + #endif 1489 + 1490 + #ifndef CONFIG_DEBUG_LOCK_ALLOC 1468 1491 /** 1469 1492 * rt_mutex_lock - lock a rt_mutex 1470 1493 * ··· 1495 1472 */ 1496 1473 void __sched rt_mutex_lock(struct rt_mutex *lock) 1497 1474 { 1498 - might_sleep(); 1499 - 1500 - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); 1501 - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); 1475 + __rt_mutex_lock(lock, 0); 1502 1476 } 1503 1477 EXPORT_SYMBOL_GPL(rt_mutex_lock); 1478 + #endif 1504 1479 1505 1480 /** 1506 1481 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+19 -3
kernel/memremap.c
··· 176 176 unsigned long pfn, pgoff, order; 177 177 pgprot_t pgprot = PAGE_KERNEL; 178 178 int error, nid, is_ram; 179 + struct dev_pagemap *conflict_pgmap; 179 180 180 181 align_start = res->start & ~(SECTION_SIZE - 1); 181 182 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) 182 183 - align_start; 184 + align_end = align_start + align_size - 1; 185 + 186 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL); 187 + if (conflict_pgmap) { 188 + dev_WARN(dev, "Conflicting mapping in same section\n"); 189 + put_dev_pagemap(conflict_pgmap); 190 + return ERR_PTR(-ENOMEM); 191 + } 192 + 193 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL); 194 + if (conflict_pgmap) { 195 + dev_WARN(dev, "Conflicting mapping in same section\n"); 196 + put_dev_pagemap(conflict_pgmap); 197 + return ERR_PTR(-ENOMEM); 198 + } 199 + 183 200 is_ram = region_intersects(align_start, align_size, 184 201 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); 185 202 ··· 216 199 217 200 mutex_lock(&pgmap_lock); 218 201 error = 0; 219 - align_end = align_start + align_size - 1; 220 202 221 203 foreach_order_pgoff(res, order, pgoff) { 222 204 error = __radix_tree_insert(&pgmap_radix, ··· 321 305 322 306 #ifdef CONFIG_DEV_PAGEMAP_OPS 323 307 DEFINE_STATIC_KEY_FALSE(devmap_managed_key); 324 - EXPORT_SYMBOL_GPL(devmap_managed_key); 308 + EXPORT_SYMBOL(devmap_managed_key); 325 309 static atomic_t devmap_enable; 326 310 327 311 /* ··· 362 346 } else if (!count) 363 347 __put_page(page); 364 348 } 365 - EXPORT_SYMBOL_GPL(__put_devmap_managed_page); 349 + EXPORT_SYMBOL(__put_devmap_managed_page); 366 350 #endif /* CONFIG_DEV_PAGEMAP_OPS */
+7 -1
kernel/sched/deadline.c
··· 2090 2090 sub_rq_bw(&next_task->dl, &rq->dl); 2091 2091 set_task_cpu(next_task, later_rq->cpu); 2092 2092 add_rq_bw(&next_task->dl, &later_rq->dl); 2093 + 2094 + /* 2095 + * Update the later_rq clock here, because the clock is used 2096 + * by the cpufreq_update_util() inside __add_running_bw(). 2097 + */ 2098 + update_rq_clock(later_rq); 2093 2099 add_running_bw(&next_task->dl, &later_rq->dl); 2094 - activate_task(later_rq, next_task, 0); 2100 + activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); 2095 2101 ret = 1; 2096 2102 2097 2103 resched_curr(later_rq);
+2
kernel/sched/rt.c
··· 836 836 * can be time-consuming. Try to avoid it when possible. 837 837 */ 838 838 raw_spin_lock(&rt_rq->rt_runtime_lock); 839 + if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) 840 + rt_rq->rt_runtime = rt_b->rt_runtime; 839 841 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 840 842 raw_spin_unlock(&rt_rq->rt_runtime_lock); 841 843 if (skip)
+1 -1
kernel/sched/topology.c
··· 47 47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 48 48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 49 49 } 50 - if (!cpumask_test_cpu(cpu, sched_group_span(group))) { 50 + if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 51 51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 52 52 } 53 53
+9 -1
kernel/stop_machine.c
··· 260 260 err = 0; 261 261 __cpu_stop_queue_work(stopper1, work1, &wakeq); 262 262 __cpu_stop_queue_work(stopper2, work2, &wakeq); 263 + /* 264 + * The waking up of stopper threads has to happen 265 + * in the same scheduling context as the queueing. 266 + * Otherwise, there is a possibility of one of the 267 + * above stoppers being woken up by another CPU, 268 + * and preempting us. This will cause us to n ot 269 + * wake up the other stopper forever. 270 + */ 271 + preempt_disable(); 263 272 unlock: 264 273 raw_spin_unlock(&stopper2->lock); 265 274 raw_spin_unlock_irq(&stopper1->lock); ··· 280 271 } 281 272 282 273 if (!err) { 283 - preempt_disable(); 284 274 wake_up_q(&wakeq); 285 275 preempt_enable(); 286 276 }
+16
kernel/trace/ring_buffer.c
··· 3227 3227 } 3228 3228 3229 3229 /** 3230 + * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 3231 + * @buffer: The ring buffer to see if write is set enabled 3232 + * 3233 + * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 3234 + * Note that this does NOT mean it is in a writable state. 3235 + * 3236 + * It may return true when the ring buffer has been disabled by 3237 + * ring_buffer_record_disable(), as that is a temporary disabling of 3238 + * the ring buffer. 3239 + */ 3240 + int ring_buffer_record_is_set_on(struct ring_buffer *buffer) 3241 + { 3242 + return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 3243 + } 3244 + 3245 + /** 3230 3246 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 3231 3247 * @buffer: The ring buffer to stop writes to. 3232 3248 * @cpu: The CPU buffer to stop
+6
kernel/trace/trace.c
··· 1373 1373 1374 1374 arch_spin_lock(&tr->max_lock); 1375 1375 1376 + /* Inherit the recordable setting from trace_buffer */ 1377 + if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) 1378 + ring_buffer_record_on(tr->max_buffer.buffer); 1379 + else 1380 + ring_buffer_record_off(tr->max_buffer.buffer); 1381 + 1376 1382 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); 1377 1383 1378 1384 __update_max_tr(tr, tsk, cpu);
+13 -5
kernel/trace/trace_events_trigger.c
··· 679 679 goto out_free; 680 680 681 681 out_reg: 682 + /* Up the trigger_data count to make sure reg doesn't free it on failure */ 683 + event_trigger_init(trigger_ops, trigger_data); 682 684 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 683 685 /* 684 686 * The above returns on success the # of functions enabled, ··· 688 686 * Consider no functions a failure too. 689 687 */ 690 688 if (!ret) { 689 + cmd_ops->unreg(glob, trigger_ops, trigger_data, file); 691 690 ret = -ENOENT; 692 - goto out_free; 693 - } else if (ret < 0) 694 - goto out_free; 695 - ret = 0; 691 + } else if (ret > 0) 692 + ret = 0; 693 + 694 + /* Down the counter of trigger_data or free it if not used anymore */ 695 + event_trigger_free(trigger_ops, trigger_data); 696 696 out: 697 697 return ret; 698 698 ··· 1420 1416 goto out; 1421 1417 } 1422 1418 1419 + /* Up the trigger_data count to make sure nothing frees it on failure */ 1420 + event_trigger_init(trigger_ops, trigger_data); 1421 + 1423 1422 if (trigger) { 1424 1423 number = strsep(&trigger, ":"); 1425 1424 ··· 1473 1466 goto out_disable; 1474 1467 /* Just return zero, not the number of enabled functions */ 1475 1468 ret = 0; 1469 + event_trigger_free(trigger_ops, trigger_data); 1476 1470 out: 1477 1471 return ret; 1478 1472 ··· 1484 1476 out_free: 1485 1477 if (cmd_ops->set_filter) 1486 1478 cmd_ops->set_filter(NULL, trigger_data, NULL); 1487 - kfree(trigger_data); 1479 + event_trigger_free(trigger_ops, trigger_data); 1488 1480 kfree(enable_data); 1489 1481 goto out; 1490 1482 }
+13 -2
kernel/trace/trace_kprobe.c
··· 400 400 static int 401 401 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 402 402 { 403 + struct event_file_link *link = NULL; 403 404 int ret = 0; 404 405 405 406 if (file) { 406 - struct event_file_link *link; 407 - 408 407 link = kmalloc(sizeof(*link), GFP_KERNEL); 409 408 if (!link) { 410 409 ret = -ENOMEM; ··· 422 423 ret = enable_kretprobe(&tk->rp); 423 424 else 424 425 ret = enable_kprobe(&tk->rp.kp); 426 + } 427 + 428 + if (ret) { 429 + if (file) { 430 + /* Notice the if is true on not WARN() */ 431 + if (!WARN_ON_ONCE(!link)) 432 + list_del_rcu(&link->list); 433 + kfree(link); 434 + tk->tp.flags &= ~TP_FLAG_TRACE; 435 + } else { 436 + tk->tp.flags &= ~TP_FLAG_PROFILE; 437 + } 425 438 } 426 439 out: 427 440 return ret;
+1 -1
lib/Kconfig.kasan
··· 5 5 6 6 config KASAN 7 7 bool "KASan: runtime memory debugger" 8 - depends on SLUB || (SLAB && !DEBUG_SLAB) 8 + depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) 9 9 select SLUB_DEBUG if SLUB 10 10 select CONSTRUCTORS 11 11 select STACKDEPOT
+2 -4
mm/memory.c
··· 1417 1417 do { 1418 1418 next = pmd_addr_end(addr, end); 1419 1419 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1420 - if (next - addr != HPAGE_PMD_SIZE) { 1421 - VM_BUG_ON_VMA(vma_is_anonymous(vma) && 1422 - !rwsem_is_locked(&tlb->mm->mmap_sem), vma); 1420 + if (next - addr != HPAGE_PMD_SIZE) 1423 1421 __split_huge_pmd(vma, pmd, addr, false, NULL); 1424 - } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1422 + else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1425 1423 goto next; 1426 1424 /* fall through */ 1427 1425 }
+1
mm/mempolicy.c
··· 2505 2505 2506 2506 /* Create pseudo-vma that contains just the policy */ 2507 2507 memset(&pvma, 0, sizeof(struct vm_area_struct)); 2508 + vma_init(&pvma, NULL); 2508 2509 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2509 2510 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2510 2511
+3
mm/mmap.c
··· 1778 1778 error = shmem_zero_setup(vma); 1779 1779 if (error) 1780 1780 goto free_vma; 1781 + } else { 1782 + vma_set_anonymous(vma); 1781 1783 } 1782 1784 1783 1785 vma_link(mm, vma, prev, rb_link, rb_parent); ··· 2985 2983 return -ENOMEM; 2986 2984 } 2987 2985 2986 + vma_set_anonymous(vma); 2988 2987 vma->vm_start = addr; 2989 2988 vma->vm_end = addr + len; 2990 2989 vma->vm_pgoff = pgoff;
+2
mm/nommu.c
··· 1145 1145 if (ret < len) 1146 1146 memset(base + ret, 0, len - ret); 1147 1147 1148 + } else { 1149 + vma_set_anonymous(vma); 1148 1150 } 1149 1151 1150 1152 return 0;
+1
mm/shmem.c
··· 1421 1421 { 1422 1422 /* Create a pseudo vma that just contains the policy */ 1423 1423 memset(vma, 0, sizeof(*vma)); 1424 + vma_init(vma, NULL); 1424 1425 /* Bias interleave by inode number to distribute better across nodes */ 1425 1426 vma->vm_pgoff = index + info->vfs_inode.i_ino; 1426 1427 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+9
mm/zswap.c
··· 1026 1026 ret = -ENOMEM; 1027 1027 goto reject; 1028 1028 } 1029 + 1030 + /* A second zswap_is_full() check after 1031 + * zswap_shrink() to make sure it's now 1032 + * under the max_pool_percent 1033 + */ 1034 + if (zswap_is_full()) { 1035 + ret = -ENOMEM; 1036 + goto reject; 1037 + } 1029 1038 } 1030 1039 1031 1040 /* allocate entry */
+10 -7
net/core/dev.c
··· 7607 7607 dev->tx_queue_len = new_len; 7608 7608 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 7609 7609 res = notifier_to_errno(res); 7610 - if (res) { 7611 - netdev_err(dev, 7612 - "refused to change device tx_queue_len\n"); 7613 - dev->tx_queue_len = orig_len; 7614 - return res; 7615 - } 7616 - return dev_qdisc_change_tx_queue_len(dev); 7610 + if (res) 7611 + goto err_rollback; 7612 + res = dev_qdisc_change_tx_queue_len(dev); 7613 + if (res) 7614 + goto err_rollback; 7617 7615 } 7618 7616 7619 7617 return 0; 7618 + 7619 + err_rollback: 7620 + netdev_err(dev, "refused to change device tx_queue_len\n"); 7621 + dev->tx_queue_len = orig_len; 7622 + return res; 7620 7623 } 7621 7624 7622 7625 /**
+7 -5
net/core/filter.c
··· 1712 1712 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, 1713 1713 u32, offset, void *, to, u32, len, u32, start_header) 1714 1714 { 1715 + u8 *end = skb_tail_pointer(skb); 1716 + u8 *net = skb_network_header(skb); 1717 + u8 *mac = skb_mac_header(skb); 1715 1718 u8 *ptr; 1716 1719 1717 - if (unlikely(offset > 0xffff || len > skb_headlen(skb))) 1720 + if (unlikely(offset > 0xffff || len > (end - mac))) 1718 1721 goto err_clear; 1719 1722 1720 1723 switch (start_header) { 1721 1724 case BPF_HDR_START_MAC: 1722 - ptr = skb_mac_header(skb) + offset; 1725 + ptr = mac + offset; 1723 1726 break; 1724 1727 case BPF_HDR_START_NET: 1725 - ptr = skb_network_header(skb) + offset; 1728 + ptr = net + offset; 1726 1729 break; 1727 1730 default: 1728 1731 goto err_clear; 1729 1732 } 1730 1733 1731 - if (likely(ptr >= skb_mac_header(skb) && 1732 - ptr + len <= skb_tail_pointer(skb))) { 1734 + if (likely(ptr >= mac && ptr + len <= end)) { 1733 1735 memcpy(to, ptr, len); 1734 1736 return 0; 1735 1737 }
+1 -1
net/core/lwt_bpf.c
··· 217 217 if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) 218 218 return -EINVAL; 219 219 220 - prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); 220 + prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC); 221 221 if (!prog->name) 222 222 return -ENOMEM; 223 223
+2 -1
net/core/xdp.c
··· 348 348 rcu_read_lock(); 349 349 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ 350 350 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 351 - xa->zc_alloc->free(xa->zc_alloc, handle); 351 + if (!WARN_ON_ONCE(!xa)) 352 + xa->zc_alloc->free(xa->zc_alloc, handle); 352 353 rcu_read_unlock(); 353 354 default: 354 355 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
+6
net/dsa/slave.c
··· 1248 1248 { 1249 1249 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1250 1250 1251 + if (!netif_running(slave_dev)) 1252 + return 0; 1253 + 1251 1254 netif_device_detach(slave_dev); 1252 1255 1253 1256 rtnl_lock(); ··· 1263 1260 int dsa_slave_resume(struct net_device *slave_dev) 1264 1261 { 1265 1262 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1263 + 1264 + if (!netif_running(slave_dev)) 1265 + return 0; 1266 1266 1267 1267 netif_device_attach(slave_dev); 1268 1268
+2 -2
net/ipv4/fib_frontend.c
··· 292 292 return ip_hdr(skb)->daddr; 293 293 294 294 in_dev = __in_dev_get_rcu(dev); 295 - BUG_ON(!in_dev); 296 295 297 296 net = dev_net(dev); 298 297 299 298 scope = RT_SCOPE_UNIVERSE; 300 299 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { 300 + bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); 301 301 struct flowi4 fl4 = { 302 302 .flowi4_iif = LOOPBACK_IFINDEX, 303 303 .flowi4_oif = l3mdev_master_ifindex_rcu(dev), 304 304 .daddr = ip_hdr(skb)->saddr, 305 305 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 306 306 .flowi4_scope = scope, 307 - .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, 307 + .flowi4_mark = vmark ? skb->mark : 0, 308 308 }; 309 309 if (!fib_lookup(net, &fl4, &res, 0)) 310 310 return FIB_RES_PREFSRC(net, res);
+2 -1
net/ipv4/igmp.c
··· 1387 1387 /* 1388 1388 * A socket has joined a multicast group on device dev. 1389 1389 */ 1390 - void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) 1390 + static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, 1391 + unsigned int mode) 1391 1392 { 1392 1393 struct ip_mc_list *im; 1393 1394 #ifdef CONFIG_IP_MULTICAST
+3 -3
net/ipv4/inet_fragment.c
··· 158 158 { 159 159 struct inet_frag_queue *q; 160 160 161 - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) 162 - return NULL; 163 - 164 161 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); 165 162 if (!q) 166 163 return NULL; ··· 201 204 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) 202 205 { 203 206 struct inet_frag_queue *fq; 207 + 208 + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) 209 + return NULL; 204 210 205 211 rcu_read_lock(); 206 212
+5
net/ipv4/ip_fragment.c
··· 383 383 int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ 384 384 385 385 if (i < next->len) { 386 + int delta = -next->truesize; 387 + 386 388 /* Eat head of the next overlapped fragment 387 389 * and leave the loop. The next ones cannot overlap. 388 390 */ 389 391 if (!pskb_pull(next, i)) 390 392 goto err; 393 + delta += next->truesize; 394 + if (delta) 395 + add_frag_mem_limit(qp->q.net, delta); 391 396 next->ip_defrag_offset += i; 392 397 qp->q.meat -= i; 393 398 if (next->ip_summed != CHECKSUM_UNNECESSARY)
+4
net/ipv4/tcp_bbr.c
··· 358 358 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ 359 359 cwnd = (cwnd + 1) & ~1U; 360 360 361 + /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ 362 + if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT) 363 + cwnd += 2; 364 + 361 365 return cwnd; 362 366 } 363 367
+8 -1
net/ipv4/tcp_input.c
··· 247 247 248 248 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 249 249 { 250 - if (tcp_hdr(skb)->cwr) 250 + if (tcp_hdr(skb)->cwr) { 251 251 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 252 + 253 + /* If the sender is telling us it has entered CWR, then its 254 + * cwnd may be very low (even just 1 packet), so we should ACK 255 + * immediately. 256 + */ 257 + tcp_enter_quickack_mode((struct sock *)tp, 2); 258 + } 252 259 } 253 260 254 261 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
+3 -1
net/ipv6/esp6.c
··· 669 669 670 670 sg_init_table(sg, nfrags); 671 671 ret = skb_to_sgvec(skb, sg, 0, skb->len); 672 - if (unlikely(ret < 0)) 672 + if (unlikely(ret < 0)) { 673 + kfree(tmp); 673 674 goto out; 675 + } 674 676 675 677 skb->ip_summed = CHECKSUM_NONE; 676 678
+6 -5
net/ipv6/ip6_vti.c
··· 480 480 goto tx_err_dst_release; 481 481 } 482 482 483 - skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 484 - skb_dst_set(skb, dst); 485 - skb->dev = skb_dst(skb)->dev; 486 - 487 483 mtu = dst_mtu(dst); 488 484 if (!skb->ignore_df && skb->len > mtu) { 489 485 skb_dst_update_pmtu(skb, mtu); ··· 494 498 htonl(mtu)); 495 499 } 496 500 497 - return -EMSGSIZE; 501 + err = -EMSGSIZE; 502 + goto tx_err_dst_release; 498 503 } 504 + 505 + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 506 + skb_dst_set(skb, dst); 507 + skb->dev = skb_dst(skb)->dev; 499 508 500 509 err = dst_output(t->net, skb->sk, skb); 501 510 if (net_xmit_eval(err) == 0) {
+7
net/netlink/af_netlink.c
··· 63 63 #include <linux/hash.h> 64 64 #include <linux/genetlink.h> 65 65 #include <linux/net_namespace.h> 66 + #include <linux/nospec.h> 66 67 67 68 #include <net/net_namespace.h> 68 69 #include <net/netns/generic.h> ··· 680 679 681 680 if (protocol < 0 || protocol >= MAX_LINKS) 682 681 return -EPROTONOSUPPORT; 682 + protocol = array_index_nospec(protocol, MAX_LINKS); 683 683 684 684 netlink_lock_table(); 685 685 #ifdef CONFIG_MODULES ··· 1010 1008 if (err) 1011 1009 return err; 1012 1010 } 1011 + 1012 + if (nlk->ngroups == 0) 1013 + groups = 0; 1014 + else 1015 + groups &= (1ULL << nlk->ngroups) - 1; 1013 1016 1014 1017 bound = nlk->bound; 1015 1018 if (bound) {
+5 -5
net/openvswitch/meter.c
··· 211 211 if (!meter) 212 212 return ERR_PTR(-ENOMEM); 213 213 214 + meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]); 214 215 meter->used = div_u64(ktime_get_ns(), 1000 * 1000); 215 216 meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0; 216 217 meter->keep_stats = !a[OVS_METER_ATTR_CLEAR]; ··· 281 280 u32 meter_id; 282 281 bool failed; 283 282 283 + if (!a[OVS_METER_ATTR_ID]) { 284 + return -ENODEV; 285 + } 286 + 284 287 meter = dp_meter_create(a); 285 288 if (IS_ERR_OR_NULL(meter)) 286 289 return PTR_ERR(meter); ··· 299 294 ovs_lock(); 300 295 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 301 296 if (!dp) { 302 - err = -ENODEV; 303 - goto exit_unlock; 304 - } 305 - 306 - if (!a[OVS_METER_ATTR_ID]) { 307 297 err = -ENODEV; 308 298 goto exit_unlock; 309 299 }
+5
net/rds/ib_frmr.c
··· 344 344 struct rds_ib_frmr *frmr; 345 345 int ret; 346 346 347 + if (!ic) { 348 + /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/ 349 + return ERR_PTR(-EOPNOTSUPP); 350 + } 351 + 347 352 do { 348 353 if (ibmr) 349 354 rds_ib_free_frmr(ibmr, true);
+2 -1
net/rds/ib_mr.h
··· 117 117 struct rds6_info_rdma_connection *iinfo6); 118 118 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); 119 119 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 120 - struct rds_sock *rs, u32 *key_ret); 120 + struct rds_sock *rs, u32 *key_ret, 121 + struct rds_connection *conn); 121 122 void rds_ib_sync_mr(void *trans_private, int dir); 122 123 void rds_ib_free_mr(void *trans_private, int invalidate); 123 124 void rds_ib_flush_mrs(void);
+13 -8
net/rds/ib_rdma.c
··· 549 549 } 550 550 551 551 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 552 - struct rds_sock *rs, u32 *key_ret) 552 + struct rds_sock *rs, u32 *key_ret, 553 + struct rds_connection *conn) 553 554 { 554 555 struct rds_ib_device *rds_ibdev; 555 556 struct rds_ib_mr *ibmr = NULL; 556 - struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; 557 + struct rds_ib_connection *ic = NULL; 557 558 int ret; 558 559 559 560 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]); ··· 562 561 ret = -ENODEV; 563 562 goto out; 564 563 } 564 + 565 + if (conn) 566 + ic = conn->c_transport_data; 565 567 566 568 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { 567 569 ret = -ENODEV; ··· 575 571 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); 576 572 else 577 573 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); 578 - if (ibmr) 579 - rds_ibdev = NULL; 574 + if (IS_ERR(ibmr)) { 575 + ret = PTR_ERR(ibmr); 576 + pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); 577 + } else { 578 + return ibmr; 579 + } 580 580 581 581 out: 582 - if (!ibmr) 583 - pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); 584 - 585 582 if (rds_ibdev) 586 583 rds_ib_dev_put(rds_ibdev); 587 584 588 - return ibmr; 585 + return ERR_PTR(ret); 589 586 } 590 587 591 588 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
+8 -5
net/rds/rdma.c
··· 170 170 } 171 171 172 172 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, 173 - u64 *cookie_ret, struct rds_mr **mr_ret) 173 + u64 *cookie_ret, struct rds_mr **mr_ret, 174 + struct rds_conn_path *cp) 174 175 { 175 176 struct rds_mr *mr = NULL, *found; 176 177 unsigned int nr_pages; ··· 270 269 * Note that dma_map() implies that pending writes are 271 270 * flushed to RAM, so no dma_sync is needed here. */ 272 271 trans_private = rs->rs_transport->get_mr(sg, nents, rs, 273 - &mr->r_key); 272 + &mr->r_key, 273 + cp ? cp->cp_conn : NULL); 274 274 275 275 if (IS_ERR(trans_private)) { 276 276 for (i = 0 ; i < nents; i++) ··· 332 330 sizeof(struct rds_get_mr_args))) 333 331 return -EFAULT; 334 332 335 - return __rds_rdma_map(rs, &args, NULL, NULL); 333 + return __rds_rdma_map(rs, &args, NULL, NULL, NULL); 336 334 } 337 335 338 336 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) ··· 356 354 new_args.cookie_addr = args.cookie_addr; 357 355 new_args.flags = args.flags; 358 356 359 - return __rds_rdma_map(rs, &new_args, NULL, NULL); 357 + return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL); 360 358 } 361 359 362 360 /* ··· 784 782 rm->m_rdma_cookie != 0) 785 783 return -EINVAL; 786 784 787 - return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); 785 + return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, 786 + &rm->rdma.op_rdma_mr, rm->m_conn_path); 788 787 } 789 788 790 789 /*
+4 -1
net/rds/rds.h
··· 470 470 struct scatterlist *op_sg; 471 471 } data; 472 472 }; 473 + 474 + struct rds_conn_path *m_conn_path; 473 475 }; 474 476 475 477 /* ··· 553 551 unsigned int avail); 554 552 void (*exit)(void); 555 553 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, 556 - struct rds_sock *rs, u32 *key_ret); 554 + struct rds_sock *rs, u32 *key_ret, 555 + struct rds_connection *conn); 557 556 void (*sync_mr)(void *trans_private, int direction); 558 557 void (*free_mr)(void *trans_private, int invalidate); 559 558 void (*flush_mrs)(void);
+7 -5
net/rds/send.c
··· 1255 1255 rs->rs_conn = conn; 1256 1256 } 1257 1257 1258 + if (conn->c_trans->t_mp_capable) 1259 + cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; 1260 + else 1261 + cpath = &conn->c_path[0]; 1262 + 1263 + rm->m_conn_path = cpath; 1264 + 1258 1265 /* Parse any control messages the user may have included. */ 1259 1266 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); 1260 1267 if (ret) { ··· 1284 1277 ret = -EOPNOTSUPP; 1285 1278 goto out; 1286 1279 } 1287 - 1288 - if (conn->c_trans->t_mp_capable) 1289 - cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; 1290 - else 1291 - cpath = &conn->c_path[0]; 1292 1280 1293 1281 if (rds_destroy_pending(conn)) { 1294 1282 ret = -EAGAIN;
+2 -2
net/rxrpc/call_accept.c
··· 116 116 while (*pp) { 117 117 parent = *pp; 118 118 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 119 - if (user_call_ID < call->user_call_ID) 119 + if (user_call_ID < xcall->user_call_ID) 120 120 pp = &(*pp)->rb_left; 121 - else if (user_call_ID > call->user_call_ID) 121 + else if (user_call_ID > xcall->user_call_ID) 122 122 pp = &(*pp)->rb_right; 123 123 else 124 124 goto id_in_use;
+4 -1
net/socket.c
··· 89 89 #include <linux/magic.h> 90 90 #include <linux/slab.h> 91 91 #include <linux/xattr.h> 92 + #include <linux/nospec.h> 92 93 93 94 #include <linux/uaccess.h> 94 95 #include <asm/unistd.h> ··· 2530 2529 2531 2530 if (call < 1 || call > SYS_SENDMMSG) 2532 2531 return -EINVAL; 2532 + call = array_index_nospec(call, SYS_SENDMMSG + 1); 2533 2533 2534 2534 len = nargs[call]; 2535 2535 if (len > sizeof(a)) ··· 2697 2695 2698 2696 bool sock_is_registered(int family) 2699 2697 { 2700 - return family < NPROTO && rcu_access_pointer(net_families[family]); 2698 + return family < NPROTO && 2699 + rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]); 2701 2700 } 2702 2701 2703 2702 static int __init sock_init(void)
+1 -1
net/xdp/xsk_queue.h
··· 250 250 251 251 static inline bool xskq_empty_desc(struct xsk_queue *q) 252 252 { 253 - return xskq_nb_free(q, q->prod_tail, 1) == q->nentries; 253 + return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries; 254 254 } 255 255 256 256 void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
+3
net/xfrm/xfrm_policy.c
··· 2215 2215 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) 2216 2216 return make_blackhole(net, dst_orig->ops->family, dst_orig); 2217 2217 2218 + if (IS_ERR(dst)) 2219 + dst_release(dst_orig); 2220 + 2218 2221 return dst; 2219 2222 } 2220 2223 EXPORT_SYMBOL(xfrm_lookup_route);
+11 -7
net/xfrm/xfrm_user.c
··· 1057 1057 { 1058 1058 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); 1059 1059 1060 - if (nlsk) 1061 - return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); 1062 - else 1063 - return -1; 1060 + if (!nlsk) { 1061 + kfree_skb(skb); 1062 + return -EPIPE; 1063 + } 1064 + 1065 + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); 1064 1066 } 1065 1067 1066 1068 static inline unsigned int xfrm_spdinfo_msgsize(void) ··· 1713 1711 #ifdef CONFIG_XFRM_SUB_POLICY 1714 1712 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1715 1713 { 1716 - struct xfrm_userpolicy_type upt = { 1717 - .type = type, 1718 - }; 1714 + struct xfrm_userpolicy_type upt; 1715 + 1716 + /* Sadly there are two holes in struct xfrm_userpolicy_type */ 1717 + memset(&upt, 0, sizeof(upt)); 1718 + upt.type = type; 1719 1719 1720 1720 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1721 1721 }
+1 -1
tools/include/uapi/linux/btf.h
··· 76 76 */ 77 77 #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 78 78 #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 79 - #define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) 79 + #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) 80 80 81 81 /* Attributes stored in the BTF_INT_ENCODING */ 82 82 #define BTF_INT_SIGNED (1 << 0)
+30 -16
tools/lib/bpf/btf.c
··· 2 2 /* Copyright (c) 2018 Facebook */ 3 3 4 4 #include <stdlib.h> 5 - #include <stdint.h> 6 5 #include <string.h> 7 6 #include <unistd.h> 8 7 #include <errno.h> ··· 31 32 struct btf_type **types; 32 33 const char *strings; 33 34 void *nohdr_data; 34 - uint32_t nr_types; 35 - uint32_t types_size; 36 - uint32_t data_size; 35 + __u32 nr_types; 36 + __u32 types_size; 37 + __u32 data_size; 37 38 int fd; 38 39 }; 40 + 41 + static const char *btf_name_by_offset(const struct btf *btf, __u32 offset) 42 + { 43 + if (offset < btf->hdr->str_len) 44 + return &btf->strings[offset]; 45 + else 46 + return NULL; 47 + } 39 48 40 49 static int btf_add_type(struct btf *btf, struct btf_type *t) 41 50 { 42 51 if (btf->types_size - btf->nr_types < 2) { 43 52 struct btf_type **new_types; 44 - u32 expand_by, new_size; 53 + __u32 expand_by, new_size; 45 54 46 55 if (btf->types_size == BTF_MAX_NR_TYPES) 47 56 return -E2BIG; ··· 76 69 static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log) 77 70 { 78 71 const struct btf_header *hdr = btf->hdr; 79 - u32 meta_left; 72 + __u32 meta_left; 80 73 81 74 if (btf->data_size < sizeof(struct btf_header)) { 82 75 elog("BTF header not found\n"); ··· 155 148 156 149 while (next_type < end_type) { 157 150 struct btf_type *t = next_type; 158 - uint16_t vlen = BTF_INFO_VLEN(t->info); 151 + __u16 vlen = BTF_INFO_VLEN(t->info); 159 152 int err; 160 153 161 154 next_type += sizeof(*t); ··· 194 187 return 0; 195 188 } 196 189 190 + const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 191 + { 192 + if (type_id > btf->nr_types) 193 + return NULL; 194 + 195 + return btf->types[type_id]; 196 + } 197 + 197 198 static bool btf_type_is_void(const struct btf_type *t) 198 199 { 199 200 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD; ··· 212 197 return !t || btf_type_is_void(t); 213 198 } 214 199 215 - static int64_t btf_type_size(const struct btf_type *t) 200 + static __s64 btf_type_size(const struct btf_type *t) 216 201 { 217 202 switch (BTF_INFO_KIND(t->info)) { 218 203 case BTF_KIND_INT: ··· 229 214 230 215 #define MAX_RESOLVE_DEPTH 32 231 216 232 - int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) 217 + __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) 233 218 { 234 219 const struct btf_array *array; 235 220 const struct btf_type *t; 236 - uint32_t nelems = 1; 237 - int64_t size = -1; 221 + __u32 nelems = 1; 222 + __s64 size = -1; 238 223 int i; 239 224 240 225 t = btf__type_by_id(btf, type_id); ··· 294 279 return type_id; 295 280 } 296 281 297 - int32_t btf__find_by_name(const struct btf *btf, const char *type_name) 282 + __s32 btf__find_by_name(const struct btf *btf, const char *type_name) 298 283 { 299 - uint32_t i; 284 + __u32 i; 300 285 301 286 if (!strcmp(type_name, "void")) 302 287 return 0; ··· 325 310 free(btf); 326 311 } 327 312 328 - struct btf *btf__new(uint8_t *data, uint32_t size, 329 - btf_print_fn_t err_log) 313 + struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log) 330 314 { 331 - uint32_t log_buf_size = 0; 315 + __u32 log_buf_size = 0; 332 316 char *log_buf = NULL; 333 317 struct btf *btf; 334 318 int err;
+6 -4
tools/lib/bpf/btf.h
··· 4 4 #ifndef __BPF_BTF_H 5 5 #define __BPF_BTF_H 6 6 7 - #include <stdint.h> 7 + #include <linux/types.h> 8 8 9 9 #define BTF_ELF_SEC ".BTF" 10 10 11 11 struct btf; 12 + struct btf_type; 12 13 13 14 typedef int (*btf_print_fn_t)(const char *, ...) 14 15 __attribute__((format(printf, 1, 2))); 15 16 16 17 void btf__free(struct btf *btf); 17 - struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log); 18 - int32_t btf__find_by_name(const struct btf *btf, const char *type_name); 19 - int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id); 18 + struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log); 19 + __s32 btf__find_by_name(const struct btf *btf, const char *type_name); 20 + const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); 21 + __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); 20 22 int btf__resolve_type(const struct btf *btf, __u32 type_id); 21 23 int btf__fd(const struct btf *btf); 22 24 const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
+47 -42
tools/lib/bpf/libbpf.c
··· 37 37 #include <linux/err.h> 38 38 #include <linux/kernel.h> 39 39 #include <linux/bpf.h> 40 + #include <linux/btf.h> 40 41 #include <linux/list.h> 41 42 #include <linux/limits.h> 42 43 #include <sys/stat.h> ··· 171 170 size_t offset; 172 171 int map_ifindex; 173 172 struct bpf_map_def def; 174 - uint32_t btf_key_type_id; 175 - uint32_t btf_value_type_id; 173 + __u32 btf_key_type_id; 174 + __u32 btf_value_type_id; 176 175 void *priv; 177 176 bpf_map_clear_priv_t clear_priv; 178 177 }; ··· 970 969 971 970 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 972 971 { 972 + const struct btf_type *container_type; 973 + const struct btf_member *key, *value; 973 974 struct bpf_map_def *def = &map->def; 974 975 const size_t max_name = 256; 975 - int64_t key_size, value_size; 976 - int32_t key_id, value_id; 977 - char name[max_name]; 976 + char container_name[max_name]; 977 + __s64 key_size, value_size; 978 + __s32 container_id; 978 979 979 - /* Find key type by name from BTF */ 980 - if (snprintf(name, max_name, "%s_key", map->name) == max_name) { 981 - pr_warning("map:%s length of BTF key_type:%s_key is too long\n", 980 + if (snprintf(container_name, max_name, "____btf_map_%s", map->name) == 981 + max_name) { 982 + pr_warning("map:%s length of '____btf_map_%s' is too long\n", 982 983 map->name, map->name); 983 984 return -EINVAL; 984 985 } 985 986 986 - key_id = btf__find_by_name(btf, name); 987 - if (key_id < 0) { 988 - pr_debug("map:%s key_type:%s cannot be found in BTF\n", 989 - map->name, name); 990 - return key_id; 987 + container_id = btf__find_by_name(btf, container_name); 988 + if (container_id < 0) { 989 + pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", 990 + map->name, container_name); 991 + return container_id; 991 992 } 992 993 993 - key_size = btf__resolve_size(btf, key_id); 994 + container_type = btf__type_by_id(btf, container_id); 995 + if (!container_type) { 996 + pr_warning("map:%s cannot find BTF type for container_id:%u\n", 997 + map->name, container_id); 998 + return -EINVAL; 999 + } 1000 + 1001 + if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT || 1002 + BTF_INFO_VLEN(container_type->info) < 2) { 1003 + pr_warning("map:%s container_name:%s is an invalid container struct\n", 1004 + map->name, container_name); 1005 + return -EINVAL; 1006 + } 1007 + 1008 + key = (struct btf_member *)(container_type + 1); 1009 + value = key + 1; 1010 + 1011 + key_size = btf__resolve_size(btf, key->type); 994 1012 if (key_size < 0) { 995 - pr_warning("map:%s key_type:%s cannot get the BTF type_size\n", 996 - map->name, name); 1013 + pr_warning("map:%s invalid BTF key_type_size\n", 1014 + map->name); 997 1015 return key_size; 998 1016 } 999 1017 1000 1018 if (def->key_size != key_size) { 1001 - pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n", 1002 - map->name, name, (unsigned int)key_size, def->key_size); 1019 + pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", 1020 + map->name, (__u32)key_size, def->key_size); 1003 1021 return -EINVAL; 1004 1022 } 1005 1023 1006 - /* Find value type from BTF */ 1007 - if (snprintf(name, max_name, "%s_value", map->name) == max_name) { 1008 - pr_warning("map:%s length of BTF value_type:%s_value is too long\n", 1009 - map->name, map->name); 1010 - return -EINVAL; 1011 - } 1012 - 1013 - value_id = btf__find_by_name(btf, name); 1014 - if (value_id < 0) { 1015 - pr_debug("map:%s value_type:%s cannot be found in BTF\n", 1016 - map->name, name); 1017 - return value_id; 1018 - } 1019 - 1020 - value_size = btf__resolve_size(btf, value_id); 1024 + value_size = btf__resolve_size(btf, value->type); 1021 1025 if (value_size < 0) { 1022 - pr_warning("map:%s value_type:%s cannot get the BTF type_size\n", 1023 - map->name, name); 1026 + pr_warning("map:%s invalid BTF value_type_size\n", map->name); 1024 1027 return value_size; 1025 1028 } 1026 1029 1027 1030 if (def->value_size != value_size) { 1028 - pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n", 1029 - map->name, name, (unsigned int)value_size, def->value_size); 1031 + pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", 1032 + map->name, (__u32)value_size, def->value_size); 1030 1033 return -EINVAL; 1031 1034 } 1032 1035 1033 - map->btf_key_type_id = key_id; 1034 - map->btf_value_type_id = value_id; 1036 + map->btf_key_type_id = key->type; 1037 + map->btf_value_type_id = value->type; 1035 1038 1036 1039 return 0; 1037 1040 } ··· 2146 2141 return map ? map->name : NULL; 2147 2142 } 2148 2143 2149 - uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map) 2144 + __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 2150 2145 { 2151 2146 return map ? map->btf_key_type_id : 0; 2152 2147 } 2153 2148 2154 - uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map) 2149 + __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 2155 2150 { 2156 2151 return map ? map->btf_value_type_id : 0; 2157 2152 } ··· 2338 2333 volatile struct perf_event_mmap_page *header = mem; 2339 2334 __u64 data_tail = header->data_tail; 2340 2335 __u64 data_head = header->data_head; 2336 + int ret = LIBBPF_PERF_EVENT_ERROR; 2341 2337 void *base, *begin, *end; 2342 - int ret; 2343 2338 2344 2339 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ 2345 2340 if (data_head == data_tail)
+2 -2
tools/lib/bpf/libbpf.h
··· 254 254 int bpf_map__fd(struct bpf_map *map); 255 255 const struct bpf_map_def *bpf_map__def(struct bpf_map *map); 256 256 const char *bpf_map__name(struct bpf_map *map); 257 - uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map); 258 - uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map); 257 + __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); 258 + __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); 259 259 260 260 typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); 261 261 int bpf_map__set_priv(struct bpf_map *map, void *priv,
+2 -2
tools/power/x86/turbostat/turbostat.8
··· 106 106 \fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved. 107 107 \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. 108 108 \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. 109 - \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 109 + \fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 110 110 \fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms. 111 111 \fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz. 112 112 \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. ··· 114 114 \fBCorWatt\fP Watts consumed by the core part of the package. 115 115 \fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors. 116 116 \fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors. 117 - \fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. 117 + \fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system. Note that the meaning of this field is model specific. For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits. Comparing PkgWatt and PkgTmp to system limits is necessary. 118 118 \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM. 119 119 .fi 120 120 .SH TOO MUCH INFORMATION EXAMPLE
+55 -61
tools/power/x86/turbostat/turbostat.c
··· 1163 1163 if (!printed || !summary_only) 1164 1164 print_header("\t"); 1165 1165 1166 - if (topo.num_cpus > 1) 1167 - format_counters(&average.threads, &average.cores, 1168 - &average.packages); 1166 + format_counters(&average.threads, &average.cores, &average.packages); 1169 1167 1170 1168 printed = 1; 1171 1169 ··· 1690 1692 t->x2apic_id = edx; 1691 1693 1692 1694 if (debug && (t->apic_id != t->x2apic_id)) 1693 - fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); 1695 + fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); 1694 1696 } 1695 1697 1696 1698 /* ··· 2471 2473 2472 2474 void set_node_data(void) 2473 2475 { 2474 - char path[80]; 2475 - FILE *filep; 2476 - int pkg, node, cpu; 2476 + int pkg, node, lnode, cpu, cpux; 2477 + int cpu_count; 2477 2478 2478 - struct pkg_node_info { 2479 - int count; 2480 - int min; 2481 - } *pni; 2479 + /* initialize logical_node_id */ 2480 + for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) 2481 + cpus[cpu].logical_node_id = -1; 2482 2482 2483 - pni = calloc(topo.num_packages, sizeof(struct pkg_node_info)); 2484 - if (!pni) 2485 - err(1, "calloc pkg_node_count"); 2486 - 2487 - for (pkg = 0; pkg < topo.num_packages; pkg++) 2488 - pni[pkg].min = topo.num_cpus; 2489 - 2490 - for (node = 0; node <= topo.max_node_num; node++) { 2491 - /* find the "first" cpu in the node */ 2492 - sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node); 2493 - filep = fopen(path, "r"); 2494 - if (!filep) 2495 - continue; 2496 - fscanf(filep, "%d", &cpu); 2497 - fclose(filep); 2498 - 2499 - pkg = cpus[cpu].physical_package_id; 2500 - pni[pkg].count++; 2501 - 2502 - if (node < pni[pkg].min) 2503 - pni[pkg].min = node; 2483 + cpu_count = 0; 2484 + for (pkg = 0; pkg < topo.num_packages; pkg++) { 2485 + lnode = 0; 2486 + for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) { 2487 + if (cpus[cpu].physical_package_id != pkg) 2488 + continue; 2489 + /* find a cpu with an unset logical_node_id */ 2490 + if (cpus[cpu].logical_node_id != -1) 2491 + continue; 2492 + cpus[cpu].logical_node_id = lnode; 2493 + node = cpus[cpu].physical_node_id; 2494 + cpu_count++; 2495 + /* 2496 + * find all matching cpus on this pkg and set 2497 + * the logical_node_id 2498 + */ 2499 + for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) { 2500 + if ((cpus[cpux].physical_package_id == pkg) && 2501 + (cpus[cpux].physical_node_id == node)) { 2502 + cpus[cpux].logical_node_id = lnode; 2503 + cpu_count++; 2504 + } 2505 + } 2506 + lnode++; 2507 + if (lnode > topo.nodes_per_pkg) 2508 + topo.nodes_per_pkg = lnode; 2509 + } 2510 + if (cpu_count >= topo.max_cpu_num) 2511 + break; 2504 2512 } 2505 - 2506 - for (pkg = 0; pkg < topo.num_packages; pkg++) 2507 - if (pni[pkg].count > topo.nodes_per_pkg) 2508 - topo.nodes_per_pkg = pni[0].count; 2509 - 2510 - /* Fake 1 node per pkg for machines that don't 2511 - * expose nodes and thus avoid -nan results 2512 - */ 2513 - if (topo.nodes_per_pkg == 0) 2514 - topo.nodes_per_pkg = 1; 2515 - 2516 - for (cpu = 0; cpu < topo.num_cpus; cpu++) { 2517 - pkg = cpus[cpu].physical_package_id; 2518 - node = cpus[cpu].physical_node_id; 2519 - cpus[cpu].logical_node_id = node - pni[pkg].min; 2520 - } 2521 - free(pni); 2522 - 2523 2513 } 2524 2514 2525 2515 int get_physical_node_id(struct cpu_topology *thiscpu) ··· 4457 4471 family = (fms >> 8) & 0xf; 4458 4472 model = (fms >> 4) & 0xf; 4459 4473 stepping = fms & 0xf; 4460 - if (family == 6 || family == 0xf) 4474 + if (family == 0xf) 4475 + family += (fms >> 20) & 0xff; 4476 + if (family >= 6) 4461 4477 model += ((fms >> 16) & 0xf) << 4; 4462 4478 4463 4479 if (!quiet) { ··· 4828 4840 siblings = get_thread_siblings(&cpus[i]); 4829 4841 if (siblings > max_siblings) 4830 4842 max_siblings = siblings; 4831 - if (cpus[i].thread_id != -1) 4843 + if (cpus[i].thread_id == 0) 4832 4844 topo.num_cores++; 4833 - 4834 - if (debug > 1) 4835 - fprintf(outf, 4836 - "cpu %d pkg %d node %d core %d thread %d\n", 4837 - i, cpus[i].physical_package_id, 4838 - cpus[i].physical_node_id, 4839 - cpus[i].physical_core_id, 4840 - cpus[i].thread_id); 4841 4845 } 4842 4846 4843 4847 topo.cores_per_node = max_core_id + 1; ··· 4855 4875 topo.threads_per_core = max_siblings; 4856 4876 if (debug > 1) 4857 4877 fprintf(outf, "max_siblings %d\n", max_siblings); 4878 + 4879 + if (debug < 1) 4880 + return; 4881 + 4882 + for (i = 0; i <= topo.max_cpu_num; ++i) { 4883 + fprintf(outf, 4884 + "cpu %d pkg %d node %d lnode %d core %d thread %d\n", 4885 + i, cpus[i].physical_package_id, 4886 + cpus[i].physical_node_id, 4887 + cpus[i].logical_node_id, 4888 + cpus[i].physical_core_id, 4889 + cpus[i].thread_id); 4890 + } 4891 + 4858 4892 } 4859 4893 4860 4894 void ··· 5096 5102 } 5097 5103 5098 5104 void print_version() { 5099 - fprintf(outf, "turbostat version 18.06.20" 5105 + fprintf(outf, "turbostat version 18.07.27" 5100 5106 " - Len Brown <lenb@kernel.org>\n"); 5101 5107 } 5102 5108
+9
tools/testing/selftests/bpf/bpf_helpers.h
··· 158 158 unsigned int numa_node; 159 159 }; 160 160 161 + #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ 162 + struct ____btf_map_##name { \ 163 + type_key key; \ 164 + type_val value; \ 165 + }; \ 166 + struct ____btf_map_##name \ 167 + __attribute__ ((section(".maps." #name), used)) \ 168 + ____btf_map_##name = { } 169 + 161 170 static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = 162 171 (void *) BPF_FUNC_skb_load_bytes; 163 172 static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
+112 -2
tools/testing/selftests/bpf/test_btf.c
··· 247 247 .max_entries = 4, 248 248 }, 249 249 250 + { 251 + .descr = "struct test #3 Invalid member offset", 252 + .raw_types = { 253 + /* int */ /* [1] */ 254 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), 255 + /* int64 */ /* [2] */ 256 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), 257 + 258 + /* struct A { */ /* [3] */ 259 + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16), 260 + BTF_MEMBER_ENC(NAME_TBD, 1, 64), /* int m; */ 261 + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* int64 n; */ 262 + /* } */ 263 + BTF_END_RAW, 264 + }, 265 + .str_sec = "\0A\0m\0n\0", 266 + .str_sec_size = sizeof("\0A\0m\0n\0"), 267 + .map_type = BPF_MAP_TYPE_ARRAY, 268 + .map_name = "struct_test3_map", 269 + .key_size = sizeof(int), 270 + .value_size = 16, 271 + .key_type_id = 1, 272 + .value_type_id = 3, 273 + .max_entries = 4, 274 + .btf_load_err = true, 275 + .err_str = "Invalid member bits_offset", 276 + }, 277 + 250 278 /* Test member exceeds the size of struct. 251 279 * 252 280 * struct A { ··· 507 479 .key_size = sizeof(int), 508 480 .value_size = sizeof(void *) * 4, 509 481 .key_type_id = 1, 510 - .value_type_id = 4, 482 + .value_type_id = 5, 511 483 .max_entries = 4, 512 484 }, 513 485 ··· 1292 1264 .err_str = "type != 0", 1293 1265 }, 1294 1266 1267 + { 1268 + .descr = "arraymap invalid btf key (a bit field)", 1269 + .raw_types = { 1270 + /* int */ /* [1] */ 1271 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), 1272 + /* 32 bit int with 32 bit offset */ /* [2] */ 1273 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8), 1274 + BTF_END_RAW, 1275 + }, 1276 + .str_sec = "", 1277 + .str_sec_size = sizeof(""), 1278 + .map_type = BPF_MAP_TYPE_ARRAY, 1279 + .map_name = "array_map_check_btf", 1280 + .key_size = sizeof(int), 1281 + .value_size = sizeof(int), 1282 + .key_type_id = 2, 1283 + .value_type_id = 1, 1284 + .max_entries = 4, 1285 + .map_create_err = true, 1286 + }, 1287 + 1288 + { 1289 + .descr = "arraymap invalid btf key (!= 32 bits)", 1290 + .raw_types = { 1291 + /* int */ /* [1] */ 1292 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), 1293 + /* 16 bit int with 0 bit offset */ /* [2] */ 1294 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2), 1295 + BTF_END_RAW, 1296 + }, 1297 + .str_sec = "", 1298 + .str_sec_size = sizeof(""), 1299 + .map_type = BPF_MAP_TYPE_ARRAY, 1300 + .map_name = "array_map_check_btf", 1301 + .key_size = sizeof(int), 1302 + .value_size = sizeof(int), 1303 + .key_type_id = 2, 1304 + .value_type_id = 1, 1305 + .max_entries = 4, 1306 + .map_create_err = true, 1307 + }, 1308 + 1309 + { 1310 + .descr = "arraymap invalid btf value (too small)", 1311 + .raw_types = { 1312 + /* int */ /* [1] */ 1313 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), 1314 + BTF_END_RAW, 1315 + }, 1316 + .str_sec = "", 1317 + .str_sec_size = sizeof(""), 1318 + .map_type = BPF_MAP_TYPE_ARRAY, 1319 + .map_name = "array_map_check_btf", 1320 + .key_size = sizeof(int), 1321 + /* btf_value_size < map->value_size */ 1322 + .value_size = sizeof(__u64), 1323 + .key_type_id = 1, 1324 + .value_type_id = 1, 1325 + .max_entries = 4, 1326 + .map_create_err = true, 1327 + }, 1328 + 1329 + { 1330 + .descr = "arraymap invalid btf value (too big)", 1331 + .raw_types = { 1332 + /* int */ /* [1] */ 1333 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), 1334 + BTF_END_RAW, 1335 + }, 1336 + .str_sec = "", 1337 + .str_sec_size = sizeof(""), 1338 + .map_type = BPF_MAP_TYPE_ARRAY, 1339 + .map_name = "array_map_check_btf", 1340 + .key_size = sizeof(int), 1341 + /* btf_value_size > map->value_size */ 1342 + .value_size = sizeof(__u16), 1343 + .key_type_id = 1, 1344 + .value_type_id = 1, 1345 + .max_entries = 4, 1346 + .map_create_err = true, 1347 + }, 1348 + 1295 1349 }; /* struct btf_raw_test raw_tests[] */ 1296 1350 1297 1351 static const char *get_next_str(const char *start, const char *end) ··· 2133 2023 BTF_ENUM_ENC(NAME_TBD, 2), 2134 2024 BTF_ENUM_ENC(NAME_TBD, 3), 2135 2025 /* struct pprint_mapv */ /* [16] */ 2136 - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 28), 2026 + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), 2137 2027 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ 2138 2028 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ 2139 2029 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
+2 -5
tools/testing/selftests/bpf/test_btf_haskv.c
··· 10 10 unsigned int v6; 11 11 }; 12 12 13 - typedef int btf_map_key; 14 - typedef struct ipv_counts btf_map_value; 15 - btf_map_key dumm_key; 16 - btf_map_value dummy_value; 17 - 18 13 struct bpf_map_def SEC("maps") btf_map = { 19 14 .type = BPF_MAP_TYPE_ARRAY, 20 15 .key_size = sizeof(int), 21 16 .value_size = sizeof(struct ipv_counts), 22 17 .max_entries = 4, 23 18 }; 19 + 20 + BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts); 24 21 25 22 struct dummy_tracepoint_args { 26 23 unsigned long long pad;
+28
tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
··· 1 + #!/bin/sh 2 + # description: Snapshot and tracing setting 3 + # flags: instance 4 + 5 + [ ! -f snapshot ] && exit_unsupported 6 + 7 + echo "Set tracing off" 8 + echo 0 > tracing_on 9 + 10 + echo "Allocate and take a snapshot" 11 + echo 1 > snapshot 12 + 13 + # Since trace buffer is empty, snapshot is also empty, but allocated 14 + grep -q "Snapshot is allocated" snapshot 15 + 16 + echo "Ensure keep tracing off" 17 + test `cat tracing_on` -eq 0 18 + 19 + echo "Set tracing on" 20 + echo 1 > tracing_on 21 + 22 + echo "Take a snapshot again" 23 + echo 1 > snapshot 24 + 25 + echo "Ensure keep tracing on" 26 + test `cat tracing_on` -eq 1 27 + 28 + exit 0
+16 -3
tools/usb/ffs-test.c
··· 44 44 45 45 /******************** Little Endian Handling ********************************/ 46 46 47 - #define cpu_to_le16(x) htole16(x) 48 - #define cpu_to_le32(x) htole32(x) 47 + /* 48 + * cpu_to_le16/32 are used when initializing structures, a context where a 49 + * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way 50 + * that allows them to be used when initializing structures. 51 + */ 52 + 53 + #if __BYTE_ORDER == __LITTLE_ENDIAN 54 + #define cpu_to_le16(x) (x) 55 + #define cpu_to_le32(x) (x) 56 + #else 57 + #define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)) 58 + #define cpu_to_le32(x) \ 59 + ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \ 60 + (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) 61 + #endif 62 + 49 63 #define le32_to_cpu(x) le32toh(x) 50 64 #define le16_to_cpu(x) le16toh(x) 51 - 52 65 53 66 /******************** Messages and Errors ***********************************/ 54 67
+2 -2
tools/virtio/asm/barrier.h
··· 13 13 } while (0); 14 14 /* Weak barriers should be used. If not - it's a bug */ 15 15 # define mb() abort() 16 - # define rmb() abort() 17 - # define wmb() abort() 16 + # define dma_rmb() abort() 17 + # define dma_wmb() abort() 18 18 #else 19 19 #error Please fill in barrier macros 20 20 #endif
+5
tools/virtio/linux/kernel.h
··· 52 52 return __kmalloc_fake; 53 53 return malloc(s); 54 54 } 55 + static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp) 56 + { 57 + return kmalloc(n * s, gfp); 58 + } 59 + 55 60 static inline void *kzalloc(size_t s, gfp_t gfp) 56 61 { 57 62 void *p = kmalloc(s, gfp);