···2727 Macbook Pro 17", iMac 20" :2828 video=efifb:i2029293030+Accepted options:3131+3232+nowc Don't map the framebuffer write combined. This can be used3333+ to workaround side-effects and slowdowns on other CPU cores3434+ when large amounts of console data are written.3535+3036--3137Edgar Hucek <gimli@dark-green.com>
+1-1
Documentation/gpio/gpio-legacy.txt
···459459460460This is done by registering "ranges" of pins, which are essentially461461cross-reference tables. These are described in462462-Documentation/pinctrl.txt462462+Documentation/driver-api/pinctl.rst463463464464While the pin allocation is totally managed by the pinctrl subsystem,465465gpio (under gpiolib) is still maintained by gpio drivers. It may happen
···148148}149149150150static inline void151151-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)151151+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,152152+ unsigned long start, unsigned long end)152153{153154 tlb->mm = mm;154155 tlb->fullmm = !(start | (end+1));···167166}168167169168static inline void170170-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)169169+arch_tlb_finish_mmu(struct mmu_gather *tlb,170170+ unsigned long start, unsigned long end, bool force)171171{172172+ if (force) {173173+ tlb->range_start = start;174174+ tlb->range_end = end;175175+ }176176+172177 tlb_flush_mmu(tlb);173178174179 /* keep the page table cache within bounds */
+6-2
arch/ia64/include/asm/tlb.h
···168168169169170170static inline void171171-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)171171+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,172172+ unsigned long start, unsigned long end)172173{173174 tlb->mm = mm;174175 tlb->max = ARRAY_SIZE(tlb->local);···186185 * collected.187186 */188187static inline void189189-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)188188+arch_tlb_finish_mmu(struct mmu_gather *tlb,189189+ unsigned long start, unsigned long end, bool force)190190{191191+ if (force)192192+ tlb->need_flush = 1;191193 /*192194 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and193195 * tlb->end_addr.
+1-1
arch/mips/Kconfig
···2260226022612261config MIPS_MT_SMP22622262 bool "MIPS MT SMP support (1 TC on each available VPE)"22632263- depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR622632263+ depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS22642264 select CPU_MIPSR2_IRQ_VI22652265 select CPU_MIPSR2_IRQ_EI22662266 select SYNC_R4K
+14-1
arch/mips/Makefile
···243243ifdef CONFIG_PHYSICAL_START244244load-y = $(CONFIG_PHYSICAL_START)245245endif246246-entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \246246+247247+entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \247248 | grep "\bkernel_entry\b" | cut -f1 -d \ )249249+ifdef CONFIG_CPU_MICROMIPS250250+ #251251+ # Set the ISA bit, since the kernel_entry symbol in the ELF will have it252252+ # clear which would lead to images containing addresses which bootloaders may253253+ # jump to as MIPS32 code.254254+ #255255+ entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \256256+ $(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \257257+ $(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))258258+else259259+ entry-y = $(entry-noisa-y)260260+endif248261249262cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic250263drivers-$(CONFIG_PCI) += arch/mips/pci/
···11+/***********************license start***************22+ * Author: Cavium Networks33+ *44+ * Contact: support@caviumnetworks.com55+ * This file is part of the OCTEON SDK66+ *77+ * Copyright (c) 2003-2017 Cavium, Inc.88+ *99+ * This file is free software; you can redistribute it and/or modify1010+ * it under the terms of the GNU General Public License, Version 2, as1111+ * published by the Free Software Foundation.1212+ *1313+ * This file is distributed in the hope that it will be useful, but1414+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty1515+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or1616+ * NONINFRINGEMENT. See the GNU General Public License for more1717+ * details.1818+ *1919+ * You should have received a copy of the GNU General Public License2020+ * along with this file; if not, write to the Free Software2121+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA2222+ * or visit http://www.gnu.org/licenses/.2323+ *2424+ * This file may also be available under a different license from Cavium.2525+ * Contact Cavium Networks for more information2626+ ***********************license end**************************************/2727+2828+#ifndef __CVMX_L2D_DEFS_H__2929+#define __CVMX_L2D_DEFS_H__3030+3131+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))3232+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))3333+3434+3535+union cvmx_l2d_err {3636+ uint64_t u64;3737+ struct cvmx_l2d_err_s {3838+ __BITFIELD_FIELD(uint64_t reserved_6_63:58,3939+ __BITFIELD_FIELD(uint64_t bmhclsel:1,4040+ __BITFIELD_FIELD(uint64_t ded_err:1,4141+ __BITFIELD_FIELD(uint64_t sec_err:1,4242+ __BITFIELD_FIELD(uint64_t ded_intena:1,4343+ __BITFIELD_FIELD(uint64_t sec_intena:1,4444+ __BITFIELD_FIELD(uint64_t ecc_ena:1,4545+ ;)))))))4646+ } s;4747+};4848+4949+union cvmx_l2d_fus3 {5050+ uint64_t u64;5151+ struct cvmx_l2d_fus3_s {5252+ __BITFIELD_FIELD(uint64_t reserved_40_63:24,5353+ __BITFIELD_FIELD(uint64_t ema_ctl:3,5454+ __BITFIELD_FIELD(uint64_t reserved_34_36:3,5555+ __BITFIELD_FIELD(uint64_t q3fus:34,5656+ ;))))5757+ } s;5858+};5959+6060+#endif
···376376 cpumask_set_cpu(cpu, &cpu_coherent_mask);377377 notify_cpu_starting(cpu);378378379379- complete(&cpu_running);380380- synchronise_count_slave(cpu);381381-382379 set_cpu_online(cpu, true);383380384381 set_cpu_sibling_map(cpu);385382 set_cpu_core_map(cpu);386383387384 calculate_cpu_foreign_map();385385+386386+ complete(&cpu_running);387387+ synchronise_count_slave(cpu);388388389389 /*390390 * irq will be enabled in ->smp_finish(), enabling it too early
···223223 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)224224 bne- .Lsyscall_exit_work225225226226- /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */227227- li r7,MSR_FP226226+ andi. r0,r8,MSR_FP227227+ beq 2f228228#ifdef CONFIG_ALTIVEC229229- oris r7,r7,MSR_VEC@h229229+ andis. r0,r8,MSR_VEC@h230230+ bne 3f230231#endif231231- and r0,r8,r7232232- cmpd r0,r7233233- bne .Lsyscall_restore_math234234-.Lsyscall_restore_math_cont:232232+2: addi r3,r1,STACK_FRAME_OVERHEAD233233+#ifdef CONFIG_PPC_BOOK3S234234+ li r10,MSR_RI235235+ mtmsrd r10,1 /* Restore RI */236236+#endif237237+ bl restore_math238238+#ifdef CONFIG_PPC_BOOK3S239239+ li r11,0240240+ mtmsrd r11,1241241+#endif242242+ ld r8,_MSR(r1)243243+ ld r3,RESULT(r1)244244+ li r11,-MAX_ERRNO235245236236- cmpld r3,r11246246+3: cmpld r3,r11237247 ld r5,_CCR(r1)238248 bge- .Lsyscall_error239249.Lsyscall_error_cont:···276266 neg r3,r3277267 std r5,_CCR(r1)278268 b .Lsyscall_error_cont279279-280280-.Lsyscall_restore_math:281281- /*282282- * Some initial tests from restore_math to avoid the heavyweight283283- * C code entry and MSR manipulations.284284- */285285- LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)286286- and. r0,r0,r8287287- bne 1f288288-289289- ld r7,PACACURRENT(r13)290290- lbz r0,THREAD+THREAD_LOAD_FP(r7)291291-#ifdef CONFIG_ALTIVEC292292- lbz r6,THREAD+THREAD_LOAD_VEC(r7)293293- add r0,r0,r6294294-#endif295295- cmpdi r0,0296296- beq .Lsyscall_restore_math_cont297297-298298-1: addi r3,r1,STACK_FRAME_OVERHEAD299299-#ifdef CONFIG_PPC_BOOK3S300300- li r10,MSR_RI301301- mtmsrd r10,1 /* Restore RI */302302-#endif303303- bl restore_math304304-#ifdef CONFIG_PPC_BOOK3S305305- li r11,0306306- mtmsrd r11,1307307-#endif308308- /* Restore volatiles, reload MSR from updated one */309309- ld r8,_MSR(r1)310310- ld r3,RESULT(r1)311311- li r11,-MAX_ERRNO312312- b .Lsyscall_restore_math_cont313269314270/* Traced system call support */315271.Lsyscall_dotrace:
-4
arch/powerpc/kernel/process.c
···511511{512512 unsigned long msr;513513514514- /*515515- * Syscall exit makes a similar initial check before branching516516- * to restore_math. Keep them in synch.517517- */518514 if (!msr_tm_active(regs->msr) &&519515 !current->thread.load_fp && !loadvec(current->thread))520516 return;
···328328 case SUN4V_CHIP_NIAGARA5:329329 case SUN4V_CHIP_SPARC_M6:330330 case SUN4V_CHIP_SPARC_M7:331331+ case SUN4V_CHIP_SPARC_M8:331332 case SUN4V_CHIP_SPARC_SN:332333 case SUN4V_CHIP_SPARC64X:333334 rover_inc_table = niagara_iterate_method;
···19441944 break;19451945 case SUN4V_CHIP_SPARC_M7:19461946 case SUN4V_CHIP_SPARC_SN:19471947- default:19481947 /* M7 and later support 52-bit virtual addresses. */19491948 sparc64_va_hole_top = 0xfff8000000000000UL;19501949 sparc64_va_hole_bottom = 0x0008000000000000UL;19511950 max_phys_bits = 49;19511951+ break;19521952+ case SUN4V_CHIP_SPARC_M8:19531953+ default:19541954+ /* M8 and later support 54-bit virtual addresses.19551955+ * However, restricting M8 and above VA bits to 5319561956+ * as 4-level page table cannot support more than19571957+ * 53 VA bits.19581958+ */19591959+ sparc64_va_hole_top = 0xfff0000000000000UL;19601960+ sparc64_va_hole_bottom = 0x0010000000000000UL;19611961+ max_phys_bits = 51;19521962 break;19531963 }19541964 }···21712161 */21722162 switch (sun4v_chip_type) {21732163 case SUN4V_CHIP_SPARC_M7:21642164+ case SUN4V_CHIP_SPARC_M8:21742165 case SUN4V_CHIP_SPARC_SN:21752166 pagecv_flag = 0x00;21762167 break;···23242313 */23252314 switch (sun4v_chip_type) {23262315 case SUN4V_CHIP_SPARC_M7:23162316+ case SUN4V_CHIP_SPARC_M8:23272317 case SUN4V_CHIP_SPARC_SN:23282318 page_cache4v_flag = _PAGE_CP_4V;23292319 break;
+10-3
arch/um/include/asm/tlb.h
···4545}46464747static inline void4848-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)4848+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,4949+ unsigned long start, unsigned long end)4950{5051 tlb->mm = mm;5152 tlb->start = start;···8180 tlb_flush_mmu_free(tlb);8281}83828484-/* tlb_finish_mmu8383+/* arch_tlb_finish_mmu8584 * Called at the end of the shootdown operation to free up any resources8685 * that were required.8786 */8887static inline void8989-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)8888+arch_tlb_finish_mmu(struct mmu_gather *tlb,8989+ unsigned long start, unsigned long end, bool force)9090{9191+ if (force) {9292+ tlb->start = start;9393+ tlb->end = end;9494+ tlb->need_flush = 1;9595+ }9196 tlb_flush_mmu(tlb);92979398 /* keep the page table cache within bounds */
+10
arch/x86/include/asm/hypervisor.h
···43434444 /* pin current vcpu to specified physical cpu (run rarely) */4545 void (*pin_vcpu)(int);4646+4747+ /* called during init_mem_mapping() to setup early mappings. */4848+ void (*init_mem_mapping)(void);4649};47504851extern const struct hypervisor_x86 *x86_hyper;···6057extern void init_hypervisor_platform(void);6158extern bool hypervisor_x2apic_available(void);6259extern void hypervisor_pin_vcpu(int cpu);6060+6161+static inline void hypervisor_init_mem_mapping(void)6262+{6363+ if (x86_hyper && x86_hyper->init_mem_mapping)6464+ x86_hyper->init_mem_mapping();6565+}6366#else6467static inline void init_hypervisor_platform(void) { }6568static inline bool hypervisor_x2apic_available(void) { return false; }6969+static inline void hypervisor_init_mem_mapping(void) { }6670#endif /* CONFIG_HYPERVISOR_GUEST */6771#endif /* _ASM_X86_HYPERVISOR_H */
+3
arch/x86/mm/init.c
···1818#include <asm/dma.h> /* for MAX_DMA_PFN */1919#include <asm/microcode.h>2020#include <asm/kaslr.h>2121+#include <asm/hypervisor.h>21222223/*2324 * We need to define the tracepoints somewhere, and tlb.c···636635637636 load_cr3(swapper_pg_dir);638637 __flush_tlb_all();638638+639639+ hypervisor_init_mem_mapping();639640640641 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);641642}
+37-22
arch/x86/xen/enlighten_hvm.c
···1212#include <asm/setup.h>1313#include <asm/hypervisor.h>1414#include <asm/e820/api.h>1515+#include <asm/early_ioremap.h>15161617#include <asm/xen/cpuid.h>1718#include <asm/xen/hypervisor.h>···2221#include "mmu.h"2322#include "smp.h"24232525-void __ref xen_hvm_init_shared_info(void)2424+static unsigned long shared_info_pfn;2525+2626+void xen_hvm_init_shared_info(void)2627{2728 struct xen_add_to_physmap xatp;2828- u64 pa;2929-3030- if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {3131- /*3232- * Search for a free page starting at 4kB physical address.3333- * Low memory is preferred to avoid an EPT large page split up3434- * by the mapping.3535- * Starting below X86_RESERVE_LOW (usually 64kB) is fine as3636- * the BIOS used for HVM guests is well behaved and won't3737- * clobber memory other than the first 4kB.3838- */3939- for (pa = PAGE_SIZE;4040- !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||4141- memblock_is_reserved(pa);4242- pa += PAGE_SIZE)4343- ;4444-4545- memblock_reserve(pa, PAGE_SIZE);4646- HYPERVISOR_shared_info = __va(pa);4747- }48294930 xatp.domid = DOMID_SELF;5031 xatp.idx = 0;5132 xatp.space = XENMAPSPACE_shared_info;5252- xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);3333+ xatp.gpfn = shared_info_pfn;5334 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))5435 BUG();3636+}3737+3838+static void __init reserve_shared_info(void)3939+{4040+ u64 pa;4141+4242+ /*4343+ * Search for a free page starting at 4kB physical address.4444+ * Low memory is preferred to avoid an EPT large page split up4545+ * by the mapping.4646+ * Starting below X86_RESERVE_LOW (usually 64kB) is fine as4747+ * the BIOS used for HVM guests is well behaved and won't4848+ * clobber memory other than the first 4kB.4949+ */5050+ for (pa = PAGE_SIZE;5151+ !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||5252+ memblock_is_reserved(pa);5353+ pa += PAGE_SIZE)5454+ ;5555+5656+ shared_info_pfn = PHYS_PFN(pa);5757+5858+ memblock_reserve(pa, PAGE_SIZE);5959+ HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);6060+}6161+6262+static void __init xen_hvm_init_mem_mapping(void)6363+{6464+ early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);6565+ HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));5566}56675768static void __init init_hvm_pv_info(void)···166153167154 init_hvm_pv_info();168155156156+ reserve_shared_info();169157 xen_hvm_init_shared_info();170158171159 /*···232218 .init_platform = xen_hvm_guest_init,233219 .pin_vcpu = xen_pin_vcpu,234220 .x2apic_available = xen_x2apic_para_available,221221+ .init_mem_mapping = xen_hvm_init_mem_mapping,235222};236223EXPORT_SYMBOL(x86_hyper_xen_hvm);
···11-/*22- * Arch specific extensions to struct device33- *44- * This file is released under the GPLv255- */66-#ifndef _ASM_XTENSA_DEVICE_H77-#define _ASM_XTENSA_DEVICE_H88-99-struct dev_archdata {1010-};1111-1212-struct pdev_archdata {1313-};1414-1515-#endif /* _ASM_XTENSA_DEVICE_H */
-18
arch/xtensa/include/asm/param.h
···11-/*22- * include/asm-xtensa/param.h33- *44- * This file is subject to the terms and conditions of the GNU General Public55- * License. See the file "COPYING" in the main directory of this archive66- * for more details.77- *88- * Copyright (C) 2001 - 2005 Tensilica Inc.99- */1010-#ifndef _XTENSA_PARAM_H1111-#define _XTENSA_PARAM_H1212-1313-#include <uapi/asm/param.h>1414-1515-# define HZ CONFIG_HZ /* internal timer frequency */1616-# define USER_HZ 100 /* for user interfaces in "ticks" */1717-# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */1818-#endif /* _XTENSA_PARAM_H */
···103103 clear_page_alias(kvaddr, paddr);104104 preempt_enable();105105}106106+EXPORT_SYMBOL(clear_user_highpage);106107107108void copy_user_highpage(struct page *dst, struct page *src,108109 unsigned long vaddr, struct vm_area_struct *vma)···120119 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);121120 preempt_enable();122121}123123-124124-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */125125-126126-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK122122+EXPORT_SYMBOL(copy_user_highpage);127123128124/*129125 * Any time the kernel writes to a user page cache page, or it is about to···174176175177 /* There shouldn't be an entry in the cache for this page anymore. */176178}177177-179179+EXPORT_SYMBOL(flush_dcache_page);178180179181/*180182 * For now, flush the whole cache. FIXME??···186188 __flush_invalidate_dcache_all();187189 __invalidate_icache_all();188190}191191+EXPORT_SYMBOL(local_flush_cache_range);189192190193/* 191194 * Remove any entry in the cache for this page. ···206207 __flush_invalidate_dcache_page_alias(virt, phys);207208 __invalidate_icache_page_alias(virt, phys);208209}210210+EXPORT_SYMBOL(local_flush_cache_page);209211210210-#endif212212+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */211213212214void213215update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)···225225226226 flush_tlb_page(vma, addr);227227228228-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK228228+#if (DCACHE_WAY_SIZE > PAGE_SIZE)229229230230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {231231 unsigned long phys = page_to_phys(page);···256256 * flush_dcache_page() on the page.257257 */258258259259-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK259259+#if (DCACHE_WAY_SIZE > PAGE_SIZE)260260261261void copy_to_user_page(struct vm_area_struct *vma, struct page *page,262262 unsigned long vaddr, void *dst, const void *src,
+17-5
block/bfq-iosched.h
···7171 *7272 * bfq_sched_data is the basic scheduler queue. It supports three7373 * ioprio_classes, and can be used either as a toplevel queue or as an7474- * intermediate queue on a hierarchical setup. @next_in_service7575- * points to the active entity of the sched_data service trees that7676- * will be scheduled next. It is used to reduce the number of steps7777- * needed for each hierarchical-schedule update.7474+ * intermediate queue in a hierarchical setup.7875 *7976 * The supported ioprio_classes are the same as in CFQ, in descending8077 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.8178 * Requests from higher priority queues are served before all the8279 * requests from lower priority queues; among requests of the same8380 * queue requests are served according to B-WF2Q+.8484- * All the fields are protected by the queue lock of the containing bfqd.8181+ *8282+ * The schedule is implemented by the service trees, plus the field8383+ * @next_in_service, which points to the entity on the active trees8484+ * that will be served next, if 1) no changes in the schedule occurs8585+ * before the current in-service entity is expired, 2) the in-service8686+ * queue becomes idle when it expires, and 3) if the entity pointed by8787+ * in_service_entity is not a queue, then the in-service child entity8888+ * of the entity pointed by in_service_entity becomes idle on8989+ * expiration. This peculiar definition allows for the following9090+ * optimization, not yet exploited: while a given entity is still in9191+ * service, we already know which is the best candidate for next9292+ * service among the other active entitities in the same parent9393+ * entity. We can then quickly compare the timestamps of the9494+ * in-service entity with those of such best candidate.9595+ *9696+ * All fields are protected by the lock of the containing bfqd.8597 */8698struct bfq_sched_data {8799 /* entity in service */
+81-65
block/bfq-wf2q.c
···188188189189/*190190 * This function tells whether entity stops being a candidate for next191191- * service, according to the following logic.191191+ * service, according to the restrictive definition of the field192192+ * next_in_service. In particular, this function is invoked for an193193+ * entity that is about to be set in service.192194 *193193- * This function is invoked for an entity that is about to be set in194194- * service. If such an entity is a queue, then the entity is no longer195195- * a candidate for next service (i.e, a candidate entity to serve196196- * after the in-service entity is expired). The function then returns197197- * true.195195+ * If entity is a queue, then the entity is no longer a candidate for196196+ * next service according to the that definition, because entity is197197+ * about to become the in-service queue. This function then returns198198+ * true if entity is a queue.198199 *199199- * In contrast, the entity could stil be a candidate for next service200200- * if it is not a queue, and has more than one child. In fact, even if201201- * one of its children is about to be set in service, other children202202- * may still be the next to serve. As a consequence, a non-queue203203- * entity is not a candidate for next-service only if it has only one204204- * child. And only if this condition holds, then the function returns205205- * true for a non-queue entity.200200+ * In contrast, entity could still be a candidate for next service if201201+ * it is not a queue, and has more than one active child. In fact,202202+ * even if one of its children is about to be set in service, other203203+ * active children may still be the next to serve, for the parent204204+ * entity, even according to the above definition. As a consequence, a205205+ * non-queue entity is not a candidate for next-service only if it has206206+ * only one active child. And only if this condition holds, then this207207+ * function returns true for a non-queue entity.206208 */207209static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)208210{···215213216214 bfqg = container_of(entity, struct bfq_group, entity);217215216216+ /*217217+ * The field active_entities does not always contain the218218+ * actual number of active children entities: it happens to219219+ * not account for the in-service entity in case the latter is220220+ * removed from its active tree (which may get done after221221+ * invoking the function bfq_no_longer_next_in_service in222222+ * bfq_get_next_queue). Fortunately, here, i.e., while223223+ * bfq_no_longer_next_in_service is not yet completed in224224+ * bfq_get_next_queue, bfq_active_extract has not yet been225225+ * invoked, and thus active_entities still coincides with the226226+ * actual number of active entities.227227+ */218228 if (bfqg->active_entities == 1)219229 return true;220230···968954 * one of its children receives a new request.969955 *970956 * Basically, this function updates the timestamps of entity and971971- * inserts entity into its active tree, ater possible extracting it957957+ * inserts entity into its active tree, ater possibly extracting it972958 * from its idle tree.973959 */974960static void __bfq_activate_entity(struct bfq_entity *entity,···10621048 entity->start = entity->finish;10631049 /*10641050 * In addition, if the entity had more than one child10651065- * when set in service, then was not extracted from10511051+ * when set in service, then it was not extracted from10661052 * the active tree. This implies that the position of10671053 * the entity in the active tree may need to be10681054 * changed now, because we have just updated the start···10701056 * time in a moment (the requeueing is then, more10711057 * precisely, a repositioning in this case). To10721058 * implement this repositioning, we: 1) dequeue the10731073- * entity here, 2) update the finish time and10741074- * requeue the entity according to the new10751075- * timestamps below.10591059+ * entity here, 2) update the finish time and requeue10601060+ * the entity according to the new timestamps below.10761061 */10771062 if (entity->tree)10781063 bfq_active_extract(st, entity);···111811051119110611201107/**11211121- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,11221122- * and activate, requeue or reposition all ancestors11231123- * for which such an update becomes necessary.11081108+ * bfq_activate_requeue_entity - activate or requeue an entity representing a11091109+ * bfq_queue, and activate, requeue or reposition11101110+ * all ancestors for which such an update becomes11111111+ * necessary.11241112 * @entity: the entity to activate.11251113 * @non_blocking_wait_rq: true if this entity was waiting for a request11261114 * @requeue: true if this is a requeue, which implies that bfqq is···11491135 * @ins_into_idle_tree: if false, the entity will not be put into the11501136 * idle tree.11511137 *11521152- * Deactivates an entity, independently from its previous state. Must11381138+ * Deactivates an entity, independently of its previous state. Must11531139 * be invoked only if entity is on a service tree. Extracts the entity11541154- * from that tree, and if necessary and allowed, puts it on the idle11401140+ * from that tree, and if necessary and allowed, puts it into the idle11551141 * tree.11561142 */11571143bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)···11721158 st = bfq_entity_service_tree(entity);11731159 is_in_service = entity == sd->in_service_entity;1174116011751175- if (is_in_service)11611161+ if (is_in_service) {11761162 bfq_calc_finish(entity, entity->service);11631163+ sd->in_service_entity = NULL;11641164+ }1177116511781166 if (entity->tree == &st->active)11791167 bfq_active_extract(st, entity);···11931177/**11941178 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.11951179 * @entity: the entity to deactivate.11961196- * @ins_into_idle_tree: true if the entity can be put on the idle tree11801180+ * @ins_into_idle_tree: true if the entity can be put into the idle tree11971181 */11981182static void bfq_deactivate_entity(struct bfq_entity *entity,11991183 bool ins_into_idle_tree,···12241208 */12251209 bfq_update_next_in_service(sd, NULL);1226121012271227- if (sd->next_in_service)12111211+ if (sd->next_in_service || sd->in_service_entity) {12281212 /*12291229- * The parent entity is still backlogged,12301230- * because next_in_service is not NULL. So, no12311231- * further upwards deactivation must be12321232- * performed. Yet, next_in_service has12331233- * changed. Then the schedule does need to be12341234- * updated upwards.12131213+ * The parent entity is still active, because12141214+ * either next_in_service or in_service_entity12151215+ * is not NULL. So, no further upwards12161216+ * deactivation must be performed. Yet,12171217+ * next_in_service has changed. Then the12181218+ * schedule does need to be updated upwards.12191219+ *12201220+ * NOTE If in_service_entity is not NULL, then12211221+ * next_in_service may happen to be NULL,12221222+ * although the parent entity is evidently12231223+ * active. This happens if 1) the entity12241224+ * pointed by in_service_entity is the only12251225+ * active entity in the parent entity, and 2)12261226+ * according to the definition of12271227+ * next_in_service, the in_service_entity12281228+ * cannot be considered as12291229+ * next_in_service. See the comments on the12301230+ * definition of next_in_service for details.12351231 */12361232 break;12331233+ }1237123412381235 /*12391236 * If we get here, then the parent is no more···1523149415241495 /*15251496 * If entity is no longer a candidate for next15261526- * service, then we extract it from its active tree,15271527- * for the following reason. To further boost the15281528- * throughput in some special case, BFQ needs to know15291529- * which is the next candidate entity to serve, while15301530- * there is already an entity in service. In this15311531- * respect, to make it easy to compute/update the next15321532- * candidate entity to serve after the current15331533- * candidate has been set in service, there is a case15341534- * where it is necessary to extract the current15351535- * candidate from its service tree. Such a case is15361536- * when the entity just set in service cannot be also15371537- * a candidate for next service. Details about when15381538- * this conditions holds are reported in the comments15391539- * on the function bfq_no_longer_next_in_service()15401540- * invoked below.14971497+ * service, then it must be extracted from its active14981498+ * tree, so as to make sure that it won't be14991499+ * considered when computing next_in_service. See the15001500+ * comments on the function15011501+ * bfq_no_longer_next_in_service() for details.15411502 */15421503 if (bfq_no_longer_next_in_service(entity))15431504 bfq_active_extract(bfq_entity_service_tree(entity),15441505 entity);1545150615461507 /*15471547- * For the same reason why we may have just extracted15481548- * entity from its active tree, we may need to update15491549- * next_in_service for the sched_data of entity too,15501550- * regardless of whether entity has been extracted.15511551- * In fact, even if entity has not been extracted, a15521552- * descendant entity may get extracted. Such an event15531553- * would cause a change in next_in_service for the15541554- * level of the descendant entity, and thus possibly15551555- * back to upper levels.15081508+ * Even if entity is not to be extracted according to15091509+ * the above check, a descendant entity may get15101510+ * extracted in one of the next iterations of this15111511+ * loop. Such an event could cause a change in15121512+ * next_in_service for the level of the descendant15131513+ * entity, and thus possibly back to this level.15561514 *15571557- * We cannot perform the resulting needed update15581558- * before the end of this loop, because, to know which15591559- * is the correct next-to-serve candidate entity for15601560- * each level, we need first to find the leaf entity15611561- * to set in service. In fact, only after we know15621562- * which is the next-to-serve leaf entity, we can15631563- * discover whether the parent entity of the leaf15641564- * entity becomes the next-to-serve, and so on.15151515+ * However, we cannot perform the resulting needed15161516+ * update of next_in_service for this level before the15171517+ * end of the whole loop, because, to know which is15181518+ * the correct next-to-serve candidate entity for each15191519+ * level, we need first to find the leaf entity to set15201520+ * in service. In fact, only after we know which is15211521+ * the next-to-serve leaf entity, we can discover15221522+ * whether the parent entity of the leaf entity15231523+ * becomes the next-to-serve, and so on.15651524 */15661566-15671525 }1568152615691527 bfqq = bfq_entity_to_bfqq(entity);
···1717#include <linux/serial_core.h>18181919/*2020+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as2121+ * occasionally getting stuck as 1. To avoid the potential for a hang, check2222+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART2323+ * implementations, so only do so if an affected platform is detected in2424+ * parse_spcr().2525+ */2626+bool qdf2400_e44_present;2727+EXPORT_SYMBOL(qdf2400_e44_present);2828+2929+/*2030 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.2131 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI2232 * quirk detection in pci_mcfg.c.···157147 goto done;158148 }159149160160- if (qdf2400_erratum_44_present(&table->header))161161- uart = "qdf2400_e44";150150+ /*151151+ * If the E44 erratum is required, then we need to tell the pl011152152+ * driver to implement the work-around.153153+ *154154+ * The global variable is used by the probe function when it155155+ * creates the UARTs, whether or not they're used as a console.156156+ *157157+ * If the user specifies "traditional" earlycon, the qdf2400_e44158158+ * console name matches the EARLYCON_DECLARE() statement, and159159+ * SPCR is not used. Parameter "earlycon" is false.160160+ *161161+ * If the user specifies "SPCR" earlycon, then we need to update162162+ * the console name so that it also says "qdf2400_e44". Parameter163163+ * "earlycon" is true.164164+ *165165+ * For consistency, if we change the console name, then we do it166166+ * for everyone, not just earlycon.167167+ */168168+ if (qdf2400_erratum_44_present(&table->header)) {169169+ qdf2400_e44_present = true;170170+ if (earlycon)171171+ uart = "qdf2400_e44";172172+ }173173+162174 if (xgene_8250_erratum_present(table))163175 iotype = "mmio32";164176
+34-15
drivers/base/firmware_class.c
···3030#include <linux/syscore_ops.h>3131#include <linux/reboot.h>3232#include <linux/security.h>3333-#include <linux/swait.h>34333534#include <generated/utsrelease.h>3635···111112 * state of the firmware loading.112113 */113114struct fw_state {114114- struct swait_queue_head wq;115115+ struct completion completion;115116 enum fw_status status;116117};117118118119static void fw_state_init(struct fw_state *fw_st)119120{120120- init_swait_queue_head(&fw_st->wq);121121+ init_completion(&fw_st->completion);121122 fw_st->status = FW_STATUS_UNKNOWN;122123}123124···130131{131132 long ret;132133133133- ret = swait_event_interruptible_timeout(fw_st->wq,134134- __fw_state_is_done(READ_ONCE(fw_st->status)),135135- timeout);134134+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);136135 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)137136 return -ENOENT;138137 if (!ret)···145148 WRITE_ONCE(fw_st->status, status);146149147150 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)148148- swake_up(&fw_st->wq);151151+ complete_all(&fw_st->completion);149152}150153151154#define fw_state_start(fw_st) \152155 __fw_state_set(fw_st, FW_STATUS_LOADING)153156#define fw_state_done(fw_st) \154157 __fw_state_set(fw_st, FW_STATUS_DONE)158158+#define fw_state_aborted(fw_st) \159159+ __fw_state_set(fw_st, FW_STATUS_ABORTED)155160#define fw_state_wait(fw_st) \156161 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)157157-158158-#ifndef CONFIG_FW_LOADER_USER_HELPER159159-160160-#define fw_state_is_aborted(fw_st) false161161-162162-#else /* CONFIG_FW_LOADER_USER_HELPER */163162164163static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)165164{166165 return fw_st->status == status;167166}167167+168168+#define fw_state_is_aborted(fw_st) \169169+ __fw_state_check(fw_st, FW_STATUS_ABORTED)170170+171171+#ifdef CONFIG_FW_LOADER_USER_HELPER168172169173#define fw_state_aborted(fw_st) \170174 __fw_state_set(fw_st, FW_STATUS_ABORTED)···173175 __fw_state_check(fw_st, FW_STATUS_DONE)174176#define fw_state_is_loading(fw_st) \175177 __fw_state_check(fw_st, FW_STATUS_LOADING)176176-#define fw_state_is_aborted(fw_st) \177177- __fw_state_check(fw_st, FW_STATUS_ABORTED)178178#define fw_state_wait_timeout(fw_st, timeout) \179179 __fw_state_wait_common(fw_st, timeout)180180···11961200 return 1; /* need to load */11971201}1198120212031203+/*12041204+ * Batched requests need only one wake, we need to do this step last due to the12051205+ * fallback mechanism. The buf is protected with kref_get(), and it won't be12061206+ * released until the last user calls release_firmware().12071207+ *12081208+ * Failed batched requests are possible as well, in such cases we just share12091209+ * the struct firmware_buf and won't release it until all requests are woken12101210+ * and have gone through this same path.12111211+ */12121212+static void fw_abort_batch_reqs(struct firmware *fw)12131213+{12141214+ struct firmware_buf *buf;12151215+12161216+ /* Loaded directly? */12171217+ if (!fw || !fw->priv)12181218+ return;12191219+12201220+ buf = fw->priv;12211221+ if (!fw_state_is_aborted(&buf->fw_st))12221222+ fw_state_aborted(&buf->fw_st);12231223+}12241224+11991225/* called from request_firmware() and request_firmware_work_func() */12001226static int12011227_request_firmware(const struct firmware **firmware_p, const char *name,···1261124312621244 out:12631245 if (ret < 0) {12461246+ fw_abort_batch_reqs(fw);12641247 release_firmware(fw);12651248 fw = NULL;12661249 }
+61
drivers/block/sunvdc.c
···875875 printk(KERN_INFO "%s", version);876876}877877878878+struct vdc_check_port_data {879879+ int dev_no;880880+ char *type;881881+};882882+883883+static int vdc_device_probed(struct device *dev, void *arg)884884+{885885+ struct vio_dev *vdev = to_vio_dev(dev);886886+ struct vdc_check_port_data *port_data;887887+888888+ port_data = (struct vdc_check_port_data *)arg;889889+890890+ if ((vdev->dev_no == port_data->dev_no) &&891891+ (!(strcmp((char *)&vdev->type, port_data->type))) &&892892+ dev_get_drvdata(dev)) {893893+ /* This device has already been configured894894+ * by vdc_port_probe()895895+ */896896+ return 1;897897+ } else {898898+ return 0;899899+ }900900+}901901+902902+/* Determine whether the VIO device is part of an mpgroup903903+ * by locating all the virtual-device-port nodes associated904904+ * with the parent virtual-device node for the VIO device905905+ * and checking whether any of these nodes are vdc-ports906906+ * which have already been configured.907907+ *908908+ * Returns true if this device is part of an mpgroup and has909909+ * already been probed.910910+ */911911+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)912912+{913913+ struct vdc_check_port_data port_data;914914+ struct device *dev;915915+916916+ port_data.dev_no = vdev->dev_no;917917+ port_data.type = (char *)&vdev->type;918918+919919+ dev = device_find_child(vdev->dev.parent, &port_data,920920+ vdc_device_probed);921921+922922+ if (dev)923923+ return true;924924+925925+ return false;926926+}927927+878928static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)879929{880930 struct mdesc_handle *hp;···940890 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {941891 printk(KERN_ERR PFX "Port id [%llu] too large.\n",942892 vdev->dev_no);893893+ goto err_out_release_mdesc;894894+ }895895+896896+ /* Check if this device is part of an mpgroup */897897+ if (vdc_port_mpgroup_check(vdev)) {898898+ printk(KERN_WARNING899899+ "VIO: Ignoring extra vdisk port %s",900900+ dev_name(&vdev->dev));943901 goto err_out_release_mdesc;944902 }945903···1001943 if (err)1002944 goto err_out_free_tx_ring;1003945946946+ /* Note that the device driver_data is used to determine947947+ * whether the port has been probed.948948+ */1004949 dev_set_drvdata(&vdev->dev, port);10059501006951 mdesc_release(hp);
···14921492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM14931493 print_once = true;14941494#endif14951495- pr_notice("random: %s called from %pF with crng_init=%d\n",14951495+ pr_notice("random: %s called from %pS with crng_init=%d\n",14961496 func_name, caller, crng_init);14971497}14981498
+10
drivers/cpuidle/cpuidle-powernv.c
···235235 return -1;236236}237237238238+extern u32 pnv_get_supported_cpuidle_states(void);238239static int powernv_add_idle_states(void)239240{240241 struct device_node *power_mgt;···249248 const char *names[CPUIDLE_STATE_MAX];250249 u32 has_stop_states = 0;251250 int i, rc;251251+ u32 supported_flags = pnv_get_supported_cpuidle_states();252252+252253253254 /* Currently we have snooze statically defined */254255···365362 for (i = 0; i < dt_idle_states; i++) {366363 unsigned int exit_latency, target_residency;367364 bool stops_timebase = false;365365+366366+ /*367367+ * Skip the platform idle state whose flag isn't in368368+ * the supported_cpuidle_states flag mask.369369+ */370370+ if ((flags[i] & supported_flags) != flags[i])371371+ continue;368372 /*369373 * If an idle state has exit latency beyond370374 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
+4-4
drivers/crypto/inside-secure/safexcel_hash.c
···883883 if (ret)884884 return ret;885885886886- memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);887887- memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);888888-889889- for (i = 0; i < ARRAY_SIZE(istate.state); i++) {886886+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {890887 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||891888 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {892889 ctx->base.needs_inv = true;893890 break;894891 }895892 }893893+894894+ memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);895895+ memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);896896897897 return 0;898898}
+3-2
drivers/dma-buf/sync_file.c
···304304{305305 struct sync_file *sync_file = file->private_data;306306307307- if (test_bit(POLL_ENABLED, &sync_file->fence->flags))307307+ if (test_bit(POLL_ENABLED, &sync_file->flags))308308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb);309309 dma_fence_put(sync_file->fence);310310 kfree(sync_file);···318318319319 poll_wait(file, &sync_file->wq, wait);320320321321- if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {321321+ if (list_empty(&sync_file->cb.node) &&322322+ !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {322323 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,323324 fence_check_cb_func) < 0)324325 wake_up_all(&sync_file->wq);
+1-1
drivers/gpu/drm/bridge/tc358767.c
···1255125512561256 /* port@2 is the output port */12571257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);12581258- if (ret)12581258+ if (ret && ret != -ENODEV)12591259 return ret;1260126012611261 /* Shut down GPIO is optional */
···145145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,146146 const struct drm_mode_fb_cmd2 *mode_cmd)147147{148148+ const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);148149 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];149150 struct drm_gem_object *obj;150151 struct drm_framebuffer *fb;151152 int i;152153 int ret;153154154154- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {155155+ for (i = 0; i < info->num_planes; i++) {156156+ unsigned int height = (i == 0) ? mode_cmd->height :157157+ DIV_ROUND_UP(mode_cmd->height, info->vsub);158158+ unsigned long size = height * mode_cmd->pitches[i] +159159+ mode_cmd->offsets[i];160160+155161 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);156162 if (!obj) {157163 DRM_ERROR("failed to lookup gem object\n");···166160 }167161168162 exynos_gem[i] = to_exynos_gem(obj);163163+164164+ if (size > exynos_gem[i]->size) {165165+ i++;166166+ ret = -EINVAL;167167+ goto err;168168+ }169169 }170170171171 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
+22-5
drivers/gpu/drm/i915/gvt/execlist.c
···4646#define same_context(a, b) (((a)->context_id == (b)->context_id) && \4747 ((a)->lrca == (b)->lrca))48484949+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);5050+4951static int context_switch_events[] = {5052 [RCS] = RCS_AS_CONTEXT_SWITCH,5153 [BCS] = BCS_AS_CONTEXT_SWITCH,···501499static int complete_execlist_workload(struct intel_vgpu_workload *workload)502500{503501 struct intel_vgpu *vgpu = workload->vgpu;504504- struct intel_vgpu_execlist *execlist =505505- &vgpu->execlist[workload->ring_id];502502+ int ring_id = workload->ring_id;503503+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];506504 struct intel_vgpu_workload *next_workload;507507- struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;505505+ struct list_head *next = workload_q_head(vgpu, ring_id)->next;508506 bool lite_restore = false;509507 int ret;510508···514512 release_shadow_batch_buffer(workload);515513 release_shadow_wa_ctx(&workload->wa_ctx);516514517517- if (workload->status || vgpu->resetting)515515+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {516516+ /* if workload->status is not successful means HW GPU517517+ * has occurred GPU hang or something wrong with i915/GVT,518518+ * and GVT won't inject context switch interrupt to guest.519519+ * So this error is a vGPU hang actually to the guest.520520+ * According to this we should emunlate a vGPU hang. If521521+ * there are pending workloads which are already submitted522522+ * from guest, we should clean them up like HW GPU does.523523+ *524524+ * if it is in middle of engine resetting, the pending525525+ * workloads won't be submitted to HW GPU and will be526526+ * cleaned up during the resetting process later, so doing527527+ * the workload clean up here doesn't have any impact.528528+ **/529529+ clean_workloads(vgpu, ENGINE_MASK(ring_id));518530 goto out;531531+ }519532520520- if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {533533+ if (!list_empty(workload_q_head(vgpu, ring_id))) {521534 struct execlist_ctx_descriptor_format *this_desc, *next_desc;522535523536 next_workload = container_of(next,
+10-1
drivers/gpu/drm/i915/gvt/firmware.c
···7272 struct intel_gvt_device_info *info = &gvt->device_info;7373 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;7474 struct intel_gvt_mmio_info *e;7575+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;7676+ int num = gvt->mmio.num_mmio_block;7577 struct gvt_firmware_header *h;7678 void *firmware;7779 void *p;7880 unsigned long size, crc32_start;7979- int i;8181+ int i, j;8082 int ret;81838284 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;···106104107105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)108106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));107107+108108+ for (i = 0; i < num; i++, block++) {109109+ for (j = 0; j < block->size; j += 4)110110+ *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =111111+ I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(112112+ block->offset) + j));113113+ }109114110115 memcpy(gvt->firmware.mmio, p, info->mmio_size);111116
+13-1
drivers/gpu/drm/i915/gvt/gvt.h
···149149 bool active;150150 bool pv_notified;151151 bool failsafe;152152- bool resetting;152152+ unsigned int resetting_eng;153153 void *sched_data;154154 struct vgpu_sched_ctl sched_ctl;155155···195195 unsigned long vgpu_allocated_fence_num;196196};197197198198+/* Special MMIO blocks. */199199+struct gvt_mmio_block {200200+ unsigned int device;201201+ i915_reg_t offset;202202+ unsigned int size;203203+ gvt_mmio_func read;204204+ gvt_mmio_func write;205205+};206206+198207#define INTEL_GVT_MMIO_HASH_BITS 11199208200209struct intel_gvt_mmio {···222213#define F_CMD_ACCESSED (1 << 5)223214/* This reg could be accessed by unaligned address */224215#define F_UNALIGN (1 << 6)216216+217217+ struct gvt_mmio_block *mmio_block;218218+ unsigned int num_mmio_block;225219226220 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);227221 unsigned int num_tracked_mmio;
+18-20
drivers/gpu/drm/i915/gvt/handlers.c
···28572857 return 0;28582858}2859285928602860-/* Special MMIO blocks. */28612861-static struct gvt_mmio_block {28622862- unsigned int device;28632863- i915_reg_t offset;28642864- unsigned int size;28652865- gvt_mmio_func read;28662866- gvt_mmio_func write;28672867-} gvt_mmio_blocks[] = {28682868- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},28692869- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},28702870- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,28712871- pvinfo_mmio_read, pvinfo_mmio_write},28722872- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},28732873- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},28742874- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},28752875-};28762876-28772860static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,28782861 unsigned int offset)28792862{28802863 unsigned long device = intel_gvt_get_device_type(gvt);28812881- struct gvt_mmio_block *block = gvt_mmio_blocks;28642864+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;28652865+ int num = gvt->mmio.num_mmio_block;28822866 int i;2883286728842884- for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {28682868+ for (i = 0; i < num; i++, block++) {28852869 if (!(device & block->device))28862870 continue;28872871 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&···28952911 vfree(gvt->mmio.mmio_attribute);28962912 gvt->mmio.mmio_attribute = NULL;28972913}29142914+29152915+/* Special MMIO blocks. */29162916+static struct gvt_mmio_block mmio_blocks[] = {29172917+ {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},29182918+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},29192919+ {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,29202920+ pvinfo_mmio_read, pvinfo_mmio_write},29212921+ {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},29222922+ {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},29232923+ {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},29242924+};2898292528992926/**29002927 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device···29452950 if (ret)29462951 goto err;29472952 }29532953+29542954+ gvt->mmio.mmio_block = mmio_blocks;29552955+ gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);2948295629492957 gvt_dbg_mmio("traced %u virtual mmio registers\n",29502958 gvt->mmio.num_tracked_mmio);···30283030 gvt_mmio_func func;30293031 int ret;3030303230313031- if (WARN_ON(bytes > 4))30333033+ if (WARN_ON(bytes > 8))30323034 return -EINVAL;3033303530343036 /*
+2-1
drivers/gpu/drm/i915/gvt/scheduler.c
···432432433433 i915_gem_request_put(fetch_and_zero(&workload->req));434434435435- if (!workload->status && !vgpu->resetting) {435435+ if (!workload->status && !(vgpu->resetting_eng &436436+ ENGINE_MASK(ring_id))) {436437 update_guest_context(workload);437438438439 for_each_set_bit(event, workload->pending_events,
···398398 }399399400400 /* Program the max register to clamp values > 1.0. */401401+ i = lut_size - 1;401402 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),402403 drm_color_lut_extract(lut[i].red, 16));403404 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
···502502 const char *name, bool mandatory)503503{504504 struct device *dev = &pdev->dev;505505- struct clk *clk = devm_clk_get(dev, name);505505+ struct clk *clk = msm_clk_get(pdev, name);506506 if (IS_ERR(clk) && mandatory) {507507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));508508 return PTR_ERR(clk);···887887 }888888889889 /* mandatory clocks: */890890- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);890890+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);891891 if (ret)892892 goto fail;893893- ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);893893+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);894894 if (ret)895895 goto fail;896896- ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);896896+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);897897 if (ret)898898 goto fail;899899- ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);899899+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);900900 if (ret)901901 goto fail;902902903903 /* optional clocks: */904904- get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);904904+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);905905906906 /* we need to set a default rate before enabling. Set a safe907907 * rate first, then figure out hw revision, and then set a
···4242msm_gem_unmap_vma(struct msm_gem_address_space *aspace,4343 struct msm_gem_vma *vma, struct sg_table *sgt)4444{4545- if (!vma->iova)4545+ if (!aspace || !vma->iova)4646 return;47474848 if (aspace->mmu) {
+2
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
···267267 /* Create output path objects for each VBIOS display path. */268268 i = -1;269269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {270270+ if (ver < 0x40) /* No support for chipsets prior to NV50. */271271+ break;270272 if (dcbE.type == DCB_OUTPUT_UNUSED)271273 continue;272274 if (dcbE.type == DCB_OUTPUT_EOL)
+20-21
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
···500500static int vop_enable(struct drm_crtc *crtc)501501{502502 struct vop *vop = to_vop(crtc);503503- int ret;503503+ int ret, i;504504505505 ret = pm_runtime_get_sync(vop->dev);506506 if (ret < 0) {···533533 }534534535535 memcpy(vop->regs, vop->regsbak, vop->len);536536+ /*537537+ * We need to make sure that all windows are disabled before we538538+ * enable the crtc. Otherwise we might try to scan from a destroyed539539+ * buffer later.540540+ */541541+ for (i = 0; i < vop->data->win_size; i++) {542542+ struct vop_win *vop_win = &vop->win[i];543543+ const struct vop_win_data *win = vop_win->data;544544+545545+ spin_lock(&vop->reg_lock);546546+ VOP_WIN_SET(vop, win, enable, 0);547547+ spin_unlock(&vop->reg_lock);548548+ }549549+536550 vop_cfg_done(vop);537551538552 /*···580566static void vop_crtc_disable(struct drm_crtc *crtc)581567{582568 struct vop *vop = to_vop(crtc);583583- int i;584569585570 WARN_ON(vop->event);586571587572 rockchip_drm_psr_deactivate(&vop->crtc);588588-589589- /*590590- * We need to make sure that all windows are disabled before we591591- * disable that crtc. Otherwise we might try to scan from a destroyed592592- * buffer later.593593- */594594- for (i = 0; i < vop->data->win_size; i++) {595595- struct vop_win *vop_win = &vop->win[i];596596- const struct vop_win_data *win = vop_win->data;597597-598598- spin_lock(&vop->reg_lock);599599- VOP_WIN_SET(vop, win, enable, 0);600600- spin_unlock(&vop->reg_lock);601601- }602602-603603- vop_cfg_done(vop);604573605574 drm_crtc_vblank_off(crtc);606575···679682 * Src.x1 can be odd when do clip, but yuv plane start point680683 * need align with 2 pixel.681684 */682682- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))685685+ if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {686686+ DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");683687 return -EINVAL;688688+ }684689685690 return 0;686691}···763764 spin_lock(&vop->reg_lock);764765765766 VOP_WIN_SET(vop, win, format, format);766766- VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);767767+ VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));767768 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);768769 if (is_yuv_support(fb->format->format)) {769770 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);···777778 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;778779779780 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];780780- VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);781781+ VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));781782 VOP_WIN_SET(vop, win, uv_mst, dma_addr);782783 }783784
···77 select DRM_PANEL88 select VIDEOMODE_HELPERS99 select FB_PROVIDE_GET_FB_UNMAPPED_AREA1010- default y11101211 help1312 Enable support for the on-chip display controller on
+1-1
drivers/i2c/busses/Kconfig
···983983984984config I2C_VERSATILE985985 tristate "ARM Versatile/Realview I2C bus support"986986- depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST986986+ depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST987987 select I2C_ALGOBIT988988 help989989 Say yes if you want to support the I2C serial bus on ARMs Versatile
+5-1
drivers/i2c/busses/i2c-designware-platdrv.c
···298298 }299299300300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);301301+ /* Some broken DSTDs use 1MiHz instead of 1MHz */302302+ if (acpi_speed == 1048576)303303+ acpi_speed = 1000000;301304 /*302305 * Find bus speed from the "clock-frequency" device property, ACPI303306 * or by using fast mode if neither is set.···322319 if (dev->clk_freq != 100000 && dev->clk_freq != 400000323320 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {324321 dev_err(&pdev->dev,325325- "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");322322+ "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",323323+ dev->clk_freq);326324 ret = -EINVAL;327325 goto exit_reset;328326 }
+15-4
drivers/i2c/i2c-core-acpi.c
···230230 dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");231231}232232233233+const struct acpi_device_id *234234+i2c_acpi_match_device(const struct acpi_device_id *matches,235235+ struct i2c_client *client)236236+{237237+ if (!(client && matches))238238+ return NULL;239239+240240+ return acpi_match_device(matches, &client->dev);241241+}242242+233243static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,234244 void *data, void **return_value)235245{···299289}300290EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);301291302302-static int i2c_acpi_match_adapter(struct device *dev, void *data)292292+static int i2c_acpi_find_match_adapter(struct device *dev, void *data)303293{304294 struct i2c_adapter *adapter = i2c_verify_adapter(dev);305295···309299 return ACPI_HANDLE(dev) == (acpi_handle)data;310300}311301312312-static int i2c_acpi_match_device(struct device *dev, void *data)302302+static int i2c_acpi_find_match_device(struct device *dev, void *data)313303{314304 return ACPI_COMPANION(dev) == data;315305}···319309 struct device *dev;320310321311 dev = bus_find_device(&i2c_bus_type, NULL, handle,322322- i2c_acpi_match_adapter);312312+ i2c_acpi_find_match_adapter);323313 return dev ? i2c_verify_adapter(dev) : NULL;324314}325315···327317{328318 struct device *dev;329319330330- dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);320320+ dev = bus_find_device(&i2c_bus_type, NULL, adev,321321+ i2c_acpi_find_match_device);331322 return dev ? i2c_verify_client(dev) : NULL;332323}333324
+1
drivers/i2c/i2c-core-base.c
···357357 * Tree match table entry is supplied for the probing device.358358 */359359 if (!driver->id_table &&360360+ !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&360361 !i2c_of_match_device(dev->driver->of_match_table, client))361362 return -ENODEV;362363
···8383 different sets of pins at run-time.84848585 This driver can also be built as a module. If so, the module will be8686- called pinctrl-i2cmux.8686+ called i2c-mux-pinctrl.87878888config I2C_MUX_REG8989 tristate "Register-based I2C multiplexer"
···22222323#include <linux/iio/iio.h>2424#include <linux/iio/driver.h>2525+#include <linux/iopoll.h>25262627#define ASPEED_RESOLUTION_BITS 102728#define ASPEED_CLOCKS_PER_SAMPLE 12···39384039#define ASPEED_ENGINE_ENABLE BIT(0)41404141+#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)4242+4343+#define ASPEED_ADC_INIT_POLLING_TIME 5004444+#define ASPEED_ADC_INIT_TIMEOUT 5000004545+4246struct aspeed_adc_model_data {4347 const char *model_name;4448 unsigned int min_sampling_rate; // Hz4549 unsigned int max_sampling_rate; // Hz4650 unsigned int vref_voltage; // mV5151+ bool wait_init_sequence;4752};48534954struct aspeed_adc_data {···218211 goto scaler_error;219212 }220213214214+ model_data = of_device_get_match_data(&pdev->dev);215215+216216+ if (model_data->wait_init_sequence) {217217+ /* Enable engine in normal mode. */218218+ writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,219219+ data->base + ASPEED_REG_ENGINE_CONTROL);220220+221221+ /* Wait for initial sequence complete. */222222+ ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,223223+ adc_engine_control_reg_val,224224+ adc_engine_control_reg_val &225225+ ASPEED_ADC_CTRL_INIT_RDY,226226+ ASPEED_ADC_INIT_POLLING_TIME,227227+ ASPEED_ADC_INIT_TIMEOUT);228228+ if (ret)229229+ goto scaler_error;230230+ }231231+221232 /* Start all channels in normal mode. */222233 ret = clk_prepare_enable(data->clk_scaler->clk);223234 if (ret)···299274 .vref_voltage = 1800, // mV300275 .min_sampling_rate = 1,301276 .max_sampling_rate = 1000000,277277+ .wait_init_sequence = true,302278};303279304280static const struct of_device_id aspeed_adc_matches[] = {
+41-1
drivers/iio/adc/axp288_adc.c
···2828#include <linux/iio/driver.h>29293030#define AXP288_ADC_EN_MASK 0xF13131+#define AXP288_ADC_TS_PIN_GPADC 0xF23232+#define AXP288_ADC_TS_PIN_ON 0xF331333234enum axp288_adc_id {3335 AXP288_ADC_TS,···123121 return IIO_VAL_INT;124122}125123124124+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,125125+ unsigned long address)126126+{127127+ int ret;128128+129129+ /* channels other than GPADC do not need to switch TS pin */130130+ if (address != AXP288_GP_ADC_H)131131+ return 0;132132+133133+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);134134+ if (ret)135135+ return ret;136136+137137+ /* When switching to the GPADC pin give things some time to settle */138138+ if (mode == AXP288_ADC_TS_PIN_GPADC)139139+ usleep_range(6000, 10000);140140+141141+ return 0;142142+}143143+126144static int axp288_adc_read_raw(struct iio_dev *indio_dev,127145 struct iio_chan_spec const *chan,128146 int *val, int *val2, long mask)···153131 mutex_lock(&indio_dev->mlock);154132 switch (mask) {155133 case IIO_CHAN_INFO_RAW:134134+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,135135+ chan->address)) {136136+ dev_err(&indio_dev->dev, "GPADC mode\n");137137+ ret = -EINVAL;138138+ break;139139+ }156140 ret = axp288_adc_read_channel(val, chan->address, info->regmap);141141+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,142142+ chan->address))143143+ dev_err(&indio_dev->dev, "TS pin restore\n");157144 break;158145 default:159146 ret = -EINVAL;···170139 mutex_unlock(&indio_dev->mlock);171140172141 return ret;142142+}143143+144144+static int axp288_adc_set_state(struct regmap *regmap)145145+{146146+ /* ADC should be always enabled for internal FG to function */147147+ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))148148+ return -EIO;149149+150150+ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);173151}174152175153static const struct iio_info axp288_adc_iio_info = {···209169 * Set ADC to enabled state at all time, including system suspend.210170 * otherwise internal fuel gauge functionality may be affected.211171 */212212- ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);172172+ ret = axp288_adc_set_state(axp20x->regmap);213173 if (ret) {214174 dev_err(&pdev->dev, "unable to enable ADC device\n");215175 return ret;
···511511 case IB_CM_REQ_RECEIVED:512512 return ipoib_cm_req_handler(cm_id, event);513513 case IB_CM_DREQ_RECEIVED:514514- p = cm_id->context;515514 ib_send_cm_drep(cm_id, NULL, 0);516515 /* Fall through */517516 case IB_CM_REJ_RECEIVED:
···256256257257 ++dev->stats.rx_packets;258258 dev->stats.rx_bytes += skb->len;259259+ if (skb->pkt_type == PACKET_MULTICAST)260260+ dev->stats.multicast++;259261260262 skb->dev = dev;261263 if ((dev->features & NETIF_F_RXCSUM) &&···711709 return pending;712710}713711712712+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,713713+ struct ib_qp *qp,714714+ enum ib_qp_state new_state)715715+{716716+ struct ib_qp_attr qp_attr;717717+ struct ib_qp_init_attr query_init_attr;718718+ int ret;719719+720720+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);721721+ if (ret) {722722+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);723723+ return;724724+ }725725+ /* print according to the new-state and the previous state.*/726726+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)727727+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");728728+ else729729+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",730730+ new_state, qp_attr.qp_state);731731+}732732+714733int ipoib_ib_dev_stop_default(struct net_device *dev)715734{716735 struct ipoib_dev_priv *priv = ipoib_priv(dev);···751728 */752729 qp_attr.qp_state = IB_QPS_ERR;753730 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))754754- ipoib_warn(priv, "Failed to modify QP to ERROR state\n");731731+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);755732756733 /* Wait for all sends and receives to complete */757734 begin = jiffies;
+12-7
drivers/infiniband/ulp/ipoib/ipoib_main.c
···15601560 int i, wait_flushed = 0;1561156115621562 init_completion(&priv->ntbl.flushed);15631563+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);1563156415641565 spin_lock_irqsave(&priv->lock, flags);15651566···1605160416061605 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");16071606 init_completion(&priv->ntbl.deleted);16081608- set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);1609160716101608 /* Stop GC if called at init fail need to cancel work */16111609 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);···18471847 .ndo_tx_timeout = ipoib_timeout,18481848 .ndo_set_rx_mode = ipoib_set_mcast_list,18491849 .ndo_get_iflink = ipoib_get_iflink,18501850+ .ndo_get_stats64 = ipoib_get_stats,18501851};1851185218521853void ipoib_setup_common(struct net_device *dev)···18781877 priv->dev = dev;18791878 spin_lock_init(&priv->lock);18801879 init_rwsem(&priv->vlan_rwsem);18801880+ mutex_init(&priv->mcast_mutex);1881188118821882 INIT_LIST_HEAD(&priv->path_list);18831883 INIT_LIST_HEAD(&priv->child_intfs);···21752173 priv->dev->dev_id = port - 1;2176217421772175 result = ib_query_port(hca, port, &attr);21782178- if (!result)21792179- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);21802180- else {21762176+ if (result) {21812177 printk(KERN_WARNING "%s: ib_query_port %d failed\n",21822178 hca->name, port);21832179 goto device_init_failed;21842180 }21812181+21822182+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);2185218321862184 /* MTU will be reset when mcast join happens */21872185 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);···22132211 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",22142212 hca->name, port, result);22152213 goto device_init_failed;22162216- } else22172217- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));22142214+ }22152215+22162216+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,22172217+ sizeof(union ib_gid));22182218 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);2219221922202220 result = ipoib_dev_init(priv->dev, hca, port);22212221- if (result < 0) {22212221+ if (result) {22222222 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",22232223 hca->name, port, result);22242224 goto device_init_failed;···23692365 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);23702366#ifdef CONFIG_INFINIBAND_IPOIB_CM23712367 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);23682368+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);23722369#endif2373237023742371 /*
+11-22
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
···684684int ipoib_mcast_stop_thread(struct net_device *dev)685685{686686 struct ipoib_dev_priv *priv = ipoib_priv(dev);687687- unsigned long flags;688687689688 ipoib_dbg_mcast(priv, "stopping multicast thread\n");690689691691- spin_lock_irqsave(&priv->lock, flags);692692- cancel_delayed_work(&priv->mcast_task);693693- spin_unlock_irqrestore(&priv->lock, flags);694694-695695- flush_workqueue(priv->wq);690690+ cancel_delayed_work_sync(&priv->mcast_task);696691697692 return 0;698693}···742747void ipoib_mcast_remove_list(struct list_head *remove_list)743748{744749 struct ipoib_mcast *mcast, *tmcast;750750+751751+ /*752752+ * make sure the in-flight joins have finished before we attempt753753+ * to leave754754+ */755755+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)756756+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))757757+ wait_for_completion(&mcast->done);745758746759 list_for_each_entry_safe(mcast, tmcast, remove_list, list) {747760 ipoib_mcast_leave(mcast->dev, mcast);···841838 struct ipoib_mcast *mcast, *tmcast;842839 unsigned long flags;843840841841+ mutex_lock(&priv->mcast_mutex);844842 ipoib_dbg_mcast(priv, "flushing multicast list\n");845843846844 spin_lock_irqsave(&priv->lock, flags);···860856861857 spin_unlock_irqrestore(&priv->lock, flags);862858863863- /*864864- * make sure the in-flight joins have finished before we attempt865865- * to leave866866- */867867- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)868868- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))869869- wait_for_completion(&mcast->done);870870-871859 ipoib_mcast_remove_list(&remove_list);860860+ mutex_unlock(&priv->mcast_mutex);872861}873862874863static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)···978981 spin_unlock(&priv->lock);979982 netif_addr_unlock(dev);980983 local_irq_restore(flags);981981-982982- /*983983- * make sure the in-flight joins have finished before we attempt984984- * to leave985985- */986986- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)987987- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))988988- wait_for_completion(&mcast->done);989984990985 ipoib_mcast_remove_list(&remove_list);991986
+7
drivers/iommu/arm-smmu.c
···1519151915201520 if (using_legacy_binding) {15211521 ret = arm_smmu_register_legacy_master(dev, &smmu);15221522+15231523+ /*15241524+ * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()15251525+ * will allocate/initialise a new one. Thus we need to update fwspec for15261526+ * later use.15271527+ */15281528+ fwspec = dev->iommu_fwspec;15221529 if (ret)15231530 goto out_free;15241531 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
+13-15
drivers/isdn/hysdn/hysdn_proclog.c
···4444 char log_name[15]; /* log filename */4545 struct log_data *log_head, *log_tail; /* head and tail for queue */4646 int if_used; /* open count for interface */4747- int volatile del_lock; /* lock for delete operations */4847 unsigned char logtmp[LOG_MAX_LINELEN];4948 wait_queue_head_t rd_queue;5049};···101102{102103 struct log_data *ib;103104 struct procdata *pd = card->proclog;104104- int i;105105 unsigned long flags;106106107107 if (!pd)···124126 else125127 pd->log_tail->next = ib; /* follows existing messages */126128 pd->log_tail = ib; /* new tail */127127- i = pd->del_lock++; /* get lock state */128128- spin_unlock_irqrestore(&card->hysdn_lock, flags);129129130130 /* delete old entrys */131131- if (!i)132132- while (pd->log_head->next) {133133- if ((pd->log_head->usage_cnt <= 0) &&134134- (pd->log_head->next->usage_cnt <= 0)) {135135- ib = pd->log_head;136136- pd->log_head = pd->log_head->next;137137- kfree(ib);138138- } else139139- break;140140- } /* pd->log_head->next */141141- pd->del_lock--; /* release lock level */131131+ while (pd->log_head->next) {132132+ if ((pd->log_head->usage_cnt <= 0) &&133133+ (pd->log_head->next->usage_cnt <= 0)) {134134+ ib = pd->log_head;135135+ pd->log_head = pd->log_head->next;136136+ kfree(ib);137137+ } else {138138+ break;139139+ }140140+ } /* pd->log_head->next */141141+142142+ spin_unlock_irqrestore(&card->hysdn_lock, flags);143143+142144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */143145} /* put_log_buffer */144146
+6
drivers/misc/mei/pci-me.c
···216216 pci_set_drvdata(pdev, dev);217217218218 /*219219+ * MEI requires to resume from runtime suspend mode220220+ * in order to perform link reset flow upon system suspend.221221+ */222222+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;223223+224224+ /*219225 * For not wake-able HW runtime pm framework220226 * can't be used on pci device level.221227 * Use domain runtime pm callbacks instead.
+6
drivers/misc/mei/pci-txe.c
···138138 pci_set_drvdata(pdev, dev);139139140140 /*141141+ * MEI requires to resume from runtime suspend mode142142+ * in order to perform link reset flow upon system suspend.143143+ */144144+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;145145+146146+ /*141147 * For not wake-able HW runtime pm framework142148 * can't be used on pci device level.143149 * Use domain runtime pm callbacks instead.
+2
drivers/mmc/core/block.c
···21702170 * from being accepted.21712171 */21722172 card = md->queue.card;21732173+ spin_lock_irq(md->queue.queue->queue_lock);21732174 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);21752175+ spin_unlock_irq(md->queue.queue->queue_lock);21742176 blk_set_queue_dying(md->queue.queue);21752177 mmc_cleanup_queue(&md->queue);21762178 if (md->disk->flags & GENHD_FL_UP) {
+1-1
drivers/mmc/core/mmc.c
···12891289static int mmc_select_hs400es(struct mmc_card *card)12901290{12911291 struct mmc_host *host = card->host;12921292- int err = 0;12921292+ int err = -EINVAL;12931293 u8 val;1294129412951295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
···12011201 * tRC < 30ns implies EDO mode. This controller does not support this12021202 * mode.12031203 */12041204- if (conf->timings.sdr.tRC_min < 30)12041204+ if (conf->timings.sdr.tRC_min < 30000)12051205 return -ENOTSUPP;1206120612071207 atmel_smc_cs_conf_init(smcconf);
+6-15
drivers/mtd/nand/atmel/pmecc.c
···945945 */946946 struct platform_device *pdev = to_platform_device(userdev);947947 const struct atmel_pmecc_caps *caps;948948+ const struct of_device_id *match;948949949950 /* No PMECC engine available. */950951 if (!of_property_read_bool(userdev->of_node,···954953955954 caps = &at91sam9g45_caps;956955957957- /*958958- * Try to find the NFC subnode and extract the associated caps959959- * from there.960960- */961961- np = of_find_compatible_node(userdev->of_node, NULL,962962- "atmel,sama5d3-nfc");963963- if (np) {964964- const struct of_device_id *match;965965-966966- match = of_match_node(atmel_pmecc_legacy_match, np);967967- if (match && match->data)968968- caps = match->data;969969-970970- of_node_put(np);971971- }956956+ /* Find the caps associated to the NAND dev node. */957957+ match = of_match_node(atmel_pmecc_legacy_match,958958+ userdev->of_node);959959+ if (match && match->data)960960+ caps = match->data;972961973962 pmecc = atmel_pmecc_create(pdev, caps, 1, 2);974963 }
+10-3
drivers/mtd/nand/nand_base.c
···65656666 if (!section) {6767 oobregion->offset = 0;6868- oobregion->length = 4;6868+ if (mtd->oobsize == 16)6969+ oobregion->length = 4;7070+ else7171+ oobregion->length = 3;6972 } else {7373+ if (mtd->oobsize == 8)7474+ return -ERANGE;7575+7076 oobregion->offset = 6;7177 oobregion->length = ecc->total - 4;7278 }···11311125 * Ensure the timing mode has been changed on the chip side11321126 * before changing timings on the controller side.11331127 */11341134- if (chip->onfi_version) {11281128+ if (chip->onfi_version &&11291129+ (le16_to_cpu(chip->onfi_params.opt_cmd) &11301130+ ONFI_OPT_CMD_SET_GET_FEATURES)) {11351131 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {11361132 chip->onfi_timing_mode_default,11371133 };···27492741 * @buf: the data to write27502742 * @oob_required: must write chip->oob_poi to OOB27512743 * @page: page number to write27522752- * @cached: cached programming27532744 * @raw: use _raw version of write_page27542745 */27552746static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
···1785178517861786 xgene_enet_gpiod_get(pdata);1787178717881788- if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {17891789- pdata->clk = devm_clk_get(&pdev->dev, NULL);17901790- if (IS_ERR(pdata->clk)) {17881788+ pdata->clk = devm_clk_get(&pdev->dev, NULL);17891789+ if (IS_ERR(pdata->clk)) {17901790+ if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {17911791 /* Abort if the clock is defined but couldn't be17921792 * retrived. Always abort if the clock is missing on17931793 * DT system as the driver can't cope with this case.
···449449 p = (char *)&dev->stats;450450 else451451 p = (char *)priv;452452+453453+ if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))454454+ continue;455455+452456 p += s->stat_offset;453457 data[j] = *(unsigned long *)p;454458 j++;
+14-1
drivers/net/ethernet/ibm/ibmvnic.c
···111111static void send_request_unmap(struct ibmvnic_adapter *, u8);112112static void send_login(struct ibmvnic_adapter *adapter);113113static void send_cap_queries(struct ibmvnic_adapter *adapter);114114+static int init_sub_crqs(struct ibmvnic_adapter *);114115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);115116static int ibmvnic_init(struct ibmvnic_adapter *);116117static void release_crq_queue(struct ibmvnic_adapter *);···652651 struct ibmvnic_adapter *adapter = netdev_priv(netdev);653652 unsigned long timeout = msecs_to_jiffies(30000);654653 struct device *dev = &adapter->vdev->dev;654654+ int rc;655655656656 do {657657 if (adapter->renegotiate) {···664662 if (!wait_for_completion_timeout(&adapter->init_done,665663 timeout)) {666664 dev_err(dev, "Capabilities query timeout\n");665665+ return -1;666666+ }667667+ rc = init_sub_crqs(adapter);668668+ if (rc) {669669+ dev_err(dev,670670+ "Initialization of SCRQ's failed\n");671671+ return -1;672672+ }673673+ rc = init_sub_crq_irqs(adapter);674674+ if (rc) {675675+ dev_err(dev,676676+ "Initialization of SCRQ's irqs failed\n");667677 return -1;668678 }669679 }···30183004 *req_value,30193005 (long int)be64_to_cpu(crq->request_capability_rsp.30203006 number), name);30213021- release_sub_crqs(adapter);30223007 *req_value = be64_to_cpu(crq->request_capability_rsp.number);30233008 ibmvnic_send_req_caps(adapter, 1);30243009 return;
+2
drivers/net/ethernet/intel/i40e/i40e_txrx.c
···11131113 if (!tx_ring->tx_bi)11141114 goto err;1115111511161116+ u64_stats_init(&tx_ring->syncp);11171117+11161118 /* round up to nearest 4K */11171119 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);11181120 /* add u32 for head writeback, align after this takes care of
+4
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
···29882988 if (!tx_ring->tx_buffer_info)29892989 goto err;2990299029912991+ u64_stats_init(&tx_ring->syncp);29922992+29912993 /* round up to nearest 4K */29922994 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);29932995 tx_ring->size = ALIGN(tx_ring->size, 4096);···30473045 rx_ring->rx_buffer_info = vzalloc(size);30483046 if (!rx_ring->rx_buffer_info)30493047 goto err;30483048+30493049+ u64_stats_init(&rx_ring->syncp);3050305030513051 /* Round up to nearest 4K */30523052 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
···19151915 spin_unlock(&pch->downl);19161916 /* see if there is anything from the attached unit to be sent */19171917 if (skb_queue_empty(&pch->file.xq)) {19181918- read_lock(&pch->upl);19191918 ppp = pch->ppp;19201919 if (ppp)19211921- ppp_xmit_process(ppp);19221922- read_unlock(&pch->upl);19201920+ __ppp_xmit_process(ppp);19231921 }19241922}1925192319261924static void ppp_channel_push(struct channel *pch)19271925{19281928- local_bh_disable();19291929-19301930- __ppp_channel_push(pch);19311931-19321932- local_bh_enable();19261926+ read_lock_bh(&pch->upl);19271927+ if (pch->ppp) {19281928+ (*this_cpu_ptr(pch->ppp->xmit_recursion))++;19291929+ __ppp_channel_push(pch);19301930+ (*this_cpu_ptr(pch->ppp->xmit_recursion))--;19311931+ } else {19321932+ __ppp_channel_push(pch);19331933+ }19341934+ read_unlock_bh(&pch->upl);19331935}1934193619351937/*
···336336337337 c.directive.opcode = nvme_admin_directive_recv;338338 c.directive.nsid = cpu_to_le32(nsid);339339- c.directive.numd = cpu_to_le32(sizeof(*s));339339+ c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);340340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;341341 c.directive.dtype = NVME_DIR_STREAMS;342342···15091509 blk_queue_write_cache(q, vwc, vwc);15101510}1511151115121512-static void nvme_configure_apst(struct nvme_ctrl *ctrl)15121512+static int nvme_configure_apst(struct nvme_ctrl *ctrl)15131513{15141514 /*15151515 * APST (Autonomous Power State Transition) lets us program a···15381538 * then don't do anything.15391539 */15401540 if (!ctrl->apsta)15411541- return;15411541+ return 0;1542154215431543 if (ctrl->npss > 31) {15441544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");15451545- return;15451545+ return 0;15461546 }1547154715481548 table = kzalloc(sizeof(*table), GFP_KERNEL);15491549 if (!table)15501550- return;15501550+ return 0;1551155115521552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {15531553 /* Turn off APST. */···16291629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);1630163016311631 kfree(table);16321632+ return ret;16321633}1633163416341635static void nvme_set_latency_tolerance(struct device *dev, s32 val)···18361835 * In fabrics we need to verify the cntlid matches the18371836 * admin connect18381837 */18391839- if (ctrl->cntlid != le16_to_cpu(id->cntlid))18381838+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {18401839 ret = -EINVAL;18401840+ goto out_free;18411841+ }1841184218421843 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {18431844 dev_err(ctrl->device,18441845 "keep-alive support is mandatory for fabrics\n");18451846 ret = -EINVAL;18471847+ goto out_free;18461848 }18471849 } else {18481850 ctrl->cntlid = le16_to_cpu(id->cntlid);···18601856 else if (!ctrl->apst_enabled && prev_apst_enabled)18611857 dev_pm_qos_hide_latency_tolerance(ctrl->device);1862185818631863- nvme_configure_apst(ctrl);18641864- nvme_configure_directives(ctrl);18591859+ ret = nvme_configure_apst(ctrl);18601860+ if (ret < 0)18611861+ return ret;18621862+18631863+ ret = nvme_configure_directives(ctrl);18641864+ if (ret < 0)18651865+ return ret;1865186618661867 ctrl->identified = true;1867186818691869+ return 0;18701870+18711871+out_free:18721872+ kfree(id);18681873 return ret;18691874}18701875EXPORT_SYMBOL_GPL(nvme_init_identify);···20172004 if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))20182005 return sprintf(buf, "eui.%8phN\n", ns->eui);2019200620202020- while (ctrl->serial[serial_len - 1] == ' ')20072007+ while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||20082008+ ctrl->serial[serial_len - 1] == '\0'))20212009 serial_len--;20222022- while (ctrl->model[model_len - 1] == ' ')20102010+ while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||20112011+ ctrl->model[model_len - 1] == '\0'))20232012 model_len--;2024201320252014 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
+7-11
drivers/nvme/host/pci.c
···15581558 if (dev->cmb) {15591559 iounmap(dev->cmb);15601560 dev->cmb = NULL;15611561- if (dev->cmbsz) {15621562- sysfs_remove_file_from_group(&dev->ctrl.device->kobj,15631563- &dev_attr_cmb.attr, NULL);15641564- dev->cmbsz = 0;15651565- }15611561+ sysfs_remove_file_from_group(&dev->ctrl.device->kobj,15621562+ &dev_attr_cmb.attr, NULL);15631563+ dev->cmbsz = 0;15661564 }15671565}15681566···1951195319521954 /*19531955 * CMBs can currently only exist on >=1.2 PCIe devices. We only19541954- * populate sysfs if a CMB is implemented. Note that we add the19551955- * CMB attribute to the nvme_ctrl kobj which removes the need to remove19561956- * it on exit. Since nvme_dev_attrs_group has no name we can pass19571957- * NULL as final argument to sysfs_add_file_to_group.19561956+ * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group19571957+ * has no name we can pass NULL as final argument to19581958+ * sysfs_add_file_to_group.19581959 */1959196019601961 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {19611962 dev->cmb = nvme_map_cmb(dev);19621962-19631963- if (dev->cmbsz) {19631963+ if (dev->cmb) {19641964 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,19651965 &dev_attr_cmb.attr, NULL))19661966 dev_warn(dev->ctrl.device,
+186-30
drivers/nvme/target/fc.c
···114114 struct kref ref;115115};116116117117+struct nvmet_fc_defer_fcp_req {118118+ struct list_head req_list;119119+ struct nvmefc_tgt_fcp_req *fcp_req;120120+};121121+117122struct nvmet_fc_tgt_queue {118123 bool ninetypercent;119124 u16 qid;···137132 struct nvmet_fc_tgt_assoc *assoc;138133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */139134 struct list_head fod_list;135135+ struct list_head pending_cmd_list;136136+ struct list_head avail_defer_list;140137 struct workqueue_struct *work_q;141138 struct kref ref;142139} __aligned(sizeof(unsigned long long));···230223static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);231224static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);232225static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);226226+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,227227+ struct nvmet_fc_fcp_iod *fod);233228234229235230/* *********************** FC-NVME DMA Handling **************************** */···472463nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)473464{474465 static struct nvmet_fc_fcp_iod *fod;475475- unsigned long flags;476466477477- spin_lock_irqsave(&queue->qlock, flags);467467+ lockdep_assert_held(&queue->qlock);468468+478469 fod = list_first_entry_or_null(&queue->fod_list,479470 struct nvmet_fc_fcp_iod, fcp_list);480471 if (fod) {···486477 * will "inherit" that reference.487478 */488479 }489489- spin_unlock_irqrestore(&queue->qlock, flags);490480 return fod;491481}492482483483+484484+static void485485+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,486486+ struct nvmet_fc_tgt_queue *queue,487487+ struct nvmefc_tgt_fcp_req *fcpreq)488488+{489489+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;490490+491491+ /*492492+ * put all admin cmds on hw queue id 0. All io commands go to493493+ * the respective hw queue based on a modulo basis494494+ */495495+ fcpreq->hwqid = queue->qid ?496496+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;497497+498498+ if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)499499+ queue_work_on(queue->cpu, queue->work_q, &fod->work);500500+ else501501+ nvmet_fc_handle_fcp_rqst(tgtport, fod);502502+}493503494504static void495505nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,···516488{517489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;518490 struct nvmet_fc_tgtport *tgtport = fod->tgtport;491491+ struct nvmet_fc_defer_fcp_req *deferfcp;519492 unsigned long flags;520493521494 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,···524495525496 fcpreq->nvmet_fc_private = NULL;526497527527- spin_lock_irqsave(&queue->qlock, flags);528528- list_add_tail(&fod->fcp_list, &fod->queue->fod_list);529498 fod->active = false;530499 fod->abort = false;531500 fod->aborted = false;532501 fod->writedataactive = false;533502 fod->fcpreq = NULL;534534- spin_unlock_irqrestore(&queue->qlock, flags);535535-536536- /*537537- * release the reference taken at queue lookup and fod allocation538538- */539539- nvmet_fc_tgt_q_put(queue);540503541504 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);505505+506506+ spin_lock_irqsave(&queue->qlock, flags);507507+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,508508+ struct nvmet_fc_defer_fcp_req, req_list);509509+ if (!deferfcp) {510510+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);511511+ spin_unlock_irqrestore(&queue->qlock, flags);512512+513513+ /* Release reference taken at queue lookup and fod allocation */514514+ nvmet_fc_tgt_q_put(queue);515515+ return;516516+ }517517+518518+ /* Re-use the fod for the next pending cmd that was deferred */519519+ list_del(&deferfcp->req_list);520520+521521+ fcpreq = deferfcp->fcp_req;522522+523523+ /* deferfcp can be reused for another IO at a later date */524524+ list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);525525+526526+ spin_unlock_irqrestore(&queue->qlock, flags);527527+528528+ /* Save NVME CMD IO in fod */529529+ memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);530530+531531+ /* Setup new fcpreq to be processed */532532+ fcpreq->rspaddr = NULL;533533+ fcpreq->rsplen = 0;534534+ fcpreq->nvmet_fc_private = fod;535535+ fod->fcpreq = fcpreq;536536+ fod->active = true;537537+538538+ /* inform LLDD IO is now being processed */539539+ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);540540+541541+ /* Submit deferred IO for processing */542542+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);543543+544544+ /*545545+ * Leave the queue lookup get reference taken when546546+ * fod was originally allocated.547547+ */542548}543549544550static int···633569 queue->port = assoc->tgtport->port;634570 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);635571 INIT_LIST_HEAD(&queue->fod_list);572572+ INIT_LIST_HEAD(&queue->avail_defer_list);573573+ INIT_LIST_HEAD(&queue->pending_cmd_list);636574 atomic_set(&queue->connected, 0);637575 atomic_set(&queue->sqtail, 0);638576 atomic_set(&queue->rsn, 1);···704638{705639 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;706640 struct nvmet_fc_fcp_iod *fod = queue->fod;641641+ struct nvmet_fc_defer_fcp_req *deferfcp;707642 unsigned long flags;708643 int i, writedataactive;709644 bool disconnect;···732665 &tgtport->fc_target_port, fod->fcpreq);733666 }734667 }668668+ }669669+670670+ /* Cleanup defer'ed IOs in queue */671671+ list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {672672+ list_del(&deferfcp->req_list);673673+ kfree(deferfcp);674674+ }675675+676676+ for (;;) {677677+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,678678+ struct nvmet_fc_defer_fcp_req, req_list);679679+ if (!deferfcp)680680+ break;681681+682682+ list_del(&deferfcp->req_list);683683+ spin_unlock_irqrestore(&queue->qlock, flags);684684+685685+ tgtport->ops->defer_rcv(&tgtport->fc_target_port,686686+ deferfcp->fcp_req);687687+688688+ tgtport->ops->fcp_abort(&tgtport->fc_target_port,689689+ deferfcp->fcp_req);690690+691691+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port,692692+ deferfcp->fcp_req);693693+694694+ kfree(deferfcp);695695+696696+ spin_lock_irqsave(&queue->qlock, flags);735697 }736698 spin_unlock_irqrestore(&queue->qlock, flags);737699···22682172 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc22692173 * layer for processing.22702174 *22712271- * The nvmet-fc layer will copy cmd payload to an internal structure for22722272- * processing. As such, upon completion of the routine, the LLDD may22732273- * immediately free/reuse the CMD IU buffer passed in the call.21752175+ * The nvmet_fc layer allocates a local job structure (struct21762176+ * nvmet_fc_fcp_iod) from the queue for the io and copies the21772177+ * CMD IU buffer to the job structure. As such, on a successful21782178+ * completion (returns 0), the LLDD may immediately free/reuse21792179+ * the CMD IU buffer passed in the call.22742180 *22752275- * If this routine returns error, the lldd should abort the exchange.21812181+ * However, in some circumstances, due to the packetized nature of FC21822182+ * and the api of the FC LLDD which may issue a hw command to send the21832183+ * response, but the LLDD may not get the hw completion for that command21842184+ * and upcall the nvmet_fc layer before a new command may be21852185+ * asynchronously received - its possible for a command to be received21862186+ * before the LLDD and nvmet_fc have recycled the job structure. It gives21872187+ * the appearance of more commands received than fits in the sq.21882188+ * To alleviate this scenario, a temporary queue is maintained in the21892189+ * transport for pending LLDD requests waiting for a queue job structure.21902190+ * In these "overrun" cases, a temporary queue element is allocated21912191+ * the LLDD request and CMD iu buffer information remembered, and the21922192+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job21932193+ * structure is freed, it is immediately reallocated for anything on the21942194+ * pending request list. The LLDDs defer_rcv() callback is called,21952195+ * informing the LLDD that it may reuse the CMD IU buffer, and the io21962196+ * is then started normally with the transport.21972197+ *21982198+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat21992199+ * the completion as successful but must not reuse the CMD IU buffer22002200+ * until the LLDD's defer_rcv() callback has been called for the22012201+ * corresponding struct nvmefc_tgt_fcp_req pointer.22022202+ *22032203+ * If there is any other condition in which an error occurs, the22042204+ * transport will return a non-zero status indicating the error.22052205+ * In all cases other than -EOVERFLOW, the transport has not accepted the22062206+ * request and the LLDD should abort the exchange.22762207 *22772208 * @target_port: pointer to the (registered) target port the FCP CMD IU22782209 * was received on.···23172194 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;23182195 struct nvmet_fc_tgt_queue *queue;23192196 struct nvmet_fc_fcp_iod *fod;21972197+ struct nvmet_fc_defer_fcp_req *deferfcp;21982198+ unsigned long flags;2320219923212200 /* validate iu, so the connection id can be used to find the queue */23222201 if ((cmdiubuf_len != sizeof(*cmdiu)) ||···23392214 * when the fod is freed.23402215 */2341221622172217+ spin_lock_irqsave(&queue->qlock, flags);22182218+23422219 fod = nvmet_fc_alloc_fcp_iod(queue);23432343- if (!fod) {22202220+ if (fod) {22212221+ spin_unlock_irqrestore(&queue->qlock, flags);22222222+22232223+ fcpreq->nvmet_fc_private = fod;22242224+ fod->fcpreq = fcpreq;22252225+22262226+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);22272227+22282228+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);22292229+22302230+ return 0;22312231+ }22322232+22332233+ if (!tgtport->ops->defer_rcv) {22342234+ spin_unlock_irqrestore(&queue->qlock, flags);23442235 /* release the queue lookup reference */23452236 nvmet_fc_tgt_q_put(queue);23462237 return -ENOENT;23472238 }2348223923492349- fcpreq->nvmet_fc_private = fod;23502350- fod->fcpreq = fcpreq;23512351- /*23522352- * put all admin cmds on hw queue id 0. All io commands go to23532353- * the respective hw queue based on a modulo basis23542354- */23552355- fcpreq->hwqid = queue->qid ?23562356- ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;23572357- memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);22402240+ deferfcp = list_first_entry_or_null(&queue->avail_defer_list,22412241+ struct nvmet_fc_defer_fcp_req, req_list);22422242+ if (deferfcp) {22432243+ /* Just re-use one that was previously allocated */22442244+ list_del(&deferfcp->req_list);22452245+ } else {22462246+ spin_unlock_irqrestore(&queue->qlock, flags);2358224723592359- if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)23602360- queue_work_on(queue->cpu, queue->work_q, &fod->work);23612361- else23622362- nvmet_fc_handle_fcp_rqst(tgtport, fod);22482248+ /* Now we need to dynamically allocate one */22492249+ deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);22502250+ if (!deferfcp) {22512251+ /* release the queue lookup reference */22522252+ nvmet_fc_tgt_q_put(queue);22532253+ return -ENOMEM;22542254+ }22552255+ spin_lock_irqsave(&queue->qlock, flags);22562256+ }2363225723642364- return 0;22582258+ /* For now, use rspaddr / rsplen to save payload information */22592259+ fcpreq->rspaddr = cmdiubuf;22602260+ fcpreq->rsplen = cmdiubuf_len;22612261+ deferfcp->fcp_req = fcpreq;22622262+22632263+ /* defer processing till a fod becomes available */22642264+ list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);22652265+22662266+ /* NOTE: the queue lookup reference is still valid */22672267+22682268+ spin_unlock_irqrestore(&queue->qlock, flags);22692269+22702270+ return -EOVERFLOW;23652271}23662272EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);23672273
+35
drivers/pci/pci.c
···42604260EXPORT_SYMBOL_GPL(pci_reset_function);4261426142624262/**42634263+ * pci_reset_function_locked - quiesce and reset a PCI device function42644264+ * @dev: PCI device to reset42654265+ *42664266+ * Some devices allow an individual function to be reset without affecting42674267+ * other functions in the same device. The PCI device must be responsive42684268+ * to PCI config space in order to use this function.42694269+ *42704270+ * This function does not just reset the PCI portion of a device, but42714271+ * clears all the state associated with the device. This function differs42724272+ * from __pci_reset_function() in that it saves and restores device state42734273+ * over the reset. It also differs from pci_reset_function() in that it42744274+ * requires the PCI device lock to be held.42754275+ *42764276+ * Returns 0 if the device function was successfully reset or negative if the42774277+ * device doesn't support resetting a single function.42784278+ */42794279+int pci_reset_function_locked(struct pci_dev *dev)42804280+{42814281+ int rc;42824282+42834283+ rc = pci_probe_reset_function(dev);42844284+ if (rc)42854285+ return rc;42864286+42874287+ pci_dev_save_and_disable(dev);42884288+42894289+ rc = __pci_reset_function_locked(dev);42904290+42914291+ pci_dev_restore(dev);42924292+42934293+ return rc;42944294+}42954295+EXPORT_SYMBOL_GPL(pci_reset_function_locked);42964296+42974297+/**42634298 * pci_try_reset_function - quiesce and reset a PCI device function42644299 * @dev: PCI device to reset42654300 *
···27602760 * we allocation is the minimum off:27612761 *27622762 * Number of CPUs27632763- * Number of MSI-X vectors27642764- * Max number allocated in hardware (QEDF_MAX_NUM_CQS)27632763+ * Number allocated by qed for our PCI function27652764 */27662766- qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,27672767- num_online_cpus());27652765+ qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);2768276627692767 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",27702768 qedf->num_queues);···29602962 goto err1;29612963 }2962296429652965+ /* Learn information crucial for qedf to progress */29662966+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);29672967+ if (rc) {29682968+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");29692969+ goto err1;29702970+ }29712971+29632972 /* queue allocation code should come here29642973 * order should be29652974 * slowpath_start···29812976 goto err2;29822977 }29832978 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);29842984-29852985- /* Learn information crucial for qedf to progress */29862986- rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);29872987- if (rc) {29882988- QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");29892989- goto err1;29902990- }2991297929922980 /* Record BDQ producer doorbell addresses */29932981 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
-30
drivers/scsi/qla2xxx/tcm_qla2xxx.c
···500500static void tcm_qla2xxx_handle_data_work(struct work_struct *work)501501{502502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);503503- unsigned long flags;504503505504 /*506505 * Ensure that the complete FCP WRITE payload has been received.507506 * Otherwise return an exception via CHECK_CONDITION status.508507 */509508 cmd->cmd_in_wq = 0;510510-511511- spin_lock_irqsave(&cmd->cmd_lock, flags);512512- cmd->data_work = 1;513513- if (cmd->aborted) {514514- cmd->data_work_free = 1;515515- spin_unlock_irqrestore(&cmd->cmd_lock, flags);516516-517517- tcm_qla2xxx_free_cmd(cmd);518518- return;519519- }520520- spin_unlock_irqrestore(&cmd->cmd_lock, flags);521509522510 cmd->qpair->tgt_counters.qla_core_ret_ctio++;523511 if (!cmd->write_data_transferred) {···753765 qlt_xmit_tm_rsp(mcmd);754766}755767756756-#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)757768static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)758769{759770 struct qla_tgt_cmd *cmd = container_of(se_cmd,760771 struct qla_tgt_cmd, se_cmd);761761- unsigned long flags;762772763773 if (qlt_abort_cmd(cmd))764774 return;765765-766766- spin_lock_irqsave(&cmd->cmd_lock, flags);767767- if ((cmd->state == QLA_TGT_STATE_NEW)||768768- ((cmd->state == QLA_TGT_STATE_DATA_IN) &&769769- DATA_WORK_NOT_FREE(cmd))) {770770- cmd->data_work_free = 1;771771- spin_unlock_irqrestore(&cmd->cmd_lock, flags);772772- /*773773- * cmd has not reached fw, Use this trigger to free it.774774- */775775- tcm_qla2xxx_free_cmd(cmd);776776- return;777777- }778778- spin_unlock_irqrestore(&cmd->cmd_lock, flags);779779- return;780780-781775}782776783777static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+1-30
drivers/scsi/sg.c
···751751 return count;752752}753753754754-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)755755-{756756- switch (hp->dxfer_direction) {757757- case SG_DXFER_NONE:758758- if (hp->dxferp || hp->dxfer_len > 0)759759- return false;760760- return true;761761- case SG_DXFER_FROM_DEV:762762- /*763763- * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp764764- * can either be NULL or != NULL so there's no point in checking765765- * it either. So just return true.766766- */767767- return true;768768- case SG_DXFER_TO_DEV:769769- case SG_DXFER_TO_FROM_DEV:770770- if (!hp->dxferp || hp->dxfer_len == 0)771771- return false;772772- return true;773773- case SG_DXFER_UNKNOWN:774774- if ((!hp->dxferp && hp->dxfer_len) ||775775- (hp->dxferp && hp->dxfer_len == 0))776776- return false;777777- return true;778778- default:779779- return false;780780- }781781-}782782-783754static int784755sg_common_write(Sg_fd * sfp, Sg_request * srp,785756 unsigned char *cmnd, int timeout, int blocking)···771800 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",772801 (int) cmnd[0], (int) hp->cmd_len));773802774774- if (!sg_is_valid_dxfer(hp))803803+ if (hp->dxfer_len >= SZ_256M)775804 return -EINVAL;776805777806 k = sg_start_req(srp, cmnd);
···563563 block_remaining);564564 to_offset = get_block_offset_user(udev, dbi,565565 block_remaining);566566- offset = DATA_BLOCK_SIZE - block_remaining;567567- to += offset;568566569567 if (*iov_cnt != 0 &&570568 to_offset == iov_tail(*iov)) {···573575 (*iov)->iov_len = copy_bytes;574576 }575577 if (copy_data) {576576- memcpy(to, from + sg->length - sg_remaining,577577- copy_bytes);578578+ offset = DATA_BLOCK_SIZE - block_remaining;579579+ memcpy(to + offset,580580+ from + sg->length - sg_remaining,581581+ copy_bytes);578582 tcmu_flush_dcache_range(to, copy_bytes);579583 }580584 sg_remaining -= copy_bytes;···637637 copy_bytes = min_t(size_t, sg_remaining,638638 block_remaining);639639 offset = DATA_BLOCK_SIZE - block_remaining;640640- from += offset;641640 tcmu_flush_dcache_range(from, copy_bytes);642642- memcpy(to + sg->length - sg_remaining, from,641641+ memcpy(to + sg->length - sg_remaining, from + offset,643642 copy_bytes);644643645644 sg_remaining -= copy_bytes;···14321433 if (udev->dev_config[0])14331434 snprintf(str + used, size - used, "/%s", udev->dev_config);1434143514361436+ /* If the old string exists, free it */14371437+ kfree(info->name);14351438 info->name = str;1436143914371440 return 0;
+9
drivers/thunderbolt/eeprom.c
···333333 int res;334334 enum tb_port_type type;335335336336+ /*337337+ * Some DROMs list more ports than the controller actually has338338+ * so we skip those but allow the parser to continue.339339+ */340340+ if (header->index > sw->config.max_port_number) {341341+ dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");342342+ return 0;343343+ }344344+336345 port = &sw->ports[header->index];337346 port->disabled = header->port_disabled;338347 if (port->disabled)
+17-6
drivers/tty/serial/8250/8250_core.c
···10431043 if (up->dl_write)10441044 uart->dl_write = up->dl_write;1045104510461046- if (serial8250_isa_config != NULL)10471047- serial8250_isa_config(0, &uart->port,10481048- &uart->capabilities);10461046+ if (uart->port.type != PORT_8250_CIR) {10471047+ if (serial8250_isa_config != NULL)10481048+ serial8250_isa_config(0, &uart->port,10491049+ &uart->capabilities);1049105010501050- ret = uart_add_one_port(&serial8250_reg, &uart->port);10511051- if (ret == 0)10521052- ret = uart->port.line;10511051+ ret = uart_add_one_port(&serial8250_reg,10521052+ &uart->port);10531053+ if (ret == 0)10541054+ ret = uart->port.line;10551055+ } else {10561056+ dev_info(uart->port.dev,10571057+ "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",10581058+ uart->port.iobase,10591059+ (unsigned long long)uart->port.mapbase,10601060+ uart->port.irq);10611061+10621062+ ret = 0;10631063+ }10531064 }10541065 mutex_unlock(&serial_mutex);10551066
+19-18
drivers/tty/serial/amba-pl011.c
···142142 .fixed_options = true,143143};144144145145-/*146146- * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as147147- * occasionally getting stuck as 1. To avoid the potential for a hang, check148148- * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART149149- * implementations, so only do so if an affected platform is detected in150150- * parse_spcr().151151- */152152-static bool qdf2400_e44_present = false;153153-145145+#ifdef CONFIG_ACPI_SPCR_TABLE154146static struct vendor_data vendor_qdt_qdf2400_e44 = {155147 .reg_offset = pl011_std_offsets,156148 .fr_busy = UART011_FR_TXFE,···157165 .always_enabled = true,158166 .fixed_options = true,159167};168168+#endif160169161170static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {162171 [REG_DR] = UART01x_DR,···23682375 resource_size_t addr;23692376 int i;2370237723712371- if (strcmp(name, "qdf2400_e44") == 0) {23722372- pr_info_once("UART: Working around QDF2400 SoC erratum 44");23732373- qdf2400_e44_present = true;23742374- } else if (strcmp(name, "pl011") != 0) {23782378+ /*23792379+ * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum23802380+ * have a distinct console name, so make sure we check for that.23812381+ * The actual implementation of the erratum occurs in the probe23822382+ * function.23832383+ */23842384+ if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))23752385 return -ENODEV;23762376- }2377238623782387 if (uart_parse_earlycon(options, &iotype, &addr, &options))23792388 return -ENODEV;···27292734 }27302735 uap->port.irq = ret;2731273627322732- uap->reg_offset = vendor_sbsa.reg_offset;27332733- uap->vendor = qdf2400_e44_present ?27342734- &vendor_qdt_qdf2400_e44 : &vendor_sbsa;27372737+#ifdef CONFIG_ACPI_SPCR_TABLE27382738+ if (qdf2400_e44_present) {27392739+ dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");27402740+ uap->vendor = &vendor_qdt_qdf2400_e44;27412741+ } else27422742+#endif27432743+ uap->vendor = &vendor_sbsa;27442744+27452745+ uap->reg_offset = uap->vendor->reg_offset;27352746 uap->fifosize = 32;27362736- uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;27472747+ uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;27372748 uap->port.ops = &sbsa_uart_pops;27382749 uap->fixed_baud = baudrate;27392750
+3-1
drivers/usb/core/hcd.c
···18881888 /* No more submits can occur */18891889 spin_lock_irq(&hcd_urb_list_lock);18901890rescan:18911891- list_for_each_entry (urb, &ep->urb_list, urb_list) {18911891+ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {18921892 int is_in;1893189318941894 if (urb->unlinked)···24852485 }24862486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {24872487 hcd = hcd->shared_hcd;24882488+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);24892489+ set_bit(HCD_FLAG_DEAD, &hcd->flags);24882490 if (hcd->rh_registered) {24892491 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);24902492
+6-4
drivers/usb/core/hub.c
···47254725static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,47264726 u16 portchange)47274727{47284728- int status, i;47284728+ int status = -ENODEV;47294729+ int i;47294730 unsigned unit_load;47304731 struct usb_device *hdev = hub->hdev;47314732 struct usb_hcd *hcd = bus_to_hcd(hdev->bus);···4930492949314930done:49324931 hub_port_disable(hub, port1, 1);49334933- if (hcd->driver->relinquish_port && !hub->hdev->parent)49344934- hcd->driver->relinquish_port(hcd, port1);49354935-49324932+ if (hcd->driver->relinquish_port && !hub->hdev->parent) {49334933+ if (status != -ENOTCONN && status != -ENODEV)49344934+ hcd->driver->relinquish_port(hcd, port1);49354935+ }49364936}4937493749384938/* Handle physical or logical connection change events.
···896896 if (!node) {897897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;898898899899+ /*900900+ * USB Specification 2.0 Section 5.9.2 states that: "If901901+ * there is only a single transaction in the microframe,902902+ * only a DATA0 data packet PID is used. If there are903903+ * two transactions per microframe, DATA1 is used for904904+ * the first transaction data packet and DATA0 is used905905+ * for the second transaction data packet. If there are906906+ * three transactions per microframe, DATA2 is used for907907+ * the first transaction data packet, DATA1 is used for908908+ * the second, and DATA0 is used for the third."909909+ *910910+ * IOW, we should satisfy the following cases:911911+ *912912+ * 1) length <= maxpacket913913+ * - DATA0914914+ *915915+ * 2) maxpacket < length <= (2 * maxpacket)916916+ * - DATA1, DATA0917917+ *918918+ * 3) (2 * maxpacket) < length <= (3 * maxpacket)919919+ * - DATA2, DATA1, DATA0920920+ */899921 if (speed == USB_SPEED_HIGH) {900922 struct usb_ep *ep = &dep->endpoint;901901- trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);923923+ unsigned int mult = ep->mult - 1;924924+ unsigned int maxp = usb_endpoint_maxp(ep->desc);925925+926926+ if (length <= (2 * maxp))927927+ mult--;928928+929929+ if (length <= maxp)930930+ mult--;931931+932932+ trb->size |= DWC3_TRB_SIZE_PCM1(mult);902933 }903934 } else {904935 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
···9898 AMD_CHIPSET_HUDSON2,9999 AMD_CHIPSET_BOLTON,100100 AMD_CHIPSET_YANGTZE,101101+ AMD_CHIPSET_TAISHAN,101102 AMD_CHIPSET_UNKNOWN,102103};103104···142141 pinfo->sb_type.gen = AMD_CHIPSET_SB700;143142 else if (rev >= 0x40 && rev <= 0x4f)144143 pinfo->sb_type.gen = AMD_CHIPSET_SB800;144144+ }145145+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,146146+ 0x145c, NULL);147147+ if (pinfo->smbus_dev) {148148+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;145149 } else {146150 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,147151 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);···266260{267261 /* Make sure amd chipset type has already been initialized */268262 usb_amd_find_chipset_info();269269- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)270270- return 0;271271-272272- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");273273- return 1;263263+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||264264+ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {265265+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");266266+ return 1;267267+ }268268+ return 0;274269}275270EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);276271···11571150}11581151DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,11591152 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);11531153+11541154+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)11551155+{11561156+ /*11571157+ * Our dear uPD72020{1,2} friend only partially resets when11581158+ * asked to via the XHCI interface, and may end up doing DMA11591159+ * at the wrong addresses, as it keeps the top 32bit of some11601160+ * addresses from its previous programming under obscure11611161+ * circumstances.11621162+ * Give it a good wack at probe time. Unfortunately, this11631163+ * needs to happen before we've had a chance to discover any11641164+ * quirk, or the system will be in a rather bad state.11651165+ */11661166+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&11671167+ (pdev->device == 0x0014 || pdev->device == 0x0015))11681168+ return true;11691169+11701170+ return false;11711171+}11721172+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
···284284285285 driver = (struct hc_driver *)id->driver_data;286286287287+ /* For some HW implementation, a XHCI reset is just not enough... */288288+ if (usb_xhci_needs_pci_reset(dev)) {289289+ dev_info(&dev->dev, "Resetting\n");290290+ if (pci_reset_function_locked(dev))291291+ dev_warn(&dev->dev, "Reset failed");292292+ }293293+287294 /* Prevent runtime suspending between USB-2 and USB-3 initialization */288295 pm_runtime_get_noresume(&dev->dev);289296
···639639 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);640640 struct usbhs_pipe *pipe;641641 unsigned long flags;642642- int ret = 0;643642644643 spin_lock_irqsave(&uep->lock, flags);645644 pipe = usbhsg_uep_to_pipe(uep);646646- if (!pipe) {647647- ret = -EINVAL;645645+ if (!pipe)648646 goto out;649649- }650647651648 usbhsg_pipe_disable(uep);652649 usbhs_pipe_free(pipe);
+7-2
drivers/usb/renesas_usbhs/rcar3.c
···2020/* Low Power Status register (LPSTS) */2121#define LPSTS_SUSPM 0x400022222323-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */2323+/*2424+ * USB General control register 2 (UGCTRL2)2525+ * Remarks: bit[31:11] and bit[9:6] should be 02626+ */2427#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */2528#define UGCTRL2_USB0SEL_OTG 0x000000302929+#define UGCTRL2_VBUSSEL 0x0000040026302731static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)2832{···3834{3935 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);40364141- usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);3737+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |3838+ UGCTRL2_VBUSSEL);42394340 if (enable) {4441 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
···315315{316316 struct us_data *us = (struct us_data *)__us;317317 struct Scsi_Host *host = us_to_host(us);318318+ struct scsi_cmnd *srb;318319319320 for (;;) {320321 usb_stor_dbg(us, "*** thread sleeping\n");···331330 scsi_lock(host);332331333332 /* When we are called with no command pending, we're done */333333+ srb = us->srb;334334 if (us->srb == NULL) {335335 scsi_unlock(host);336336 mutex_unlock(&us->dev_mutex);···400398 /* lock access to the state */401399 scsi_lock(host);402400403403- /* indicate that the command is done */404404- if (us->srb->result != DID_ABORT << 16) {405405- usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",406406- us->srb->result);407407- us->srb->scsi_done(us->srb);408408- } else {401401+ /* was the command aborted? */402402+ if (us->srb->result == DID_ABORT << 16) {409403SkipForAbort:410404 usb_stor_dbg(us, "scsi command aborted\n");405405+ srb = NULL; /* Don't call srb->scsi_done() */411406 }412407413408 /*···428429429430 /* unlock the device pointers */430431 mutex_unlock(&us->dev_mutex);432432+433433+ /* now that the locks are released, notify the SCSI core */434434+ if (srb) {435435+ usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",436436+ srb->result);437437+ srb->scsi_done(srb);438438+ }431439 } /* for (;;) */432440433441 /* Wait until we are told to stop */
···843843 * hibernation, system resume and during runtime PM transitions844844 * along with subsystem-level and driver-level callbacks.845845 * @pins: For device pin management.846846- * See Documentation/pinctrl.txt for details.846846+ * See Documentation/driver-api/pinctl.rst for details.847847 * @msi_list: Hosts MSI descriptors848848 * @msi_domain: The generic MSI domain this device is using.849849 * @numa_node: NUMA node this device is close to.
+2-1
include/linux/i2c.h
···689689#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */690690#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */691691#define I2C_CLASS_SPD (1<<7) /* Memory modules */692692-#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */692692+/* Warn users that the adapter doesn't support classes anymore */693693+#define I2C_CLASS_DEPRECATED (1<<8)693694694695/* Internal numbers to terminate lists */695696#define I2C_CLIENT_END 0xfffeU
+7
include/linux/iio/common/st_sensors.h
···105105 struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];106106};107107108108+struct st_sensor_sim {109109+ u8 addr;110110+ u8 value;111111+};112112+108113/**109114 * struct st_sensor_bdu - ST sensor device block data update110115 * @addr: address of the register.···202197 * @bdu: Block data update register.203198 * @das: Data Alignment Selection register.204199 * @drdy_irq: Data ready register of the sensor.200200+ * @sim: SPI serial interface mode register of the sensor.205201 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.206202 * @bootime: samples to discard when sensor passing from power-down to power-up.207203 */···219213 struct st_sensor_bdu bdu;220214 struct st_sensor_das das;221215 struct st_sensor_data_ready_irq drdy_irq;216216+ struct st_sensor_sim sim;222217 bool multi_read_bit;223218 unsigned int bootime;224219};
···487487 /* numa_scan_seq prevents two threads setting pte_numa */488488 int numa_scan_seq;489489#endif490490-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)491490 /*492491 * An operation with batched TLB flushing is going on. Anything that493492 * can move process memory needs to flush the TLB when moving a494493 * PROT_NONE or PROT_NUMA mapped page.495494 */496496- bool tlb_flush_pending;497497-#endif495495+ atomic_t tlb_flush_pending;498496#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH499497 /* See flush_tlb_batched_pending() */500498 bool tlb_flush_batched;···520522 return mm->cpu_vm_mask_var;521523}522524523523-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)525525+struct mmu_gather;526526+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,527527+ unsigned long start, unsigned long end);528528+extern void tlb_finish_mmu(struct mmu_gather *tlb,529529+ unsigned long start, unsigned long end);530530+524531/*525532 * Memory barriers to keep this state in sync are graciously provided by526533 * the page table locks, outside of which no page table modifications happen.527527- * The barriers below prevent the compiler from re-ordering the instructions528528- * around the memory barriers that are already present in the code.534534+ * The barriers are used to ensure the order between tlb_flush_pending updates,535535+ * which happen while the lock is not taken, and the PTE updates, which happen536536+ * while the lock is taken, are serialized.529537 */530538static inline bool mm_tlb_flush_pending(struct mm_struct *mm)531539{532532- barrier();533533- return mm->tlb_flush_pending;540540+ return atomic_read(&mm->tlb_flush_pending) > 0;534541}535535-static inline void set_tlb_flush_pending(struct mm_struct *mm)542542+543543+/*544544+ * Returns true if there are two above TLB batching threads in parallel.545545+ */546546+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)536547{537537- mm->tlb_flush_pending = true;548548+ return atomic_read(&mm->tlb_flush_pending) > 1;549549+}550550+551551+static inline void init_tlb_flush_pending(struct mm_struct *mm)552552+{553553+ atomic_set(&mm->tlb_flush_pending, 0);554554+}555555+556556+static inline void inc_tlb_flush_pending(struct mm_struct *mm)557557+{558558+ atomic_inc(&mm->tlb_flush_pending);538559539560 /*540540- * Guarantee that the tlb_flush_pending store does not leak into the561561+ * Guarantee that the tlb_flush_pending increase does not leak into the541562 * critical section updating the page tables542563 */543564 smp_mb__before_spinlock();544565}566566+545567/* Clearing is done after a TLB flush, which also provides a barrier. */546546-static inline void clear_tlb_flush_pending(struct mm_struct *mm)568568+static inline void dec_tlb_flush_pending(struct mm_struct *mm)547569{548548- barrier();549549- mm->tlb_flush_pending = false;570570+ /*571571+ * Guarantee that the tlb_flush_pending does not not leak into the572572+ * critical section, since we must order the PTE change and changes to573573+ * the pending TLB flush indication. We could have relied on TLB flush574574+ * as a memory barrier, but this behavior is not clearly documented.575575+ */576576+ smp_mb__before_atomic();577577+ atomic_dec(&mm->tlb_flush_pending);550578}551551-#else552552-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)553553-{554554- return false;555555-}556556-static inline void set_tlb_flush_pending(struct mm_struct *mm)557557-{558558-}559559-static inline void clear_tlb_flush_pending(struct mm_struct *mm)560560-{561561-}562562-#endif563579564580struct vm_fault;565581
···346346 * indicating an FC transport Aborted status.347347 * Entrypoint is Mandatory.348348 *349349+ * @defer_rcv: Called by the transport to signal the LLLD that it has350350+ * begun processing of a previously received NVME CMD IU. The LLDD351351+ * is now free to re-use the rcv buffer associated with the352352+ * nvmefc_tgt_fcp_req.353353+ *349354 * @max_hw_queues: indicates the maximum number of hw queues the LLDD350355 * supports for cpu affinitization.351356 * Value is Mandatory. Must be at least 1.···850845 void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,851846 struct nvmefc_tgt_fcp_req *fcpreq);852847 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,848848+ struct nvmefc_tgt_fcp_req *fcpreq);849849+ void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,853850 struct nvmefc_tgt_fcp_req *fcpreq);854851855852 u32 max_hw_queues;
···8181 * it.8282 * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a8383 * value on the line. Use argument 1 to indicate high level, argument 0 to8484- * indicate low level. (Please see Documentation/pinctrl.txt, section8585- * "GPIO mode pitfalls" for a discussion around this parameter.)8484+ * indicate low level. (Please see Documentation/driver-api/pinctl.rst,8585+ * section "GPIO mode pitfalls" for a discussion around this parameter.)8686 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power8787 * supplies, the argument to this parameter (on a custom format) tells8888 * the driver which alternative power source to use.
+2
include/linux/platform_data/st_sensors_pdata.h
···1717 * Available only for accelerometer and pressure sensors.1818 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).1919 * @open_drain: set the interrupt line to be open drain if possible.2020+ * @spi_3wire: enable spi-3wire mode.2021 */2122struct st_sensors_platform_data {2223 u8 drdy_int_pin;2324 bool open_drain;2525+ bool spi_3wire;2426};25272628#endif /* ST_SENSORS_PDATA_H */
+20
include/linux/ptp_clock_kernel.h
···9999 * parameter func: the desired function to use.100100 * parameter chan: the function channel index to use.101101 *102102+ * @do_work: Request driver to perform auxiliary (periodic) operations103103+ * Driver should return delay of the next auxiliary work scheduling104104+ * time (>=0) or negative value in case further scheduling105105+ * is not required.106106+ *102107 * Drivers should embed their ptp_clock_info within a private103108 * structure, obtaining a reference to it using container_of().104109 *···131126 struct ptp_clock_request *request, int on);132127 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,133128 enum ptp_pin_function func, unsigned int chan);129129+ long (*do_aux_work)(struct ptp_clock_info *ptp);134130};135131136132struct ptp_clock;···217211int ptp_find_pin(struct ptp_clock *ptp,218212 enum ptp_pin_function func, unsigned int chan);219213214214+/**215215+ * ptp_schedule_worker() - schedule ptp auxiliary work216216+ *217217+ * @ptp: The clock obtained from ptp_clock_register().218218+ * @delay: number of jiffies to wait before queuing219219+ * See kthread_queue_delayed_work() for more info.220220+ */221221+222222+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);223223+220224#else221225static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,222226 struct device *parent)···241225static inline int ptp_find_pin(struct ptp_clock *ptp,242226 enum ptp_pin_function func, unsigned int chan)243227{ return -1; }228228+static inline int ptp_schedule_worker(struct ptp_clock *ptp,229229+ unsigned long delay)230230+{ return -EOPNOTSUPP; }231231+244232#endif245233246234#endif
···171171 __u32 size; /* in, cmdstream size */172172 __u32 pad;173173 __u32 nr_relocs; /* in, number of submit_reloc's */174174- __u64 __user relocs; /* in, ptr to array of submit_reloc's */174174+ __u64 relocs; /* in, ptr to array of submit_reloc's */175175};176176177177/* Each buffer referenced elsewhere in the cmdstream submit (ie. the···215215 __u32 fence; /* out */216216 __u32 nr_bos; /* in, number of submit_bo's */217217 __u32 nr_cmds; /* in, number of submit_cmd's */218218- __u64 __user bos; /* in, ptr to array of submit_bo's */219219- __u64 __user cmds; /* in, ptr to array of submit_cmd's */218218+ __u64 bos; /* in, ptr to array of submit_bo's */219219+ __u64 cmds; /* in, ptr to array of submit_cmd's */220220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */221221};222222
···670670 * this reference was taken by ihold under the page lock671671 * pinning the inode in place so i_lock was unnecessary. The672672 * only way for this check to fail is if the inode was673673- * truncated in parallel so warn for now if this happens.673673+ * truncated in parallel which is almost certainly an674674+ * application bug. In such a case, just retry.674675 *675676 * We are not calling into get_futex_key_refs() in file-backed676677 * cases, therefore a successful atomic_inc return below will677678 * guarantee that get_futex_key() will still imply smp_mb(); (B).678679 */679679- if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {680680+ if (!atomic_inc_not_zero(&inode->i_count)) {680681 rcu_read_unlock();681682 put_page(page);682683
···110110 if (in_task()) {111111 unsigned int fail_nth = READ_ONCE(current->fail_nth);112112113113- if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))114114- goto fail;113113+ if (fail_nth) {114114+ if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))115115+ goto fail;115116116116- return false;117117+ return false;118118+ }117119 }118120119121 /* No need to check any other properties if the probability is 0 */
+8-8
lib/test_kmod.c
···485485 config->test_driver);486486 else487487 len += snprintf(buf+len, PAGE_SIZE - len,488488- "driver:\tEMTPY\n");488488+ "driver:\tEMPTY\n");489489490490 if (config->test_fs)491491 len += snprintf(buf+len, PAGE_SIZE - len,···493493 config->test_fs);494494 else495495 len += snprintf(buf+len, PAGE_SIZE - len,496496- "fs:\tEMTPY\n");496496+ "fs:\tEMPTY\n");497497498498 mutex_unlock(&test_dev->config_mutex);499499···746746 strlen(test_str));747747 break;748748 case TEST_KMOD_FS_TYPE:749749- break;750749 kfree_const(config->test_fs);751750 config->test_driver = NULL;752751 copied = config_copy_test_fs(config, test_str,753752 strlen(test_str));753753+ break;754754 default:755755 mutex_unlock(&test_dev->config_mutex);756756 return -EINVAL;···880880 int (*test_sync)(struct kmod_test_device *test_dev))881881{882882 int ret;883883- long new;883883+ unsigned long new;884884 unsigned int old_val;885885886886- ret = kstrtol(buf, 10, &new);886886+ ret = kstrtoul(buf, 10, &new);887887 if (ret)888888 return ret;889889···918918 unsigned int max)919919{920920 int ret;921921- long new;921921+ unsigned long new;922922923923- ret = kstrtol(buf, 10, &new);923923+ ret = kstrtoul(buf, 10, &new);924924 if (ret)925925 return ret;926926···11461146 struct kmod_test_device *test_dev = NULL;11471147 int ret;1148114811491149- mutex_unlock(®_dev_mutex);11491149+ mutex_lock(®_dev_mutex);1150115011511151 /* int should suffice for number of devices, test for wrap */11521152 if (unlikely(num_test_devs + 1) < 0) {
···14961496 }1497149714981498 /*14991499+ * The page_table_lock above provides a memory barrier15001500+ * with change_protection_range.15011501+ */15021502+ if (mm_tlb_flush_pending(vma->vm_mm))15031503+ flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);15041504+15051505+ /*14991506 * Migrate the THP to the requested node, returns with page unlocked15001507 * and access rights restored.15011508 */
···215215 return true;216216}217217218218-/* tlb_gather_mmu219219- * Called to initialize an (on-stack) mmu_gather structure for page-table220220- * tear-down from @mm. The @fullmm argument is used when @mm is without221221- * users and we're going to destroy the full address space (exit/execve).222222- */223223-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)218218+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,219219+ unsigned long start, unsigned long end)224220{225221 tlb->mm = mm;226222···271275 * Called at the end of the shootdown operation to free up any resources272276 * that were required.273277 */274274-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)278278+void arch_tlb_finish_mmu(struct mmu_gather *tlb,279279+ unsigned long start, unsigned long end, bool force)275280{276281 struct mmu_gather_batch *batch, *next;282282+283283+ if (force)284284+ __tlb_adjust_range(tlb, start, end - start);277285278286 tlb_flush_mmu(tlb);279287···397397}398398399399#endif /* CONFIG_HAVE_RCU_TABLE_FREE */400400+401401+/* tlb_gather_mmu402402+ * Called to initialize an (on-stack) mmu_gather structure for page-table403403+ * tear-down from @mm. The @fullmm argument is used when @mm is without404404+ * users and we're going to destroy the full address space (exit/execve).405405+ */406406+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,407407+ unsigned long start, unsigned long end)408408+{409409+ arch_tlb_gather_mmu(tlb, mm, start, end);410410+ inc_tlb_flush_pending(tlb->mm);411411+}412412+413413+void tlb_finish_mmu(struct mmu_gather *tlb,414414+ unsigned long start, unsigned long end)415415+{416416+ /*417417+ * If there are parallel threads are doing PTE changes on same range418418+ * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB419419+ * flush by batching, a thread has stable TLB entry can fail to flush420420+ * the TLB by observing pte_none|!pte_dirty, for example so flush TLB421421+ * forcefully if we detect parallel PTE batching threads.422422+ */423423+ bool force = mm_tlb_flush_nested(tlb->mm);424424+425425+ arch_tlb_finish_mmu(tlb, start, end, force);426426+ dec_tlb_flush_pending(tlb->mm);427427+}400428401429/*402430 * Note: this doesn't free the actual pages themselves. That
-6
mm/migrate.c
···19371937 put_page(new_page);19381938 goto out_fail;19391939 }19401940- /*19411941- * We are not sure a pending tlb flush here is for a huge page19421942- * mapping or not. Hence use the tlb range variant19431943- */19441944- if (mm_tlb_flush_pending(mm))19451945- flush_tlb_range(vma, mmun_start, mmun_end);1946194019471941 /* Prepare a page as a migration target */19481942 __SetPageLocked(new_page);
+2-2
mm/mprotect.c
···244244 BUG_ON(addr >= end);245245 pgd = pgd_offset(mm, addr);246246 flush_cache_range(vma, addr, end);247247- set_tlb_flush_pending(mm);247247+ inc_tlb_flush_pending(mm);248248 do {249249 next = pgd_addr_end(addr, end);250250 if (pgd_none_or_clear_bad(pgd))···256256 /* Only flush the TLB if we actually modified any entries: */257257 if (pages)258258 flush_tlb_range(vma, start, end);259259- clear_tlb_flush_pending(mm);259259+ dec_tlb_flush_pending(mm);260260261261 return pages;262262}
+6-5
mm/page_alloc.c
···44584458 * Part of the reclaimable slab consists of items that are in use,44594459 * and cannot be freed. Cap this estimate at the low watermark.44604460 */44614461- available += global_page_state(NR_SLAB_RECLAIMABLE) -44624462- min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);44614461+ available += global_node_page_state(NR_SLAB_RECLAIMABLE) -44624462+ min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,44634463+ wmark_low);4463446444644465 if (available < 0)44654466 available = 0;···46034602 global_node_page_state(NR_FILE_DIRTY),46044603 global_node_page_state(NR_WRITEBACK),46054604 global_node_page_state(NR_UNSTABLE_NFS),46064606- global_page_state(NR_SLAB_RECLAIMABLE),46074607- global_page_state(NR_SLAB_UNRECLAIMABLE),46054605+ global_node_page_state(NR_SLAB_RECLAIMABLE),46064606+ global_node_page_state(NR_SLAB_UNRECLAIMABLE),46084607 global_node_page_state(NR_FILE_MAPPED),46094608 global_node_page_state(NR_SHMEM),46104609 global_page_state(NR_PAGETABLE),···7669766876707669 /* Make sure the range is really isolated. */76717670 if (test_pages_isolated(outer_start, end, false)) {76727672- pr_info("%s: [%lx, %lx) PFNs busy\n",76717671+ pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",76737672 __func__, outer_start, end);76747673 ret = -EBUSY;76757674 goto done;
+30-22
mm/rmap.c
···888888 .flags = PVMW_SYNC,889889 };890890 int *cleaned = arg;891891+ bool invalidation_needed = false;891892892893 while (page_vma_mapped_walk(&pvmw)) {893894 int ret = 0;894894- address = pvmw.address;895895 if (pvmw.pte) {896896 pte_t entry;897897 pte_t *pte = pvmw.pte;···899899 if (!pte_dirty(*pte) && !pte_write(*pte))900900 continue;901901902902- flush_cache_page(vma, address, pte_pfn(*pte));903903- entry = ptep_clear_flush(vma, address, pte);902902+ flush_cache_page(vma, pvmw.address, pte_pfn(*pte));903903+ entry = ptep_clear_flush(vma, pvmw.address, pte);904904 entry = pte_wrprotect(entry);905905 entry = pte_mkclean(entry);906906- set_pte_at(vma->vm_mm, address, pte, entry);906906+ set_pte_at(vma->vm_mm, pvmw.address, pte, entry);907907 ret = 1;908908 } else {909909#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE···913913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))914914 continue;915915916916- flush_cache_page(vma, address, page_to_pfn(page));917917- entry = pmdp_huge_clear_flush(vma, address, pmd);916916+ flush_cache_page(vma, pvmw.address, page_to_pfn(page));917917+ entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);918918 entry = pmd_wrprotect(entry);919919 entry = pmd_mkclean(entry);920920- set_pmd_at(vma->vm_mm, address, pmd, entry);920920+ set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);921921 ret = 1;922922#else923923 /* unexpected pmd-mapped page? */···926926 }927927928928 if (ret) {929929- mmu_notifier_invalidate_page(vma->vm_mm, address);930929 (*cleaned)++;930930+ invalidation_needed = true;931931 }932932+ }933933+934934+ if (invalidation_needed) {935935+ mmu_notifier_invalidate_range(vma->vm_mm, address,936936+ address + (1UL << compound_order(page)));932937 }933938934939 return true;···13281323 };13291324 pte_t pteval;13301325 struct page *subpage;13311331- bool ret = true;13261326+ bool ret = true, invalidation_needed = false;13321327 enum ttu_flags flags = (enum ttu_flags)arg;1333132813341329 /* munlock has nothing to gain from examining un-locked vmas */···13681363 VM_BUG_ON_PAGE(!pvmw.pte, page);1369136413701365 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);13711371- address = pvmw.address;13721372-1373136613741367 if (!(flags & TTU_IGNORE_ACCESS)) {13751375- if (ptep_clear_flush_young_notify(vma, address,13681368+ if (ptep_clear_flush_young_notify(vma, pvmw.address,13761369 pvmw.pte)) {13771370 ret = false;13781371 page_vma_mapped_walk_done(&pvmw);···13791376 }1380137713811378 /* Nuke the page table entry. */13821382- flush_cache_page(vma, address, pte_pfn(*pvmw.pte));13791379+ flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));13831380 if (should_defer_flush(mm, flags)) {13841381 /*13851382 * We clear the PTE but do not flush so potentially···13891386 * transition on a cached TLB entry is written through13901387 * and traps if the PTE is unmapped.13911388 */13921392- pteval = ptep_get_and_clear(mm, address, pvmw.pte);13891389+ pteval = ptep_get_and_clear(mm, pvmw.address,13901390+ pvmw.pte);1393139113941392 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));13951393 } else {13961396- pteval = ptep_clear_flush(vma, address, pvmw.pte);13941394+ pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);13971395 }1398139613991397 /* Move the dirty bit to the page. Now the pte is gone. */···14091405 if (PageHuge(page)) {14101406 int nr = 1 << compound_order(page);14111407 hugetlb_count_sub(nr, mm);14121412- set_huge_swap_pte_at(mm, address,14081408+ set_huge_swap_pte_at(mm, pvmw.address,14131409 pvmw.pte, pteval,14141410 vma_mmu_pagesize(vma));14151411 } else {14161412 dec_mm_counter(mm, mm_counter(page));14171417- set_pte_at(mm, address, pvmw.pte, pteval);14131413+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);14181414 }1419141514201416 } else if (pte_unused(pteval)) {···14381434 swp_pte = swp_entry_to_pte(entry);14391435 if (pte_soft_dirty(pteval))14401436 swp_pte = pte_swp_mksoft_dirty(swp_pte);14411441- set_pte_at(mm, address, pvmw.pte, swp_pte);14371437+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);14421438 } else if (PageAnon(page)) {14431439 swp_entry_t entry = { .val = page_private(subpage) };14441440 pte_t swp_pte;···14641460 * If the page was redirtied, it cannot be14651461 * discarded. Remap the page to page table.14661462 */14671467- set_pte_at(mm, address, pvmw.pte, pteval);14631463+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);14681464 SetPageSwapBacked(page);14691465 ret = false;14701466 page_vma_mapped_walk_done(&pvmw);···14721468 }1473146914741470 if (swap_duplicate(entry) < 0) {14751475- set_pte_at(mm, address, pvmw.pte, pteval);14711471+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);14761472 ret = false;14771473 page_vma_mapped_walk_done(&pvmw);14781474 break;···14881484 swp_pte = swp_entry_to_pte(entry);14891485 if (pte_soft_dirty(pteval))14901486 swp_pte = pte_swp_mksoft_dirty(swp_pte);14911491- set_pte_at(mm, address, pvmw.pte, swp_pte);14871487+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);14921488 } else14931489 dec_mm_counter(mm, mm_counter_file(page));14941490discard:14951491 page_remove_rmap(subpage, PageHuge(page));14961492 put_page(page);14971497- mmu_notifier_invalidate_page(mm, address);14931493+ invalidation_needed = true;14981494 }14951495+14961496+ if (invalidation_needed)14971497+ mmu_notifier_invalidate_range(mm, address,14981498+ address + (1UL << compound_order(page)));14991499 return ret;15001500}15011501
+10-2
mm/shmem.c
···10221022 */10231023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {10241024 spin_lock(&sbinfo->shrinklist_lock);10251025- if (list_empty(&info->shrinklist)) {10251025+ /*10261026+ * _careful to defend against unlocked access to10271027+ * ->shrink_list in shmem_unused_huge_shrink()10281028+ */10291029+ if (list_empty_careful(&info->shrinklist)) {10261030 list_add_tail(&info->shrinklist,10271031 &sbinfo->shrinklist);10281032 sbinfo->shrinklist_len++;···18211817 * to shrink under memory pressure.18221818 */18231819 spin_lock(&sbinfo->shrinklist_lock);18241824- if (list_empty(&info->shrinklist)) {18201820+ /*18211821+ * _careful to defend against unlocked access to18221822+ * ->shrink_list in shmem_unused_huge_shrink()18231823+ */18241824+ if (list_empty_careful(&info->shrinklist)) {18251825 list_add_tail(&info->shrinklist,18261826 &sbinfo->shrinklist);18271827 sbinfo->shrinklist_len++;
+1-1
mm/util.c
···633633 * which are reclaimable, under pressure. The dentry634634 * cache and most inode caches should fall into this635635 */636636- free += global_page_state(NR_SLAB_RECLAIMABLE);636636+ free += global_node_page_state(NR_SLAB_RECLAIMABLE);637637638638 /*639639 * Leave reserved pages. The pages are not for anonymous pages.
+51-9
net/batman-adv/translation-table.c
···15491549 return found;15501550}1551155115521552+/**15531553+ * batadv_tt_global_sync_flags - update TT sync flags15541554+ * @tt_global: the TT global entry to update sync flags in15551555+ *15561556+ * Updates the sync flag bits in the tt_global flag attribute with a logical15571557+ * OR of all sync flags from any of its TT orig entries.15581558+ */15591559+static void15601560+batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)15611561+{15621562+ struct batadv_tt_orig_list_entry *orig_entry;15631563+ const struct hlist_head *head;15641564+ u16 flags = BATADV_NO_FLAGS;15651565+15661566+ rcu_read_lock();15671567+ head = &tt_global->orig_list;15681568+ hlist_for_each_entry_rcu(orig_entry, head, list)15691569+ flags |= orig_entry->flags;15701570+ rcu_read_unlock();15711571+15721572+ flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);15731573+ tt_global->common.flags = flags;15741574+}15751575+15761576+/**15771577+ * batadv_tt_global_orig_entry_add - add or update a TT orig entry15781578+ * @tt_global: the TT global entry to add an orig entry in15791579+ * @orig_node: the originator to add an orig entry for15801580+ * @ttvn: translation table version number of this changeset15811581+ * @flags: TT sync flags15821582+ */15521583static void15531584batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,15541554- struct batadv_orig_node *orig_node, int ttvn)15851585+ struct batadv_orig_node *orig_node, int ttvn,15861586+ u8 flags)15551587{15561588 struct batadv_tt_orig_list_entry *orig_entry;15571589···15931561 * was added during a "temporary client detection"15941562 */15951563 orig_entry->ttvn = ttvn;15961596- goto out;15641564+ orig_entry->flags = flags;15651565+ goto sync_flags;15971566 }1598156715991568 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);···16061573 batadv_tt_global_size_inc(orig_node, tt_global->common.vid);16071574 orig_entry->orig_node = orig_node;16081575 orig_entry->ttvn = ttvn;15761576+ orig_entry->flags = flags;16091577 kref_init(&orig_entry->refcount);1610157816111579 spin_lock_bh(&tt_global->list_lock);···16161582 spin_unlock_bh(&tt_global->list_lock);16171583 atomic_inc(&tt_global->orig_list_count);1618158415851585+sync_flags:15861586+ batadv_tt_global_sync_flags(tt_global);16191587out:16201588 if (orig_entry)16211589 batadv_tt_orig_list_entry_put(orig_entry);···17391703 }1740170417411705 /* the change can carry possible "attribute" flags like the17421742- * TT_CLIENT_WIFI, therefore they have to be copied in the17061706+ * TT_CLIENT_TEMP, therefore they have to be copied in the17431707 * client entry17441708 */17451745- common->flags |= flags;17091709+ common->flags |= flags & (~BATADV_TT_SYNC_MASK);1746171017471711 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only17481712 * one originator left in the list and we previously received a···17591723 }17601724add_orig_entry:17611725 /* add the new orig_entry (if needed) or update it */17621762- batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);17261726+ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,17271727+ flags & BATADV_TT_SYNC_MASK);1763172817641729 batadv_dbg(BATADV_DBG_TT, bat_priv,17651730 "Creating new global tt entry: %pM (vid: %d, via %pM)\n",···19831946 struct batadv_tt_orig_list_entry *orig,19841947 bool best)19851948{19491949+ u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;19861950 void *hdr;19871951 struct batadv_orig_node_vlan *vlan;19881952 u8 last_ttvn;···20131975 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||20141976 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||20151977 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||20162016- nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))19781978+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))20171979 goto nla_put_failure;2018198020191981 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))···26272589 unsigned short vid)26282590{26292591 struct batadv_hashtable *hash = bat_priv->tt.global_hash;25922592+ struct batadv_tt_orig_list_entry *tt_orig;26302593 struct batadv_tt_common_entry *tt_common;26312594 struct batadv_tt_global_entry *tt_global;26322595 struct hlist_head *head;···26662627 /* find out if this global entry is announced by this26672628 * originator26682629 */26692669- if (!batadv_tt_global_entry_has_orig(tt_global,26702670- orig_node))26302630+ tt_orig = batadv_tt_global_orig_entry_find(tt_global,26312631+ orig_node);26322632+ if (!tt_orig)26712633 continue;2672263426732635 /* use network order to read the VID: this ensures that···26802640 /* compute the CRC on flags that have to be kept in sync26812641 * among nodes26822642 */26832683- flags = tt_common->flags & BATADV_TT_SYNC_MASK;26432643+ flags = tt_orig->flags;26842644 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));2685264526862646 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);26472647+26482648+ batadv_tt_orig_list_entry_put(tt_orig);26872649 }26882650 rcu_read_unlock();26892651 }
+2
net/batman-adv/types.h
···12601260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client12611261 * @orig_node: pointer to orig node announcing this non-mesh client12621262 * @ttvn: translation table version number which added the non-mesh client12631263+ * @flags: per orig entry TT sync flags12631264 * @list: list node for batadv_tt_global_entry::orig_list12641265 * @refcount: number of contexts the object is used12651266 * @rcu: struct used for freeing in an RCU-safe manner···12681267struct batadv_tt_orig_list_entry {12691268 struct batadv_orig_node *orig_node;12701269 u8 ttvn;12701270+ u8 flags;12711271 struct hlist_node list;12721272 struct kref refcount;12731273 struct rcu_head rcu;
···107107#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */108108#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */109109#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */110110+#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */110111#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */111112#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */112113#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */···25212520 return;2522252125232522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */25242524- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||25252525- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {25232523+ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&25242524+ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {25262525 tp->snd_cwnd = tp->snd_ssthresh;25272526 tp->snd_cwnd_stamp = tcp_jiffies32;25282527 }···30053004 /* Offset the time elapsed after installing regular RTO */30063005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||30073006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {30083008- struct sk_buff *skb = tcp_write_queue_head(sk);30093009- u64 rto_time_stamp = skb->skb_mstamp +30103010- jiffies_to_usecs(rto);30113011- s64 delta_us = rto_time_stamp - tp->tcp_mstamp;30073007+ s64 delta_us = tcp_rto_delta_us(sk);30123008 /* delta_us may not be positive if the socket is locked30133009 * when the retrans timer fires and is rescheduled.30143010 */···30153017 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,30163018 TCP_RTO_MAX);30173019 }30203020+}30213021+30223022+/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */30233023+static void tcp_set_xmit_timer(struct sock *sk)30243024+{30253025+ if (!tcp_schedule_loss_probe(sk))30263026+ tcp_rearm_rto(sk);30183027}3019302830203029/* If we get here, the whole TSO packet has not been acked. */···31853180 ca_rtt_us, sack->rate);3186318131873182 if (flag & FLAG_ACKED) {31883188- tcp_rearm_rto(sk);31833183+ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */31893184 if (unlikely(icsk->icsk_mtup.probe_size &&31903185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {31913186 tcp_mtup_probe_success(sk);···32133208 * after when the head was last (re)transmitted. Otherwise the32143209 * timeout may continue to extend in loss recovery.32153210 */32163216- tcp_rearm_rto(sk);32113211+ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */32173212 }3218321332193214 if (icsk->icsk_ca_ops->pkts_acked) {···35853580 if (after(ack, tp->snd_nxt))35863581 goto invalid_ack;3587358235883588- if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)35893589- tcp_rearm_rto(sk);35903590-35913583 if (after(ack, prior_snd_una)) {35923584 flag |= FLAG_SND_UNA_ADVANCED;35933585 icsk->icsk_retransmits = 0;···36493647 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,36503648 &sack_state);3651364936503650+ if (tp->tlp_high_seq)36513651+ tcp_process_tlp_ack(sk, ack, flag);36523652+ /* If needed, reset TLP/RTO timer; RACK may later override this. */36533653+ if (flag & FLAG_SET_XMIT_TIMER)36543654+ tcp_set_xmit_timer(sk);36553655+36523656 if (tcp_ack_is_dubious(sk, flag)) {36533657 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));36543658 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);36553659 }36563656- if (tp->tlp_high_seq)36573657- tcp_process_tlp_ack(sk, ack, flag);3658366036593661 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))36603662 sk_dst_confirm(sk);3661366336623662- if (icsk->icsk_pending == ICSK_TIME_RETRANS)36633663- tcp_schedule_loss_probe(sk);36643664 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */36653665 lost = tp->lost - lost; /* freshly marked lost */36663666 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+9-18
net/ipv4/tcp_output.c
···23772377{23782378 struct inet_connection_sock *icsk = inet_csk(sk);23792379 struct tcp_sock *tp = tcp_sk(sk);23802380- u32 timeout, tlp_time_stamp, rto_time_stamp;23812380 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);23812381+ u32 timeout, rto_delta_us;2382238223832383- /* No consecutive loss probes. */23842384- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {23852385- tcp_rearm_rto(sk);23862386- return false;23872387- }23882383 /* Don't do any loss probe on a Fast Open connection before 3WHS23892384 * finishes.23902385 */23912386 if (tp->fastopen_rsk)23922392- return false;23932393-23942394- /* TLP is only scheduled when next timer event is RTO. */23952395- if (icsk->icsk_pending != ICSK_TIME_RETRANS)23962387 return false;2397238823982389 /* Schedule a loss probe in 2*RTT for SACK capable connections···24082417 (rtt + (rtt >> 1) + TCP_DELACK_MAX));24092418 timeout = max_t(u32, timeout, msecs_to_jiffies(10));2410241924112411- /* If RTO is shorter, just schedule TLP in its place. */24122412- tlp_time_stamp = tcp_jiffies32 + timeout;24132413- rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;24142414- if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {24152415- s32 delta = rto_time_stamp - tcp_jiffies32;24162416- if (delta > 0)24172417- timeout = delta;24182418- }24202420+ /* If the RTO formula yields an earlier time, then use that time. */24212421+ rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */24222422+ if (rto_delta_us > 0)24232423+ timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));2419242424202425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,24212426 TCP_RTO_MAX);···34363449 int err;3437345034383451 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);34523452+34533453+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))34543454+ return -EHOSTUNREACH; /* Routing failure or similar. */34553455+34393456 tcp_connect_init(sk);3440345734413458 if (unlikely(tp->repair)) {
···802802 if (is_udplite) /* UDP-Lite */803803 csum = udplite_csum(skb);804804805805- else if (sk->sk_no_check_tx) { /* UDP csum disabled */805805+ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */806806807807 skb->ip_summed = CHECKSUM_NONE;808808 goto send;
+1-1
net/ipv4/udp_offload.c
···235235 if (uh->check == 0)236236 uh->check = CSUM_MANGLED_0;237237238238- skb->ip_summed = CHECKSUM_NONE;238238+ skb->ip_summed = CHECKSUM_UNNECESSARY;239239240240 /* If there is no outer header we can fake a checksum offload241241 * due to the fact that we have already done the checksum in
···7272 if (uh->check == 0)7373 uh->check = CSUM_MANGLED_0;74747575- skb->ip_summed = CHECKSUM_NONE;7575+ skb->ip_summed = CHECKSUM_UNNECESSARY;76767777 /* If there is no outer header we can fake a checksum offload7878 * due to the fact that we have already done the checksum in
+9-4
net/packet/af_packet.c
···3700370037013701 if (optlen != sizeof(val))37023702 return -EINVAL;37033703- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)37043704- return -EBUSY;37053703 if (copy_from_user(&val, optval, sizeof(val)))37063704 return -EFAULT;37073705 if (val > INT_MAX)37083706 return -EINVAL;37093709- po->tp_reserve = val;37103710- return 0;37073707+ lock_sock(sk);37083708+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {37093709+ ret = -EBUSY;37103710+ } else {37113711+ po->tp_reserve = val;37123712+ ret = 0;37133713+ }37143714+ release_sock(sk);37153715+ return ret;37113716 }37123717 case PACKET_LOSS:37133718 {
+4-1
net/rds/ib_recv.c
···10151015 if (rds_ib_ring_empty(&ic->i_recv_ring))10161016 rds_ib_stats_inc(s_ib_rx_ring_empty);1017101710181018- if (rds_ib_ring_low(&ic->i_recv_ring))10181018+ if (rds_ib_ring_low(&ic->i_recv_ring)) {10191019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);10201020+ rds_ib_stats_inc(s_ib_rx_refill_from_cq);10211021+ }10201022}1021102310221024int rds_ib_recv_path(struct rds_conn_path *cp)···10311029 if (rds_conn_up(conn)) {10321030 rds_ib_attempt_ack(ic);10331031 rds_ib_recv_refill(conn, 0, GFP_KERNEL);10321032+ rds_ib_stats_inc(s_ib_rx_refill_from_thread);10341033 }1035103410361035 return ret;
+11-11
net/sched/act_ipt.c
···3636static unsigned int xt_net_id;3737static struct tc_action_ops act_xt_ops;38383939-static int ipt_init_target(struct xt_entry_target *t, char *table,4040- unsigned int hook)3939+static int ipt_init_target(struct net *net, struct xt_entry_target *t,4040+ char *table, unsigned int hook)4141{4242 struct xt_tgchk_param par;4343 struct xt_target *target;···4949 return PTR_ERR(target);50505151 t->u.kernel.target = target;5252+ memset(&par, 0, sizeof(par));5353+ par.net = net;5254 par.table = table;5353- par.entryinfo = NULL;5455 par.target = target;5556 par.targinfo = t->data;5657 par.hook_mask = hook;···9291 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },9392};94939595-static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,9494+static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,9695 struct nlattr *est, struct tc_action **a,9796 const struct tc_action_ops *ops, int ovr, int bind)9897{9898+ struct tc_action_net *tn = net_generic(net, id);9999 struct nlattr *tb[TCA_IPT_MAX + 1];100100 struct tcf_ipt *ipt;101101 struct xt_entry_target *td, *t;···161159 if (unlikely(!t))162160 goto err2;163161164164- err = ipt_init_target(t, tname, hook);162162+ err = ipt_init_target(net, t, tname, hook);165163 if (err < 0)166164 goto err3;167165···195193 struct nlattr *est, struct tc_action **a, int ovr,196194 int bind)197195{198198- struct tc_action_net *tn = net_generic(net, ipt_net_id);199199-200200- return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);196196+ return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,197197+ bind);201198}202199203200static int tcf_xt_init(struct net *net, struct nlattr *nla,204201 struct nlattr *est, struct tc_action **a, int ovr,205202 int bind)206203{207207- struct tc_action_net *tn = net_generic(net, xt_net_id);208208-209209- return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);204204+ return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,205205+ bind);210206}211207212208static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
+1-3
net/tipc/node.c
···14551455 /* Initiate synch mode if applicable */14561456 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {14571457 syncpt = iseqno + exp_pkts - 1;14581458- if (!tipc_link_is_up(l)) {14591459- tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);14581458+ if (!tipc_link_is_up(l))14601459 __tipc_node_link_up(n, bearer_id, xmitq);14611461- }14621460 if (n->state == SELF_UP_PEER_UP) {14631461 n->sync_point = syncpt;14641462 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+64-23
scripts/get_maintainer.pl
···18181919use Getopt::Long qw(:config no_auto_abbrev);2020use Cwd;2121+use File::Find;21222223my $cur_path = fastgetcwd() . '/';2324my $lk_path = "./";···5958my $pattern_depth = 0;6059my $version = 0;6160my $help = 0;6161+my $find_maintainer_files = 0;62626363my $vcs_used = 0;6464···251249 'sections!' => \$sections,252250 'fe|file-emails!' => \$file_emails,253251 'f|file' => \$from_filename,252252+ 'find-maintainer-files' => \$find_maintainer_files,254253 'v|version' => \$version,255254 'h|help|usage' => \$help,256255 )) {···310307311308my @typevalue = ();312309my %keyword_hash;310310+my @mfiles = ();313311314314-open (my $maint, '<', "${lk_path}MAINTAINERS")315315- or die "$P: Can't open MAINTAINERS: $!\n";316316-while (<$maint>) {317317- my $line = $_;312312+sub read_maintainer_file {313313+ my ($file) = @_;318314319319- if ($line =~ m/^([A-Z]):\s*(.*)/) {320320- my $type = $1;321321- my $value = $2;315315+ open (my $maint, '<', "$file")316316+ or die "$P: Can't open MAINTAINERS file '$file': $!\n";317317+ while (<$maint>) {318318+ my $line = $_;322319323323- ##Filename pattern matching324324- if ($type eq "F" || $type eq "X") {325325- $value =~ s@\.@\\\.@g; ##Convert . to \.326326- $value =~ s/\*/\.\*/g; ##Convert * to .*327327- $value =~ s/\?/\./g; ##Convert ? to .328328- ##if pattern is a directory and it lacks a trailing slash, add one329329- if ((-d $value)) {330330- $value =~ s@([^/])$@$1/@;320320+ if ($line =~ m/^([A-Z]):\s*(.*)/) {321321+ my $type = $1;322322+ my $value = $2;323323+324324+ ##Filename pattern matching325325+ if ($type eq "F" || $type eq "X") {326326+ $value =~ s@\.@\\\.@g; ##Convert . to \.327327+ $value =~ s/\*/\.\*/g; ##Convert * to .*328328+ $value =~ s/\?/\./g; ##Convert ? to .329329+ ##if pattern is a directory and it lacks a trailing slash, add one330330+ if ((-d $value)) {331331+ $value =~ s@([^/])$@$1/@;332332+ }333333+ } elsif ($type eq "K") {334334+ $keyword_hash{@typevalue} = $value;331335 }332332- } elsif ($type eq "K") {333333- $keyword_hash{@typevalue} = $value;336336+ push(@typevalue, "$type:$value");337337+ } elsif (!(/^\s*$/ || /^\s*\#/)) {338338+ $line =~ s/\n$//g;339339+ push(@typevalue, $line);334340 }335335- push(@typevalue, "$type:$value");336336- } elsif (!/^(\s)*$/) {337337- $line =~ s/\n$//g;338338- push(@typevalue, $line);341341+ }342342+ close($maint);343343+}344344+345345+sub find_is_maintainer_file {346346+ my ($file) = $_;347347+ return if ($file !~ m@/MAINTAINERS$@);348348+ $file = $File::Find::name;349349+ return if (! -f $file);350350+ push(@mfiles, $file);351351+}352352+353353+sub find_ignore_git {354354+ return grep { $_ !~ /^\.git$/; } @_;355355+}356356+357357+if (-d "${lk_path}MAINTAINERS") {358358+ opendir(DIR, "${lk_path}MAINTAINERS") or die $!;359359+ my @files = readdir(DIR);360360+ closedir(DIR);361361+ foreach my $file (@files) {362362+ push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);339363 }340364}341341-close($maint);342365366366+if ($find_maintainer_files) {367367+ find( { wanted => \&find_is_maintainer_file,368368+ preprocess => \&find_ignore_git,369369+ no_chdir => 1,370370+ }, "${lk_path}");371371+} else {372372+ push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";373373+}374374+375375+foreach my $file (@mfiles) {376376+ read_maintainer_file("$file");377377+}343378344379#345380# Read mail address map···914873 if ( (-f "${lk_path}COPYING")915874 && (-f "${lk_path}CREDITS")916875 && (-f "${lk_path}Kbuild")917917- && (-f "${lk_path}MAINTAINERS")876876+ && (-e "${lk_path}MAINTAINERS")918877 && (-f "${lk_path}Makefile")919878 && (-f "${lk_path}README")920879 && (-d "${lk_path}Documentation")
+73-22
scripts/parse-maintainers.pl
···2233use strict;4455-my %map;55+my $P = $0;6677-# sort comparison function77+# sort comparison functions88sub by_category($$) {99 my ($a, $b) = @_;1010···1515 $a =~ s/THE REST/ZZZZZZ/g;1616 $b =~ s/THE REST/ZZZZZZ/g;17171818- $a cmp $b;1818+ return $a cmp $b;1919}20202121-sub alpha_output {2222- my $key;2323- my $sort_method = \&by_category;2424- my $sep = "";2121+sub by_pattern($$) {2222+ my ($a, $b) = @_;2323+ my $preferred_order = 'MRPLSWTQBCFXNK';25242626- foreach $key (sort $sort_method keys %map) {2727- if ($key ne " ") {2828- print $sep . $key . "\n";2929- $sep = "\n";3030- }3131- print $map{$key};2525+ my $a1 = uc(substr($a, 0, 1));2626+ my $b1 = uc(substr($b, 0, 1));2727+2828+ my $a_index = index($preferred_order, $a1);2929+ my $b_index = index($preferred_order, $b1);3030+3131+ $a_index = 1000 if ($a_index == -1);3232+ $b_index = 1000 if ($b_index == -1);3333+3434+ if (($a1 =~ /^F$/ && $b1 =~ /^F$/) ||3535+ ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {3636+ return $a cmp $b;3737+ }3838+3939+ if ($a_index < $b_index) {4040+ return -1;4141+ } elsif ($a_index == $b_index) {4242+ return 0;4343+ } else {4444+ return 1;3245 }3346}3447···5239 return $s;5340}54414242+sub alpha_output {4343+ my ($hashref, $filename) = (@_);4444+4545+ open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n";4646+ foreach my $key (sort by_category keys %$hashref) {4747+ if ($key eq " ") {4848+ chomp $$hashref{$key};4949+ print $file $$hashref{$key};5050+ } else {5151+ print $file "\n" . $key . "\n";5252+ foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {5353+ print $file ($pattern . "\n");5454+ }5555+ }5656+ }5757+ close($file);5858+}5959+5560sub file_input {6161+ my ($hashref, $filename) = (@_);6262+5663 my $lastline = "";5764 my $case = " ";5858- $map{$case} = "";6565+ $$hashref{$case} = "";59666060- while (<>) {6767+ open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n";6868+6969+ while (<$file>) {6170 my $line = $_;62716372 # Pattern line?6473 if ($line =~ m/^([A-Z]):\s*(.*)/) {6574 $line = $1 . ":\t" . trim($2) . "\n";6675 if ($lastline eq "") {6767- $map{$case} = $map{$case} . $line;7676+ $$hashref{$case} = $$hashref{$case} . $line;6877 next;6978 }7079 $case = trim($lastline);7171- exists $map{$case} and die "Header '$case' already exists";7272- $map{$case} = $line;8080+ exists $$hashref{$case} and die "Header '$case' already exists";8181+ $$hashref{$case} = $line;7382 $lastline = "";7483 next;7584 }76857786 if ($case eq " ") {7878- $map{$case} = $map{$case} . $lastline;8787+ $$hashref{$case} = $$hashref{$case} . $lastline;7988 $lastline = $line;8089 next;8190 }8291 trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'");8392 $lastline = $line;8493 }8585- $map{$case} = $map{$case} . $lastline;9494+ $$hashref{$case} = $$hashref{$case} . $lastline;9595+ close($file);8696}87978888-&file_input;8989-&alpha_output;9898+my %hash;9999+my %new_hash;100100+101101+file_input(\%hash, "MAINTAINERS");102102+103103+foreach my $type (@ARGV) {104104+ foreach my $key (keys %hash) {105105+ if ($key =~ /$type/ || $hash{$key} =~ /$type/) {106106+ $new_hash{$key} = $hash{$key};107107+ delete $hash{$key};108108+ }109109+ }110110+}111111+112112+alpha_output(\%hash, "MAINTAINERS.new");113113+alpha_output(\%new_hash, "SECTION.new");114114+90115exit(0);
+2
tools/build/feature/test-bpf.c
···1111# define __NR_bpf 2801212# elif defined(__sparc__)1313# define __NR_bpf 3491414+# elif defined(__s390__)1515+# define __NR_bpf 3511416# else1517# error __NR_bpf not defined. libbpf does not support your arch.1618# endif
+2
tools/lib/bpf/bpf.c
···3939# define __NR_bpf 2804040# elif defined(__sparc__)4141# define __NR_bpf 3494242+# elif defined(__s390__)4343+# define __NR_bpf 3514244# else4345# error __NR_bpf not defined. libbpf does not support your arch.4446# endif