···27 Macbook Pro 17", iMac 20" :28 video=efifb:i202900000030--31Edgar Hucek <gimli@dark-green.com>
···27 Macbook Pro 17", iMac 20" :28 video=efifb:i202930+Accepted options:31+32+nowc Don't map the framebuffer write combined. This can be used33+ to workaround side-effects and slowdowns on other CPU cores34+ when large amounts of console data are written.35+36--37Edgar Hucek <gimli@dark-green.com>
+1-1
Documentation/gpio/gpio-legacy.txt
···459460This is done by registering "ranges" of pins, which are essentially461cross-reference tables. These are described in462-Documentation/pinctrl.txt463464While the pin allocation is totally managed by the pinctrl subsystem,465gpio (under gpiolib) is still maintained by gpio drivers. It may happen
···459460This is done by registering "ranges" of pins, which are essentially461cross-reference tables. These are described in462+Documentation/driver-api/pinctl.rst463464While the pin allocation is totally managed by the pinctrl subsystem,465gpio (under gpiolib) is still maintained by gpio drivers. It may happen
···148}149150static inline void151-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)0152{153 tlb->mm = mm;154 tlb->fullmm = !(start | (end+1));···167}168169static inline void170-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)0171{00000172 tlb_flush_mmu(tlb);173174 /* keep the page table cache within bounds */
···148}149150static inline void151+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,152+ unsigned long start, unsigned long end)153{154 tlb->mm = mm;155 tlb->fullmm = !(start | (end+1));···166}167168static inline void169+arch_tlb_finish_mmu(struct mmu_gather *tlb,170+ unsigned long start, unsigned long end, bool force)171{172+ if (force) {173+ tlb->range_start = start;174+ tlb->range_end = end;175+ }176+177 tlb_flush_mmu(tlb);178179 /* keep the page table cache within bounds */
+6-2
arch/ia64/include/asm/tlb.h
···168169170static inline void171-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)0172{173 tlb->mm = mm;174 tlb->max = ARRAY_SIZE(tlb->local);···186 * collected.187 */188static inline void189-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)0190{00191 /*192 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and193 * tlb->end_addr.
···168169170static inline void171+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,172+ unsigned long start, unsigned long end)173{174 tlb->mm = mm;175 tlb->max = ARRAY_SIZE(tlb->local);···185 * collected.186 */187static inline void188+arch_tlb_finish_mmu(struct mmu_gather *tlb,189+ unsigned long start, unsigned long end, bool force)190{191+ if (force)192+ tlb->need_flush = 1;193 /*194 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and195 * tlb->end_addr.
+1-1
arch/mips/Kconfig
···22602261config MIPS_MT_SMP2262 bool "MIPS MT SMP support (1 TC on each available VPE)"2263- depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR62264 select CPU_MIPSR2_IRQ_VI2265 select CPU_MIPSR2_IRQ_EI2266 select SYNC_R4K
···22602261config MIPS_MT_SMP2262 bool "MIPS MT SMP support (1 TC on each available VPE)"2263+ depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS2264 select CPU_MIPSR2_IRQ_VI2265 select CPU_MIPSR2_IRQ_EI2266 select SYNC_R4K
···243ifdef CONFIG_PHYSICAL_START244load-y = $(CONFIG_PHYSICAL_START)245endif246+247+entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \248 | grep "\bkernel_entry\b" | cut -f1 -d \ )249+ifdef CONFIG_CPU_MICROMIPS250+ #251+ # Set the ISA bit, since the kernel_entry symbol in the ELF will have it252+ # clear which would lead to images containing addresses which bootloaders may253+ # jump to as MIPS32 code.254+ #255+ entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \256+ $(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \257+ $(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))258+else259+ entry-y = $(entry-noisa-y)260+endif261262cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic263drivers-$(CONFIG_PCI) += arch/mips/pci/
···1+/***********************license start***************2+ * Author: Cavium Networks3+ *4+ * Contact: support@caviumnetworks.com5+ * This file is part of the OCTEON SDK6+ *7+ * Copyright (c) 2003-2017 Cavium, Inc.8+ *9+ * This file is free software; you can redistribute it and/or modify10+ * it under the terms of the GNU General Public License, Version 2, as11+ * published by the Free Software Foundation.12+ *13+ * This file is distributed in the hope that it will be useful, but14+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty15+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or16+ * NONINFRINGEMENT. See the GNU General Public License for more17+ * details.18+ *19+ * You should have received a copy of the GNU General Public License20+ * along with this file; if not, write to the Free Software21+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA22+ * or visit http://www.gnu.org/licenses/.23+ *24+ * This file may also be available under a different license from Cavium.25+ * Contact Cavium Networks for more information26+ ***********************license end**************************************/27+28+#ifndef __CVMX_L2D_DEFS_H__29+#define __CVMX_L2D_DEFS_H__30+31+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))32+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))33+34+35+union cvmx_l2d_err {36+ uint64_t u64;37+ struct cvmx_l2d_err_s {38+ __BITFIELD_FIELD(uint64_t reserved_6_63:58,39+ __BITFIELD_FIELD(uint64_t bmhclsel:1,40+ __BITFIELD_FIELD(uint64_t ded_err:1,41+ __BITFIELD_FIELD(uint64_t sec_err:1,42+ __BITFIELD_FIELD(uint64_t ded_intena:1,43+ __BITFIELD_FIELD(uint64_t sec_intena:1,44+ __BITFIELD_FIELD(uint64_t ecc_ena:1,45+ ;)))))))46+ } s;47+};48+49+union cvmx_l2d_fus3 {50+ uint64_t u64;51+ struct cvmx_l2d_fus3_s {52+ __BITFIELD_FIELD(uint64_t reserved_40_63:24,53+ __BITFIELD_FIELD(uint64_t ema_ctl:3,54+ __BITFIELD_FIELD(uint64_t reserved_34_36:3,55+ __BITFIELD_FIELD(uint64_t q3fus:34,56+ ;))))57+ } s;58+};59+60+#endif
···376 cpumask_set_cpu(cpu, &cpu_coherent_mask);377 notify_cpu_starting(cpu);378379- complete(&cpu_running);380- synchronise_count_slave(cpu);381-382 set_cpu_online(cpu, true);383384 set_cpu_sibling_map(cpu);385 set_cpu_core_map(cpu);386387 calculate_cpu_foreign_map();000388389 /*390 * irq will be enabled in ->smp_finish(), enabling it too early
···376 cpumask_set_cpu(cpu, &cpu_coherent_mask);377 notify_cpu_starting(cpu);378000379 set_cpu_online(cpu, true);380381 set_cpu_sibling_map(cpu);382 set_cpu_core_map(cpu);383384 calculate_cpu_foreign_map();385+386+ complete(&cpu_running);387+ synchronise_count_slave(cpu);388389 /*390 * irq will be enabled in ->smp_finish(), enabling it too early
···223 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)224 bne- .Lsyscall_exit_work225226- /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */227- li r7,MSR_FP228#ifdef CONFIG_ALTIVEC229- oris r7,r7,MSR_VEC@h0230#endif231- and r0,r8,r7232- cmpd r0,r7233- bne .Lsyscall_restore_math234-.Lsyscall_restore_math_cont:000000000235236- cmpld r3,r11237 ld r5,_CCR(r1)238 bge- .Lsyscall_error239.Lsyscall_error_cont:···276 neg r3,r3277 std r5,_CCR(r1)278 b .Lsyscall_error_cont279-280-.Lsyscall_restore_math:281- /*282- * Some initial tests from restore_math to avoid the heavyweight283- * C code entry and MSR manipulations.284- */285- LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)286- and. r0,r0,r8287- bne 1f288-289- ld r7,PACACURRENT(r13)290- lbz r0,THREAD+THREAD_LOAD_FP(r7)291-#ifdef CONFIG_ALTIVEC292- lbz r6,THREAD+THREAD_LOAD_VEC(r7)293- add r0,r0,r6294-#endif295- cmpdi r0,0296- beq .Lsyscall_restore_math_cont297-298-1: addi r3,r1,STACK_FRAME_OVERHEAD299-#ifdef CONFIG_PPC_BOOK3S300- li r10,MSR_RI301- mtmsrd r10,1 /* Restore RI */302-#endif303- bl restore_math304-#ifdef CONFIG_PPC_BOOK3S305- li r11,0306- mtmsrd r11,1307-#endif308- /* Restore volatiles, reload MSR from updated one */309- ld r8,_MSR(r1)310- ld r3,RESULT(r1)311- li r11,-MAX_ERRNO312- b .Lsyscall_restore_math_cont313314/* Traced system call support */315.Lsyscall_dotrace:
···223 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)224 bne- .Lsyscall_exit_work225226+ andi. r0,r8,MSR_FP227+ beq 2f228#ifdef CONFIG_ALTIVEC229+ andis. r0,r8,MSR_VEC@h230+ bne 3f231#endif232+2: addi r3,r1,STACK_FRAME_OVERHEAD233+#ifdef CONFIG_PPC_BOOK3S234+ li r10,MSR_RI235+ mtmsrd r10,1 /* Restore RI */236+#endif237+ bl restore_math238+#ifdef CONFIG_PPC_BOOK3S239+ li r11,0240+ mtmsrd r11,1241+#endif242+ ld r8,_MSR(r1)243+ ld r3,RESULT(r1)244+ li r11,-MAX_ERRNO245246+3: cmpld r3,r11247 ld r5,_CCR(r1)248 bge- .Lsyscall_error249.Lsyscall_error_cont:···266 neg r3,r3267 std r5,_CCR(r1)268 b .Lsyscall_error_cont0000000000000000000000000000000000269270/* Traced system call support */271.Lsyscall_dotrace:
-4
arch/powerpc/kernel/process.c
···511{512 unsigned long msr;513514- /*515- * Syscall exit makes a similar initial check before branching516- * to restore_math. Keep them in synch.517- */518 if (!msr_tm_active(regs->msr) &&519 !current->thread.load_fp && !loadvec(current->thread))520 return;
···511{512 unsigned long msr;5130000514 if (!msr_tm_active(regs->msr) &&515 !current->thread.load_fp && !loadvec(current->thread))516 return;
···71 * This may be called from low level interrupt handlers at some72 * point in future.73 */74- local_irq_save(*flags);75- while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))76- cpu_relax();0000077}7879static inline void wd_smp_unlock(unsigned long *flags)80{81 clear_bit_unlock(0, &__wd_smp_lock);82- local_irq_restore(*flags);83}8485static void wd_lockup_ipi(struct pt_regs *regs)···101 nmi_panic(regs, "Hard LOCKUP");102}103104-static void set_cpu_stuck(int cpu, u64 tb)105{106- cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);107- cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);108 if (cpumask_empty(&wd_smp_cpus_pending)) {109 wd_smp_last_reset_tb = tb;110 cpumask_andnot(&wd_smp_cpus_pending,111 &wd_cpus_enabled,112 &wd_smp_cpus_stuck);113 }0000114}115116static void watchdog_smp_panic(int cpu, u64 tb)···144 }145 smp_flush_nmi_ipi(1000000);146147- /* Take the stuck CPU out of the watch group */148- for_each_cpu(c, &wd_smp_cpus_pending)149- set_cpu_stuck(c, tb);150151-out:152 wd_smp_unlock(&flags);153154 printk_safe_flush();···159160 if (hardlockup_panic)161 nmi_panic(NULL, "Hard LOCKUP");00000162}163164static void wd_smp_clear_cpu_pending(int cpu, u64 tb)···270271void arch_touch_nmi_watchdog(void)272{0273 int cpu = smp_processor_id();274275- watchdog_timer_interrupt(cpu);0276}277EXPORT_SYMBOL(arch_touch_nmi_watchdog);278···297298static int start_wd_on_cpu(unsigned int cpu)299{00300 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {301 WARN_ON(1);302 return 0;···313 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))314 return 0;3150316 cpumask_set_cpu(cpu, &wd_cpus_enabled);317 if (cpumask_weight(&wd_cpus_enabled) == 1) {318 cpumask_set_cpu(cpu, &wd_smp_cpus_pending);319 wd_smp_last_reset_tb = get_tb();320 }321- smp_wmb();0322 start_watchdog_timer_on(cpu);323324 return 0;···328329static int stop_wd_on_cpu(unsigned int cpu)330{00331 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))332 return 0; /* Can happen in CPU unplug case */333334 stop_watchdog_timer_on(cpu);3350336 cpumask_clear_cpu(cpu, &wd_cpus_enabled);00337 wd_smp_clear_cpu_pending(cpu, get_tb());338339 return 0;
···71 * This may be called from low level interrupt handlers at some72 * point in future.73 */74+ raw_local_irq_save(*flags);75+ hard_irq_disable(); /* Make it soft-NMI safe */76+ while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {77+ raw_local_irq_restore(*flags);78+ spin_until_cond(!test_bit(0, &__wd_smp_lock));79+ raw_local_irq_save(*flags);80+ hard_irq_disable();81+ }82}8384static inline void wd_smp_unlock(unsigned long *flags)85{86 clear_bit_unlock(0, &__wd_smp_lock);87+ raw_local_irq_restore(*flags);88}8990static void wd_lockup_ipi(struct pt_regs *regs)···96 nmi_panic(regs, "Hard LOCKUP");97}9899+static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)100{101+ cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);102+ cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);103 if (cpumask_empty(&wd_smp_cpus_pending)) {104 wd_smp_last_reset_tb = tb;105 cpumask_andnot(&wd_smp_cpus_pending,106 &wd_cpus_enabled,107 &wd_smp_cpus_stuck);108 }109+}110+static void set_cpu_stuck(int cpu, u64 tb)111+{112+ set_cpumask_stuck(cpumask_of(cpu), tb);113}114115static void watchdog_smp_panic(int cpu, u64 tb)···135 }136 smp_flush_nmi_ipi(1000000);137138+ /* Take the stuck CPUs out of the watch group */139+ set_cpumask_stuck(&wd_smp_cpus_pending, tb);01400141 wd_smp_unlock(&flags);142143 printk_safe_flush();···152153 if (hardlockup_panic)154 nmi_panic(NULL, "Hard LOCKUP");155+156+ return;157+158+out:159+ wd_smp_unlock(&flags);160}161162static void wd_smp_clear_cpu_pending(int cpu, u64 tb)···258259void arch_touch_nmi_watchdog(void)260{261+ unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;262 int cpu = smp_processor_id();263264+ if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)265+ watchdog_timer_interrupt(cpu);266}267EXPORT_SYMBOL(arch_touch_nmi_watchdog);268···283284static int start_wd_on_cpu(unsigned int cpu)285{286+ unsigned long flags;287+288 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {289 WARN_ON(1);290 return 0;···297 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))298 return 0;299300+ wd_smp_lock(&flags);301 cpumask_set_cpu(cpu, &wd_cpus_enabled);302 if (cpumask_weight(&wd_cpus_enabled) == 1) {303 cpumask_set_cpu(cpu, &wd_smp_cpus_pending);304 wd_smp_last_reset_tb = get_tb();305 }306+ wd_smp_unlock(&flags);307+308 start_watchdog_timer_on(cpu);309310 return 0;···310311static int stop_wd_on_cpu(unsigned int cpu)312{313+ unsigned long flags;314+315 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))316 return 0; /* Can happen in CPU unplug case */317318 stop_watchdog_timer_on(cpu);319320+ wd_smp_lock(&flags);321 cpumask_clear_cpu(cpu, &wd_cpus_enabled);322+ wd_smp_unlock(&flags);323+324 wd_smp_clear_cpu_pending(cpu, get_tb());325326 return 0;
···328 case SUN4V_CHIP_NIAGARA5:329 case SUN4V_CHIP_SPARC_M6:330 case SUN4V_CHIP_SPARC_M7:0331 case SUN4V_CHIP_SPARC_SN:332 case SUN4V_CHIP_SPARC64X:333 rover_inc_table = niagara_iterate_method;
···328 case SUN4V_CHIP_NIAGARA5:329 case SUN4V_CHIP_SPARC_M6:330 case SUN4V_CHIP_SPARC_M7:331+ case SUN4V_CHIP_SPARC_M8:332 case SUN4V_CHIP_SPARC_SN:333 case SUN4V_CHIP_SPARC64X:334 rover_inc_table = niagara_iterate_method;
···1944 break;1945 case SUN4V_CHIP_SPARC_M7:1946 case SUN4V_CHIP_SPARC_SN:1947- default:1948 /* M7 and later support 52-bit virtual addresses. */1949 sparc64_va_hole_top = 0xfff8000000000000UL;1950 sparc64_va_hole_bottom = 0x0008000000000000UL;1951 max_phys_bits = 49;000000000001952 break;1953 }1954 }···2171 */2172 switch (sun4v_chip_type) {2173 case SUN4V_CHIP_SPARC_M7:02174 case SUN4V_CHIP_SPARC_SN:2175 pagecv_flag = 0x00;2176 break;···2324 */2325 switch (sun4v_chip_type) {2326 case SUN4V_CHIP_SPARC_M7:02327 case SUN4V_CHIP_SPARC_SN:2328 page_cache4v_flag = _PAGE_CP_4V;2329 break;
···1944 break;1945 case SUN4V_CHIP_SPARC_M7:1946 case SUN4V_CHIP_SPARC_SN:01947 /* M7 and later support 52-bit virtual addresses. */1948 sparc64_va_hole_top = 0xfff8000000000000UL;1949 sparc64_va_hole_bottom = 0x0008000000000000UL;1950 max_phys_bits = 49;1951+ break;1952+ case SUN4V_CHIP_SPARC_M8:1953+ default:1954+ /* M8 and later support 54-bit virtual addresses.1955+ * However, restricting M8 and above VA bits to 531956+ * as 4-level page table cannot support more than1957+ * 53 VA bits.1958+ */1959+ sparc64_va_hole_top = 0xfff0000000000000UL;1960+ sparc64_va_hole_bottom = 0x0010000000000000UL;1961+ max_phys_bits = 51;1962 break;1963 }1964 }···2161 */2162 switch (sun4v_chip_type) {2163 case SUN4V_CHIP_SPARC_M7:2164+ case SUN4V_CHIP_SPARC_M8:2165 case SUN4V_CHIP_SPARC_SN:2166 pagecv_flag = 0x00;2167 break;···2313 */2314 switch (sun4v_chip_type) {2315 case SUN4V_CHIP_SPARC_M7:2316+ case SUN4V_CHIP_SPARC_M8:2317 case SUN4V_CHIP_SPARC_SN:2318 page_cache4v_flag = _PAGE_CP_4V;2319 break;
+10-3
arch/um/include/asm/tlb.h
···45}4647static inline void48-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)049{50 tlb->mm = mm;51 tlb->start = start;···81 tlb_flush_mmu_free(tlb);82}8384-/* tlb_finish_mmu85 * Called at the end of the shootdown operation to free up any resources86 * that were required.87 */88static inline void89-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)090{0000091 tlb_flush_mmu(tlb);9293 /* keep the page table cache within bounds */
···45}4647static inline void48+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,49+ unsigned long start, unsigned long end)50{51 tlb->mm = mm;52 tlb->start = start;···80 tlb_flush_mmu_free(tlb);81}8283+/* arch_tlb_finish_mmu84 * Called at the end of the shootdown operation to free up any resources85 * that were required.86 */87static inline void88+arch_tlb_finish_mmu(struct mmu_gather *tlb,89+ unsigned long start, unsigned long end, bool force)90{91+ if (force) {92+ tlb->start = start;93+ tlb->end = end;94+ tlb->need_flush = 1;95+ }96 tlb_flush_mmu(tlb);9798 /* keep the page table cache within bounds */
···4344 /* pin current vcpu to specified physical cpu (run rarely) */45 void (*pin_vcpu)(int);46+47+ /* called during init_mem_mapping() to setup early mappings. */48+ void (*init_mem_mapping)(void);49};5051extern const struct hypervisor_x86 *x86_hyper;···57extern void init_hypervisor_platform(void);58extern bool hypervisor_x2apic_available(void);59extern void hypervisor_pin_vcpu(int cpu);60+61+static inline void hypervisor_init_mem_mapping(void)62+{63+ if (x86_hyper && x86_hyper->init_mem_mapping)64+ x86_hyper->init_mem_mapping();65+}66#else67static inline void init_hypervisor_platform(void) { }68static inline bool hypervisor_x2apic_available(void) { return false; }69+static inline void hypervisor_init_mem_mapping(void) { }70#endif /* CONFIG_HYPERVISOR_GUEST */71#endif /* _ASM_X86_HYPERVISOR_H */
+3
arch/x86/mm/init.c
···18#include <asm/dma.h> /* for MAX_DMA_PFN */19#include <asm/microcode.h>20#include <asm/kaslr.h>02122/*23 * We need to define the tracepoints somewhere, and tlb.c···636637 load_cr3(swapper_pg_dir);638 __flush_tlb_all();00639640 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);641}
···18#include <asm/dma.h> /* for MAX_DMA_PFN */19#include <asm/microcode.h>20#include <asm/kaslr.h>21+#include <asm/hypervisor.h>2223/*24 * We need to define the tracepoints somewhere, and tlb.c···635636 load_cr3(swapper_pg_dir);637 __flush_tlb_all();638+639+ hypervisor_init_mem_mapping();640641 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);642}
+37-22
arch/x86/xen/enlighten_hvm.c
···12#include <asm/setup.h>13#include <asm/hypervisor.h>14#include <asm/e820/api.h>01516#include <asm/xen/cpuid.h>17#include <asm/xen/hypervisor.h>···22#include "mmu.h"23#include "smp.h"2425-void __ref xen_hvm_init_shared_info(void)0026{27 struct xen_add_to_physmap xatp;28- u64 pa;29-30- if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {31- /*32- * Search for a free page starting at 4kB physical address.33- * Low memory is preferred to avoid an EPT large page split up34- * by the mapping.35- * Starting below X86_RESERVE_LOW (usually 64kB) is fine as36- * the BIOS used for HVM guests is well behaved and won't37- * clobber memory other than the first 4kB.38- */39- for (pa = PAGE_SIZE;40- !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||41- memblock_is_reserved(pa);42- pa += PAGE_SIZE)43- ;44-45- memblock_reserve(pa, PAGE_SIZE);46- HYPERVISOR_shared_info = __va(pa);47- }4849 xatp.domid = DOMID_SELF;50 xatp.idx = 0;51 xatp.space = XENMAPSPACE_shared_info;52- xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);53 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))54 BUG();00000000000000000000000000000055}5657static void __init init_hvm_pv_info(void)···166167 init_hvm_pv_info();1680169 xen_hvm_init_shared_info();170171 /*···232 .init_platform = xen_hvm_guest_init,233 .pin_vcpu = xen_pin_vcpu,234 .x2apic_available = xen_x2apic_para_available,0235};236EXPORT_SYMBOL(x86_hyper_xen_hvm);
···12#include <asm/setup.h>13#include <asm/hypervisor.h>14#include <asm/e820/api.h>15+#include <asm/early_ioremap.h>1617#include <asm/xen/cpuid.h>18#include <asm/xen/hypervisor.h>···21#include "mmu.h"22#include "smp.h"2324+static unsigned long shared_info_pfn;25+26+void xen_hvm_init_shared_info(void)27{28 struct xen_add_to_physmap xatp;000000000000000000002930 xatp.domid = DOMID_SELF;31 xatp.idx = 0;32 xatp.space = XENMAPSPACE_shared_info;33+ xatp.gpfn = shared_info_pfn;34 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))35 BUG();36+}37+38+static void __init reserve_shared_info(void)39+{40+ u64 pa;41+42+ /*43+ * Search for a free page starting at 4kB physical address.44+ * Low memory is preferred to avoid an EPT large page split up45+ * by the mapping.46+ * Starting below X86_RESERVE_LOW (usually 64kB) is fine as47+ * the BIOS used for HVM guests is well behaved and won't48+ * clobber memory other than the first 4kB.49+ */50+ for (pa = PAGE_SIZE;51+ !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||52+ memblock_is_reserved(pa);53+ pa += PAGE_SIZE)54+ ;55+56+ shared_info_pfn = PHYS_PFN(pa);57+58+ memblock_reserve(pa, PAGE_SIZE);59+ HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);60+}61+62+static void __init xen_hvm_init_mem_mapping(void)63+{64+ early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);65+ HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));66}6768static void __init init_hvm_pv_info(void)···153154 init_hvm_pv_info();155156+ reserve_shared_info();157 xen_hvm_init_shared_info();158159 /*···218 .init_platform = xen_hvm_guest_init,219 .pin_vcpu = xen_pin_vcpu,220 .x2apic_available = xen_x2apic_para_available,221+ .init_mem_mapping = xen_hvm_init_mem_mapping,222};223EXPORT_SYMBOL(x86_hyper_xen_hvm);
···1-/*2- * Arch specific extensions to struct device3- *4- * This file is released under the GPLv25- */6-#ifndef _ASM_XTENSA_DEVICE_H7-#define _ASM_XTENSA_DEVICE_H8-9-struct dev_archdata {10-};11-12-struct pdev_archdata {13-};14-15-#endif /* _ASM_XTENSA_DEVICE_H */
···000000000000000
-18
arch/xtensa/include/asm/param.h
···1-/*2- * include/asm-xtensa/param.h3- *4- * This file is subject to the terms and conditions of the GNU General Public5- * License. See the file "COPYING" in the main directory of this archive6- * for more details.7- *8- * Copyright (C) 2001 - 2005 Tensilica Inc.9- */10-#ifndef _XTENSA_PARAM_H11-#define _XTENSA_PARAM_H12-13-#include <uapi/asm/param.h>14-15-# define HZ CONFIG_HZ /* internal timer frequency */16-# define USER_HZ 100 /* for user interfaces in "ticks" */17-# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */18-#endif /* _XTENSA_PARAM_H */
···103 clear_page_alias(kvaddr, paddr);104 preempt_enable();105}0106107void copy_user_highpage(struct page *dst, struct page *src,108 unsigned long vaddr, struct vm_area_struct *vma)···120 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);121 preempt_enable();122}123-124-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */125-126-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK127128/*129 * Any time the kernel writes to a user page cache page, or it is about to···174175 /* There shouldn't be an entry in the cache for this page anymore. */176}177-178179/*180 * For now, flush the whole cache. FIXME??···186 __flush_invalidate_dcache_all();187 __invalidate_icache_all();188}0189190/* 191 * Remove any entry in the cache for this page. ···206 __flush_invalidate_dcache_page_alias(virt, phys);207 __invalidate_icache_page_alias(virt, phys);208}0209210-#endif211212void213update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)···225226 flush_tlb_page(vma, addr);227228-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK229230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {231 unsigned long phys = page_to_phys(page);···256 * flush_dcache_page() on the page.257 */258259-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK260261void copy_to_user_page(struct vm_area_struct *vma, struct page *page,262 unsigned long vaddr, void *dst, const void *src,
···103 clear_page_alias(kvaddr, paddr);104 preempt_enable();105}106+EXPORT_SYMBOL(clear_user_highpage);107108void copy_user_highpage(struct page *dst, struct page *src,109 unsigned long vaddr, struct vm_area_struct *vma)···119 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);120 preempt_enable();121}122+EXPORT_SYMBOL(copy_user_highpage);000123124/*125 * Any time the kernel writes to a user page cache page, or it is about to···176177 /* There shouldn't be an entry in the cache for this page anymore. */178}179+EXPORT_SYMBOL(flush_dcache_page);180181/*182 * For now, flush the whole cache. FIXME??···188 __flush_invalidate_dcache_all();189 __invalidate_icache_all();190}191+EXPORT_SYMBOL(local_flush_cache_range);192193/* 194 * Remove any entry in the cache for this page. ···207 __flush_invalidate_dcache_page_alias(virt, phys);208 __invalidate_icache_page_alias(virt, phys);209}210+EXPORT_SYMBOL(local_flush_cache_page);211212+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */213214void215update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)···225226 flush_tlb_page(vma, addr);227228+#if (DCACHE_WAY_SIZE > PAGE_SIZE)229230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {231 unsigned long phys = page_to_phys(page);···256 * flush_dcache_page() on the page.257 */258259+#if (DCACHE_WAY_SIZE > PAGE_SIZE)260261void copy_to_user_page(struct vm_area_struct *vma, struct page *page,262 unsigned long vaddr, void *dst, const void *src,
+17-5
block/bfq-iosched.h
···71 *72 * bfq_sched_data is the basic scheduler queue. It supports three73 * ioprio_classes, and can be used either as a toplevel queue or as an74- * intermediate queue on a hierarchical setup. @next_in_service75- * points to the active entity of the sched_data service trees that76- * will be scheduled next. It is used to reduce the number of steps77- * needed for each hierarchical-schedule update.78 *79 * The supported ioprio_classes are the same as in CFQ, in descending80 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.81 * Requests from higher priority queues are served before all the82 * requests from lower priority queues; among requests of the same83 * queue requests are served according to B-WF2Q+.84- * All the fields are protected by the queue lock of the containing bfqd.00000000000000085 */86struct bfq_sched_data {87 /* entity in service */
···71 *72 * bfq_sched_data is the basic scheduler queue. It supports three73 * ioprio_classes, and can be used either as a toplevel queue or as an74+ * intermediate queue in a hierarchical setup.00075 *76 * The supported ioprio_classes are the same as in CFQ, in descending77 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.78 * Requests from higher priority queues are served before all the79 * requests from lower priority queues; among requests of the same80 * queue requests are served according to B-WF2Q+.81+ *82+ * The schedule is implemented by the service trees, plus the field83+ * @next_in_service, which points to the entity on the active trees84+ * that will be served next, if 1) no changes in the schedule occurs85+ * before the current in-service entity is expired, 2) the in-service86+ * queue becomes idle when it expires, and 3) if the entity pointed by87+ * in_service_entity is not a queue, then the in-service child entity88+ * of the entity pointed by in_service_entity becomes idle on89+ * expiration. This peculiar definition allows for the following90+ * optimization, not yet exploited: while a given entity is still in91+ * service, we already know which is the best candidate for next92+ * service among the other active entitities in the same parent93+ * entity. We can then quickly compare the timestamps of the94+ * in-service entity with those of such best candidate.95+ *96+ * All fields are protected by the lock of the containing bfqd.97 */98struct bfq_sched_data {99 /* entity in service */
+81-65
block/bfq-wf2q.c
···188189/*190 * This function tells whether entity stops being a candidate for next191- * service, according to the following logic.00192 *193- * This function is invoked for an entity that is about to be set in194- * service. If such an entity is a queue, then the entity is no longer195- * a candidate for next service (i.e, a candidate entity to serve196- * after the in-service entity is expired). The function then returns197- * true.198 *199- * In contrast, the entity could stil be a candidate for next service200- * if it is not a queue, and has more than one child. In fact, even if201- * one of its children is about to be set in service, other children202- * may still be the next to serve. As a consequence, a non-queue203- * entity is not a candidate for next-service only if it has only one204- * child. And only if this condition holds, then the function returns205- * true for a non-queue entity.0206 */207static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)208{···215216 bfqg = container_of(entity, struct bfq_group, entity);217000000000000218 if (bfqg->active_entities == 1)219 return true;220···968 * one of its children receives a new request.969 *970 * Basically, this function updates the timestamps of entity and971- * inserts entity into its active tree, ater possible extracting it972 * from its idle tree.973 */974static void __bfq_activate_entity(struct bfq_entity *entity,···1062 entity->start = entity->finish;1063 /*1064 * In addition, if the entity had more than one child1065- * when set in service, then was not extracted from1066 * the active tree. This implies that the position of1067 * the entity in the active tree may need to be1068 * changed now, because we have just updated the start···1070 * time in a moment (the requeueing is then, more1071 * precisely, a repositioning in this case). To1072 * implement this repositioning, we: 1) dequeue the1073- * entity here, 2) update the finish time and1074- * requeue the entity according to the new1075- * timestamps below.1076 */1077 if (entity->tree)1078 bfq_active_extract(st, entity);···111811191120/**1121- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,1122- * and activate, requeue or reposition all ancestors1123- * for which such an update becomes necessary.01124 * @entity: the entity to activate.1125 * @non_blocking_wait_rq: true if this entity was waiting for a request1126 * @requeue: true if this is a requeue, which implies that bfqq is···1149 * @ins_into_idle_tree: if false, the entity will not be put into the1150 * idle tree.1151 *1152- * Deactivates an entity, independently from its previous state. Must1153 * be invoked only if entity is on a service tree. Extracts the entity1154- * from that tree, and if necessary and allowed, puts it on the idle1155 * tree.1156 */1157bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)···1172 st = bfq_entity_service_tree(entity);1173 is_in_service = entity == sd->in_service_entity;11741175- if (is_in_service)1176 bfq_calc_finish(entity, entity->service);0011771178 if (entity->tree == &st->active)1179 bfq_active_extract(st, entity);···1193/**1194 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.1195 * @entity: the entity to deactivate.1196- * @ins_into_idle_tree: true if the entity can be put on the idle tree1197 */1198static void bfq_deactivate_entity(struct bfq_entity *entity,1199 bool ins_into_idle_tree,···1224 */1225 bfq_update_next_in_service(sd, NULL);12261227- if (sd->next_in_service)1228 /*1229- * The parent entity is still backlogged,1230- * because next_in_service is not NULL. So, no1231- * further upwards deactivation must be1232- * performed. Yet, next_in_service has1233- * changed. Then the schedule does need to be1234- * updated upwards.0000000000001235 */1236 break;012371238 /*1239 * If we get here, then the parent is no more···15231524 /*1525 * If entity is no longer a candidate for next1526- * service, then we extract it from its active tree,1527- * for the following reason. To further boost the1528- * throughput in some special case, BFQ needs to know1529- * which is the next candidate entity to serve, while1530- * there is already an entity in service. In this1531- * respect, to make it easy to compute/update the next1532- * candidate entity to serve after the current1533- * candidate has been set in service, there is a case1534- * where it is necessary to extract the current1535- * candidate from its service tree. Such a case is1536- * when the entity just set in service cannot be also1537- * a candidate for next service. Details about when1538- * this conditions holds are reported in the comments1539- * on the function bfq_no_longer_next_in_service()1540- * invoked below.1541 */1542 if (bfq_no_longer_next_in_service(entity))1543 bfq_active_extract(bfq_entity_service_tree(entity),1544 entity);15451546 /*1547- * For the same reason why we may have just extracted1548- * entity from its active tree, we may need to update1549- * next_in_service for the sched_data of entity too,1550- * regardless of whether entity has been extracted.1551- * In fact, even if entity has not been extracted, a1552- * descendant entity may get extracted. Such an event1553- * would cause a change in next_in_service for the1554- * level of the descendant entity, and thus possibly1555- * back to upper levels.1556 *1557- * We cannot perform the resulting needed update1558- * before the end of this loop, because, to know which1559- * is the correct next-to-serve candidate entity for1560- * each level, we need first to find the leaf entity1561- * to set in service. In fact, only after we know1562- * which is the next-to-serve leaf entity, we can1563- * discover whether the parent entity of the leaf1564- * entity becomes the next-to-serve, and so on.01565 */1566-1567 }15681569 bfqq = bfq_entity_to_bfqq(entity);
···188189/*190 * This function tells whether entity stops being a candidate for next191+ * service, according to the restrictive definition of the field192+ * next_in_service. In particular, this function is invoked for an193+ * entity that is about to be set in service.194 *195+ * If entity is a queue, then the entity is no longer a candidate for196+ * next service according to the that definition, because entity is197+ * about to become the in-service queue. This function then returns198+ * true if entity is a queue.0199 *200+ * In contrast, entity could still be a candidate for next service if201+ * it is not a queue, and has more than one active child. In fact,202+ * even if one of its children is about to be set in service, other203+ * active children may still be the next to serve, for the parent204+ * entity, even according to the above definition. As a consequence, a205+ * non-queue entity is not a candidate for next-service only if it has206+ * only one active child. And only if this condition holds, then this207+ * function returns true for a non-queue entity.208 */209static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)210{···213214 bfqg = container_of(entity, struct bfq_group, entity);215216+ /*217+ * The field active_entities does not always contain the218+ * actual number of active children entities: it happens to219+ * not account for the in-service entity in case the latter is220+ * removed from its active tree (which may get done after221+ * invoking the function bfq_no_longer_next_in_service in222+ * bfq_get_next_queue). Fortunately, here, i.e., while223+ * bfq_no_longer_next_in_service is not yet completed in224+ * bfq_get_next_queue, bfq_active_extract has not yet been225+ * invoked, and thus active_entities still coincides with the226+ * actual number of active entities.227+ */228 if (bfqg->active_entities == 1)229 return true;230···954 * one of its children receives a new request.955 *956 * Basically, this function updates the timestamps of entity and957+ * inserts entity into its active tree, ater possibly extracting it958 * from its idle tree.959 */960static void __bfq_activate_entity(struct bfq_entity *entity,···1048 entity->start = entity->finish;1049 /*1050 * In addition, if the entity had more than one child1051+ * when set in service, then it was not extracted from1052 * the active tree. This implies that the position of1053 * the entity in the active tree may need to be1054 * changed now, because we have just updated the start···1056 * time in a moment (the requeueing is then, more1057 * precisely, a repositioning in this case). To1058 * implement this repositioning, we: 1) dequeue the1059+ * entity here, 2) update the finish time and requeue1060+ * the entity according to the new timestamps below.01061 */1062 if (entity->tree)1063 bfq_active_extract(st, entity);···110511061107/**1108+ * bfq_activate_requeue_entity - activate or requeue an entity representing a1109+ * bfq_queue, and activate, requeue or reposition1110+ * all ancestors for which such an update becomes1111+ * necessary.1112 * @entity: the entity to activate.1113 * @non_blocking_wait_rq: true if this entity was waiting for a request1114 * @requeue: true if this is a requeue, which implies that bfqq is···1135 * @ins_into_idle_tree: if false, the entity will not be put into the1136 * idle tree.1137 *1138+ * Deactivates an entity, independently of its previous state. Must1139 * be invoked only if entity is on a service tree. Extracts the entity1140+ * from that tree, and if necessary and allowed, puts it into the idle1141 * tree.1142 */1143bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)···1158 st = bfq_entity_service_tree(entity);1159 is_in_service = entity == sd->in_service_entity;11601161+ if (is_in_service) {1162 bfq_calc_finish(entity, entity->service);1163+ sd->in_service_entity = NULL;1164+ }11651166 if (entity->tree == &st->active)1167 bfq_active_extract(st, entity);···1177/**1178 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.1179 * @entity: the entity to deactivate.1180+ * @ins_into_idle_tree: true if the entity can be put into the idle tree1181 */1182static void bfq_deactivate_entity(struct bfq_entity *entity,1183 bool ins_into_idle_tree,···1208 */1209 bfq_update_next_in_service(sd, NULL);12101211+ if (sd->next_in_service || sd->in_service_entity) {1212 /*1213+ * The parent entity is still active, because1214+ * either next_in_service or in_service_entity1215+ * is not NULL. So, no further upwards1216+ * deactivation must be performed. Yet,1217+ * next_in_service has changed. Then the1218+ * schedule does need to be updated upwards.1219+ *1220+ * NOTE If in_service_entity is not NULL, then1221+ * next_in_service may happen to be NULL,1222+ * although the parent entity is evidently1223+ * active. This happens if 1) the entity1224+ * pointed by in_service_entity is the only1225+ * active entity in the parent entity, and 2)1226+ * according to the definition of1227+ * next_in_service, the in_service_entity1228+ * cannot be considered as1229+ * next_in_service. See the comments on the1230+ * definition of next_in_service for details.1231 */1232 break;1233+ }12341235 /*1236 * If we get here, then the parent is no more···14941495 /*1496 * If entity is no longer a candidate for next1497+ * service, then it must be extracted from its active1498+ * tree, so as to make sure that it won't be1499+ * considered when computing next_in_service. See the1500+ * comments on the function1501+ * bfq_no_longer_next_in_service() for details.00000000001502 */1503 if (bfq_no_longer_next_in_service(entity))1504 bfq_active_extract(bfq_entity_service_tree(entity),1505 entity);15061507 /*1508+ * Even if entity is not to be extracted according to1509+ * the above check, a descendant entity may get1510+ * extracted in one of the next iterations of this1511+ * loop. Such an event could cause a change in1512+ * next_in_service for the level of the descendant1513+ * entity, and thus possibly back to this level.0001514 *1515+ * However, we cannot perform the resulting needed1516+ * update of next_in_service for this level before the1517+ * end of the whole loop, because, to know which is1518+ * the correct next-to-serve candidate entity for each1519+ * level, we need first to find the leaf entity to set1520+ * in service. In fact, only after we know which is1521+ * the next-to-serve leaf entity, we can discover1522+ * whether the parent entity of the leaf entity1523+ * becomes the next-to-serve, and so on.1524 */01525 }15261527 bfqq = bfq_entity_to_bfqq(entity);
···301 struct elevator_queue *e = q->elevator;302 struct request *rq;303 unsigned int tag;0304305 blk_queue_enter_live(q);306 data->q = q;307 if (likely(!data->ctx))308- data->ctx = blk_mq_get_ctx(q);309 if (likely(!data->hctx))310 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);311 if (op & REQ_NOWAIT)···325326 tag = blk_mq_get_tag(data);327 if (tag == BLK_MQ_TAG_FAIL) {0000328 blk_queue_exit(q);329 return NULL;330 }···361362 rq = blk_mq_get_request(q, NULL, op, &alloc_data);363364- blk_mq_put_ctx(alloc_data.ctx);365- blk_queue_exit(q);366-367 if (!rq)368 return ERR_PTR(-EWOULDBLOCK);000369370 rq->__data_len = 0;371 rq->__sector = (sector_t) -1;···412413 rq = blk_mq_get_request(q, NULL, op, &alloc_data);414415- blk_queue_exit(q);416-417 if (!rq)418 return ERR_PTR(-EWOULDBLOCK);00419420 return rq;421}···684void blk_mq_delay_kick_requeue_list(struct request_queue *q,685 unsigned long msecs)686{687- kblockd_schedule_delayed_work(&q->requeue_work,688- msecs_to_jiffies(msecs));689}690EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);691
···301 struct elevator_queue *e = q->elevator;302 struct request *rq;303 unsigned int tag;304+ struct blk_mq_ctx *local_ctx = NULL;305306 blk_queue_enter_live(q);307 data->q = q;308 if (likely(!data->ctx))309+ data->ctx = local_ctx = blk_mq_get_ctx(q);310 if (likely(!data->hctx))311 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);312 if (op & REQ_NOWAIT)···324325 tag = blk_mq_get_tag(data);326 if (tag == BLK_MQ_TAG_FAIL) {327+ if (local_ctx) {328+ blk_mq_put_ctx(local_ctx);329+ data->ctx = NULL;330+ }331 blk_queue_exit(q);332 return NULL;333 }···356357 rq = blk_mq_get_request(q, NULL, op, &alloc_data);358000359 if (!rq)360 return ERR_PTR(-EWOULDBLOCK);361+362+ blk_mq_put_ctx(alloc_data.ctx);363+ blk_queue_exit(q);364365 rq->__data_len = 0;366 rq->__sector = (sector_t) -1;···407408 rq = blk_mq_get_request(q, NULL, op, &alloc_data);40900410 if (!rq)411 return ERR_PTR(-EWOULDBLOCK);412+413+ blk_queue_exit(q);414415 return rq;416}···679void blk_mq_delay_kick_requeue_list(struct request_queue *q,680 unsigned long msecs)681{682+ kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,683+ msecs_to_jiffies(msecs));684}685EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);686
+34-2
drivers/acpi/spcr.c
···17#include <linux/serial_core.h>1819/*000000000020 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.21 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI22 * quirk detection in pci_mcfg.c.···157 goto done;158 }159160- if (qdf2400_erratum_44_present(&table->header))161- uart = "qdf2400_e44";0000000000000000000000162 if (xgene_8250_erratum_present(table))163 iotype = "mmio32";164
···17#include <linux/serial_core.h>1819/*20+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as21+ * occasionally getting stuck as 1. To avoid the potential for a hang, check22+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART23+ * implementations, so only do so if an affected platform is detected in24+ * parse_spcr().25+ */26+bool qdf2400_e44_present;27+EXPORT_SYMBOL(qdf2400_e44_present);28+29+/*30 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.31 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI32 * quirk detection in pci_mcfg.c.···147 goto done;148 }149150+ /*151+ * If the E44 erratum is required, then we need to tell the pl011152+ * driver to implement the work-around.153+ *154+ * The global variable is used by the probe function when it155+ * creates the UARTs, whether or not they're used as a console.156+ *157+ * If the user specifies "traditional" earlycon, the qdf2400_e44158+ * console name matches the EARLYCON_DECLARE() statement, and159+ * SPCR is not used. Parameter "earlycon" is false.160+ *161+ * If the user specifies "SPCR" earlycon, then we need to update162+ * the console name so that it also says "qdf2400_e44". Parameter163+ * "earlycon" is true.164+ *165+ * For consistency, if we change the console name, then we do it166+ * for everyone, not just earlycon.167+ */168+ if (qdf2400_erratum_44_present(&table->header)) {169+ qdf2400_e44_present = true;170+ if (earlycon)171+ uart = "qdf2400_e44";172+ }173+174 if (xgene_8250_erratum_present(table))175 iotype = "mmio32";176
+34-15
drivers/base/firmware_class.c
···30#include <linux/syscore_ops.h>31#include <linux/reboot.h>32#include <linux/security.h>33-#include <linux/swait.h>3435#include <generated/utsrelease.h>36···111 * state of the firmware loading.112 */113struct fw_state {114- struct swait_queue_head wq;115 enum fw_status status;116};117118static void fw_state_init(struct fw_state *fw_st)119{120- init_swait_queue_head(&fw_st->wq);121 fw_st->status = FW_STATUS_UNKNOWN;122}123···130{131 long ret;132133- ret = swait_event_interruptible_timeout(fw_st->wq,134- __fw_state_is_done(READ_ONCE(fw_st->status)),135- timeout);136 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)137 return -ENOENT;138 if (!ret)···145 WRITE_ONCE(fw_st->status, status);146147 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)148- swake_up(&fw_st->wq);149}150151#define fw_state_start(fw_st) \152 __fw_state_set(fw_st, FW_STATUS_LOADING)153#define fw_state_done(fw_st) \154 __fw_state_set(fw_st, FW_STATUS_DONE)00155#define fw_state_wait(fw_st) \156 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)157-158-#ifndef CONFIG_FW_LOADER_USER_HELPER159-160-#define fw_state_is_aborted(fw_st) false161-162-#else /* CONFIG_FW_LOADER_USER_HELPER */163164static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)165{166 return fw_st->status == status;167}00000168169#define fw_state_aborted(fw_st) \170 __fw_state_set(fw_st, FW_STATUS_ABORTED)···173 __fw_state_check(fw_st, FW_STATUS_DONE)174#define fw_state_is_loading(fw_st) \175 __fw_state_check(fw_st, FW_STATUS_LOADING)176-#define fw_state_is_aborted(fw_st) \177- __fw_state_check(fw_st, FW_STATUS_ABORTED)178#define fw_state_wait_timeout(fw_st, timeout) \179 __fw_state_wait_common(fw_st, timeout)180···1196 return 1; /* need to load */1197}119800000000000000000000001199/* called from request_firmware() and request_firmware_work_func() */1200static int1201_request_firmware(const struct firmware **firmware_p, const char *name,···12611262 out:1263 if (ret < 0) {01264 release_firmware(fw);1265 fw = NULL;1266 }
···30#include <linux/syscore_ops.h>31#include <linux/reboot.h>32#include <linux/security.h>03334#include <generated/utsrelease.h>35···112 * state of the firmware loading.113 */114struct fw_state {115+ struct completion completion;116 enum fw_status status;117};118119static void fw_state_init(struct fw_state *fw_st)120{121+ init_completion(&fw_st->completion);122 fw_st->status = FW_STATUS_UNKNOWN;123}124···131{132 long ret;133134+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);00135 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)136 return -ENOENT;137 if (!ret)···148 WRITE_ONCE(fw_st->status, status);149150 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)151+ complete_all(&fw_st->completion);152}153154#define fw_state_start(fw_st) \155 __fw_state_set(fw_st, FW_STATUS_LOADING)156#define fw_state_done(fw_st) \157 __fw_state_set(fw_st, FW_STATUS_DONE)158+#define fw_state_aborted(fw_st) \159+ __fw_state_set(fw_st, FW_STATUS_ABORTED)160#define fw_state_wait(fw_st) \161 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)000000162163static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)164{165 return fw_st->status == status;166}167+168+#define fw_state_is_aborted(fw_st) \169+ __fw_state_check(fw_st, FW_STATUS_ABORTED)170+171+#ifdef CONFIG_FW_LOADER_USER_HELPER172173#define fw_state_aborted(fw_st) \174 __fw_state_set(fw_st, FW_STATUS_ABORTED)···175 __fw_state_check(fw_st, FW_STATUS_DONE)176#define fw_state_is_loading(fw_st) \177 __fw_state_check(fw_st, FW_STATUS_LOADING)00178#define fw_state_wait_timeout(fw_st, timeout) \179 __fw_state_wait_common(fw_st, timeout)180···1200 return 1; /* need to load */1201}12021203+/*1204+ * Batched requests need only one wake, we need to do this step last due to the1205+ * fallback mechanism. The buf is protected with kref_get(), and it won't be1206+ * released until the last user calls release_firmware().1207+ *1208+ * Failed batched requests are possible as well, in such cases we just share1209+ * the struct firmware_buf and won't release it until all requests are woken1210+ * and have gone through this same path.1211+ */1212+static void fw_abort_batch_reqs(struct firmware *fw)1213+{1214+ struct firmware_buf *buf;1215+1216+ /* Loaded directly? */1217+ if (!fw || !fw->priv)1218+ return;1219+1220+ buf = fw->priv;1221+ if (!fw_state_is_aborted(&buf->fw_st))1222+ fw_state_aborted(&buf->fw_st);1223+}1224+1225/* called from request_firmware() and request_firmware_work_func() */1226static int1227_request_firmware(const struct firmware **firmware_p, const char *name,···12431244 out:1245 if (ret < 0) {1246+ fw_abort_batch_reqs(fw);1247 release_firmware(fw);1248 fw = NULL;1249 }
+61
drivers/block/sunvdc.c
···875 printk(KERN_INFO "%s", version);876}87700000000000000000000000000000000000000000000000000878static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)879{880 struct mdesc_handle *hp;···940 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {941 printk(KERN_ERR PFX "Port id [%llu] too large.\n",942 vdev->dev_no);00000000943 goto err_out_release_mdesc;944 }945···1001 if (err)1002 goto err_out_free_tx_ring;10030001004 dev_set_drvdata(&vdev->dev, port);10051006 mdesc_release(hp);
···875 printk(KERN_INFO "%s", version);876}877878+struct vdc_check_port_data {879+ int dev_no;880+ char *type;881+};882+883+static int vdc_device_probed(struct device *dev, void *arg)884+{885+ struct vio_dev *vdev = to_vio_dev(dev);886+ struct vdc_check_port_data *port_data;887+888+ port_data = (struct vdc_check_port_data *)arg;889+890+ if ((vdev->dev_no == port_data->dev_no) &&891+ (!(strcmp((char *)&vdev->type, port_data->type))) &&892+ dev_get_drvdata(dev)) {893+ /* This device has already been configured894+ * by vdc_port_probe()895+ */896+ return 1;897+ } else {898+ return 0;899+ }900+}901+902+/* Determine whether the VIO device is part of an mpgroup903+ * by locating all the virtual-device-port nodes associated904+ * with the parent virtual-device node for the VIO device905+ * and checking whether any of these nodes are vdc-ports906+ * which have already been configured.907+ *908+ * Returns true if this device is part of an mpgroup and has909+ * already been probed.910+ */911+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)912+{913+ struct vdc_check_port_data port_data;914+ struct device *dev;915+916+ port_data.dev_no = vdev->dev_no;917+ port_data.type = (char *)&vdev->type;918+919+ dev = device_find_child(vdev->dev.parent, &port_data,920+ vdc_device_probed);921+922+ if (dev)923+ return true;924+925+ return false;926+}927+928static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)929{930 struct mdesc_handle *hp;···890 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {891 printk(KERN_ERR PFX "Port id [%llu] too large.\n",892 vdev->dev_no);893+ goto err_out_release_mdesc;894+ }895+896+ /* Check if this device is part of an mpgroup */897+ if (vdc_port_mpgroup_check(vdev)) {898+ printk(KERN_WARNING899+ "VIO: Ignoring extra vdisk port %s",900+ dev_name(&vdev->dev));901 goto err_out_release_mdesc;902 }903···943 if (err)944 goto err_out_free_tx_ring;945946+ /* Note that the device driver_data is used to determine947+ * whether the port has been probed.948+ */949 dev_set_drvdata(&vdev->dev, port);950951 mdesc_release(hp);
···1492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM1493 print_once = true;1494#endif1495- pr_notice("random: %s called from %pF with crng_init=%d\n",1496 func_name, caller, crng_init);1497}1498
···1492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM1493 print_once = true;1494#endif1495+ pr_notice("random: %s called from %pS with crng_init=%d\n",1496 func_name, caller, crng_init);1497}1498
+10
drivers/cpuidle/cpuidle-powernv.c
···235 return -1;236}2370238static int powernv_add_idle_states(void)239{240 struct device_node *power_mgt;···249 const char *names[CPUIDLE_STATE_MAX];250 u32 has_stop_states = 0;251 int i, rc;00252253 /* Currently we have snooze statically defined */254···365 for (i = 0; i < dt_idle_states; i++) {366 unsigned int exit_latency, target_residency;367 bool stops_timebase = false;0000000368 /*369 * If an idle state has exit latency beyond370 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
···235 return -1;236}237238+extern u32 pnv_get_supported_cpuidle_states(void);239static int powernv_add_idle_states(void)240{241 struct device_node *power_mgt;···248 const char *names[CPUIDLE_STATE_MAX];249 u32 has_stop_states = 0;250 int i, rc;251+ u32 supported_flags = pnv_get_supported_cpuidle_states();252+253254 /* Currently we have snooze statically defined */255···362 for (i = 0; i < dt_idle_states; i++) {363 unsigned int exit_latency, target_residency;364 bool stops_timebase = false;365+366+ /*367+ * Skip the platform idle state whose flag isn't in368+ * the supported_cpuidle_states flag mask.369+ */370+ if ((flags[i] & supported_flags) != flags[i])371+ continue;372 /*373 * If an idle state has exit latency beyond374 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
+4-4
drivers/crypto/inside-secure/safexcel_hash.c
···883 if (ret)884 return ret;885886- memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);887- memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);888-889- for (i = 0; i < ARRAY_SIZE(istate.state); i++) {890 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||891 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {892 ctx->base.needs_inv = true;893 break;894 }895 }000896897 return 0;898}
···883 if (ret)884 return ret;885886+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {000887 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||888 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {889 ctx->base.needs_inv = true;890 break;891 }892 }893+894+ memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);895+ memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);896897 return 0;898}
+3-2
drivers/dma-buf/sync_file.c
···304{305 struct sync_file *sync_file = file->private_data;306307- if (test_bit(POLL_ENABLED, &sync_file->fence->flags))308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb);309 dma_fence_put(sync_file->fence);310 kfree(sync_file);···318319 poll_wait(file, &sync_file->wq, wait);320321- if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {0322 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,323 fence_check_cb_func) < 0)324 wake_up_all(&sync_file->wq);
···304{305 struct sync_file *sync_file = file->private_data;306307+ if (test_bit(POLL_ENABLED, &sync_file->flags))308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb);309 dma_fence_put(sync_file->fence);310 kfree(sync_file);···318319 poll_wait(file, &sync_file->wq, wait);320321+ if (list_empty(&sync_file->cb.node) &&322+ !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {323 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,324 fence_check_cb_func) < 0)325 wake_up_all(&sync_file->wq);
+1-1
drivers/gpu/drm/bridge/tc358767.c
···12551256 /* port@2 is the output port */1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);1258- if (ret)1259 return ret;12601261 /* Shut down GPIO is optional */
···12551256 /* port@2 is the output port */1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);1258+ if (ret && ret != -ENODEV)1259 return ret;12601261 /* Shut down GPIO is optional */
+2-2
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
···270 if (ret)271 return ret;272273- if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {274- DRM_ERROR("relocation %u outside object", i);275 return -EINVAL;276 }277
···270 if (ret)271 return ret;272273+ if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {274+ DRM_ERROR("relocation %u outside object\n", i);275 return -EINVAL;276 }277
+13-1
drivers/gpu/drm/exynos/exynos_drm_fb.c
···145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,146 const struct drm_mode_fb_cmd2 *mode_cmd)147{0148 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];149 struct drm_gem_object *obj;150 struct drm_framebuffer *fb;151 int i;152 int ret;153154- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {00000155 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);156 if (!obj) {157 DRM_ERROR("failed to lookup gem object\n");···166 }167168 exynos_gem[i] = to_exynos_gem(obj);000000169 }170171 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
···145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,146 const struct drm_mode_fb_cmd2 *mode_cmd)147{148+ const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);149 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];150 struct drm_gem_object *obj;151 struct drm_framebuffer *fb;152 int i;153 int ret;154155+ for (i = 0; i < info->num_planes; i++) {156+ unsigned int height = (i == 0) ? mode_cmd->height :157+ DIV_ROUND_UP(mode_cmd->height, info->vsub);158+ unsigned long size = height * mode_cmd->pitches[i] +159+ mode_cmd->offsets[i];160+161 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);162 if (!obj) {163 DRM_ERROR("failed to lookup gem object\n");···160 }161162 exynos_gem[i] = to_exynos_gem(obj);163+164+ if (size > exynos_gem[i]->size) {165+ i++;166+ ret = -EINVAL;167+ goto err;168+ }169 }170171 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
···46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \47 ((a)->lrca == (b)->lrca))4849+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);50+51static int context_switch_events[] = {52 [RCS] = RCS_AS_CONTEXT_SWITCH,53 [BCS] = BCS_AS_CONTEXT_SWITCH,···499static int complete_execlist_workload(struct intel_vgpu_workload *workload)500{501 struct intel_vgpu *vgpu = workload->vgpu;502+ int ring_id = workload->ring_id;503+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];504 struct intel_vgpu_workload *next_workload;505+ struct list_head *next = workload_q_head(vgpu, ring_id)->next;506 bool lite_restore = false;507 int ret;508···512 release_shadow_batch_buffer(workload);513 release_shadow_wa_ctx(&workload->wa_ctx);514515+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {516+ /* if workload->status is not successful means HW GPU517+ * has occurred GPU hang or something wrong with i915/GVT,518+ * and GVT won't inject context switch interrupt to guest.519+ * So this error is a vGPU hang actually to the guest.520+ * According to this we should emunlate a vGPU hang. If521+ * there are pending workloads which are already submitted522+ * from guest, we should clean them up like HW GPU does.523+ *524+ * if it is in middle of engine resetting, the pending525+ * workloads won't be submitted to HW GPU and will be526+ * cleaned up during the resetting process later, so doing527+ * the workload clean up here doesn't have any impact.528+ **/529+ clean_workloads(vgpu, ENGINE_MASK(ring_id));530 goto out;531+ }532533+ if (!list_empty(workload_q_head(vgpu, ring_id))) {534 struct execlist_ctx_descriptor_format *this_desc, *next_desc;535536 next_workload = container_of(next,
+10-1
drivers/gpu/drm/i915/gvt/firmware.c
···72 struct intel_gvt_device_info *info = &gvt->device_info;73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;74 struct intel_gvt_mmio_info *e;0075 struct gvt_firmware_header *h;76 void *firmware;77 void *p;78 unsigned long size, crc32_start;79- int i;80 int ret;8182 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;···106107 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)108 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));0000000109110 memcpy(gvt->firmware.mmio, p, info->mmio_size);111
···72 struct intel_gvt_device_info *info = &gvt->device_info;73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;74 struct intel_gvt_mmio_info *e;75+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;76+ int num = gvt->mmio.num_mmio_block;77 struct gvt_firmware_header *h;78 void *firmware;79 void *p;80 unsigned long size, crc32_start;81+ int i, j;82 int ret;8384 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;···104105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));107+108+ for (i = 0; i < num; i++, block++) {109+ for (j = 0; j < block->size; j += 4)110+ *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =111+ I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(112+ block->offset) + j));113+ }114115 memcpy(gvt->firmware.mmio, p, info->mmio_size);116
+13-1
drivers/gpu/drm/i915/gvt/gvt.h
···149 bool active;150 bool pv_notified;151 bool failsafe;152- bool resetting;153 void *sched_data;154 struct vgpu_sched_ctl sched_ctl;155···195 unsigned long vgpu_allocated_fence_num;196};197000000000198#define INTEL_GVT_MMIO_HASH_BITS 11199200struct intel_gvt_mmio {···222#define F_CMD_ACCESSED (1 << 5)223/* This reg could be accessed by unaligned address */224#define F_UNALIGN (1 << 6)000225226 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);227 unsigned int num_tracked_mmio;
···149 bool active;150 bool pv_notified;151 bool failsafe;152+ unsigned int resetting_eng;153 void *sched_data;154 struct vgpu_sched_ctl sched_ctl;155···195 unsigned long vgpu_allocated_fence_num;196};197198+/* Special MMIO blocks. */199+struct gvt_mmio_block {200+ unsigned int device;201+ i915_reg_t offset;202+ unsigned int size;203+ gvt_mmio_func read;204+ gvt_mmio_func write;205+};206+207#define INTEL_GVT_MMIO_HASH_BITS 11208209struct intel_gvt_mmio {···213#define F_CMD_ACCESSED (1 << 5)214/* This reg could be accessed by unaligned address */215#define F_UNALIGN (1 << 6)216+217+ struct gvt_mmio_block *mmio_block;218+ unsigned int num_mmio_block;219220 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);221 unsigned int num_tracked_mmio;
+18-20
drivers/gpu/drm/i915/gvt/handlers.c
···2857 return 0;2858}28592860-/* Special MMIO blocks. */2861-static struct gvt_mmio_block {2862- unsigned int device;2863- i915_reg_t offset;2864- unsigned int size;2865- gvt_mmio_func read;2866- gvt_mmio_func write;2867-} gvt_mmio_blocks[] = {2868- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},2869- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},2870- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,2871- pvinfo_mmio_read, pvinfo_mmio_write},2872- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},2873- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},2874- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},2875-};2876-2877static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,2878 unsigned int offset)2879{2880 unsigned long device = intel_gvt_get_device_type(gvt);2881- struct gvt_mmio_block *block = gvt_mmio_blocks;02882 int i;28832884- for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {2885 if (!(device & block->device))2886 continue;2887 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&···2895 vfree(gvt->mmio.mmio_attribute);2896 gvt->mmio.mmio_attribute = NULL;2897}0000000000028982899/**2900 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device···2945 if (ret)2946 goto err;2947 }00029482949 gvt_dbg_mmio("traced %u virtual mmio registers\n",2950 gvt->mmio.num_tracked_mmio);···3028 gvt_mmio_func func;3029 int ret;30303031- if (WARN_ON(bytes > 4))3032 return -EINVAL;30333034 /*
···2857 return 0;2858}2859000000000000000002860static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,2861 unsigned int offset)2862{2863 unsigned long device = intel_gvt_get_device_type(gvt);2864+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;2865+ int num = gvt->mmio.num_mmio_block;2866 int i;28672868+ for (i = 0; i < num; i++, block++) {2869 if (!(device & block->device))2870 continue;2871 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&···2911 vfree(gvt->mmio.mmio_attribute);2912 gvt->mmio.mmio_attribute = NULL;2913}2914+2915+/* Special MMIO blocks. */2916+static struct gvt_mmio_block mmio_blocks[] = {2917+ {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},2918+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},2919+ {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,2920+ pvinfo_mmio_read, pvinfo_mmio_write},2921+ {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},2922+ {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},2923+ {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},2924+};29252926/**2927 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device···2950 if (ret)2951 goto err;2952 }2953+2954+ gvt->mmio.mmio_block = mmio_blocks;2955+ gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);29562957 gvt_dbg_mmio("traced %u virtual mmio registers\n",2958 gvt->mmio.num_tracked_mmio);···3030 gvt_mmio_func func;3031 int ret;30323033+ if (WARN_ON(bytes > 8))3034 return -EINVAL;30353036 /*
+2-1
drivers/gpu/drm/i915/gvt/scheduler.c
···432433 i915_gem_request_put(fetch_and_zero(&workload->req));434435- if (!workload->status && !vgpu->resetting) {0436 update_guest_context(workload);437438 for_each_set_bit(event, workload->pending_events,
···398 }399400 /* Program the max register to clamp values > 1.0. */0401 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),402 drm_color_lut_extract(lut[i].red, 16));403 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
···398 }399400 /* Program the max register to clamp values > 1.0. */401+ i = lut_size - 1;402 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),403 drm_color_lut_extract(lut[i].red, 16));404 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
···502 const char *name, bool mandatory)503{504 struct device *dev = &pdev->dev;505- struct clk *clk = devm_clk_get(dev, name);506 if (IS_ERR(clk) && mandatory) {507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));508 return PTR_ERR(clk);···887 }888889 /* mandatory clocks: */890- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);891 if (ret)892 goto fail;893- ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);894 if (ret)895 goto fail;896- ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);897 if (ret)898 goto fail;899- ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);900 if (ret)901 goto fail;902903 /* optional clocks: */904- get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);905906 /* we need to set a default rate before enabling. Set a safe907 * rate first, then figure out hw revision, and then set a
···502 const char *name, bool mandatory)503{504 struct device *dev = &pdev->dev;505+ struct clk *clk = msm_clk_get(pdev, name);506 if (IS_ERR(clk) && mandatory) {507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));508 return PTR_ERR(clk);···887 }888889 /* mandatory clocks: */890+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);891 if (ret)892 goto fail;893+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);894 if (ret)895 goto fail;896+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);897 if (ret)898 goto fail;899+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);900 if (ret)901 goto fail;902903 /* optional clocks: */904+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);905906 /* we need to set a default rate before enabling. Set a safe907 * rate first, then figure out hw revision, and then set a
···383 struct page **pages;384385 vma = add_vma(obj, aspace);386- if (IS_ERR(vma))387- return PTR_ERR(vma);00388389 pages = get_pages(obj);390 if (IS_ERR(pages)) {···407408fail:409 del_vma(vma);410-411 mutex_unlock(&msm_obj->lock);412 return ret;413}···930 if (use_vram) {931 struct msm_gem_vma *vma;932 struct page **pages;000933934 vma = add_vma(obj, NULL);0935 if (IS_ERR(vma)) {936 ret = PTR_ERR(vma);937 goto fail;
···383 struct page **pages;384385 vma = add_vma(obj, aspace);386+ if (IS_ERR(vma)) {387+ ret = PTR_ERR(vma);388+ goto unlock;389+ }390391 pages = get_pages(obj);392 if (IS_ERR(pages)) {···405406fail:407 del_vma(vma);408+unlock:409 mutex_unlock(&msm_obj->lock);410 return ret;411}···928 if (use_vram) {929 struct msm_gem_vma *vma;930 struct page **pages;931+ struct msm_gem_object *msm_obj = to_msm_bo(obj);932+933+ mutex_lock(&msm_obj->lock);934935 vma = add_vma(obj, NULL);936+ mutex_unlock(&msm_obj->lock);937 if (IS_ERR(vma)) {938 ret = PTR_ERR(vma);939 goto fail;
+3-3
drivers/gpu/drm/msm/msm_gem_submit.c
···34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)35{36 struct msm_gem_submit *submit;37- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +38- (nr_cmds * sizeof(submit->cmd[0]));3940 if (sz > SIZE_MAX)41 return NULL;···451 if (ret)452 goto out;453454- if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {455 ret = submit_fence_sync(submit);456 if (ret)457 goto out;
···34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)35{36 struct msm_gem_submit *submit;37+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +38+ ((u64)nr_cmds * sizeof(submit->cmd[0]));3940 if (sz > SIZE_MAX)41 return NULL;···451 if (ret)452 goto out;453454+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {455 ret = submit_fence_sync(submit);456 if (ret)457 goto out;
+1-1
drivers/gpu/drm/msm/msm_gem_vma.c
···42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,43 struct msm_gem_vma *vma, struct sg_table *sgt)44{45- if (!vma->iova)46 return;4748 if (aspace->mmu) {
···42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,43 struct msm_gem_vma *vma, struct sg_table *sgt)44{45+ if (!aspace || !vma->iova)46 return;4748 if (aspace->mmu) {
+2
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
···267 /* Create output path objects for each VBIOS display path. */268 i = -1;269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {00270 if (dcbE.type == DCB_OUTPUT_UNUSED)271 continue;272 if (dcbE.type == DCB_OUTPUT_EOL)
···267 /* Create output path objects for each VBIOS display path. */268 i = -1;269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {270+ if (ver < 0x40) /* No support for chipsets prior to NV50. */271+ break;272 if (dcbE.type == DCB_OUTPUT_UNUSED)273 continue;274 if (dcbE.type == DCB_OUTPUT_EOL)
+20-21
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
···500static int vop_enable(struct drm_crtc *crtc)501{502 struct vop *vop = to_vop(crtc);503- int ret;504505 ret = pm_runtime_get_sync(vop->dev);506 if (ret < 0) {···533 }534535 memcpy(vop->regs, vop->regsbak, vop->len);00000000000000536 vop_cfg_done(vop);537538 /*···580static void vop_crtc_disable(struct drm_crtc *crtc)581{582 struct vop *vop = to_vop(crtc);583- int i;584585 WARN_ON(vop->event);586587 rockchip_drm_psr_deactivate(&vop->crtc);588-589- /*590- * We need to make sure that all windows are disabled before we591- * disable that crtc. Otherwise we might try to scan from a destroyed592- * buffer later.593- */594- for (i = 0; i < vop->data->win_size; i++) {595- struct vop_win *vop_win = &vop->win[i];596- const struct vop_win_data *win = vop_win->data;597-598- spin_lock(&vop->reg_lock);599- VOP_WIN_SET(vop, win, enable, 0);600- spin_unlock(&vop->reg_lock);601- }602-603- vop_cfg_done(vop);604605 drm_crtc_vblank_off(crtc);606···679 * Src.x1 can be odd when do clip, but yuv plane start point680 * need align with 2 pixel.681 */682- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))0683 return -EINVAL;0684685 return 0;686}···763 spin_lock(&vop->reg_lock);764765 VOP_WIN_SET(vop, win, format, format);766- VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);767 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);768 if (is_yuv_support(fb->format->format)) {769 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);···777 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;778779 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];780- VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);781 VOP_WIN_SET(vop, win, uv_mst, dma_addr);782 }783
···500static int vop_enable(struct drm_crtc *crtc)501{502 struct vop *vop = to_vop(crtc);503+ int ret, i;504505 ret = pm_runtime_get_sync(vop->dev);506 if (ret < 0) {···533 }534535 memcpy(vop->regs, vop->regsbak, vop->len);536+ /*537+ * We need to make sure that all windows are disabled before we538+ * enable the crtc. Otherwise we might try to scan from a destroyed539+ * buffer later.540+ */541+ for (i = 0; i < vop->data->win_size; i++) {542+ struct vop_win *vop_win = &vop->win[i];543+ const struct vop_win_data *win = vop_win->data;544+545+ spin_lock(&vop->reg_lock);546+ VOP_WIN_SET(vop, win, enable, 0);547+ spin_unlock(&vop->reg_lock);548+ }549+550 vop_cfg_done(vop);551552 /*···566static void vop_crtc_disable(struct drm_crtc *crtc)567{568 struct vop *vop = to_vop(crtc);0569570 WARN_ON(vop->event);571572 rockchip_drm_psr_deactivate(&vop->crtc);0000000000000000573574 drm_crtc_vblank_off(crtc);575···682 * Src.x1 can be odd when do clip, but yuv plane start point683 * need align with 2 pixel.684 */685+ if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {686+ DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");687 return -EINVAL;688+ }689690 return 0;691}···764 spin_lock(&vop->reg_lock);765766 VOP_WIN_SET(vop, win, format, format);767+ VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));768 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);769 if (is_yuv_support(fb->format->format)) {770 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);···778 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;779780 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];781+ VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));782 VOP_WIN_SET(vop, win, uv_mst, dma_addr);783 }784
···7 select DRM_PANEL8 select VIDEOMODE_HELPERS9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA10- default y1112 help13 Enable support for the on-chip display controller on
···7 select DRM_PANEL8 select VIDEOMODE_HELPERS9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA01011 help12 Enable support for the on-chip display controller on
+1-1
drivers/i2c/busses/Kconfig
···983984config I2C_VERSATILE985 tristate "ARM Versatile/Realview I2C bus support"986- depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST987 select I2C_ALGOBIT988 help989 Say yes if you want to support the I2C serial bus on ARMs Versatile
···983984config I2C_VERSATILE985 tristate "ARM Versatile/Realview I2C bus support"986+ depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST987 select I2C_ALGOBIT988 help989 Say yes if you want to support the I2C serial bus on ARMs Versatile
+5-1
drivers/i2c/busses/i2c-designware-platdrv.c
···298 }299300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);000301 /*302 * Find bus speed from the "clock-frequency" device property, ACPI303 * or by using fast mode if neither is set.···322 if (dev->clk_freq != 100000 && dev->clk_freq != 400000323 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {324 dev_err(&pdev->dev,325- "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");0326 ret = -EINVAL;327 goto exit_reset;328 }
···298 }299300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);301+ /* Some broken DSTDs use 1MiHz instead of 1MHz */302+ if (acpi_speed == 1048576)303+ acpi_speed = 1000000;304 /*305 * Find bus speed from the "clock-frequency" device property, ACPI306 * or by using fast mode if neither is set.···319 if (dev->clk_freq != 100000 && dev->clk_freq != 400000320 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {321 dev_err(&pdev->dev,322+ "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",323+ dev->clk_freq);324 ret = -EINVAL;325 goto exit_reset;326 }
···357 * Tree match table entry is supplied for the probing device.358 */359 if (!driver->id_table &&0360 !i2c_of_match_device(dev->driver->of_match_table, client))361 return -ENODEV;362
···357 * Tree match table entry is supplied for the probing device.358 */359 if (!driver->id_table &&360+ !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&361 !i2c_of_match_device(dev->driver->of_match_table, client))362 return -ENODEV;363
···83 different sets of pins at run-time.8485 This driver can also be built as a module. If so, the module will be86- called pinctrl-i2cmux.8788config I2C_MUX_REG89 tristate "Register-based I2C multiplexer"
···83 different sets of pins at run-time.8485 This driver can also be built as a module. If so, the module will be86+ called i2c-mux-pinctrl.8788config I2C_MUX_REG89 tristate "Register-based I2C multiplexer"
···2223#include <linux/iio/iio.h>24#include <linux/iio/driver.h>02526#define ASPEED_RESOLUTION_BITS 1027#define ASPEED_CLOCKS_PER_SAMPLE 12···3940#define ASPEED_ENGINE_ENABLE BIT(0)410000042struct aspeed_adc_model_data {43 const char *model_name;44 unsigned int min_sampling_rate; // Hz45 unsigned int max_sampling_rate; // Hz46 unsigned int vref_voltage; // mV047};4849struct aspeed_adc_data {···218 goto scaler_error;219 }220000000000000000000221 /* Start all channels in normal mode. */222 ret = clk_prepare_enable(data->clk_scaler->clk);223 if (ret)···299 .vref_voltage = 1800, // mV300 .min_sampling_rate = 1,301 .max_sampling_rate = 1000000,0302};303304static const struct of_device_id aspeed_adc_matches[] = {
···2223#include <linux/iio/iio.h>24#include <linux/iio/driver.h>25+#include <linux/iopoll.h>2627#define ASPEED_RESOLUTION_BITS 1028#define ASPEED_CLOCKS_PER_SAMPLE 12···3839#define ASPEED_ENGINE_ENABLE BIT(0)4041+#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)42+43+#define ASPEED_ADC_INIT_POLLING_TIME 50044+#define ASPEED_ADC_INIT_TIMEOUT 50000045+46struct aspeed_adc_model_data {47 const char *model_name;48 unsigned int min_sampling_rate; // Hz49 unsigned int max_sampling_rate; // Hz50 unsigned int vref_voltage; // mV51+ bool wait_init_sequence;52};5354struct aspeed_adc_data {···211 goto scaler_error;212 }213214+ model_data = of_device_get_match_data(&pdev->dev);215+216+ if (model_data->wait_init_sequence) {217+ /* Enable engine in normal mode. */218+ writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,219+ data->base + ASPEED_REG_ENGINE_CONTROL);220+221+ /* Wait for initial sequence complete. */222+ ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,223+ adc_engine_control_reg_val,224+ adc_engine_control_reg_val &225+ ASPEED_ADC_CTRL_INIT_RDY,226+ ASPEED_ADC_INIT_POLLING_TIME,227+ ASPEED_ADC_INIT_TIMEOUT);228+ if (ret)229+ goto scaler_error;230+ }231+232 /* Start all channels in normal mode. */233 ret = clk_prepare_enable(data->clk_scaler->clk);234 if (ret)···274 .vref_voltage = 1800, // mV275 .min_sampling_rate = 1,276 .max_sampling_rate = 1000000,277+ .wait_init_sequence = true,278};279280static const struct of_device_id aspeed_adc_matches[] = {
+41-1
drivers/iio/adc/axp288_adc.c
···28#include <linux/iio/driver.h>2930#define AXP288_ADC_EN_MASK 0xF1003132enum axp288_adc_id {33 AXP288_ADC_TS,···123 return IIO_VAL_INT;124}12500000000000000000000126static int axp288_adc_read_raw(struct iio_dev *indio_dev,127 struct iio_chan_spec const *chan,128 int *val, int *val2, long mask)···153 mutex_lock(&indio_dev->mlock);154 switch (mask) {155 case IIO_CHAN_INFO_RAW:000000156 ret = axp288_adc_read_channel(val, chan->address, info->regmap);000157 break;158 default:159 ret = -EINVAL;···170 mutex_unlock(&indio_dev->mlock);171172 return ret;000000000173}174175static const struct iio_info axp288_adc_iio_info = {···209 * Set ADC to enabled state at all time, including system suspend.210 * otherwise internal fuel gauge functionality may be affected.211 */212- ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);213 if (ret) {214 dev_err(&pdev->dev, "unable to enable ADC device\n");215 return ret;
···28#include <linux/iio/driver.h>2930#define AXP288_ADC_EN_MASK 0xF131+#define AXP288_ADC_TS_PIN_GPADC 0xF232+#define AXP288_ADC_TS_PIN_ON 0xF33334enum axp288_adc_id {35 AXP288_ADC_TS,···121 return IIO_VAL_INT;122}123124+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,125+ unsigned long address)126+{127+ int ret;128+129+ /* channels other than GPADC do not need to switch TS pin */130+ if (address != AXP288_GP_ADC_H)131+ return 0;132+133+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);134+ if (ret)135+ return ret;136+137+ /* When switching to the GPADC pin give things some time to settle */138+ if (mode == AXP288_ADC_TS_PIN_GPADC)139+ usleep_range(6000, 10000);140+141+ return 0;142+}143+144static int axp288_adc_read_raw(struct iio_dev *indio_dev,145 struct iio_chan_spec const *chan,146 int *val, int *val2, long mask)···131 mutex_lock(&indio_dev->mlock);132 switch (mask) {133 case IIO_CHAN_INFO_RAW:134+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,135+ chan->address)) {136+ dev_err(&indio_dev->dev, "GPADC mode\n");137+ ret = -EINVAL;138+ break;139+ }140 ret = axp288_adc_read_channel(val, chan->address, info->regmap);141+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,142+ chan->address))143+ dev_err(&indio_dev->dev, "TS pin restore\n");144 break;145 default:146 ret = -EINVAL;···139 mutex_unlock(&indio_dev->mlock);140141 return ret;142+}143+144+static int axp288_adc_set_state(struct regmap *regmap)145+{146+ /* ADC should be always enabled for internal FG to function */147+ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))148+ return -EIO;149+150+ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);151}152153static const struct iio_info axp288_adc_iio_info = {···169 * Set ADC to enabled state at all time, including system suspend.170 * otherwise internal fuel gauge functionality may be affected.171 */172+ ret = axp288_adc_set_state(axp20x->regmap);173 if (ret) {174 dev_err(&pdev->dev, "unable to enable ADC device\n");175 return ret;
···511 case IB_CM_REQ_RECEIVED:512 return ipoib_cm_req_handler(cm_id, event);513 case IB_CM_DREQ_RECEIVED:514- p = cm_id->context;515 ib_send_cm_drep(cm_id, NULL, 0);516 /* Fall through */517 case IB_CM_REJ_RECEIVED:
···511 case IB_CM_REQ_RECEIVED:512 return ipoib_cm_req_handler(cm_id, event);513 case IB_CM_DREQ_RECEIVED:0514 ib_send_cm_drep(cm_id, NULL, 0);515 /* Fall through */516 case IB_CM_REJ_RECEIVED:
···256257 ++dev->stats.rx_packets;258 dev->stats.rx_bytes += skb->len;00259260 skb->dev = dev;261 if ((dev->features & NETIF_F_RXCSUM) &&···711 return pending;712}713000000000000000000000714int ipoib_ib_dev_stop_default(struct net_device *dev)715{716 struct ipoib_dev_priv *priv = ipoib_priv(dev);···751 */752 qp_attr.qp_state = IB_QPS_ERR;753 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))754- ipoib_warn(priv, "Failed to modify QP to ERROR state\n");755756 /* Wait for all sends and receives to complete */757 begin = jiffies;
···256257 ++dev->stats.rx_packets;258 dev->stats.rx_bytes += skb->len;259+ if (skb->pkt_type == PACKET_MULTICAST)260+ dev->stats.multicast++;261262 skb->dev = dev;263 if ((dev->features & NETIF_F_RXCSUM) &&···709 return pending;710}711712+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,713+ struct ib_qp *qp,714+ enum ib_qp_state new_state)715+{716+ struct ib_qp_attr qp_attr;717+ struct ib_qp_init_attr query_init_attr;718+ int ret;719+720+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);721+ if (ret) {722+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);723+ return;724+ }725+ /* print according to the new-state and the previous state.*/726+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)727+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");728+ else729+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",730+ new_state, qp_attr.qp_state);731+}732+733int ipoib_ib_dev_stop_default(struct net_device *dev)734{735 struct ipoib_dev_priv *priv = ipoib_priv(dev);···728 */729 qp_attr.qp_state = IB_QPS_ERR;730 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))731+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);732733 /* Wait for all sends and receives to complete */734 begin = jiffies;
+12-7
drivers/infiniband/ulp/ipoib/ipoib_main.c
···1560 int i, wait_flushed = 0;15611562 init_completion(&priv->ntbl.flushed);015631564 spin_lock_irqsave(&priv->lock, flags);1565···16051606 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");1607 init_completion(&priv->ntbl.deleted);1608- set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);16091610 /* Stop GC if called at init fail need to cancel work */1611 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);···1847 .ndo_tx_timeout = ipoib_timeout,1848 .ndo_set_rx_mode = ipoib_set_mcast_list,1849 .ndo_get_iflink = ipoib_get_iflink,01850};18511852void ipoib_setup_common(struct net_device *dev)···1878 priv->dev = dev;1879 spin_lock_init(&priv->lock);1880 init_rwsem(&priv->vlan_rwsem);018811882 INIT_LIST_HEAD(&priv->path_list);1883 INIT_LIST_HEAD(&priv->child_intfs);···2175 priv->dev->dev_id = port - 1;21762177 result = ib_query_port(hca, port, &attr);2178- if (!result)2179- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);2180- else {2181 printk(KERN_WARNING "%s: ib_query_port %d failed\n",2182 hca->name, port);2183 goto device_init_failed;2184 }0021852186 /* MTU will be reset when mcast join happens */2187 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);···2213 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",2214 hca->name, port, result);2215 goto device_init_failed;2216- } else2217- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));002218 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);22192220 result = ipoib_dev_init(priv->dev, hca, port);2221- if (result < 0) {2222 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",2223 hca->name, port, result);2224 goto device_init_failed;···2369 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);2370#ifdef CONFIG_INFINIBAND_IPOIB_CM2371 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);02372#endif23732374 /*
···1560 int i, wait_flushed = 0;15611562 init_completion(&priv->ntbl.flushed);1563+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);15641565 spin_lock_irqsave(&priv->lock, flags);1566···16041605 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");1606 init_completion(&priv->ntbl.deleted);016071608 /* Stop GC if called at init fail need to cancel work */1609 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);···1847 .ndo_tx_timeout = ipoib_timeout,1848 .ndo_set_rx_mode = ipoib_set_mcast_list,1849 .ndo_get_iflink = ipoib_get_iflink,1850+ .ndo_get_stats64 = ipoib_get_stats,1851};18521853void ipoib_setup_common(struct net_device *dev)···1877 priv->dev = dev;1878 spin_lock_init(&priv->lock);1879 init_rwsem(&priv->vlan_rwsem);1880+ mutex_init(&priv->mcast_mutex);18811882 INIT_LIST_HEAD(&priv->path_list);1883 INIT_LIST_HEAD(&priv->child_intfs);···2173 priv->dev->dev_id = port - 1;21742175 result = ib_query_port(hca, port, &attr);2176+ if (result) {002177 printk(KERN_WARNING "%s: ib_query_port %d failed\n",2178 hca->name, port);2179 goto device_init_failed;2180 }2181+2182+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);21832184 /* MTU will be reset when mcast join happens */2185 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);···2211 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",2212 hca->name, port, result);2213 goto device_init_failed;2214+ }2215+2216+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,2217+ sizeof(union ib_gid));2218 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);22192220 result = ipoib_dev_init(priv->dev, hca, port);2221+ if (result) {2222 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",2223 hca->name, port, result);2224 goto device_init_failed;···2365 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);2366#ifdef CONFIG_INFINIBAND_IPOIB_CM2367 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);2368+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);2369#endif23702371 /*
+11-22
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
···684int ipoib_mcast_stop_thread(struct net_device *dev)685{686 struct ipoib_dev_priv *priv = ipoib_priv(dev);687- unsigned long flags;688689 ipoib_dbg_mcast(priv, "stopping multicast thread\n");690691- spin_lock_irqsave(&priv->lock, flags);692- cancel_delayed_work(&priv->mcast_task);693- spin_unlock_irqrestore(&priv->lock, flags);694-695- flush_workqueue(priv->wq);696697 return 0;698}···742void ipoib_mcast_remove_list(struct list_head *remove_list)743{744 struct ipoib_mcast *mcast, *tmcast;00000000745746 list_for_each_entry_safe(mcast, tmcast, remove_list, list) {747 ipoib_mcast_leave(mcast->dev, mcast);···841 struct ipoib_mcast *mcast, *tmcast;842 unsigned long flags;8430844 ipoib_dbg_mcast(priv, "flushing multicast list\n");845846 spin_lock_irqsave(&priv->lock, flags);···860861 spin_unlock_irqrestore(&priv->lock, flags);862863- /*864- * make sure the in-flight joins have finished before we attempt865- * to leave866- */867- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)868- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))869- wait_for_completion(&mcast->done);870-871 ipoib_mcast_remove_list(&remove_list);0872}873874static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)···978 spin_unlock(&priv->lock);979 netif_addr_unlock(dev);980 local_irq_restore(flags);981-982- /*983- * make sure the in-flight joins have finished before we attempt984- * to leave985- */986- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)987- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))988- wait_for_completion(&mcast->done);989990 ipoib_mcast_remove_list(&remove_list);991
···684int ipoib_mcast_stop_thread(struct net_device *dev)685{686 struct ipoib_dev_priv *priv = ipoib_priv(dev);0687688 ipoib_dbg_mcast(priv, "stopping multicast thread\n");689690+ cancel_delayed_work_sync(&priv->mcast_task);0000691692 return 0;693}···747void ipoib_mcast_remove_list(struct list_head *remove_list)748{749 struct ipoib_mcast *mcast, *tmcast;750+751+ /*752+ * make sure the in-flight joins have finished before we attempt753+ * to leave754+ */755+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)756+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))757+ wait_for_completion(&mcast->done);758759 list_for_each_entry_safe(mcast, tmcast, remove_list, list) {760 ipoib_mcast_leave(mcast->dev, mcast);···838 struct ipoib_mcast *mcast, *tmcast;839 unsigned long flags;840841+ mutex_lock(&priv->mcast_mutex);842 ipoib_dbg_mcast(priv, "flushing multicast list\n");843844 spin_lock_irqsave(&priv->lock, flags);···856857 spin_unlock_irqrestore(&priv->lock, flags);85800000000859 ipoib_mcast_remove_list(&remove_list);860+ mutex_unlock(&priv->mcast_mutex);861}862863static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)···981 spin_unlock(&priv->lock);982 netif_addr_unlock(dev);983 local_irq_restore(flags);00000000984985 ipoib_mcast_remove_list(&remove_list);986
+7
drivers/iommu/arm-smmu.c
···15191520 if (using_legacy_binding) {1521 ret = arm_smmu_register_legacy_master(dev, &smmu);00000001522 if (ret)1523 goto out_free;1524 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
···15191520 if (using_legacy_binding) {1521 ret = arm_smmu_register_legacy_master(dev, &smmu);1522+1523+ /*1524+ * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()1525+ * will allocate/initialise a new one. Thus we need to update fwspec for1526+ * later use.1527+ */1528+ fwspec = dev->iommu_fwspec;1529 if (ret)1530 goto out_free;1531 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
+13-15
drivers/isdn/hysdn/hysdn_proclog.c
···44 char log_name[15]; /* log filename */45 struct log_data *log_head, *log_tail; /* head and tail for queue */46 int if_used; /* open count for interface */47- int volatile del_lock; /* lock for delete operations */48 unsigned char logtmp[LOG_MAX_LINELEN];49 wait_queue_head_t rd_queue;50};···101{102 struct log_data *ib;103 struct procdata *pd = card->proclog;104- int i;105 unsigned long flags;106107 if (!pd)···124 else125 pd->log_tail->next = ib; /* follows existing messages */126 pd->log_tail = ib; /* new tail */127- i = pd->del_lock++; /* get lock state */128- spin_unlock_irqrestore(&card->hysdn_lock, flags);129130 /* delete old entrys */131- if (!i)132- while (pd->log_head->next) {133- if ((pd->log_head->usage_cnt <= 0) &&134- (pd->log_head->next->usage_cnt <= 0)) {135- ib = pd->log_head;136- pd->log_head = pd->log_head->next;137- kfree(ib);138- } else139- break;140- } /* pd->log_head->next */141- pd->del_lock--; /* release lock level */00142 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */143} /* put_log_buffer */144
···44 char log_name[15]; /* log filename */45 struct log_data *log_head, *log_tail; /* head and tail for queue */46 int if_used; /* open count for interface */047 unsigned char logtmp[LOG_MAX_LINELEN];48 wait_queue_head_t rd_queue;49};···102{103 struct log_data *ib;104 struct procdata *pd = card->proclog;0105 unsigned long flags;106107 if (!pd)···126 else127 pd->log_tail->next = ib; /* follows existing messages */128 pd->log_tail = ib; /* new tail */00129130 /* delete old entrys */131+ while (pd->log_head->next) {132+ if ((pd->log_head->usage_cnt <= 0) &&133+ (pd->log_head->next->usage_cnt <= 0)) {134+ ib = pd->log_head;135+ pd->log_head = pd->log_head->next;136+ kfree(ib);137+ } else {138+ break;139+ }140+ } /* pd->log_head->next */141+142+ spin_unlock_irqrestore(&card->hysdn_lock, flags);143+144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */145} /* put_log_buffer */146
+6
drivers/misc/mei/pci-me.c
···216 pci_set_drvdata(pdev, dev);217218 /*000000219 * For not wake-able HW runtime pm framework220 * can't be used on pci device level.221 * Use domain runtime pm callbacks instead.
···216 pci_set_drvdata(pdev, dev);217218 /*219+ * MEI requires to resume from runtime suspend mode220+ * in order to perform link reset flow upon system suspend.221+ */222+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;223+224+ /*225 * For not wake-able HW runtime pm framework226 * can't be used on pci device level.227 * Use domain runtime pm callbacks instead.
+6
drivers/misc/mei/pci-txe.c
···138 pci_set_drvdata(pdev, dev);139140 /*000000141 * For not wake-able HW runtime pm framework142 * can't be used on pci device level.143 * Use domain runtime pm callbacks instead.
···138 pci_set_drvdata(pdev, dev);139140 /*141+ * MEI requires to resume from runtime suspend mode142+ * in order to perform link reset flow upon system suspend.143+ */144+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;145+146+ /*147 * For not wake-able HW runtime pm framework148 * can't be used on pci device level.149 * Use domain runtime pm callbacks instead.
+2
drivers/mmc/core/block.c
···2170 * from being accepted.2171 */2172 card = md->queue.card;02173 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);02174 blk_set_queue_dying(md->queue.queue);2175 mmc_cleanup_queue(&md->queue);2176 if (md->disk->flags & GENHD_FL_UP) {
···2170 * from being accepted.2171 */2172 card = md->queue.card;2173+ spin_lock_irq(md->queue.queue->queue_lock);2174 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);2175+ spin_unlock_irq(md->queue.queue->queue_lock);2176 blk_set_queue_dying(md->queue.queue);2177 mmc_cleanup_queue(&md->queue);2178 if (md->disk->flags & GENHD_FL_UP) {
+1-1
drivers/mmc/core/mmc.c
···1289static int mmc_select_hs400es(struct mmc_card *card)1290{1291 struct mmc_host *host = card->host;1292- int err = 0;1293 u8 val;12941295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
···1289static int mmc_select_hs400es(struct mmc_card *card)1290{1291 struct mmc_host *host = card->host;1292+ int err = -EINVAL;1293 u8 val;12941295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
···1201 * tRC < 30ns implies EDO mode. This controller does not support this1202 * mode.1203 */1204- if (conf->timings.sdr.tRC_min < 30)1205 return -ENOTSUPP;12061207 atmel_smc_cs_conf_init(smcconf);
···1201 * tRC < 30ns implies EDO mode. This controller does not support this1202 * mode.1203 */1204+ if (conf->timings.sdr.tRC_min < 30000)1205 return -ENOTSUPP;12061207 atmel_smc_cs_conf_init(smcconf);
+6-15
drivers/mtd/nand/atmel/pmecc.c
···945 */946 struct platform_device *pdev = to_platform_device(userdev);947 const struct atmel_pmecc_caps *caps;0948949 /* No PMECC engine available. */950 if (!of_property_read_bool(userdev->of_node,···954955 caps = &at91sam9g45_caps;956957- /*958- * Try to find the NFC subnode and extract the associated caps959- * from there.960- */961- np = of_find_compatible_node(userdev->of_node, NULL,962- "atmel,sama5d3-nfc");963- if (np) {964- const struct of_device_id *match;965-966- match = of_match_node(atmel_pmecc_legacy_match, np);967- if (match && match->data)968- caps = match->data;969-970- of_node_put(np);971- }972973 pmecc = atmel_pmecc_create(pdev, caps, 1, 2);974 }
···945 */946 struct platform_device *pdev = to_platform_device(userdev);947 const struct atmel_pmecc_caps *caps;948+ const struct of_device_id *match;949950 /* No PMECC engine available. */951 if (!of_property_read_bool(userdev->of_node,···953954 caps = &at91sam9g45_caps;955956+ /* Find the caps associated to the NAND dev node. */957+ match = of_match_node(atmel_pmecc_legacy_match,958+ userdev->of_node);959+ if (match && match->data)960+ caps = match->data;0000000000961962 pmecc = atmel_pmecc_create(pdev, caps, 1, 2);963 }
+10-3
drivers/mtd/nand/nand_base.c
···6566 if (!section) {67 oobregion->offset = 0;68- oobregion->length = 4;00069 } else {00070 oobregion->offset = 6;71 oobregion->length = ecc->total - 4;72 }···1131 * Ensure the timing mode has been changed on the chip side1132 * before changing timings on the controller side.1133 */1134- if (chip->onfi_version) {001135 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {1136 chip->onfi_timing_mode_default,1137 };···2749 * @buf: the data to write2750 * @oob_required: must write chip->oob_poi to OOB2751 * @page: page number to write2752- * @cached: cached programming2753 * @raw: use _raw version of write_page2754 */2755static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
···6566 if (!section) {67 oobregion->offset = 0;68+ if (mtd->oobsize == 16)69+ oobregion->length = 4;70+ else71+ oobregion->length = 3;72 } else {73+ if (mtd->oobsize == 8)74+ return -ERANGE;75+76 oobregion->offset = 6;77 oobregion->length = ecc->total - 4;78 }···1125 * Ensure the timing mode has been changed on the chip side1126 * before changing timings on the controller side.1127 */1128+ if (chip->onfi_version &&1129+ (le16_to_cpu(chip->onfi_params.opt_cmd) &1130+ ONFI_OPT_CMD_SET_GET_FEATURES)) {1131 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {1132 chip->onfi_timing_mode_default,1133 };···2741 * @buf: the data to write2742 * @oob_required: must write chip->oob_poi to OOB2743 * @page: page number to write02744 * @raw: use _raw version of write_page2745 */2746static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
···17851786 xgene_enet_gpiod_get(pdata);17871788- if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {1789- pdata->clk = devm_clk_get(&pdev->dev, NULL);1790- if (IS_ERR(pdata->clk)) {1791 /* Abort if the clock is defined but couldn't be1792 * retrived. Always abort if the clock is missing on1793 * DT system as the driver can't cope with this case.
···17851786 xgene_enet_gpiod_get(pdata);17871788+ pdata->clk = devm_clk_get(&pdev->dev, NULL);1789+ if (IS_ERR(pdata->clk)) {1790+ if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {1791 /* Abort if the clock is defined but couldn't be1792 * retrived. Always abort if the clock is missing on1793 * DT system as the driver can't cope with this case.
···449 p = (char *)&dev->stats;450 else451 p = (char *)priv;0000452 p += s->stat_offset;453 data[j] = *(unsigned long *)p;454 j++;
···449 p = (char *)&dev->stats;450 else451 p = (char *)priv;452+453+ if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))454+ continue;455+456 p += s->stat_offset;457 data[j] = *(unsigned long *)p;458 j++;
···111static void send_request_unmap(struct ibmvnic_adapter *, u8);112static void send_login(struct ibmvnic_adapter *adapter);113static void send_cap_queries(struct ibmvnic_adapter *adapter);114+static int init_sub_crqs(struct ibmvnic_adapter *);115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);116static int ibmvnic_init(struct ibmvnic_adapter *);117static void release_crq_queue(struct ibmvnic_adapter *);···651 struct ibmvnic_adapter *adapter = netdev_priv(netdev);652 unsigned long timeout = msecs_to_jiffies(30000);653 struct device *dev = &adapter->vdev->dev;654+ int rc;655656 do {657 if (adapter->renegotiate) {···662 if (!wait_for_completion_timeout(&adapter->init_done,663 timeout)) {664 dev_err(dev, "Capabilities query timeout\n");665+ return -1;666+ }667+ rc = init_sub_crqs(adapter);668+ if (rc) {669+ dev_err(dev,670+ "Initialization of SCRQ's failed\n");671+ return -1;672+ }673+ rc = init_sub_crq_irqs(adapter);674+ if (rc) {675+ dev_err(dev,676+ "Initialization of SCRQ's irqs failed\n");677 return -1;678 }679 }···3004 *req_value,3005 (long int)be64_to_cpu(crq->request_capability_rsp.3006 number), name);03007 *req_value = be64_to_cpu(crq->request_capability_rsp.number);3008 ibmvnic_send_req_caps(adapter, 1);3009 return;
+2
drivers/net/ethernet/intel/i40e/i40e_txrx.c
···1113 if (!tx_ring->tx_bi)1114 goto err;1115001116 /* round up to nearest 4K */1117 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);1118 /* add u32 for head writeback, align after this takes care of
···1113 if (!tx_ring->tx_bi)1114 goto err;11151116+ u64_stats_init(&tx_ring->syncp);1117+1118 /* round up to nearest 4K */1119 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);1120 /* add u32 for head writeback, align after this takes care of
+4
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
···2988 if (!tx_ring->tx_buffer_info)2989 goto err;2990002991 /* round up to nearest 4K */2992 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);2993 tx_ring->size = ALIGN(tx_ring->size, 4096);···3047 rx_ring->rx_buffer_info = vzalloc(size);3048 if (!rx_ring->rx_buffer_info)3049 goto err;0030503051 /* Round up to nearest 4K */3052 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
···2988 if (!tx_ring->tx_buffer_info)2989 goto err;29902991+ u64_stats_init(&tx_ring->syncp);2992+2993 /* round up to nearest 4K */2994 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);2995 tx_ring->size = ALIGN(tx_ring->size, 4096);···3045 rx_ring->rx_buffer_info = vzalloc(size);3046 if (!rx_ring->rx_buffer_info)3047 goto err;3048+3049+ u64_stats_init(&rx_ring->syncp);30503051 /* Round up to nearest 4K */3052 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
···1048 else1049 netif_napi_del(&nvchan->napi);10501051+ atomic_inc(&nvscdev->open_chn);1052+ wake_up(&nvscdev->subchan_open);1053}10541055int rndis_filter_device_add(struct hv_device *dev,···1089 net_device = net_device_ctx->nvdev;1090 net_device->max_chn = 1;1091 net_device->num_chn = 1;0010921093 net_device->extension = rndis_device;1094 rndis_device->ndev = net;···1221 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,1222 net_device->num_chn);12231224+ atomic_set(&net_device->open_chn, 1);1225 num_rss_qs = net_device->num_chn - 1;1226 if (num_rss_qs == 0)1227 return 0;122801229 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);12301231 init_packet = &net_device->channel_init_pkt;···1242 if (ret)1243 goto out;12441245+ wait_for_completion(&net_device->channel_init_wait);1246 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {1247 ret = -ENODEV;1248 goto out;1249 }012501251 net_device->num_chn = 1 +1252 init_packet->msg.v5_msg.subchn_comp.num_subchannels;1253+1254+ /* wait for all sub channels to open */1255+ wait_event(net_device->subchan_open,1256+ atomic_read(&net_device->open_chn) == net_device->num_chn);12571258 /* ignore failues from setting rss parameters, still have channels */1259 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+1-1
drivers/net/ipvlan/ipvlan_main.c
···192193 netdev_lockdep_set_classes(dev);194195- ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);196 if (!ipvlan->pcpu_stats)197 return -ENOMEM;198
···192193 netdev_lockdep_set_classes(dev);194195+ ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);196 if (!ipvlan->pcpu_stats)197 return -ENOMEM;198
+10-8
drivers/net/ppp/ppp_generic.c
···1915 spin_unlock(&pch->downl);1916 /* see if there is anything from the attached unit to be sent */1917 if (skb_queue_empty(&pch->file.xq)) {1918- read_lock(&pch->upl);1919 ppp = pch->ppp;1920 if (ppp)1921- ppp_xmit_process(ppp);1922- read_unlock(&pch->upl);1923 }1924}19251926static void ppp_channel_push(struct channel *pch)1927{1928- local_bh_disable();1929-1930- __ppp_channel_push(pch);1931-1932- local_bh_enable();00001933}19341935/*
···1915 spin_unlock(&pch->downl);1916 /* see if there is anything from the attached unit to be sent */1917 if (skb_queue_empty(&pch->file.xq)) {01918 ppp = pch->ppp;1919 if (ppp)1920+ __ppp_xmit_process(ppp);01921 }1922}19231924static void ppp_channel_push(struct channel *pch)1925{1926+ read_lock_bh(&pch->upl);1927+ if (pch->ppp) {1928+ (*this_cpu_ptr(pch->ppp->xmit_recursion))++;1929+ __ppp_channel_push(pch);1930+ (*this_cpu_ptr(pch->ppp->xmit_recursion))--;1931+ } else {1932+ __ppp_channel_push(pch);1933+ }1934+ read_unlock_bh(&pch->upl);1935}19361937/*
···336337 c.directive.opcode = nvme_admin_directive_recv;338 c.directive.nsid = cpu_to_le32(nsid);339- c.directive.numd = cpu_to_le32(sizeof(*s));340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;341 c.directive.dtype = NVME_DIR_STREAMS;342···1509 blk_queue_write_cache(q, vwc, vwc);1510}15111512-static void nvme_configure_apst(struct nvme_ctrl *ctrl)1513{1514 /*1515 * APST (Autonomous Power State Transition) lets us program a···1538 * then don't do anything.1539 */1540 if (!ctrl->apsta)1541- return;15421543 if (ctrl->npss > 31) {1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");1545- return;1546 }15471548 table = kzalloc(sizeof(*table), GFP_KERNEL);1549 if (!table)1550- return;15511552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {1553 /* Turn off APST. */···1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);16301631 kfree(table);01632}16331634static void nvme_set_latency_tolerance(struct device *dev, s32 val)···1836 * In fabrics we need to verify the cntlid matches the1837 * admin connect1838 */1839- if (ctrl->cntlid != le16_to_cpu(id->cntlid))1840 ret = -EINVAL;0018411842 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {1843 dev_err(ctrl->device,1844 "keep-alive support is mandatory for fabrics\n");1845 ret = -EINVAL;01846 }1847 } else {1848 ctrl->cntlid = le16_to_cpu(id->cntlid);···1860 else if (!ctrl->apst_enabled && prev_apst_enabled)1861 dev_pm_qos_hide_latency_tolerance(ctrl->device);18621863- nvme_configure_apst(ctrl);1864- nvme_configure_directives(ctrl);0000018651866 ctrl->identified = true;186700001868 return ret;1869}1870EXPORT_SYMBOL_GPL(nvme_init_identify);···2017 if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))2018 return sprintf(buf, "eui.%8phN\n", ns->eui);20192020- while (ctrl->serial[serial_len - 1] == ' ')02021 serial_len--;2022- while (ctrl->model[model_len - 1] == ' ')02023 model_len--;20242025 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
···336337 c.directive.opcode = nvme_admin_directive_recv;338 c.directive.nsid = cpu_to_le32(nsid);339+ c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;341 c.directive.dtype = NVME_DIR_STREAMS;342···1509 blk_queue_write_cache(q, vwc, vwc);1510}15111512+static int nvme_configure_apst(struct nvme_ctrl *ctrl)1513{1514 /*1515 * APST (Autonomous Power State Transition) lets us program a···1538 * then don't do anything.1539 */1540 if (!ctrl->apsta)1541+ return 0;15421543 if (ctrl->npss > 31) {1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");1545+ return 0;1546 }15471548 table = kzalloc(sizeof(*table), GFP_KERNEL);1549 if (!table)1550+ return 0;15511552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {1553 /* Turn off APST. */···1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);16301631 kfree(table);1632+ return ret;1633}16341635static void nvme_set_latency_tolerance(struct device *dev, s32 val)···1835 * In fabrics we need to verify the cntlid matches the1836 * admin connect1837 */1838+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {1839 ret = -EINVAL;1840+ goto out_free;1841+ }18421843 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {1844 dev_err(ctrl->device,1845 "keep-alive support is mandatory for fabrics\n");1846 ret = -EINVAL;1847+ goto out_free;1848 }1849 } else {1850 ctrl->cntlid = le16_to_cpu(id->cntlid);···1856 else if (!ctrl->apst_enabled && prev_apst_enabled)1857 dev_pm_qos_hide_latency_tolerance(ctrl->device);18581859+ ret = nvme_configure_apst(ctrl);1860+ if (ret < 0)1861+ return ret;1862+1863+ ret = nvme_configure_directives(ctrl);1864+ if (ret < 0)1865+ return ret;18661867 ctrl->identified = true;18681869+ return 0;1870+1871+out_free:1872+ kfree(id);1873 return ret;1874}1875EXPORT_SYMBOL_GPL(nvme_init_identify);···2004 if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))2005 return sprintf(buf, "eui.%8phN\n", ns->eui);20062007+ while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||2008+ ctrl->serial[serial_len - 1] == '\0'))2009 serial_len--;2010+ while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||2011+ ctrl->model[model_len - 1] == '\0'))2012 model_len--;20132014 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
+7-11
drivers/nvme/host/pci.c
···1558 if (dev->cmb) {1559 iounmap(dev->cmb);1560 dev->cmb = NULL;1561- if (dev->cmbsz) {1562- sysfs_remove_file_from_group(&dev->ctrl.device->kobj,1563- &dev_attr_cmb.attr, NULL);1564- dev->cmbsz = 0;1565- }1566 }1567}1568···19511952 /*1953 * CMBs can currently only exist on >=1.2 PCIe devices. We only1954- * populate sysfs if a CMB is implemented. Note that we add the1955- * CMB attribute to the nvme_ctrl kobj which removes the need to remove1956- * it on exit. Since nvme_dev_attrs_group has no name we can pass1957- * NULL as final argument to sysfs_add_file_to_group.1958 */19591960 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {1961 dev->cmb = nvme_map_cmb(dev);1962-1963- if (dev->cmbsz) {1964 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,1965 &dev_attr_cmb.attr, NULL))1966 dev_warn(dev->ctrl.device,
···1558 if (dev->cmb) {1559 iounmap(dev->cmb);1560 dev->cmb = NULL;1561+ sysfs_remove_file_from_group(&dev->ctrl.device->kobj,1562+ &dev_attr_cmb.attr, NULL);1563+ dev->cmbsz = 0;001564 }1565}1566···19531954 /*1955 * CMBs can currently only exist on >=1.2 PCIe devices. We only1956+ * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group1957+ * has no name we can pass NULL as final argument to1958+ * sysfs_add_file_to_group.01959 */19601961 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {1962 dev->cmb = nvme_map_cmb(dev);1963+ if (dev->cmb) {01964 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,1965 &dev_attr_cmb.attr, NULL))1966 dev_warn(dev->ctrl.device,
+186-30
drivers/nvme/target/fc.c
···114 struct kref ref;115};11600000117struct nvmet_fc_tgt_queue {118 bool ninetypercent;119 u16 qid;···137 struct nvmet_fc_tgt_assoc *assoc;138 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */139 struct list_head fod_list;00140 struct workqueue_struct *work_q;141 struct kref ref;142} __aligned(sizeof(unsigned long long));···230static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);231static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);232static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);00233234235/* *********************** FC-NVME DMA Handling **************************** */···472nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)473{474 static struct nvmet_fc_fcp_iod *fod;475- unsigned long flags;476477- spin_lock_irqsave(&queue->qlock, flags);0478 fod = list_first_entry_or_null(&queue->fod_list,479 struct nvmet_fc_fcp_iod, fcp_list);480 if (fod) {···486 * will "inherit" that reference.487 */488 }489- spin_unlock_irqrestore(&queue->qlock, flags);490 return fod;491}49200000000000000000000493494static void495nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,···516{517 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;518 struct nvmet_fc_tgtport *tgtport = fod->tgtport;0519 unsigned long flags;520521 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,···524525 fcpreq->nvmet_fc_private = NULL;526527- spin_lock_irqsave(&queue->qlock, flags);528- list_add_tail(&fod->fcp_list, &fod->queue->fod_list);529 fod->active = false;530 fod->abort = false;531 fod->aborted = false;532 fod->writedataactive = false;533 fod->fcpreq = NULL;534- spin_unlock_irqrestore(&queue->qlock, flags);535-536- /*537- * release the reference taken at queue lookup and fod allocation538- */539- nvmet_fc_tgt_q_put(queue);540541 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);0000000000000000000000000000000000000000000542}543544static int···633 queue->port = assoc->tgtport->port;634 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);635 INIT_LIST_HEAD(&queue->fod_list);00636 atomic_set(&queue->connected, 0);637 atomic_set(&queue->sqtail, 0);638 atomic_set(&queue->rsn, 1);···704{705 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;706 struct nvmet_fc_fcp_iod *fod = queue->fod;0707 unsigned long flags;708 int i, writedataactive;709 bool disconnect;···732 &tgtport->fc_target_port, fod->fcpreq);733 }734 }00000000000000000000000000000735 }736 spin_unlock_irqrestore(&queue->qlock, flags);737···2268 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc2269 * layer for processing.2270 *2271- * The nvmet-fc layer will copy cmd payload to an internal structure for2272- * processing. As such, upon completion of the routine, the LLDD may2273- * immediately free/reuse the CMD IU buffer passed in the call.002274 *2275- * If this routine returns error, the lldd should abort the exchange.00000000000000000000000002276 *2277 * @target_port: pointer to the (registered) target port the FCP CMD IU2278 * was received on.···2317 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;2318 struct nvmet_fc_tgt_queue *queue;2319 struct nvmet_fc_fcp_iod *fod;0023202321 /* validate iu, so the connection id can be used to find the queue */2322 if ((cmdiubuf_len != sizeof(*cmdiu)) ||···2339 * when the fod is freed.2340 */2341002342 fod = nvmet_fc_alloc_fcp_iod(queue);2343- if (!fod) {000000000000002344 /* release the queue lookup reference */2345 nvmet_fc_tgt_q_put(queue);2346 return -ENOENT;2347 }23482349- fcpreq->nvmet_fc_private = fod;2350- fod->fcpreq = fcpreq;2351- /*2352- * put all admin cmds on hw queue id 0. All io commands go to2353- * the respective hw queue based on a modulo basis2354- */2355- fcpreq->hwqid = queue->qid ?2356- ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;2357- memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);23582359- if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)2360- queue_work_on(queue->cpu, queue->work_q, &fod->work);2361- else2362- nvmet_fc_handle_fcp_rqst(tgtport, fod);0000023632364- return 0;0000000000002365}2366EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);2367
···114 struct kref ref;115};116117+struct nvmet_fc_defer_fcp_req {118+ struct list_head req_list;119+ struct nvmefc_tgt_fcp_req *fcp_req;120+};121+122struct nvmet_fc_tgt_queue {123 bool ninetypercent;124 u16 qid;···132 struct nvmet_fc_tgt_assoc *assoc;133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */134 struct list_head fod_list;135+ struct list_head pending_cmd_list;136+ struct list_head avail_defer_list;137 struct workqueue_struct *work_q;138 struct kref ref;139} __aligned(sizeof(unsigned long long));···223static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);224static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);225static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);226+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,227+ struct nvmet_fc_fcp_iod *fod);228229230/* *********************** FC-NVME DMA Handling **************************** */···463nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)464{465 static struct nvmet_fc_fcp_iod *fod;0466467+ lockdep_assert_held(&queue->qlock);468+469 fod = list_first_entry_or_null(&queue->fod_list,470 struct nvmet_fc_fcp_iod, fcp_list);471 if (fod) {···477 * will "inherit" that reference.478 */479 }0480 return fod;481}482483+484+static void485+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,486+ struct nvmet_fc_tgt_queue *queue,487+ struct nvmefc_tgt_fcp_req *fcpreq)488+{489+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;490+491+ /*492+ * put all admin cmds on hw queue id 0. All io commands go to493+ * the respective hw queue based on a modulo basis494+ */495+ fcpreq->hwqid = queue->qid ?496+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;497+498+ if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)499+ queue_work_on(queue->cpu, queue->work_q, &fod->work);500+ else501+ nvmet_fc_handle_fcp_rqst(tgtport, fod);502+}503504static void505nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,···488{489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;490 struct nvmet_fc_tgtport *tgtport = fod->tgtport;491+ struct nvmet_fc_defer_fcp_req *deferfcp;492 unsigned long flags;493494 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,···495496 fcpreq->nvmet_fc_private = NULL;49700498 fod->active = false;499 fod->abort = false;500 fod->aborted = false;501 fod->writedataactive = false;502 fod->fcpreq = NULL;000000503504 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);505+506+ spin_lock_irqsave(&queue->qlock, flags);507+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,508+ struct nvmet_fc_defer_fcp_req, req_list);509+ if (!deferfcp) {510+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);511+ spin_unlock_irqrestore(&queue->qlock, flags);512+513+ /* Release reference taken at queue lookup and fod allocation */514+ nvmet_fc_tgt_q_put(queue);515+ return;516+ }517+518+ /* Re-use the fod for the next pending cmd that was deferred */519+ list_del(&deferfcp->req_list);520+521+ fcpreq = deferfcp->fcp_req;522+523+ /* deferfcp can be reused for another IO at a later date */524+ list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);525+526+ spin_unlock_irqrestore(&queue->qlock, flags);527+528+ /* Save NVME CMD IO in fod */529+ memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);530+531+ /* Setup new fcpreq to be processed */532+ fcpreq->rspaddr = NULL;533+ fcpreq->rsplen = 0;534+ fcpreq->nvmet_fc_private = fod;535+ fod->fcpreq = fcpreq;536+ fod->active = true;537+538+ /* inform LLDD IO is now being processed */539+ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);540+541+ /* Submit deferred IO for processing */542+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);543+544+ /*545+ * Leave the queue lookup get reference taken when546+ * fod was originally allocated.547+ */548}549550static int···569 queue->port = assoc->tgtport->port;570 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);571 INIT_LIST_HEAD(&queue->fod_list);572+ INIT_LIST_HEAD(&queue->avail_defer_list);573+ INIT_LIST_HEAD(&queue->pending_cmd_list);574 atomic_set(&queue->connected, 0);575 atomic_set(&queue->sqtail, 0);576 atomic_set(&queue->rsn, 1);···638{639 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;640 struct nvmet_fc_fcp_iod *fod = queue->fod;641+ struct nvmet_fc_defer_fcp_req *deferfcp;642 unsigned long flags;643 int i, writedataactive;644 bool disconnect;···665 &tgtport->fc_target_port, fod->fcpreq);666 }667 }668+ }669+670+ /* Cleanup defer'ed IOs in queue */671+ list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {672+ list_del(&deferfcp->req_list);673+ kfree(deferfcp);674+ }675+676+ for (;;) {677+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,678+ struct nvmet_fc_defer_fcp_req, req_list);679+ if (!deferfcp)680+ break;681+682+ list_del(&deferfcp->req_list);683+ spin_unlock_irqrestore(&queue->qlock, flags);684+685+ tgtport->ops->defer_rcv(&tgtport->fc_target_port,686+ deferfcp->fcp_req);687+688+ tgtport->ops->fcp_abort(&tgtport->fc_target_port,689+ deferfcp->fcp_req);690+691+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port,692+ deferfcp->fcp_req);693+694+ kfree(deferfcp);695+696+ spin_lock_irqsave(&queue->qlock, flags);697 }698 spin_unlock_irqrestore(&queue->qlock, flags);699···2172 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc2173 * layer for processing.2174 *2175+ * The nvmet_fc layer allocates a local job structure (struct2176+ * nvmet_fc_fcp_iod) from the queue for the io and copies the2177+ * CMD IU buffer to the job structure. As such, on a successful2178+ * completion (returns 0), the LLDD may immediately free/reuse2179+ * the CMD IU buffer passed in the call.2180 *2181+ * However, in some circumstances, due to the packetized nature of FC2182+ * and the api of the FC LLDD which may issue a hw command to send the2183+ * response, but the LLDD may not get the hw completion for that command2184+ * and upcall the nvmet_fc layer before a new command may be2185+ * asynchronously received - its possible for a command to be received2186+ * before the LLDD and nvmet_fc have recycled the job structure. It gives2187+ * the appearance of more commands received than fits in the sq.2188+ * To alleviate this scenario, a temporary queue is maintained in the2189+ * transport for pending LLDD requests waiting for a queue job structure.2190+ * In these "overrun" cases, a temporary queue element is allocated2191+ * the LLDD request and CMD iu buffer information remembered, and the2192+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job2193+ * structure is freed, it is immediately reallocated for anything on the2194+ * pending request list. The LLDDs defer_rcv() callback is called,2195+ * informing the LLDD that it may reuse the CMD IU buffer, and the io2196+ * is then started normally with the transport.2197+ *2198+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat2199+ * the completion as successful but must not reuse the CMD IU buffer2200+ * until the LLDD's defer_rcv() callback has been called for the2201+ * corresponding struct nvmefc_tgt_fcp_req pointer.2202+ *2203+ * If there is any other condition in which an error occurs, the2204+ * transport will return a non-zero status indicating the error.2205+ * In all cases other than -EOVERFLOW, the transport has not accepted the2206+ * request and the LLDD should abort the exchange.2207 *2208 * @target_port: pointer to the (registered) target port the FCP CMD IU2209 * was received on.···2194 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;2195 struct nvmet_fc_tgt_queue *queue;2196 struct nvmet_fc_fcp_iod *fod;2197+ struct nvmet_fc_defer_fcp_req *deferfcp;2198+ unsigned long flags;21992200 /* validate iu, so the connection id can be used to find the queue */2201 if ((cmdiubuf_len != sizeof(*cmdiu)) ||···2214 * when the fod is freed.2215 */22162217+ spin_lock_irqsave(&queue->qlock, flags);2218+2219 fod = nvmet_fc_alloc_fcp_iod(queue);2220+ if (fod) {2221+ spin_unlock_irqrestore(&queue->qlock, flags);2222+2223+ fcpreq->nvmet_fc_private = fod;2224+ fod->fcpreq = fcpreq;2225+2226+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);2227+2228+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);2229+2230+ return 0;2231+ }2232+2233+ if (!tgtport->ops->defer_rcv) {2234+ spin_unlock_irqrestore(&queue->qlock, flags);2235 /* release the queue lookup reference */2236 nvmet_fc_tgt_q_put(queue);2237 return -ENOENT;2238 }22392240+ deferfcp = list_first_entry_or_null(&queue->avail_defer_list,2241+ struct nvmet_fc_defer_fcp_req, req_list);2242+ if (deferfcp) {2243+ /* Just re-use one that was previously allocated */2244+ list_del(&deferfcp->req_list);2245+ } else {2246+ spin_unlock_irqrestore(&queue->qlock, flags);0022472248+ /* Now we need to dynamically allocate one */2249+ deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);2250+ if (!deferfcp) {2251+ /* release the queue lookup reference */2252+ nvmet_fc_tgt_q_put(queue);2253+ return -ENOMEM;2254+ }2255+ spin_lock_irqsave(&queue->qlock, flags);2256+ }22572258+ /* For now, use rspaddr / rsplen to save payload information */2259+ fcpreq->rspaddr = cmdiubuf;2260+ fcpreq->rsplen = cmdiubuf_len;2261+ deferfcp->fcp_req = fcpreq;2262+2263+ /* defer processing till a fod becomes available */2264+ list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);2265+2266+ /* NOTE: the queue lookup reference is still valid */2267+2268+ spin_unlock_irqrestore(&queue->qlock, flags);2269+2270+ return -EOVERFLOW;2271}2272EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);2273
+35
drivers/pci/pci.c
···4260EXPORT_SYMBOL_GPL(pci_reset_function);42614262/**000000000000000000000000000000000004263 * pci_try_reset_function - quiesce and reset a PCI device function4264 * @dev: PCI device to reset4265 *
···4260EXPORT_SYMBOL_GPL(pci_reset_function);42614262/**4263+ * pci_reset_function_locked - quiesce and reset a PCI device function4264+ * @dev: PCI device to reset4265+ *4266+ * Some devices allow an individual function to be reset without affecting4267+ * other functions in the same device. The PCI device must be responsive4268+ * to PCI config space in order to use this function.4269+ *4270+ * This function does not just reset the PCI portion of a device, but4271+ * clears all the state associated with the device. This function differs4272+ * from __pci_reset_function() in that it saves and restores device state4273+ * over the reset. It also differs from pci_reset_function() in that it4274+ * requires the PCI device lock to be held.4275+ *4276+ * Returns 0 if the device function was successfully reset or negative if the4277+ * device doesn't support resetting a single function.4278+ */4279+int pci_reset_function_locked(struct pci_dev *dev)4280+{4281+ int rc;4282+4283+ rc = pci_probe_reset_function(dev);4284+ if (rc)4285+ return rc;4286+4287+ pci_dev_save_and_disable(dev);4288+4289+ rc = __pci_reset_function_locked(dev);4290+4291+ pci_dev_restore(dev);4292+4293+ return rc;4294+}4295+EXPORT_SYMBOL_GPL(pci_reset_function_locked);4296+4297+/**4298 * pci_try_reset_function - quiesce and reset a PCI device function4299 * @dev: PCI device to reset4300 *
···2223#include <linux/cdev.h>24#include <linux/device.h>025#include <linux/mutex.h>26#include <linux/posix-clock.h>27#include <linux/ptp_clock.h>···57 struct attribute_group pin_attr_group;58 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */59 const struct attribute_group *pin_attr_groups[2];0060};6162/*
···2223#include <linux/cdev.h>24#include <linux/device.h>25+#include <linux/kthread.h>26#include <linux/mutex.h>27#include <linux/posix-clock.h>28#include <linux/ptp_clock.h>···56 struct attribute_group pin_attr_group;57 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */58 const struct attribute_group *pin_attr_groups[2];59+ struct kthread_worker *kworker;60+ struct kthread_delayed_work aux_work;61};6263/*
···2760 * we allocation is the minimum off:2761 *2762 * Number of CPUs2763- * Number of MSI-X vectors2764- * Max number allocated in hardware (QEDF_MAX_NUM_CQS)2765 */2766- qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,2767- num_online_cpus());27682769 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",2770 qedf->num_queues);···2960 goto err1;2961 }296200000002963 /* queue allocation code should come here2964 * order should be2965 * slowpath_start···2981 goto err2;2982 }2983 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);2984-2985- /* Learn information crucial for qedf to progress */2986- rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);2987- if (rc) {2988- QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");2989- goto err1;2990- }29912992 /* Record BDQ producer doorbell addresses */2993 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
···2760 * we allocation is the minimum off:2761 *2762 * Number of CPUs2763+ * Number allocated by qed for our PCI function02764 */2765+ qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);027662767 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",2768 qedf->num_queues);···2962 goto err1;2963 }29642965+ /* Learn information crucial for qedf to progress */2966+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);2967+ if (rc) {2968+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");2969+ goto err1;2970+ }2971+2972 /* queue allocation code should come here2973 * order should be2974 * slowpath_start···2976 goto err2;2977 }2978 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);000000029792980 /* Record BDQ producer doorbell addresses */2981 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
-30
drivers/scsi/qla2xxx/tcm_qla2xxx.c
···500static void tcm_qla2xxx_handle_data_work(struct work_struct *work)501{502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);503- unsigned long flags;504505 /*506 * Ensure that the complete FCP WRITE payload has been received.507 * Otherwise return an exception via CHECK_CONDITION status.508 */509 cmd->cmd_in_wq = 0;510-511- spin_lock_irqsave(&cmd->cmd_lock, flags);512- cmd->data_work = 1;513- if (cmd->aborted) {514- cmd->data_work_free = 1;515- spin_unlock_irqrestore(&cmd->cmd_lock, flags);516-517- tcm_qla2xxx_free_cmd(cmd);518- return;519- }520- spin_unlock_irqrestore(&cmd->cmd_lock, flags);521522 cmd->qpair->tgt_counters.qla_core_ret_ctio++;523 if (!cmd->write_data_transferred) {···753 qlt_xmit_tm_rsp(mcmd);754}755756-#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)757static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)758{759 struct qla_tgt_cmd *cmd = container_of(se_cmd,760 struct qla_tgt_cmd, se_cmd);761- unsigned long flags;762763 if (qlt_abort_cmd(cmd))764 return;765-766- spin_lock_irqsave(&cmd->cmd_lock, flags);767- if ((cmd->state == QLA_TGT_STATE_NEW)||768- ((cmd->state == QLA_TGT_STATE_DATA_IN) &&769- DATA_WORK_NOT_FREE(cmd))) {770- cmd->data_work_free = 1;771- spin_unlock_irqrestore(&cmd->cmd_lock, flags);772- /*773- * cmd has not reached fw, Use this trigger to free it.774- */775- tcm_qla2xxx_free_cmd(cmd);776- return;777- }778- spin_unlock_irqrestore(&cmd->cmd_lock, flags);779- return;780-781}782783static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
···500static void tcm_qla2xxx_handle_data_work(struct work_struct *work)501{502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);0503504 /*505 * Ensure that the complete FCP WRITE payload has been received.506 * Otherwise return an exception via CHECK_CONDITION status.507 */508 cmd->cmd_in_wq = 0;00000000000509510 cmd->qpair->tgt_counters.qla_core_ret_ctio++;511 if (!cmd->write_data_transferred) {···765 qlt_xmit_tm_rsp(mcmd);766}7670768static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)769{770 struct qla_tgt_cmd *cmd = container_of(se_cmd,771 struct qla_tgt_cmd, se_cmd);0772773 if (qlt_abort_cmd(cmd))774 return;0000000000000000775}776777static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+1-30
drivers/scsi/sg.c
···751 return count;752}753754-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)755-{756- switch (hp->dxfer_direction) {757- case SG_DXFER_NONE:758- if (hp->dxferp || hp->dxfer_len > 0)759- return false;760- return true;761- case SG_DXFER_FROM_DEV:762- /*763- * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp764- * can either be NULL or != NULL so there's no point in checking765- * it either. So just return true.766- */767- return true;768- case SG_DXFER_TO_DEV:769- case SG_DXFER_TO_FROM_DEV:770- if (!hp->dxferp || hp->dxfer_len == 0)771- return false;772- return true;773- case SG_DXFER_UNKNOWN:774- if ((!hp->dxferp && hp->dxfer_len) ||775- (hp->dxferp && hp->dxfer_len == 0))776- return false;777- return true;778- default:779- return false;780- }781-}782-783static int784sg_common_write(Sg_fd * sfp, Sg_request * srp,785 unsigned char *cmnd, int timeout, int blocking)···771 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",772 (int) cmnd[0], (int) hp->cmd_len));773774- if (!sg_is_valid_dxfer(hp))775 return -EINVAL;776777 k = sg_start_req(srp, cmnd);
···751 return count;752}75300000000000000000000000000000754static int755sg_common_write(Sg_fd * sfp, Sg_request * srp,756 unsigned char *cmnd, int timeout, int blocking)···800 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",801 (int) cmnd[0], (int) hp->cmd_len));802803+ if (hp->dxfer_len >= SZ_256M)804 return -EINVAL;805806 k = sg_start_req(srp, cmnd);
···333 int res;334 enum tb_port_type type;335000000000336 port = &sw->ports[header->index];337 port->disabled = header->port_disabled;338 if (port->disabled)
···333 int res;334 enum tb_port_type type;335336+ /*337+ * Some DROMs list more ports than the controller actually has338+ * so we skip those but allow the parser to continue.339+ */340+ if (header->index > sw->config.max_port_number) {341+ dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");342+ return 0;343+ }344+345 port = &sw->ports[header->index];346 port->disabled = header->port_disabled;347 if (port->disabled)
+17-6
drivers/tty/serial/8250/8250_core.c
···1043 if (up->dl_write)1044 uart->dl_write = up->dl_write;10451046- if (serial8250_isa_config != NULL)1047- serial8250_isa_config(0, &uart->port,1048- &uart->capabilities);010491050- ret = uart_add_one_port(&serial8250_reg, &uart->port);1051- if (ret == 0)1052- ret = uart->port.line;00000000001053 }1054 mutex_unlock(&serial_mutex);1055
···1043 if (up->dl_write)1044 uart->dl_write = up->dl_write;10451046+ if (uart->port.type != PORT_8250_CIR) {1047+ if (serial8250_isa_config != NULL)1048+ serial8250_isa_config(0, &uart->port,1049+ &uart->capabilities);10501051+ ret = uart_add_one_port(&serial8250_reg,1052+ &uart->port);1053+ if (ret == 0)1054+ ret = uart->port.line;1055+ } else {1056+ dev_info(uart->port.dev,1057+ "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",1058+ uart->port.iobase,1059+ (unsigned long long)uart->port.mapbase,1060+ uart->port.irq);1061+1062+ ret = 0;1063+ }1064 }1065 mutex_unlock(&serial_mutex);1066
+19-18
drivers/tty/serial/amba-pl011.c
···142 .fixed_options = true,143};144145-/*146- * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as147- * occasionally getting stuck as 1. To avoid the potential for a hang, check148- * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART149- * implementations, so only do so if an affected platform is detected in150- * parse_spcr().151- */152-static bool qdf2400_e44_present = false;153-154static struct vendor_data vendor_qdt_qdf2400_e44 = {155 .reg_offset = pl011_std_offsets,156 .fr_busy = UART011_FR_TXFE,···157 .always_enabled = true,158 .fixed_options = true,159};0160161static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {162 [REG_DR] = UART01x_DR,···2368 resource_size_t addr;2369 int i;23702371- if (strcmp(name, "qdf2400_e44") == 0) {2372- pr_info_once("UART: Working around QDF2400 SoC erratum 44");2373- qdf2400_e44_present = true;2374- } else if (strcmp(name, "pl011") != 0) {0002375 return -ENODEV;2376- }23772378 if (uart_parse_earlycon(options, &iotype, &addr, &options))2379 return -ENODEV;···2729 }2730 uap->port.irq = ret;27312732- uap->reg_offset = vendor_sbsa.reg_offset;2733- uap->vendor = qdf2400_e44_present ?2734- &vendor_qdt_qdf2400_e44 : &vendor_sbsa;0000002735 uap->fifosize = 32;2736- uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;2737 uap->port.ops = &sbsa_uart_pops;2738 uap->fixed_baud = baudrate;2739
···142 .fixed_options = true,143};144145+#ifdef CONFIG_ACPI_SPCR_TABLE00000000146static struct vendor_data vendor_qdt_qdf2400_e44 = {147 .reg_offset = pl011_std_offsets,148 .fr_busy = UART011_FR_TXFE,···165 .always_enabled = true,166 .fixed_options = true,167};168+#endif169170static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {171 [REG_DR] = UART01x_DR,···2375 resource_size_t addr;2376 int i;23772378+ /*2379+ * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum2380+ * have a distinct console name, so make sure we check for that.2381+ * The actual implementation of the erratum occurs in the probe2382+ * function.2383+ */2384+ if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))2385 return -ENODEV;023862387 if (uart_parse_earlycon(options, &iotype, &addr, &options))2388 return -ENODEV;···2734 }2735 uap->port.irq = ret;27362737+#ifdef CONFIG_ACPI_SPCR_TABLE2738+ if (qdf2400_e44_present) {2739+ dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");2740+ uap->vendor = &vendor_qdt_qdf2400_e44;2741+ } else2742+#endif2743+ uap->vendor = &vendor_sbsa;2744+2745+ uap->reg_offset = uap->vendor->reg_offset;2746 uap->fifosize = 32;2747+ uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;2748 uap->port.ops = &sbsa_uart_pops;2749 uap->fixed_baud = baudrate;2750
+3-1
drivers/usb/core/hcd.c
···1888 /* No more submits can occur */1889 spin_lock_irq(&hcd_urb_list_lock);1890rescan:1891- list_for_each_entry (urb, &ep->urb_list, urb_list) {1892 int is_in;18931894 if (urb->unlinked)···2485 }2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {2487 hcd = hcd->shared_hcd;002488 if (hcd->rh_registered) {2489 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);2490
···1888 /* No more submits can occur */1889 spin_lock_irq(&hcd_urb_list_lock);1890rescan:1891+ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {1892 int is_in;18931894 if (urb->unlinked)···2485 }2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {2487 hcd = hcd->shared_hcd;2488+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);2489+ set_bit(HCD_FLAG_DEAD, &hcd->flags);2490 if (hcd->rh_registered) {2491 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);2492
···896 if (!node) {897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;898899+ /*900+ * USB Specification 2.0 Section 5.9.2 states that: "If901+ * there is only a single transaction in the microframe,902+ * only a DATA0 data packet PID is used. If there are903+ * two transactions per microframe, DATA1 is used for904+ * the first transaction data packet and DATA0 is used905+ * for the second transaction data packet. If there are906+ * three transactions per microframe, DATA2 is used for907+ * the first transaction data packet, DATA1 is used for908+ * the second, and DATA0 is used for the third."909+ *910+ * IOW, we should satisfy the following cases:911+ *912+ * 1) length <= maxpacket913+ * - DATA0914+ *915+ * 2) maxpacket < length <= (2 * maxpacket)916+ * - DATA1, DATA0917+ *918+ * 3) (2 * maxpacket) < length <= (3 * maxpacket)919+ * - DATA2, DATA1, DATA0920+ */921 if (speed == USB_SPEED_HIGH) {922 struct usb_ep *ep = &dep->endpoint;923+ unsigned int mult = ep->mult - 1;924+ unsigned int maxp = usb_endpoint_maxp(ep->desc);925+926+ if (length <= (2 * maxp))927+ mult--;928+929+ if (length <= maxp)930+ mult--;931+932+ trb->size |= DWC3_TRB_SIZE_PCM1(mult);933 }934 } else {935 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
···98 AMD_CHIPSET_HUDSON2,99 AMD_CHIPSET_BOLTON,100 AMD_CHIPSET_YANGTZE,0101 AMD_CHIPSET_UNKNOWN,102};103···142 pinfo->sb_type.gen = AMD_CHIPSET_SB700;143 else if (rev >= 0x40 && rev <= 0x4f)144 pinfo->sb_type.gen = AMD_CHIPSET_SB800;00000145 } else {146 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,147 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);···266{267 /* Make sure amd chipset type has already been initialized */268 usb_amd_find_chipset_info();269- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)270- return 0;271-272- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");273- return 1;0274}275EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);276···1157}1158DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,1159 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);00000000000000000000
···98 AMD_CHIPSET_HUDSON2,99 AMD_CHIPSET_BOLTON,100 AMD_CHIPSET_YANGTZE,101+ AMD_CHIPSET_TAISHAN,102 AMD_CHIPSET_UNKNOWN,103};104···141 pinfo->sb_type.gen = AMD_CHIPSET_SB700;142 else if (rev >= 0x40 && rev <= 0x4f)143 pinfo->sb_type.gen = AMD_CHIPSET_SB800;144+ }145+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,146+ 0x145c, NULL);147+ if (pinfo->smbus_dev) {148+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;149 } else {150 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,151 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);···260{261 /* Make sure amd chipset type has already been initialized */262 usb_amd_find_chipset_info();263+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||264+ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {265+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");266+ return 1;267+ }268+ return 0;269}270EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);271···1150}1151DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,1152 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);1153+1154+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)1155+{1156+ /*1157+ * Our dear uPD72020{1,2} friend only partially resets when1158+ * asked to via the XHCI interface, and may end up doing DMA1159+ * at the wrong addresses, as it keeps the top 32bit of some1160+ * addresses from its previous programming under obscure1161+ * circumstances.1162+ * Give it a good wack at probe time. Unfortunately, this1163+ * needs to happen before we've had a chance to discover any1164+ * quirk, or the system will be in a rather bad state.1165+ */1166+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&1167+ (pdev->device == 0x0014 || pdev->device == 0x0015))1168+ return true;1169+1170+ return false;1171+}1172+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
···284285 driver = (struct hc_driver *)id->driver_data;2860000000287 /* Prevent runtime suspending between USB-2 and USB-3 initialization */288 pm_runtime_get_noresume(&dev->dev);289
···284285 driver = (struct hc_driver *)id->driver_data;286287+ /* For some HW implementation, a XHCI reset is just not enough... */288+ if (usb_xhci_needs_pci_reset(dev)) {289+ dev_info(&dev->dev, "Resetting\n");290+ if (pci_reset_function_locked(dev))291+ dev_warn(&dev->dev, "Reset failed");292+ }293+294 /* Prevent runtime suspending between USB-2 and USB-3 initialization */295 pm_runtime_get_noresume(&dev->dev);296
···315{316 struct us_data *us = (struct us_data *)__us;317 struct Scsi_Host *host = us_to_host(us);0318319 for (;;) {320 usb_stor_dbg(us, "*** thread sleeping\n");···331 scsi_lock(host);332333 /* When we are called with no command pending, we're done */0334 if (us->srb == NULL) {335 scsi_unlock(host);336 mutex_unlock(&us->dev_mutex);···400 /* lock access to the state */401 scsi_lock(host);402403- /* indicate that the command is done */404- if (us->srb->result != DID_ABORT << 16) {405- usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",406- us->srb->result);407- us->srb->scsi_done(us->srb);408- } else {409SkipForAbort:410 usb_stor_dbg(us, "scsi command aborted\n");0411 }412413 /*···428429 /* unlock the device pointers */430 mutex_unlock(&us->dev_mutex);0000000431 } /* for (;;) */432433 /* Wait until we are told to stop */
···315{316 struct us_data *us = (struct us_data *)__us;317 struct Scsi_Host *host = us_to_host(us);318+ struct scsi_cmnd *srb;319320 for (;;) {321 usb_stor_dbg(us, "*** thread sleeping\n");···330 scsi_lock(host);331332 /* When we are called with no command pending, we're done */333+ srb = us->srb;334 if (us->srb == NULL) {335 scsi_unlock(host);336 mutex_unlock(&us->dev_mutex);···398 /* lock access to the state */399 scsi_lock(host);400401+ /* was the command aborted? */402+ if (us->srb->result == DID_ABORT << 16) {0000403SkipForAbort:404 usb_stor_dbg(us, "scsi command aborted\n");405+ srb = NULL; /* Don't call srb->scsi_done() */406 }407408 /*···429430 /* unlock the device pointers */431 mutex_unlock(&us->dev_mutex);432+433+ /* now that the locks are released, notify the SCSI core */434+ if (srb) {435+ usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",436+ srb->result);437+ srb->scsi_done(srb);438+ }439 } /* for (;;) */440441 /* Wait until we are told to stop */
···843 * hibernation, system resume and during runtime PM transitions844 * along with subsystem-level and driver-level callbacks.845 * @pins: For device pin management.846- * See Documentation/pinctrl.txt for details.847 * @msi_list: Hosts MSI descriptors848 * @msi_domain: The generic MSI domain this device is using.849 * @numa_node: NUMA node this device is close to.
···843 * hibernation, system resume and during runtime PM transitions844 * along with subsystem-level and driver-level callbacks.845 * @pins: For device pin management.846+ * See Documentation/driver-api/pinctl.rst for details.847 * @msi_list: Hosts MSI descriptors848 * @msi_domain: The generic MSI domain this device is using.849 * @numa_node: NUMA node this device is close to.
+2-1
include/linux/i2c.h
···689#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */690#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */691#define I2C_CLASS_SPD (1<<7) /* Memory modules */692-#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */0693694/* Internal numbers to terminate lists */695#define I2C_CLIENT_END 0xfffeU
···689#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */690#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */691#define I2C_CLASS_SPD (1<<7) /* Memory modules */692+/* Warn users that the adapter doesn't support classes anymore */693+#define I2C_CLASS_DEPRECATED (1<<8)694695/* Internal numbers to terminate lists */696#define I2C_CLIENT_END 0xfffeU
+7
include/linux/iio/common/st_sensors.h
···105 struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];106};10700000108/**109 * struct st_sensor_bdu - ST sensor device block data update110 * @addr: address of the register.···202 * @bdu: Block data update register.203 * @das: Data Alignment Selection register.204 * @drdy_irq: Data ready register of the sensor.0205 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.206 * @bootime: samples to discard when sensor passing from power-down to power-up.207 */···219 struct st_sensor_bdu bdu;220 struct st_sensor_das das;221 struct st_sensor_data_ready_irq drdy_irq;0222 bool multi_read_bit;223 unsigned int bootime;224};
···105 struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];106};107108+struct st_sensor_sim {109+ u8 addr;110+ u8 value;111+};112+113/**114 * struct st_sensor_bdu - ST sensor device block data update115 * @addr: address of the register.···197 * @bdu: Block data update register.198 * @das: Data Alignment Selection register.199 * @drdy_irq: Data ready register of the sensor.200+ * @sim: SPI serial interface mode register of the sensor.201 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.202 * @bootime: samples to discard when sensor passing from power-down to power-up.203 */···213 struct st_sensor_bdu bdu;214 struct st_sensor_das das;215 struct st_sensor_data_ready_irq drdy_irq;216+ struct st_sensor_sim sim;217 bool multi_read_bit;218 unsigned int bootime;219};
···487 /* numa_scan_seq prevents two threads setting pte_numa */488 int numa_scan_seq;489#endif490-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)491 /*492 * An operation with batched TLB flushing is going on. Anything that493 * can move process memory needs to flush the TLB when moving a494 * PROT_NONE or PROT_NUMA mapped page.495 */496- bool tlb_flush_pending;497-#endif498#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH499 /* See flush_tlb_batched_pending() */500 bool tlb_flush_batched;···520 return mm->cpu_vm_mask_var;521}522523-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)00000524/*525 * Memory barriers to keep this state in sync are graciously provided by526 * the page table locks, outside of which no page table modifications happen.527- * The barriers below prevent the compiler from re-ordering the instructions528- * around the memory barriers that are already present in the code.0529 */530static inline bool mm_tlb_flush_pending(struct mm_struct *mm)531{532- barrier();533- return mm->tlb_flush_pending;534}535-static inline void set_tlb_flush_pending(struct mm_struct *mm)0000536{537- mm->tlb_flush_pending = true;0000000000538539 /*540- * Guarantee that the tlb_flush_pending store does not leak into the541 * critical section updating the page tables542 */543 smp_mb__before_spinlock();544}0545/* Clearing is done after a TLB flush, which also provides a barrier. */546-static inline void clear_tlb_flush_pending(struct mm_struct *mm)547{548- barrier();549- mm->tlb_flush_pending = false;000000550}551-#else552-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)553-{554- return false;555-}556-static inline void set_tlb_flush_pending(struct mm_struct *mm)557-{558-}559-static inline void clear_tlb_flush_pending(struct mm_struct *mm)560-{561-}562-#endif563564struct vm_fault;565
···487 /* numa_scan_seq prevents two threads setting pte_numa */488 int numa_scan_seq;489#endif0490 /*491 * An operation with batched TLB flushing is going on. Anything that492 * can move process memory needs to flush the TLB when moving a493 * PROT_NONE or PROT_NUMA mapped page.494 */495+ atomic_t tlb_flush_pending;0496#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH497 /* See flush_tlb_batched_pending() */498 bool tlb_flush_batched;···522 return mm->cpu_vm_mask_var;523}524525+struct mmu_gather;526+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,527+ unsigned long start, unsigned long end);528+extern void tlb_finish_mmu(struct mmu_gather *tlb,529+ unsigned long start, unsigned long end);530+531/*532 * Memory barriers to keep this state in sync are graciously provided by533 * the page table locks, outside of which no page table modifications happen.534+ * The barriers are used to ensure the order between tlb_flush_pending updates,535+ * which happen while the lock is not taken, and the PTE updates, which happen536+ * while the lock is taken, are serialized.537 */538static inline bool mm_tlb_flush_pending(struct mm_struct *mm)539{540+ return atomic_read(&mm->tlb_flush_pending) > 0;0541}542+543+/*544+ * Returns true if there are two above TLB batching threads in parallel.545+ */546+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)547{548+ return atomic_read(&mm->tlb_flush_pending) > 1;549+}550+551+static inline void init_tlb_flush_pending(struct mm_struct *mm)552+{553+ atomic_set(&mm->tlb_flush_pending, 0);554+}555+556+static inline void inc_tlb_flush_pending(struct mm_struct *mm)557+{558+ atomic_inc(&mm->tlb_flush_pending);559560 /*561+ * Guarantee that the tlb_flush_pending increase does not leak into the562 * critical section updating the page tables563 */564 smp_mb__before_spinlock();565}566+567/* Clearing is done after a TLB flush, which also provides a barrier. */568+static inline void dec_tlb_flush_pending(struct mm_struct *mm)569{570+ /*571+ * Guarantee that the tlb_flush_pending does not not leak into the572+ * critical section, since we must order the PTE change and changes to573+ * the pending TLB flush indication. We could have relied on TLB flush574+ * as a memory barrier, but this behavior is not clearly documented.575+ */576+ smp_mb__before_atomic();577+ atomic_dec(&mm->tlb_flush_pending);578}000000000000579580struct vm_fault;581
···346 * indicating an FC transport Aborted status.347 * Entrypoint is Mandatory.348 *00000349 * @max_hw_queues: indicates the maximum number of hw queues the LLDD350 * supports for cpu affinitization.351 * Value is Mandatory. Must be at least 1.···850 void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,851 struct nvmefc_tgt_fcp_req *fcpreq);852 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,00853 struct nvmefc_tgt_fcp_req *fcpreq);854855 u32 max_hw_queues;
···346 * indicating an FC transport Aborted status.347 * Entrypoint is Mandatory.348 *349+ * @defer_rcv: Called by the transport to signal the LLLD that it has350+ * begun processing of a previously received NVME CMD IU. The LLDD351+ * is now free to re-use the rcv buffer associated with the352+ * nvmefc_tgt_fcp_req.353+ *354 * @max_hw_queues: indicates the maximum number of hw queues the LLDD355 * supports for cpu affinitization.356 * Value is Mandatory. Must be at least 1.···845 void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,846 struct nvmefc_tgt_fcp_req *fcpreq);847 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,848+ struct nvmefc_tgt_fcp_req *fcpreq);849+ void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,850 struct nvmefc_tgt_fcp_req *fcpreq);851852 u32 max_hw_queues;
···81 * it.82 * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a83 * value on the line. Use argument 1 to indicate high level, argument 0 to84- * indicate low level. (Please see Documentation/pinctrl.txt, section85- * "GPIO mode pitfalls" for a discussion around this parameter.)86 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power87 * supplies, the argument to this parameter (on a custom format) tells88 * the driver which alternative power source to use.
···81 * it.82 * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a83 * value on the line. Use argument 1 to indicate high level, argument 0 to84+ * indicate low level. (Please see Documentation/driver-api/pinctl.rst,85+ * section "GPIO mode pitfalls" for a discussion around this parameter.)86 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power87 * supplies, the argument to this parameter (on a custom format) tells88 * the driver which alternative power source to use.
+2
include/linux/platform_data/st_sensors_pdata.h
···17 * Available only for accelerometer and pressure sensors.18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).19 * @open_drain: set the interrupt line to be open drain if possible.020 */21struct st_sensors_platform_data {22 u8 drdy_int_pin;23 bool open_drain;024};2526#endif /* ST_SENSORS_PDATA_H */
···17 * Available only for accelerometer and pressure sensors.18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).19 * @open_drain: set the interrupt line to be open drain if possible.20+ * @spi_3wire: enable spi-3wire mode.21 */22struct st_sensors_platform_data {23 u8 drdy_int_pin;24 bool open_drain;25+ bool spi_3wire;26};2728#endif /* ST_SENSORS_PDATA_H */
+20
include/linux/ptp_clock_kernel.h
···99 * parameter func: the desired function to use.100 * parameter chan: the function channel index to use.101 *00000102 * Drivers should embed their ptp_clock_info within a private103 * structure, obtaining a reference to it using container_of().104 *···131 struct ptp_clock_request *request, int on);132 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,133 enum ptp_pin_function func, unsigned int chan);0134};135136struct ptp_clock;···217int ptp_find_pin(struct ptp_clock *ptp,218 enum ptp_pin_function func, unsigned int chan);2190000000000220#else221static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,222 struct device *parent)···241static inline int ptp_find_pin(struct ptp_clock *ptp,242 enum ptp_pin_function func, unsigned int chan)243{ return -1; }0000244#endif245246#endif
···99 * parameter func: the desired function to use.100 * parameter chan: the function channel index to use.101 *102+ * @do_work: Request driver to perform auxiliary (periodic) operations103+ * Driver should return delay of the next auxiliary work scheduling104+ * time (>=0) or negative value in case further scheduling105+ * is not required.106+ *107 * Drivers should embed their ptp_clock_info within a private108 * structure, obtaining a reference to it using container_of().109 *···126 struct ptp_clock_request *request, int on);127 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,128 enum ptp_pin_function func, unsigned int chan);129+ long (*do_aux_work)(struct ptp_clock_info *ptp);130};131132struct ptp_clock;···211int ptp_find_pin(struct ptp_clock *ptp,212 enum ptp_pin_function func, unsigned int chan);213214+/**215+ * ptp_schedule_worker() - schedule ptp auxiliary work216+ *217+ * @ptp: The clock obtained from ptp_clock_register().218+ * @delay: number of jiffies to wait before queuing219+ * See kthread_queue_delayed_work() for more info.220+ */221+222+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);223+224#else225static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,226 struct device *parent)···225static inline int ptp_find_pin(struct ptp_clock *ptp,226 enum ptp_pin_function func, unsigned int chan)227{ return -1; }228+static inline int ptp_schedule_worker(struct ptp_clock *ptp,229+ unsigned long delay)230+{ return -EOPNOTSUPP; }231+232#endif233234#endif
···1916 u64 xmit_time);1917extern void tcp_rack_reo_timeout(struct sock *sk);191800000000001919/*1920 * Save and compile IPv4 options, return a pointer to it1921 */
···1916 u64 xmit_time);1917extern void tcp_rack_reo_timeout(struct sock *sk);19181919+/* At how many usecs into the future should the RTO fire? */1920+static inline s64 tcp_rto_delta_us(const struct sock *sk)1921+{1922+ const struct sk_buff *skb = tcp_write_queue_head(sk);1923+ u32 rto = inet_csk(sk)->icsk_rto;1924+ u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);1925+1926+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;1927+}1928+1929/*1930 * Save and compile IPv4 options, return a pointer to it1931 */
···171 __u32 size; /* in, cmdstream size */172 __u32 pad;173 __u32 nr_relocs; /* in, number of submit_reloc's */174- __u64 __user relocs; /* in, ptr to array of submit_reloc's */175};176177/* Each buffer referenced elsewhere in the cmdstream submit (ie. the···215 __u32 fence; /* out */216 __u32 nr_bos; /* in, number of submit_bo's */217 __u32 nr_cmds; /* in, number of submit_cmd's */218- __u64 __user bos; /* in, ptr to array of submit_bo's */219- __u64 __user cmds; /* in, ptr to array of submit_cmd's */220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */221};222
···171 __u32 size; /* in, cmdstream size */172 __u32 pad;173 __u32 nr_relocs; /* in, number of submit_reloc's */174+ __u64 relocs; /* in, ptr to array of submit_reloc's */175};176177/* Each buffer referenced elsewhere in the cmdstream submit (ie. the···215 __u32 fence; /* out */216 __u32 nr_bos; /* in, number of submit_bo's */217 __u32 nr_cmds; /* in, number of submit_cmd's */218+ __u64 bos; /* in, ptr to array of submit_bo's */219+ __u64 cmds; /* in, ptr to array of submit_cmd's */220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */221};222
···670 * this reference was taken by ihold under the page lock671 * pinning the inode in place so i_lock was unnecessary. The672 * only way for this check to fail is if the inode was673- * truncated in parallel so warn for now if this happens.0674 *675 * We are not calling into get_futex_key_refs() in file-backed676 * cases, therefore a successful atomic_inc return below will677 * guarantee that get_futex_key() will still imply smp_mb(); (B).678 */679- if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {680 rcu_read_unlock();681 put_page(page);682
···670 * this reference was taken by ihold under the page lock671 * pinning the inode in place so i_lock was unnecessary. The672 * only way for this check to fail is if the inode was673+ * truncated in parallel which is almost certainly an674+ * application bug. In such a case, just retry.675 *676 * We are not calling into get_futex_key_refs() in file-backed677 * cases, therefore a successful atomic_inc return below will678 * guarantee that get_futex_key() will still imply smp_mb(); (B).679 */680+ if (!atomic_inc_not_zero(&inode->i_count)) {681 rcu_read_unlock();682 put_page(page);683
···110 if (in_task()) {111 unsigned int fail_nth = READ_ONCE(current->fail_nth);112113- if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))114- goto fail;0115116- return false;0117 }118119 /* No need to check any other properties if the probability is 0 */
···110 if (in_task()) {111 unsigned int fail_nth = READ_ONCE(current->fail_nth);112113+ if (fail_nth) {114+ if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))115+ goto fail;116117+ return false;118+ }119 }120121 /* No need to check any other properties if the probability is 0 */
+8-8
lib/test_kmod.c
···485 config->test_driver);486 else487 len += snprintf(buf+len, PAGE_SIZE - len,488- "driver:\tEMTPY\n");489490 if (config->test_fs)491 len += snprintf(buf+len, PAGE_SIZE - len,···493 config->test_fs);494 else495 len += snprintf(buf+len, PAGE_SIZE - len,496- "fs:\tEMTPY\n");497498 mutex_unlock(&test_dev->config_mutex);499···746 strlen(test_str));747 break;748 case TEST_KMOD_FS_TYPE:749- break;750 kfree_const(config->test_fs);751 config->test_driver = NULL;752 copied = config_copy_test_fs(config, test_str,753 strlen(test_str));0754 default:755 mutex_unlock(&test_dev->config_mutex);756 return -EINVAL;···880 int (*test_sync)(struct kmod_test_device *test_dev))881{882 int ret;883- long new;884 unsigned int old_val;885886- ret = kstrtol(buf, 10, &new);887 if (ret)888 return ret;889···918 unsigned int max)919{920 int ret;921- long new;922923- ret = kstrtol(buf, 10, &new);924 if (ret)925 return ret;926···1146 struct kmod_test_device *test_dev = NULL;1147 int ret;11481149- mutex_unlock(®_dev_mutex);11501151 /* int should suffice for number of devices, test for wrap */1152 if (unlikely(num_test_devs + 1) < 0) {
···485 config->test_driver);486 else487 len += snprintf(buf+len, PAGE_SIZE - len,488+ "driver:\tEMPTY\n");489490 if (config->test_fs)491 len += snprintf(buf+len, PAGE_SIZE - len,···493 config->test_fs);494 else495 len += snprintf(buf+len, PAGE_SIZE - len,496+ "fs:\tEMPTY\n");497498 mutex_unlock(&test_dev->config_mutex);499···746 strlen(test_str));747 break;748 case TEST_KMOD_FS_TYPE:0749 kfree_const(config->test_fs);750 config->test_driver = NULL;751 copied = config_copy_test_fs(config, test_str,752 strlen(test_str));753+ break;754 default:755 mutex_unlock(&test_dev->config_mutex);756 return -EINVAL;···880 int (*test_sync)(struct kmod_test_device *test_dev))881{882 int ret;883+ unsigned long new;884 unsigned int old_val;885886+ ret = kstrtoul(buf, 10, &new);887 if (ret)888 return ret;889···918 unsigned int max)919{920 int ret;921+ unsigned long new;922923+ ret = kstrtoul(buf, 10, &new);924 if (ret)925 return ret;926···1146 struct kmod_test_device *test_dev = NULL;1147 int ret;11481149+ mutex_lock(®_dev_mutex);11501151 /* int should suffice for number of devices, test for wrap */1152 if (unlikely(num_test_devs + 1) < 0) {
+1-1
mm/balloon_compaction.c
···24{25 unsigned long flags;26 struct page *page = alloc_page(balloon_mapping_gfp_mask() |27- __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO);28 if (!page)29 return NULL;30
···24{25 unsigned long flags;26 struct page *page = alloc_page(balloon_mapping_gfp_mask() |27+ __GFP_NOMEMALLOC | __GFP_NORETRY);28 if (!page)29 return NULL;30
···1496 }14971498 /*00000001499 * Migrate the THP to the requested node, returns with page unlocked1500 * and access rights restored.1501 */
···1496 }14971498 /*1499+ * The page_table_lock above provides a memory barrier1500+ * with change_protection_range.1501+ */1502+ if (mm_tlb_flush_pending(vma->vm_mm))1503+ flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);1504+1505+ /*1506 * Migrate the THP to the requested node, returns with page unlocked1507 * and access rights restored.1508 */
+1-1
mm/hugetlb.c
···4062 return ret;4063out_release_unlock:4064 spin_unlock(ptl);4065-out_release_nounlock:4066 if (vm_shared)4067 unlock_page(page);04068 put_page(page);4069 goto out;4070}
···4062 return ret;4063out_release_unlock:4064 spin_unlock(ptl);04065 if (vm_shared)4066 unlock_page(page);4067+out_release_nounlock:4068 put_page(page);4069 goto out;4070}
···215 return true;216}217218-/* tlb_gather_mmu219- * Called to initialize an (on-stack) mmu_gather structure for page-table220- * tear-down from @mm. The @fullmm argument is used when @mm is without221- * users and we're going to destroy the full address space (exit/execve).222- */223-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)224{225 tlb->mm = mm;226···271 * Called at the end of the shootdown operation to free up any resources272 * that were required.273 */274-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)0275{276 struct mmu_gather_batch *batch, *next;000277278 tlb_flush_mmu(tlb);279···397}398399#endif /* CONFIG_HAVE_RCU_TABLE_FREE */0000000000000000000000000000400401/*402 * Note: this doesn't free the actual pages themselves. That
···215 return true;216}217218+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,219+ unsigned long start, unsigned long end)0000220{221 tlb->mm = mm;222···275 * Called at the end of the shootdown operation to free up any resources276 * that were required.277 */278+void arch_tlb_finish_mmu(struct mmu_gather *tlb,279+ unsigned long start, unsigned long end, bool force)280{281 struct mmu_gather_batch *batch, *next;282+283+ if (force)284+ __tlb_adjust_range(tlb, start, end - start);285286 tlb_flush_mmu(tlb);287···397}398399#endif /* CONFIG_HAVE_RCU_TABLE_FREE */400+401+/* tlb_gather_mmu402+ * Called to initialize an (on-stack) mmu_gather structure for page-table403+ * tear-down from @mm. The @fullmm argument is used when @mm is without404+ * users and we're going to destroy the full address space (exit/execve).405+ */406+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,407+ unsigned long start, unsigned long end)408+{409+ arch_tlb_gather_mmu(tlb, mm, start, end);410+ inc_tlb_flush_pending(tlb->mm);411+}412+413+void tlb_finish_mmu(struct mmu_gather *tlb,414+ unsigned long start, unsigned long end)415+{416+ /*417+ * If there are parallel threads are doing PTE changes on same range418+ * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB419+ * flush by batching, a thread has stable TLB entry can fail to flush420+ * the TLB by observing pte_none|!pte_dirty, for example so flush TLB421+ * forcefully if we detect parallel PTE batching threads.422+ */423+ bool force = mm_tlb_flush_nested(tlb->mm);424+425+ arch_tlb_finish_mmu(tlb, start, end, force);426+ dec_tlb_flush_pending(tlb->mm);427+}428429/*430 * Note: this doesn't free the actual pages themselves. That
-6
mm/migrate.c
···1937 put_page(new_page);1938 goto out_fail;1939 }1940- /*1941- * We are not sure a pending tlb flush here is for a huge page1942- * mapping or not. Hence use the tlb range variant1943- */1944- if (mm_tlb_flush_pending(mm))1945- flush_tlb_range(vma, mmun_start, mmun_end);19461947 /* Prepare a page as a migration target */1948 __SetPageLocked(new_page);
···1937 put_page(new_page);1938 goto out_fail;1939 }00000019401941 /* Prepare a page as a migration target */1942 __SetPageLocked(new_page);
+2-2
mm/mprotect.c
···244 BUG_ON(addr >= end);245 pgd = pgd_offset(mm, addr);246 flush_cache_range(vma, addr, end);247- set_tlb_flush_pending(mm);248 do {249 next = pgd_addr_end(addr, end);250 if (pgd_none_or_clear_bad(pgd))···256 /* Only flush the TLB if we actually modified any entries: */257 if (pages)258 flush_tlb_range(vma, start, end);259- clear_tlb_flush_pending(mm);260261 return pages;262}
···244 BUG_ON(addr >= end);245 pgd = pgd_offset(mm, addr);246 flush_cache_range(vma, addr, end);247+ inc_tlb_flush_pending(mm);248 do {249 next = pgd_addr_end(addr, end);250 if (pgd_none_or_clear_bad(pgd))···256 /* Only flush the TLB if we actually modified any entries: */257 if (pages)258 flush_tlb_range(vma, start, end);259+ dec_tlb_flush_pending(mm);260261 return pages;262}
+6-5
mm/page_alloc.c
···4458 * Part of the reclaimable slab consists of items that are in use,4459 * and cannot be freed. Cap this estimate at the low watermark.4460 */4461- available += global_page_state(NR_SLAB_RECLAIMABLE) -4462- min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);044634464 if (available < 0)4465 available = 0;···4603 global_node_page_state(NR_FILE_DIRTY),4604 global_node_page_state(NR_WRITEBACK),4605 global_node_page_state(NR_UNSTABLE_NFS),4606- global_page_state(NR_SLAB_RECLAIMABLE),4607- global_page_state(NR_SLAB_UNRECLAIMABLE),4608 global_node_page_state(NR_FILE_MAPPED),4609 global_node_page_state(NR_SHMEM),4610 global_page_state(NR_PAGETABLE),···76697670 /* Make sure the range is really isolated. */7671 if (test_pages_isolated(outer_start, end, false)) {7672- pr_info("%s: [%lx, %lx) PFNs busy\n",7673 __func__, outer_start, end);7674 ret = -EBUSY;7675 goto done;
···4458 * Part of the reclaimable slab consists of items that are in use,4459 * and cannot be freed. Cap this estimate at the low watermark.4460 */4461+ available += global_node_page_state(NR_SLAB_RECLAIMABLE) -4462+ min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,4463+ wmark_low);44644465 if (available < 0)4466 available = 0;···4602 global_node_page_state(NR_FILE_DIRTY),4603 global_node_page_state(NR_WRITEBACK),4604 global_node_page_state(NR_UNSTABLE_NFS),4605+ global_node_page_state(NR_SLAB_RECLAIMABLE),4606+ global_node_page_state(NR_SLAB_UNRECLAIMABLE),4607 global_node_page_state(NR_FILE_MAPPED),4608 global_node_page_state(NR_SHMEM),4609 global_page_state(NR_PAGETABLE),···76687669 /* Make sure the range is really isolated. */7670 if (test_pages_isolated(outer_start, end, false)) {7671+ pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",7672 __func__, outer_start, end);7673 ret = -EBUSY;7674 goto done;
+30-22
mm/rmap.c
···888 .flags = PVMW_SYNC,889 };890 int *cleaned = arg;0891892 while (page_vma_mapped_walk(&pvmw)) {893 int ret = 0;894- address = pvmw.address;895 if (pvmw.pte) {896 pte_t entry;897 pte_t *pte = pvmw.pte;···899 if (!pte_dirty(*pte) && !pte_write(*pte))900 continue;901902- flush_cache_page(vma, address, pte_pfn(*pte));903- entry = ptep_clear_flush(vma, address, pte);904 entry = pte_wrprotect(entry);905 entry = pte_mkclean(entry);906- set_pte_at(vma->vm_mm, address, pte, entry);907 ret = 1;908 } else {909#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE···913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))914 continue;915916- flush_cache_page(vma, address, page_to_pfn(page));917- entry = pmdp_huge_clear_flush(vma, address, pmd);918 entry = pmd_wrprotect(entry);919 entry = pmd_mkclean(entry);920- set_pmd_at(vma->vm_mm, address, pmd, entry);921 ret = 1;922#else923 /* unexpected pmd-mapped page? */···926 }927928 if (ret) {929- mmu_notifier_invalidate_page(vma->vm_mm, address);930 (*cleaned)++;0931 }00000932 }933934 return true;···1328 };1329 pte_t pteval;1330 struct page *subpage;1331- bool ret = true;1332 enum ttu_flags flags = (enum ttu_flags)arg;13331334 /* munlock has nothing to gain from examining un-locked vmas */···1368 VM_BUG_ON_PAGE(!pvmw.pte, page);13691370 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);1371- address = pvmw.address;1372-13731374 if (!(flags & TTU_IGNORE_ACCESS)) {1375- if (ptep_clear_flush_young_notify(vma, address,1376 pvmw.pte)) {1377 ret = false;1378 page_vma_mapped_walk_done(&pvmw);···1379 }13801381 /* Nuke the page table entry. */1382- flush_cache_page(vma, address, pte_pfn(*pvmw.pte));1383 if (should_defer_flush(mm, flags)) {1384 /*1385 * We clear the PTE but do not flush so potentially···1389 * transition on a cached TLB entry is written through1390 * and traps if the PTE is unmapped.1391 */1392- pteval = ptep_get_and_clear(mm, address, pvmw.pte);013931394 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));1395 } else {1396- pteval = ptep_clear_flush(vma, address, pvmw.pte);1397 }13981399 /* Move the dirty bit to the page. Now the pte is gone. */···1409 if (PageHuge(page)) {1410 int nr = 1 << compound_order(page);1411 hugetlb_count_sub(nr, mm);1412- set_huge_swap_pte_at(mm, address,1413 pvmw.pte, pteval,1414 vma_mmu_pagesize(vma));1415 } else {1416 dec_mm_counter(mm, mm_counter(page));1417- set_pte_at(mm, address, pvmw.pte, pteval);1418 }14191420 } else if (pte_unused(pteval)) {···1438 swp_pte = swp_entry_to_pte(entry);1439 if (pte_soft_dirty(pteval))1440 swp_pte = pte_swp_mksoft_dirty(swp_pte);1441- set_pte_at(mm, address, pvmw.pte, swp_pte);1442 } else if (PageAnon(page)) {1443 swp_entry_t entry = { .val = page_private(subpage) };1444 pte_t swp_pte;···1464 * If the page was redirtied, it cannot be1465 * discarded. Remap the page to page table.1466 */1467- set_pte_at(mm, address, pvmw.pte, pteval);1468 SetPageSwapBacked(page);1469 ret = false;1470 page_vma_mapped_walk_done(&pvmw);···1472 }14731474 if (swap_duplicate(entry) < 0) {1475- set_pte_at(mm, address, pvmw.pte, pteval);1476 ret = false;1477 page_vma_mapped_walk_done(&pvmw);1478 break;···1488 swp_pte = swp_entry_to_pte(entry);1489 if (pte_soft_dirty(pteval))1490 swp_pte = pte_swp_mksoft_dirty(swp_pte);1491- set_pte_at(mm, address, pvmw.pte, swp_pte);1492 } else1493 dec_mm_counter(mm, mm_counter_file(page));1494discard:1495 page_remove_rmap(subpage, PageHuge(page));1496 put_page(page);1497- mmu_notifier_invalidate_page(mm, address);1498 }00001499 return ret;1500}1501
···888 .flags = PVMW_SYNC,889 };890 int *cleaned = arg;891+ bool invalidation_needed = false;892893 while (page_vma_mapped_walk(&pvmw)) {894 int ret = 0;0895 if (pvmw.pte) {896 pte_t entry;897 pte_t *pte = pvmw.pte;···899 if (!pte_dirty(*pte) && !pte_write(*pte))900 continue;901902+ flush_cache_page(vma, pvmw.address, pte_pfn(*pte));903+ entry = ptep_clear_flush(vma, pvmw.address, pte);904 entry = pte_wrprotect(entry);905 entry = pte_mkclean(entry);906+ set_pte_at(vma->vm_mm, pvmw.address, pte, entry);907 ret = 1;908 } else {909#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE···913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))914 continue;915916+ flush_cache_page(vma, pvmw.address, page_to_pfn(page));917+ entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);918 entry = pmd_wrprotect(entry);919 entry = pmd_mkclean(entry);920+ set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);921 ret = 1;922#else923 /* unexpected pmd-mapped page? */···926 }927928 if (ret) {0929 (*cleaned)++;930+ invalidation_needed = true;931 }932+ }933+934+ if (invalidation_needed) {935+ mmu_notifier_invalidate_range(vma->vm_mm, address,936+ address + (1UL << compound_order(page)));937 }938939 return true;···1323 };1324 pte_t pteval;1325 struct page *subpage;1326+ bool ret = true, invalidation_needed = false;1327 enum ttu_flags flags = (enum ttu_flags)arg;13281329 /* munlock has nothing to gain from examining un-locked vmas */···1363 VM_BUG_ON_PAGE(!pvmw.pte, page);13641365 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);0013661367 if (!(flags & TTU_IGNORE_ACCESS)) {1368+ if (ptep_clear_flush_young_notify(vma, pvmw.address,1369 pvmw.pte)) {1370 ret = false;1371 page_vma_mapped_walk_done(&pvmw);···1376 }13771378 /* Nuke the page table entry. */1379+ flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));1380 if (should_defer_flush(mm, flags)) {1381 /*1382 * We clear the PTE but do not flush so potentially···1386 * transition on a cached TLB entry is written through1387 * and traps if the PTE is unmapped.1388 */1389+ pteval = ptep_get_and_clear(mm, pvmw.address,1390+ pvmw.pte);13911392 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));1393 } else {1394+ pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);1395 }13961397 /* Move the dirty bit to the page. Now the pte is gone. */···1405 if (PageHuge(page)) {1406 int nr = 1 << compound_order(page);1407 hugetlb_count_sub(nr, mm);1408+ set_huge_swap_pte_at(mm, pvmw.address,1409 pvmw.pte, pteval,1410 vma_mmu_pagesize(vma));1411 } else {1412 dec_mm_counter(mm, mm_counter(page));1413+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);1414 }14151416 } else if (pte_unused(pteval)) {···1434 swp_pte = swp_entry_to_pte(entry);1435 if (pte_soft_dirty(pteval))1436 swp_pte = pte_swp_mksoft_dirty(swp_pte);1437+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);1438 } else if (PageAnon(page)) {1439 swp_entry_t entry = { .val = page_private(subpage) };1440 pte_t swp_pte;···1460 * If the page was redirtied, it cannot be1461 * discarded. Remap the page to page table.1462 */1463+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);1464 SetPageSwapBacked(page);1465 ret = false;1466 page_vma_mapped_walk_done(&pvmw);···1468 }14691470 if (swap_duplicate(entry) < 0) {1471+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);1472 ret = false;1473 page_vma_mapped_walk_done(&pvmw);1474 break;···1484 swp_pte = swp_entry_to_pte(entry);1485 if (pte_soft_dirty(pteval))1486 swp_pte = pte_swp_mksoft_dirty(swp_pte);1487+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);1488 } else1489 dec_mm_counter(mm, mm_counter_file(page));1490discard:1491 page_remove_rmap(subpage, PageHuge(page));1492 put_page(page);1493+ invalidation_needed = true;1494 }1495+1496+ if (invalidation_needed)1497+ mmu_notifier_invalidate_range(mm, address,1498+ address + (1UL << compound_order(page)));1499 return ret;1500}1501
+10-2
mm/shmem.c
···1022 */1023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {1024 spin_lock(&sbinfo->shrinklist_lock);1025- if (list_empty(&info->shrinklist)) {00001026 list_add_tail(&info->shrinklist,1027 &sbinfo->shrinklist);1028 sbinfo->shrinklist_len++;···1821 * to shrink under memory pressure.1822 */1823 spin_lock(&sbinfo->shrinklist_lock);1824- if (list_empty(&info->shrinklist)) {00001825 list_add_tail(&info->shrinklist,1826 &sbinfo->shrinklist);1827 sbinfo->shrinklist_len++;
···1022 */1023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {1024 spin_lock(&sbinfo->shrinklist_lock);1025+ /*1026+ * _careful to defend against unlocked access to1027+ * ->shrink_list in shmem_unused_huge_shrink()1028+ */1029+ if (list_empty_careful(&info->shrinklist)) {1030 list_add_tail(&info->shrinklist,1031 &sbinfo->shrinklist);1032 sbinfo->shrinklist_len++;···1817 * to shrink under memory pressure.1818 */1819 spin_lock(&sbinfo->shrinklist_lock);1820+ /*1821+ * _careful to defend against unlocked access to1822+ * ->shrink_list in shmem_unused_huge_shrink()1823+ */1824+ if (list_empty_careful(&info->shrinklist)) {1825 list_add_tail(&info->shrinklist,1826 &sbinfo->shrinklist);1827 sbinfo->shrinklist_len++;
+1-1
mm/util.c
···633 * which are reclaimable, under pressure. The dentry634 * cache and most inode caches should fall into this635 */636- free += global_page_state(NR_SLAB_RECLAIMABLE);637638 /*639 * Leave reserved pages. The pages are not for anonymous pages.
···633 * which are reclaimable, under pressure. The dentry634 * cache and most inode caches should fall into this635 */636+ free += global_node_page_state(NR_SLAB_RECLAIMABLE);637638 /*639 * Leave reserved pages. The pages are not for anonymous pages.
+51-9
net/batman-adv/translation-table.c
···1549 return found;1550}155100000000000000000000000000000001552static void1553batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,1554- struct batadv_orig_node *orig_node, int ttvn)01555{1556 struct batadv_tt_orig_list_entry *orig_entry;1557···1593 * was added during a "temporary client detection"1594 */1595 orig_entry->ttvn = ttvn;1596- goto out;01597 }15981599 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);···1606 batadv_tt_global_size_inc(orig_node, tt_global->common.vid);1607 orig_entry->orig_node = orig_node;1608 orig_entry->ttvn = ttvn;01609 kref_init(&orig_entry->refcount);16101611 spin_lock_bh(&tt_global->list_lock);···1616 spin_unlock_bh(&tt_global->list_lock);1617 atomic_inc(&tt_global->orig_list_count);1618001619out:1620 if (orig_entry)1621 batadv_tt_orig_list_entry_put(orig_entry);···1739 }17401741 /* the change can carry possible "attribute" flags like the1742- * TT_CLIENT_WIFI, therefore they have to be copied in the1743 * client entry1744 */1745- common->flags |= flags;17461747 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only1748 * one originator left in the list and we previously received a···1759 }1760add_orig_entry:1761 /* add the new orig_entry (if needed) or update it */1762- batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);017631764 batadv_dbg(BATADV_DBG_TT, bat_priv,1765 "Creating new global tt entry: %pM (vid: %d, via %pM)\n",···1983 struct batadv_tt_orig_list_entry *orig,1984 bool best)1985{01986 void *hdr;1987 struct batadv_orig_node_vlan *vlan;1988 u8 last_ttvn;···2013 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||2014 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||2015 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||2016- nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))2017 goto nla_put_failure;20182019 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))···2627 unsigned short vid)2628{2629 struct batadv_hashtable *hash = bat_priv->tt.global_hash;02630 struct batadv_tt_common_entry *tt_common;2631 struct batadv_tt_global_entry *tt_global;2632 struct hlist_head *head;···2666 /* find out if this global entry is announced by this2667 * originator2668 */2669- if (!batadv_tt_global_entry_has_orig(tt_global,2670- orig_node))02671 continue;26722673 /* use network order to read the VID: this ensures that···2680 /* compute the CRC on flags that have to be kept in sync2681 * among nodes2682 */2683- flags = tt_common->flags & BATADV_TT_SYNC_MASK;2684 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));26852686 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);002687 }2688 rcu_read_unlock();2689 }
···1549 return found;1550}15511552+/**1553+ * batadv_tt_global_sync_flags - update TT sync flags1554+ * @tt_global: the TT global entry to update sync flags in1555+ *1556+ * Updates the sync flag bits in the tt_global flag attribute with a logical1557+ * OR of all sync flags from any of its TT orig entries.1558+ */1559+static void1560+batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)1561+{1562+ struct batadv_tt_orig_list_entry *orig_entry;1563+ const struct hlist_head *head;1564+ u16 flags = BATADV_NO_FLAGS;1565+1566+ rcu_read_lock();1567+ head = &tt_global->orig_list;1568+ hlist_for_each_entry_rcu(orig_entry, head, list)1569+ flags |= orig_entry->flags;1570+ rcu_read_unlock();1571+1572+ flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);1573+ tt_global->common.flags = flags;1574+}1575+1576+/**1577+ * batadv_tt_global_orig_entry_add - add or update a TT orig entry1578+ * @tt_global: the TT global entry to add an orig entry in1579+ * @orig_node: the originator to add an orig entry for1580+ * @ttvn: translation table version number of this changeset1581+ * @flags: TT sync flags1582+ */1583static void1584batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,1585+ struct batadv_orig_node *orig_node, int ttvn,1586+ u8 flags)1587{1588 struct batadv_tt_orig_list_entry *orig_entry;1589···1561 * was added during a "temporary client detection"1562 */1563 orig_entry->ttvn = ttvn;1564+ orig_entry->flags = flags;1565+ goto sync_flags;1566 }15671568 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);···1573 batadv_tt_global_size_inc(orig_node, tt_global->common.vid);1574 orig_entry->orig_node = orig_node;1575 orig_entry->ttvn = ttvn;1576+ orig_entry->flags = flags;1577 kref_init(&orig_entry->refcount);15781579 spin_lock_bh(&tt_global->list_lock);···1582 spin_unlock_bh(&tt_global->list_lock);1583 atomic_inc(&tt_global->orig_list_count);15841585+sync_flags:1586+ batadv_tt_global_sync_flags(tt_global);1587out:1588 if (orig_entry)1589 batadv_tt_orig_list_entry_put(orig_entry);···1703 }17041705 /* the change can carry possible "attribute" flags like the1706+ * TT_CLIENT_TEMP, therefore they have to be copied in the1707 * client entry1708 */1709+ common->flags |= flags & (~BATADV_TT_SYNC_MASK);17101711 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only1712 * one originator left in the list and we previously received a···1723 }1724add_orig_entry:1725 /* add the new orig_entry (if needed) or update it */1726+ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,1727+ flags & BATADV_TT_SYNC_MASK);17281729 batadv_dbg(BATADV_DBG_TT, bat_priv,1730 "Creating new global tt entry: %pM (vid: %d, via %pM)\n",···1946 struct batadv_tt_orig_list_entry *orig,1947 bool best)1948{1949+ u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;1950 void *hdr;1951 struct batadv_orig_node_vlan *vlan;1952 u8 last_ttvn;···1975 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||1976 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||1977 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||1978+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))1979 goto nla_put_failure;19801981 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))···2589 unsigned short vid)2590{2591 struct batadv_hashtable *hash = bat_priv->tt.global_hash;2592+ struct batadv_tt_orig_list_entry *tt_orig;2593 struct batadv_tt_common_entry *tt_common;2594 struct batadv_tt_global_entry *tt_global;2595 struct hlist_head *head;···2627 /* find out if this global entry is announced by this2628 * originator2629 */2630+ tt_orig = batadv_tt_global_orig_entry_find(tt_global,2631+ orig_node);2632+ if (!tt_orig)2633 continue;26342635 /* use network order to read the VID: this ensures that···2640 /* compute the CRC on flags that have to be kept in sync2641 * among nodes2642 */2643+ flags = tt_orig->flags;2644 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));26452646 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);2647+2648+ batadv_tt_orig_list_entry_put(tt_orig);2649 }2650 rcu_read_unlock();2651 }
+2
net/batman-adv/types.h
···1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client1261 * @orig_node: pointer to orig node announcing this non-mesh client1262 * @ttvn: translation table version number which added the non-mesh client01263 * @list: list node for batadv_tt_global_entry::orig_list1264 * @refcount: number of contexts the object is used1265 * @rcu: struct used for freeing in an RCU-safe manner···1268struct batadv_tt_orig_list_entry {1269 struct batadv_orig_node *orig_node;1270 u8 ttvn;01271 struct hlist_node list;1272 struct kref refcount;1273 struct rcu_head rcu;
···1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client1261 * @orig_node: pointer to orig node announcing this non-mesh client1262 * @ttvn: translation table version number which added the non-mesh client1263+ * @flags: per orig entry TT sync flags1264 * @list: list node for batadv_tt_global_entry::orig_list1265 * @refcount: number of contexts the object is used1266 * @rcu: struct used for freeing in an RCU-safe manner···1267struct batadv_tt_orig_list_entry {1268 struct batadv_orig_node *orig_node;1269 u8 ttvn;1270+ u8 flags;1271 struct hlist_node list;1272 struct kref refcount;1273 struct rcu_head rcu;
···107#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */108#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */109#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */0110#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */111#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */112#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */···2521 return;25222523 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */2524- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||2525- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {2526 tp->snd_cwnd = tp->snd_ssthresh;2527 tp->snd_cwnd_stamp = tcp_jiffies32;2528 }···3005 /* Offset the time elapsed after installing regular RTO */3006 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||3007 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {3008- struct sk_buff *skb = tcp_write_queue_head(sk);3009- u64 rto_time_stamp = skb->skb_mstamp +3010- jiffies_to_usecs(rto);3011- s64 delta_us = rto_time_stamp - tp->tcp_mstamp;3012 /* delta_us may not be positive if the socket is locked3013 * when the retrans timer fires and is rescheduled.3014 */···3015 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,3016 TCP_RTO_MAX);3017 }00000003018}30193020/* If we get here, the whole TSO packet has not been acked. */···3185 ca_rtt_us, sack->rate);31863187 if (flag & FLAG_ACKED) {3188- tcp_rearm_rto(sk);3189 if (unlikely(icsk->icsk_mtup.probe_size &&3190 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {3191 tcp_mtup_probe_success(sk);···3213 * after when the head was last (re)transmitted. Otherwise the3214 * timeout may continue to extend in loss recovery.3215 */3216- tcp_rearm_rto(sk);3217 }32183219 if (icsk->icsk_ca_ops->pkts_acked) {···3585 if (after(ack, tp->snd_nxt))3586 goto invalid_ack;35873588- if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)3589- tcp_rearm_rto(sk);3590-3591 if (after(ack, prior_snd_una)) {3592 flag |= FLAG_SND_UNA_ADVANCED;3593 icsk->icsk_retransmits = 0;···3649 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,3650 &sack_state);36510000003652 if (tcp_ack_is_dubious(sk, flag)) {3653 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));3654 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);3655 }3656- if (tp->tlp_high_seq)3657- tcp_process_tlp_ack(sk, ack, flag);36583659 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))3660 sk_dst_confirm(sk);36613662- if (icsk->icsk_pending == ICSK_TIME_RETRANS)3663- tcp_schedule_loss_probe(sk);3664 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */3665 lost = tp->lost - lost; /* freshly marked lost */3666 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
···107#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */108#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */109#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */110+#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */111#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */112#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */113#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */···2520 return;25212522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */2523+ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&2524+ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {2525 tp->snd_cwnd = tp->snd_ssthresh;2526 tp->snd_cwnd_stamp = tcp_jiffies32;2527 }···3004 /* Offset the time elapsed after installing regular RTO */3005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||3006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {3007+ s64 delta_us = tcp_rto_delta_us(sk);0003008 /* delta_us may not be positive if the socket is locked3009 * when the retrans timer fires and is rescheduled.3010 */···3017 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,3018 TCP_RTO_MAX);3019 }3020+}3021+3022+/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */3023+static void tcp_set_xmit_timer(struct sock *sk)3024+{3025+ if (!tcp_schedule_loss_probe(sk))3026+ tcp_rearm_rto(sk);3027}30283029/* If we get here, the whole TSO packet has not been acked. */···3180 ca_rtt_us, sack->rate);31813182 if (flag & FLAG_ACKED) {3183+ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */3184 if (unlikely(icsk->icsk_mtup.probe_size &&3185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {3186 tcp_mtup_probe_success(sk);···3208 * after when the head was last (re)transmitted. Otherwise the3209 * timeout may continue to extend in loss recovery.3210 */3211+ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */3212 }32133214 if (icsk->icsk_ca_ops->pkts_acked) {···3580 if (after(ack, tp->snd_nxt))3581 goto invalid_ack;35820003583 if (after(ack, prior_snd_una)) {3584 flag |= FLAG_SND_UNA_ADVANCED;3585 icsk->icsk_retransmits = 0;···3647 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,3648 &sack_state);36493650+ if (tp->tlp_high_seq)3651+ tcp_process_tlp_ack(sk, ack, flag);3652+ /* If needed, reset TLP/RTO timer; RACK may later override this. */3653+ if (flag & FLAG_SET_XMIT_TIMER)3654+ tcp_set_xmit_timer(sk);3655+3656 if (tcp_ack_is_dubious(sk, flag)) {3657 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));3658 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);3659 }0036603661 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))3662 sk_dst_confirm(sk);3663003664 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */3665 lost = tp->lost - lost; /* freshly marked lost */3666 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+9-18
net/ipv4/tcp_output.c
···2377{2378 struct inet_connection_sock *icsk = inet_csk(sk);2379 struct tcp_sock *tp = tcp_sk(sk);2380- u32 timeout, tlp_time_stamp, rto_time_stamp;2381 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);023822383- /* No consecutive loss probes. */2384- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {2385- tcp_rearm_rto(sk);2386- return false;2387- }2388 /* Don't do any loss probe on a Fast Open connection before 3WHS2389 * finishes.2390 */2391 if (tp->fastopen_rsk)2392- return false;2393-2394- /* TLP is only scheduled when next timer event is RTO. */2395- if (icsk->icsk_pending != ICSK_TIME_RETRANS)2396 return false;23972398 /* Schedule a loss probe in 2*RTT for SACK capable connections···2408 (rtt + (rtt >> 1) + TCP_DELACK_MAX));2409 timeout = max_t(u32, timeout, msecs_to_jiffies(10));24102411- /* If RTO is shorter, just schedule TLP in its place. */2412- tlp_time_stamp = tcp_jiffies32 + timeout;2413- rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;2414- if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {2415- s32 delta = rto_time_stamp - tcp_jiffies32;2416- if (delta > 0)2417- timeout = delta;2418- }24192420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,2421 TCP_RTO_MAX);···3436 int err;34373438 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);00003439 tcp_connect_init(sk);34403441 if (unlikely(tp->repair)) {
···2377{2378 struct inet_connection_sock *icsk = inet_csk(sk);2379 struct tcp_sock *tp = tcp_sk(sk);02380 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);2381+ u32 timeout, rto_delta_us;2382000002383 /* Don't do any loss probe on a Fast Open connection before 3WHS2384 * finishes.2385 */2386 if (tp->fastopen_rsk)00002387 return false;23882389 /* Schedule a loss probe in 2*RTT for SACK capable connections···2417 (rtt + (rtt >> 1) + TCP_DELACK_MAX));2418 timeout = max_t(u32, timeout, msecs_to_jiffies(10));24192420+ /* If the RTO formula yields an earlier time, then use that time. */2421+ rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */2422+ if (rto_delta_us > 0)2423+ timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));000024242425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,2426 TCP_RTO_MAX);···3449 int err;34503451 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);3452+3453+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))3454+ return -EHOSTUNREACH; /* Routing failure or similar. */3455+3456 tcp_connect_init(sk);34573458 if (unlikely(tp->repair)) {
···802 if (is_udplite) /* UDP-Lite */803 csum = udplite_csum(skb);804805+ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */806807 skb->ip_summed = CHECKSUM_NONE;808 goto send;
+1-1
net/ipv4/udp_offload.c
···235 if (uh->check == 0)236 uh->check = CSUM_MANGLED_0;237238- skb->ip_summed = CHECKSUM_NONE;239240 /* If there is no outer header we can fake a checksum offload241 * due to the fact that we have already done the checksum in
···235 if (uh->check == 0)236 uh->check = CSUM_MANGLED_0;237238+ skb->ip_summed = CHECKSUM_UNNECESSARY;239240 /* If there is no outer header we can fake a checksum offload241 * due to the fact that we have already done the checksum in
···72 if (uh->check == 0)73 uh->check = CSUM_MANGLED_0;7475- skb->ip_summed = CHECKSUM_NONE;7677 /* If there is no outer header we can fake a checksum offload78 * due to the fact that we have already done the checksum in
···72 if (uh->check == 0)73 uh->check = CSUM_MANGLED_0;7475+ skb->ip_summed = CHECKSUM_UNNECESSARY;7677 /* If there is no outer header we can fake a checksum offload78 * due to the fact that we have already done the checksum in
+9-4
net/packet/af_packet.c
···37003701 if (optlen != sizeof(val))3702 return -EINVAL;3703- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)3704- return -EBUSY;3705 if (copy_from_user(&val, optval, sizeof(val)))3706 return -EFAULT;3707 if (val > INT_MAX)3708 return -EINVAL;3709- po->tp_reserve = val;3710- return 0;00000003711 }3712 case PACKET_LOSS:3713 {
···37003701 if (optlen != sizeof(val))3702 return -EINVAL;003703 if (copy_from_user(&val, optval, sizeof(val)))3704 return -EFAULT;3705 if (val > INT_MAX)3706 return -EINVAL;3707+ lock_sock(sk);3708+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {3709+ ret = -EBUSY;3710+ } else {3711+ po->tp_reserve = val;3712+ ret = 0;3713+ }3714+ release_sock(sk);3715+ return ret;3716 }3717 case PACKET_LOSS:3718 {
+4-1
net/rds/ib_recv.c
···1015 if (rds_ib_ring_empty(&ic->i_recv_ring))1016 rds_ib_stats_inc(s_ib_rx_ring_empty);10171018- if (rds_ib_ring_low(&ic->i_recv_ring))1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);001020}10211022int rds_ib_recv_path(struct rds_conn_path *cp)···1031 if (rds_conn_up(conn)) {1032 rds_ib_attempt_ack(ic);1033 rds_ib_recv_refill(conn, 0, GFP_KERNEL);01034 }10351036 return ret;
···1015 if (rds_ib_ring_empty(&ic->i_recv_ring))1016 rds_ib_stats_inc(s_ib_rx_ring_empty);10171018+ if (rds_ib_ring_low(&ic->i_recv_ring)) {1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);1020+ rds_ib_stats_inc(s_ib_rx_refill_from_cq);1021+ }1022}10231024int rds_ib_recv_path(struct rds_conn_path *cp)···1029 if (rds_conn_up(conn)) {1030 rds_ib_attempt_ack(ic);1031 rds_ib_recv_refill(conn, 0, GFP_KERNEL);1032+ rds_ib_stats_inc(s_ib_rx_refill_from_thread);1033 }10341035 return ret;
+11-11
net/sched/act_ipt.c
···36static unsigned int xt_net_id;37static struct tc_action_ops act_xt_ops;3839-static int ipt_init_target(struct xt_entry_target *t, char *table,40- unsigned int hook)41{42 struct xt_tgchk_param par;43 struct xt_target *target;···49 return PTR_ERR(target);5051 t->u.kernel.target = target;0052 par.table = table;53- par.entryinfo = NULL;54 par.target = target;55 par.targinfo = t->data;56 par.hook_mask = hook;···92 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },93};9495-static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,96 struct nlattr *est, struct tc_action **a,97 const struct tc_action_ops *ops, int ovr, int bind)98{099 struct nlattr *tb[TCA_IPT_MAX + 1];100 struct tcf_ipt *ipt;101 struct xt_entry_target *td, *t;···161 if (unlikely(!t))162 goto err2;163164- err = ipt_init_target(t, tname, hook);165 if (err < 0)166 goto err3;167···195 struct nlattr *est, struct tc_action **a, int ovr,196 int bind)197{198- struct tc_action_net *tn = net_generic(net, ipt_net_id);199-200- return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);201}202203static int tcf_xt_init(struct net *net, struct nlattr *nla,204 struct nlattr *est, struct tc_action **a, int ovr,205 int bind)206{207- struct tc_action_net *tn = net_generic(net, xt_net_id);208-209- return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);210}211212static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
···36static unsigned int xt_net_id;37static struct tc_action_ops act_xt_ops;3839+static int ipt_init_target(struct net *net, struct xt_entry_target *t,40+ char *table, unsigned int hook)41{42 struct xt_tgchk_param par;43 struct xt_target *target;···49 return PTR_ERR(target);5051 t->u.kernel.target = target;52+ memset(&par, 0, sizeof(par));53+ par.net = net;54 par.table = table;055 par.target = target;56 par.targinfo = t->data;57 par.hook_mask = hook;···91 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },92};9394+static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,95 struct nlattr *est, struct tc_action **a,96 const struct tc_action_ops *ops, int ovr, int bind)97{98+ struct tc_action_net *tn = net_generic(net, id);99 struct nlattr *tb[TCA_IPT_MAX + 1];100 struct tcf_ipt *ipt;101 struct xt_entry_target *td, *t;···159 if (unlikely(!t))160 goto err2;161162+ err = ipt_init_target(net, t, tname, hook);163 if (err < 0)164 goto err3;165···193 struct nlattr *est, struct tc_action **a, int ovr,194 int bind)195{196+ return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,197+ bind);0198}199200static int tcf_xt_init(struct net *net, struct nlattr *nla,201 struct nlattr *est, struct tc_action **a, int ovr,202 int bind)203{204+ return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,205+ bind);0206}207208static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
···1819use Getopt::Long qw(:config no_auto_abbrev);20use Cwd;02122my $cur_path = fastgetcwd() . '/';23my $lk_path = "./";···59my $pattern_depth = 0;60my $version = 0;61my $help = 0;06263my $vcs_used = 0;64···251 'sections!' => \$sections,252 'fe|file-emails!' => \$file_emails,253 'f|file' => \$from_filename,0254 'v|version' => \$version,255 'h|help|usage' => \$help,256 )) {···310311my @typevalue = ();312my %keyword_hash;0313314-open (my $maint, '<', "${lk_path}MAINTAINERS")315- or die "$P: Can't open MAINTAINERS: $!\n";316-while (<$maint>) {317- my $line = $_;318319- if ($line =~ m/^([A-Z]):\s*(.*)/) {320- my $type = $1;321- my $value = $2;0322323- ##Filename pattern matching324- if ($type eq "F" || $type eq "X") {325- $value =~ s@\.@\\\.@g; ##Convert . to \.326- $value =~ s/\*/\.\*/g; ##Convert * to .*327- $value =~ s/\?/\./g; ##Convert ? to .328- ##if pattern is a directory and it lacks a trailing slash, add one329- if ((-d $value)) {330- $value =~ s@([^/])$@$1/@;0000000331 }332- } elsif ($type eq "K") {333- $keyword_hash{@typevalue} = $value;00334 }335- push(@typevalue, "$type:$value");336- } elsif (!/^(\s)*$/) {337- $line =~ s/\n$//g;338- push(@typevalue, $line);000000000000000000339 }340}341-close($maint);342000000000000343344#345# Read mail address map···914 if ( (-f "${lk_path}COPYING")915 && (-f "${lk_path}CREDITS")916 && (-f "${lk_path}Kbuild")917- && (-f "${lk_path}MAINTAINERS")918 && (-f "${lk_path}Makefile")919 && (-f "${lk_path}README")920 && (-d "${lk_path}Documentation")
···1819use Getopt::Long qw(:config no_auto_abbrev);20use Cwd;21+use File::Find;2223my $cur_path = fastgetcwd() . '/';24my $lk_path = "./";···58my $pattern_depth = 0;59my $version = 0;60my $help = 0;61+my $find_maintainer_files = 0;6263my $vcs_used = 0;64···249 'sections!' => \$sections,250 'fe|file-emails!' => \$file_emails,251 'f|file' => \$from_filename,252+ 'find-maintainer-files' => \$find_maintainer_files,253 'v|version' => \$version,254 'h|help|usage' => \$help,255 )) {···307308my @typevalue = ();309my %keyword_hash;310+my @mfiles = ();311312+sub read_maintainer_file {313+ my ($file) = @_;00314315+ open (my $maint, '<', "$file")316+ or die "$P: Can't open MAINTAINERS file '$file': $!\n";317+ while (<$maint>) {318+ my $line = $_;319320+ if ($line =~ m/^([A-Z]):\s*(.*)/) {321+ my $type = $1;322+ my $value = $2;323+324+ ##Filename pattern matching325+ if ($type eq "F" || $type eq "X") {326+ $value =~ s@\.@\\\.@g; ##Convert . to \.327+ $value =~ s/\*/\.\*/g; ##Convert * to .*328+ $value =~ s/\?/\./g; ##Convert ? to .329+ ##if pattern is a directory and it lacks a trailing slash, add one330+ if ((-d $value)) {331+ $value =~ s@([^/])$@$1/@;332+ }333+ } elsif ($type eq "K") {334+ $keyword_hash{@typevalue} = $value;335 }336+ push(@typevalue, "$type:$value");337+ } elsif (!(/^\s*$/ || /^\s*\#/)) {338+ $line =~ s/\n$//g;339+ push(@typevalue, $line);340 }341+ }342+ close($maint);343+}344+345+sub find_is_maintainer_file {346+ my ($file) = $_;347+ return if ($file !~ m@/MAINTAINERS$@);348+ $file = $File::Find::name;349+ return if (! -f $file);350+ push(@mfiles, $file);351+}352+353+sub find_ignore_git {354+ return grep { $_ !~ /^\.git$/; } @_;355+}356+357+if (-d "${lk_path}MAINTAINERS") {358+ opendir(DIR, "${lk_path}MAINTAINERS") or die $!;359+ my @files = readdir(DIR);360+ closedir(DIR);361+ foreach my $file (@files) {362+ push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);363 }364}0365366+if ($find_maintainer_files) {367+ find( { wanted => \&find_is_maintainer_file,368+ preprocess => \&find_ignore_git,369+ no_chdir => 1,370+ }, "${lk_path}");371+} else {372+ push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";373+}374+375+foreach my $file (@mfiles) {376+ read_maintainer_file("$file");377+}378379#380# Read mail address map···873 if ( (-f "${lk_path}COPYING")874 && (-f "${lk_path}CREDITS")875 && (-f "${lk_path}Kbuild")876+ && (-e "${lk_path}MAINTAINERS")877 && (-f "${lk_path}Makefile")878 && (-f "${lk_path}README")879 && (-d "${lk_path}Documentation")
+73-22
scripts/parse-maintainers.pl
···23use strict;45-my %map;67-# sort comparison function8sub by_category($$) {9 my ($a, $b) = @_;10···15 $a =~ s/THE REST/ZZZZZZ/g;16 $b =~ s/THE REST/ZZZZZZ/g;1718- $a cmp $b;19}2021-sub alpha_output {22- my $key;23- my $sort_method = \&by_category;24- my $sep = "";2526- foreach $key (sort $sort_method keys %map) {27- if ($key ne " ") {28- print $sep . $key . "\n";29- $sep = "\n";30- }31- print $map{$key};0000000000000032 }33}34···52 return $s;53}5400000000000000000055sub file_input {0056 my $lastline = "";57 my $case = " ";58- $map{$case} = "";5960- while (<>) {0061 my $line = $_;6263 # Pattern line?64 if ($line =~ m/^([A-Z]):\s*(.*)/) {65 $line = $1 . ":\t" . trim($2) . "\n";66 if ($lastline eq "") {67- $map{$case} = $map{$case} . $line;68 next;69 }70 $case = trim($lastline);71- exists $map{$case} and die "Header '$case' already exists";72- $map{$case} = $line;73 $lastline = "";74 next;75 }7677 if ($case eq " ") {78- $map{$case} = $map{$case} . $lastline;79 $lastline = $line;80 next;81 }82 trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'");83 $lastline = $line;84 }85- $map{$case} = $map{$case} . $lastline;086}8788-&file_input;89-&alpha_output;00000000000000090exit(0);
···23use strict;45+my $P = $0;67+# sort comparison functions8sub by_category($$) {9 my ($a, $b) = @_;10···15 $a =~ s/THE REST/ZZZZZZ/g;16 $b =~ s/THE REST/ZZZZZZ/g;1718+ return $a cmp $b;19}2021+sub by_pattern($$) {22+ my ($a, $b) = @_;23+ my $preferred_order = 'MRPLSWTQBCFXNK';02425+ my $a1 = uc(substr($a, 0, 1));26+ my $b1 = uc(substr($b, 0, 1));27+28+ my $a_index = index($preferred_order, $a1);29+ my $b_index = index($preferred_order, $b1);30+31+ $a_index = 1000 if ($a_index == -1);32+ $b_index = 1000 if ($b_index == -1);33+34+ if (($a1 =~ /^F$/ && $b1 =~ /^F$/) ||35+ ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {36+ return $a cmp $b;37+ }38+39+ if ($a_index < $b_index) {40+ return -1;41+ } elsif ($a_index == $b_index) {42+ return 0;43+ } else {44+ return 1;45 }46}47···39 return $s;40}4142+sub alpha_output {43+ my ($hashref, $filename) = (@_);44+45+ open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n";46+ foreach my $key (sort by_category keys %$hashref) {47+ if ($key eq " ") {48+ chomp $$hashref{$key};49+ print $file $$hashref{$key};50+ } else {51+ print $file "\n" . $key . "\n";52+ foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {53+ print $file ($pattern . "\n");54+ }55+ }56+ }57+ close($file);58+}59+60sub file_input {61+ my ($hashref, $filename) = (@_);62+63 my $lastline = "";64 my $case = " ";65+ $$hashref{$case} = "";6667+ open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n";68+69+ while (<$file>) {70 my $line = $_;7172 # Pattern line?73 if ($line =~ m/^([A-Z]):\s*(.*)/) {74 $line = $1 . ":\t" . trim($2) . "\n";75 if ($lastline eq "") {76+ $$hashref{$case} = $$hashref{$case} . $line;77 next;78 }79 $case = trim($lastline);80+ exists $$hashref{$case} and die "Header '$case' already exists";81+ $$hashref{$case} = $line;82 $lastline = "";83 next;84 }8586 if ($case eq " ") {87+ $$hashref{$case} = $$hashref{$case} . $lastline;88 $lastline = $line;89 next;90 }91 trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'");92 $lastline = $line;93 }94+ $$hashref{$case} = $$hashref{$case} . $lastline;95+ close($file);96}9798+my %hash;99+my %new_hash;100+101+file_input(\%hash, "MAINTAINERS");102+103+foreach my $type (@ARGV) {104+ foreach my $key (keys %hash) {105+ if ($key =~ /$type/ || $hash{$key} =~ /$type/) {106+ $new_hash{$key} = $hash{$key};107+ delete $hash{$key};108+ }109+ }110+}111+112+alpha_output(\%hash, "MAINTAINERS.new");113+alpha_output(\%new_hash, "SECTION.new");114+115exit(0);
+2
tools/build/feature/test-bpf.c
···11# define __NR_bpf 28012# elif defined(__sparc__)13# define __NR_bpf 3490014# else15# error __NR_bpf not defined. libbpf does not support your arch.16# endif
···11# define __NR_bpf 28012# elif defined(__sparc__)13# define __NR_bpf 34914+# elif defined(__s390__)15+# define __NR_bpf 35116# else17# error __NR_bpf not defined. libbpf does not support your arch.18# endif
+2
tools/lib/bpf/bpf.c
···39# define __NR_bpf 28040# elif defined(__sparc__)41# define __NR_bpf 3490042# else43# error __NR_bpf not defined. libbpf does not support your arch.44# endif
···39# define __NR_bpf 28040# elif defined(__sparc__)41# define __NR_bpf 34942+# elif defined(__s390__)43+# define __NR_bpf 35144# else45# error __NR_bpf not defined. libbpf does not support your arch.46# endif