Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '4.7-fixes' into mips-for-linux-next

+1150 -605
+3 -3
Documentation/x86/intel_mpx.txt
··· 45 45 MPX-instrumented. 46 46 3) The kernel detects that the CPU has MPX, allows the new prctl() to 47 47 succeed, and notes the location of the bounds directory. Userspace is 48 - expected to keep the bounds directory at that locationWe note it 48 + expected to keep the bounds directory at that location. We note it 49 49 instead of reading it each time because the 'xsave' operation needed 50 50 to access the bounds directory register is an expensive operation. 51 51 4) If the application needs to spill bounds out of the 4 registers, it ··· 167 167 We need to decode MPX instructions to get violation address and 168 168 set this address into extended struct siginfo. 169 169 170 - The _sigfault feild of struct siginfo is extended as follow: 170 + The _sigfault field of struct siginfo is extended as follow: 171 171 172 172 87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 173 173 88 struct { ··· 240 240 This is allowed architecturally. See more information "Intel(R) Architecture 241 241 Instruction Set Extensions Programming Reference" (9.3.4). 242 242 243 - However, if users did this, the kernel might be fooled in to unmaping an 243 + However, if users did this, the kernel might be fooled in to unmapping an 244 244 in-use bounds table since it does not recognize sharing.
+2 -2
Documentation/x86/tlb.txt
··· 5 5 from areas other than the one we are trying to flush will be 6 6 destroyed and must be refilled later, at some cost. 7 7 2. Use the invlpg instruction to invalidate a single page at a 8 - time. This could potentialy cost many more instructions, but 8 + time. This could potentially cost many more instructions, but 9 9 it is a much more precise operation, causing no collateral 10 10 damage to other TLB entries. 11 11 ··· 19 19 work. 20 20 3. The size of the TLB. The larger the TLB, the more collateral 21 21 damage we do with a full flush. So, the larger the TLB, the 22 - more attrative an individual flush looks. Data and 22 + more attractive an individual flush looks. Data and 23 23 instructions have separate TLBs, as do different page sizes. 24 24 4. The microarchitecture. The TLB has become a multi-level 25 25 cache on modern CPUs, and the global flushes have become more
+1 -1
Documentation/x86/x86_64/machinecheck
··· 36 36 37 37 check_interval 38 38 How often to poll for corrected machine check errors, in seconds 39 - (Note output is hexademical). Default 5 minutes. When the poller 39 + (Note output is hexadecimal). Default 5 minutes. When the poller 40 40 finds MCEs it triggers an exponential speedup (poll more often) on 41 41 the polling interval. When the poller stops finding MCEs, it 42 42 triggers an exponential backoff (poll less often) on the polling
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 7 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Psychotic Stoned Sheep 6 6 7 7 # *DOCUMENTATION*
+2
arch/arm64/include/asm/cputype.h
··· 80 80 #define APM_CPU_PART_POTENZA 0x000 81 81 82 82 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 83 + #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 83 84 84 85 #define BRCM_CPU_PART_VULCAN 0x516 85 86 86 87 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 87 88 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 88 89 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 90 + #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 89 91 90 92 #ifndef __ASSEMBLY__ 91 93
+2
arch/arm64/include/asm/ptrace.h
··· 117 117 }; 118 118 u64 orig_x0; 119 119 u64 syscallno; 120 + u64 orig_addr_limit; 121 + u64 unused; // maintain 16 byte alignment 120 122 }; 121 123 122 124 #define arch_has_single_step() (1)
+1
arch/arm64/kernel/asm-offsets.c
··· 60 60 DEFINE(S_PC, offsetof(struct pt_regs, pc)); 61 61 DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); 62 62 DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); 63 + DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); 63 64 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); 64 65 BLANK(); 65 66 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
+6
arch/arm64/kernel/cpu_errata.c
··· 98 98 MIDR_RANGE(MIDR_THUNDERX, 0x00, 99 99 (1 << MIDR_VARIANT_SHIFT) | 1), 100 100 }, 101 + { 102 + /* Cavium ThunderX, T81 pass 1.0 */ 103 + .desc = "Cavium erratum 27456", 104 + .capability = ARM64_WORKAROUND_CAVIUM_27456, 105 + MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), 106 + }, 101 107 #endif 102 108 { 103 109 }
+17 -2
arch/arm64/kernel/entry.S
··· 28 28 #include <asm/errno.h> 29 29 #include <asm/esr.h> 30 30 #include <asm/irq.h> 31 + #include <asm/memory.h> 31 32 #include <asm/thread_info.h> 32 33 #include <asm/unistd.h> 33 34 ··· 98 97 mov x29, xzr // fp pointed to user-space 99 98 .else 100 99 add x21, sp, #S_FRAME_SIZE 101 - .endif 100 + get_thread_info tsk 101 + /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ 102 + ldr x20, [tsk, #TI_ADDR_LIMIT] 103 + str x20, [sp, #S_ORIG_ADDR_LIMIT] 104 + mov x20, #TASK_SIZE_64 105 + str x20, [tsk, #TI_ADDR_LIMIT] 106 + ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO) 107 + .endif /* \el == 0 */ 102 108 mrs x22, elr_el1 103 109 mrs x23, spsr_el1 104 110 stp lr, x21, [sp, #S_LR] ··· 136 128 .endm 137 129 138 130 .macro kernel_exit, el 131 + .if \el != 0 132 + /* Restore the task's original addr_limit. */ 133 + ldr x20, [sp, #S_ORIG_ADDR_LIMIT] 134 + str x20, [tsk, #TI_ADDR_LIMIT] 135 + 136 + /* No need to restore UAO, it will be restored from SPSR_EL1 */ 137 + .endif 138 + 139 139 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR 140 140 .if \el == 0 141 141 ct_user_enter ··· 422 406 bl trace_hardirqs_off 423 407 #endif 424 408 425 - get_thread_info tsk 426 409 irq_handler 427 410 428 411 #ifdef CONFIG_PREEMPT
+2 -1
arch/arm64/mm/fault.c
··· 280 280 } 281 281 282 282 if (permission_fault(esr) && (addr < USER_DS)) { 283 - if (get_fs() == KERNEL_DS) 283 + /* regs->orig_addr_limit may be 0 if we entered from EL0 */ 284 + if (regs->orig_addr_limit == KERNEL_DS) 284 285 die("Accessing user space memory with fs=KERNEL_DS", regs, esr); 285 286 286 287 if (!search_exception_tables(regs->pc))
+1 -1
arch/mips/cavium-octeon/octeon-irq.c
··· 1260 1260 1261 1261 line = (hw + gpiod->base_hwirq) >> 6; 1262 1262 bit = (hw + gpiod->base_hwirq) & 63; 1263 - if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1263 + if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1264 1264 octeon_irq_ciu_to_irq[line][bit] != 0) 1265 1265 return -EINVAL; 1266 1266
+2
arch/mips/include/asm/msa.h
··· 168 168 unsigned int reg; \ 169 169 __asm__ __volatile__( \ 170 170 " .set push\n" \ 171 + " .set fp=64\n" \ 171 172 " .set msa\n" \ 172 173 " cfcmsa %0, $" #cs "\n" \ 173 174 " .set pop\n" \ ··· 180 179 { \ 181 180 __asm__ __volatile__( \ 182 181 " .set push\n" \ 182 + " .set fp=64\n" \ 183 183 " .set msa\n" \ 184 184 " ctcmsa $" #cs ", %0\n" \ 185 185 " .set pop\n" \
+1 -6
arch/mips/kernel/cevt-r4k.c
··· 276 276 CLOCK_EVT_FEAT_C3STOP | 277 277 CLOCK_EVT_FEAT_PERCPU; 278 278 279 - clockevent_set_clock(cd, mips_hpt_frequency); 280 - 281 - /* Calculate the min / max delta */ 282 - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 283 279 min_delta = calculate_min_delta(); 284 - cd->min_delta_ns = clockevent_delta2ns(min_delta, cd); 285 280 286 281 cd->rating = 300; 287 282 cd->irq = irq; ··· 284 289 cd->set_next_event = mips_next_event; 285 290 cd->event_handler = mips_event_handler; 286 291 287 - clockevents_register_device(cd); 292 + clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff); 288 293 289 294 if (cp0_timer_irq_installed) 290 295 return 0;
+3 -1
arch/mips/kernel/csrc-r4k.c
··· 23 23 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 24 24 }; 25 25 26 - static u64 notrace r4k_read_sched_clock(void) 26 + static u64 __maybe_unused notrace r4k_read_sched_clock(void) 27 27 { 28 28 return read_c0_count(); 29 29 } ··· 82 82 83 83 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); 84 84 85 + #ifndef CONFIG_CPU_FREQ 85 86 sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); 87 + #endif 86 88 87 89 return 0; 88 90 }
+3 -1
arch/mips/kernel/traps.c
··· 704 704 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) 705 705 { 706 706 struct siginfo si = { 0 }; 707 + struct vm_area_struct *vma; 707 708 708 709 switch (sig) { 709 710 case 0: ··· 745 744 si.si_addr = fault_addr; 746 745 si.si_signo = sig; 747 746 down_read(&current->mm->mmap_sem); 748 - if (find_vma(current->mm, (unsigned long)fault_addr)) 747 + vma = find_vma(current->mm, (unsigned long)fault_addr); 748 + if (vma && (vma->vm_start <= (unsigned long)fault_addr)) 749 749 si.si_code = SEGV_ACCERR; 750 750 else 751 751 si.si_code = SEGV_MAPERR;
+1 -1
arch/mips/lantiq/irq.c
··· 344 344 if (hw == ltq_eiu_irq[i]) 345 345 chip = &ltq_eiu_type; 346 346 347 - irq_set_chip_and_handler(hw, chip, handle_level_irq); 347 + irq_set_chip_and_handler(irq, chip, handle_level_irq); 348 348 349 349 return 0; 350 350 }
+7 -7
arch/mips/loongson64/loongson-3/hpet.c
··· 13 13 #define SMBUS_PCI_REG64 0x64 14 14 #define SMBUS_PCI_REGB4 0xb4 15 15 16 - #define HPET_MIN_CYCLES 64 17 - #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) 16 + #define HPET_MIN_CYCLES 16 17 + #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12) 18 18 19 19 static DEFINE_SPINLOCK(hpet_lock); 20 20 DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); ··· 157 157 static int hpet_next_event(unsigned long delta, 158 158 struct clock_event_device *evt) 159 159 { 160 - unsigned int cnt; 161 - int res; 160 + u32 cnt; 161 + s32 res; 162 162 163 163 cnt = hpet_read(HPET_COUNTER); 164 - cnt += delta; 164 + cnt += (u32) delta; 165 165 hpet_write(HPET_T0_CMP, cnt); 166 166 167 - res = (int)(cnt - hpet_read(HPET_COUNTER)); 167 + res = (s32)(cnt - hpet_read(HPET_COUNTER)); 168 168 169 169 return res < HPET_MIN_CYCLES ? -ETIME : 0; 170 170 } ··· 230 230 231 231 cd = &per_cpu(hpet_clockevent_device, cpu); 232 232 cd->name = "hpet"; 233 - cd->rating = 320; 233 + cd->rating = 100; 234 234 cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 235 235 cd->set_state_shutdown = hpet_set_state_shutdown; 236 236 cd->set_state_periodic = hpet_set_state_periodic;
+1 -1
arch/mips/mm/sc-rm7k.c
··· 161 161 local_irq_save(flags); 162 162 blast_rm7k_tcache(); 163 163 clear_c0_config(RM7K_CONF_TE); 164 - local_irq_save(flags); 164 + local_irq_restore(flags); 165 165 } 166 166 167 167 static void rm7k_sc_disable(void)
+1 -1
arch/mips/net/bpf_jit.c
··· 1199 1199 1200 1200 memset(&ctx, 0, sizeof(ctx)); 1201 1201 1202 - ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL); 1202 + ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); 1203 1203 if (ctx.offsets == NULL) 1204 1204 return; 1205 1205
+6 -5
arch/x86/events/core.c
··· 2319 2319 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 2320 2320 { 2321 2321 struct stack_frame frame; 2322 - const void __user *fp; 2322 + const unsigned long __user *fp; 2323 2323 2324 2324 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2325 2325 /* TODO: We don't support guest os callchain now */ ··· 2332 2332 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) 2333 2333 return; 2334 2334 2335 - fp = (void __user *)regs->bp; 2335 + fp = (unsigned long __user *)regs->bp; 2336 2336 2337 2337 perf_callchain_store(entry, regs->ip); 2338 2338 ··· 2345 2345 pagefault_disable(); 2346 2346 while (entry->nr < entry->max_stack) { 2347 2347 unsigned long bytes; 2348 + 2348 2349 frame.next_frame = NULL; 2349 2350 frame.return_address = 0; 2350 2351 2351 - if (!access_ok(VERIFY_READ, fp, 16)) 2352 + if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) 2352 2353 break; 2353 2354 2354 - bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8); 2355 + bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); 2355 2356 if (bytes != 0) 2356 2357 break; 2357 - bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8); 2358 + bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp)); 2358 2359 if (bytes != 0) 2359 2360 break; 2360 2361
+2 -2
arch/x86/events/intel/Makefile
··· 1 1 obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o 2 2 obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o 3 3 obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o 4 - obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o 5 - intel-rapl-objs := rapl.o 4 + obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o 5 + intel-rapl-perf-objs := rapl.o 6 6 obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o 7 7 intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o 8 8 obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
+29
arch/x86/events/intel/core.c
··· 115 115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 116 116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 117 117 118 + /* 119 + * When HT is off these events can only run on the bottom 4 counters 120 + * When HT is on, they are impacted by the HT bug and require EXCL access 121 + */ 118 122 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 119 123 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 120 124 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ ··· 143 139 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 144 140 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 145 141 142 + /* 143 + * When HT is off these events can only run on the bottom 4 counters 144 + * When HT is on, they are impacted by the HT bug and require EXCL access 145 + */ 146 146 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 147 147 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 148 148 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ ··· 190 182 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 191 183 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 192 184 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 185 + 186 + /* 187 + * when HT is off, these can only run on the bottom 4 counters 188 + */ 189 + INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 190 + INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 191 + INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 192 + INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 193 + INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */ 194 + 193 195 EVENT_CONSTRAINT_END 194 196 }; 195 197 ··· 268 250 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 269 251 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 270 252 253 + /* 254 + * When HT is off these events can only run on the bottom 4 counters 255 + * When HT is on, they are impacted by the HT bug and require EXCL access 256 + */ 271 257 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 272 258 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 273 259 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ ··· 286 264 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 287 265 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 288 266 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 267 + /* 268 + * when HT is off, these can only run on the bottom 4 counters 269 + */ 270 + INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 271 + INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 272 + INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 273 + INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 289 274 EVENT_CONSTRAINT_END 290 275 }; 291 276
+2 -2
arch/x86/kernel/amd_nb.c
··· 71 71 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) 72 72 i++; 73 73 74 - if (i == 0) 75 - return 0; 74 + if (!i) 75 + return -ENODEV; 76 76 77 77 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL); 78 78 if (!nb)
+1
arch/x86/pci/acpi.c
··· 396 396 return -ENODEV; 397 397 398 398 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); 399 + acpi_irq_penalty_init(); 399 400 pcibios_enable_irq = acpi_pci_irq_enable; 400 401 pcibios_disable_irq = acpi_pci_irq_disable; 401 402 x86_init.pci.init_irq = x86_init_noop;
+85 -12
arch/x86/power/hibernate_64.c
··· 19 19 #include <asm/mtrr.h> 20 20 #include <asm/sections.h> 21 21 #include <asm/suspend.h> 22 + #include <asm/tlbflush.h> 22 23 23 24 /* Defined in hibernate_asm_64.S */ 24 25 extern asmlinkage __visible int restore_image(void); ··· 29 28 * kernel's text (this value is passed in the image header). 30 29 */ 31 30 unsigned long restore_jump_address __visible; 31 + unsigned long jump_address_phys; 32 32 33 33 /* 34 34 * Value of the cr3 register from before the hibernation (this value is passed ··· 39 37 40 38 pgd_t *temp_level4_pgt __visible; 41 39 42 - void *relocated_restore_code __visible; 40 + unsigned long relocated_restore_code __visible; 41 + 42 + static int set_up_temporary_text_mapping(void) 43 + { 44 + pmd_t *pmd; 45 + pud_t *pud; 46 + 47 + /* 48 + * The new mapping only has to cover the page containing the image 49 + * kernel's entry point (jump_address_phys), because the switch over to 50 + * it is carried out by relocated code running from a page allocated 51 + * specifically for this purpose and covered by the identity mapping, so 52 + * the temporary kernel text mapping is only needed for the final jump. 53 + * Moreover, in that mapping the virtual address of the image kernel's 54 + * entry point must be the same as its virtual address in the image 55 + * kernel (restore_jump_address), so the image kernel's 56 + * restore_registers() code doesn't find itself in a different area of 57 + * the virtual address space after switching over to the original page 58 + * tables used by the image kernel. 59 + */ 60 + pud = (pud_t *)get_safe_page(GFP_ATOMIC); 61 + if (!pud) 62 + return -ENOMEM; 63 + 64 + pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); 65 + if (!pmd) 66 + return -ENOMEM; 67 + 68 + set_pmd(pmd + pmd_index(restore_jump_address), 69 + __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); 70 + set_pud(pud + pud_index(restore_jump_address), 71 + __pud(__pa(pmd) | _KERNPG_TABLE)); 72 + set_pgd(temp_level4_pgt + pgd_index(restore_jump_address), 73 + __pgd(__pa(pud) | _KERNPG_TABLE)); 74 + 75 + return 0; 76 + } 43 77 44 78 static void *alloc_pgt_page(void *context) 45 79 { ··· 97 59 if (!temp_level4_pgt) 98 60 return -ENOMEM; 99 61 100 - /* It is safe to reuse the original kernel mapping */ 101 - set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 102 - init_level4_pgt[pgd_index(__START_KERNEL_map)]); 62 + /* Prepare a temporary mapping for the kernel text */ 63 + result = set_up_temporary_text_mapping(); 64 + if (result) 65 + return result; 103 66 104 67 /* Set up the direct mapping from scratch */ 105 68 for (i = 0; i < nr_pfn_mapped; i++) { ··· 117 78 return 0; 118 79 } 119 80 81 + static int relocate_restore_code(void) 82 + { 83 + pgd_t *pgd; 84 + pud_t *pud; 85 + 86 + relocated_restore_code = get_safe_page(GFP_ATOMIC); 87 + if (!relocated_restore_code) 88 + return -ENOMEM; 89 + 90 + memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE); 91 + 92 + /* Make the page containing the relocated code executable */ 93 + pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code); 94 + pud = pud_offset(pgd, relocated_restore_code); 95 + if (pud_large(*pud)) { 96 + set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); 97 + } else { 98 + pmd_t *pmd = pmd_offset(pud, relocated_restore_code); 99 + 100 + if (pmd_large(*pmd)) { 101 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); 102 + } else { 103 + pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code); 104 + 105 + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX)); 106 + } 107 + } 108 + __flush_tlb_all(); 109 + 110 + return 0; 111 + } 112 + 120 113 int swsusp_arch_resume(void) 121 114 { 122 115 int error; 123 116 124 117 /* We have got enough memory and from now on we cannot recover */ 125 - if ((error = set_up_temporary_mappings())) 118 + error = set_up_temporary_mappings(); 119 + if (error) 126 120 return error; 127 121 128 - relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); 129 - if (!relocated_restore_code) 130 - return -ENOMEM; 131 - memcpy(relocated_restore_code, &core_restore_code, 132 - &restore_registers - &core_restore_code); 122 + error = relocate_restore_code(); 123 + if (error) 124 + return error; 133 125 134 126 restore_image(); 135 127 return 0; ··· 179 109 180 110 struct restore_data_record { 181 111 unsigned long jump_address; 112 + unsigned long jump_address_phys; 182 113 unsigned long cr3; 183 114 unsigned long magic; 184 115 }; 185 116 186 - #define RESTORE_MAGIC 0x0123456789ABCDEFUL 117 + #define RESTORE_MAGIC 0x123456789ABCDEF0UL 187 118 188 119 /** 189 120 * arch_hibernation_header_save - populate the architecture specific part ··· 197 126 198 127 if (max_size < sizeof(struct restore_data_record)) 199 128 return -EOVERFLOW; 200 - rdr->jump_address = restore_jump_address; 129 + rdr->jump_address = (unsigned long)&restore_registers; 130 + rdr->jump_address_phys = __pa_symbol(&restore_registers); 201 131 rdr->cr3 = restore_cr3; 202 132 rdr->magic = RESTORE_MAGIC; 203 133 return 0; ··· 214 142 struct restore_data_record *rdr = addr; 215 143 216 144 restore_jump_address = rdr->jump_address; 145 + jump_address_phys = rdr->jump_address_phys; 217 146 restore_cr3 = rdr->cr3; 218 147 return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; 219 148 }
+24 -31
arch/x86/power/hibernate_asm_64.S
··· 44 44 pushfq 45 45 popq pt_regs_flags(%rax) 46 46 47 - /* save the address of restore_registers */ 48 - movq $restore_registers, %rax 49 - movq %rax, restore_jump_address(%rip) 50 47 /* save cr3 */ 51 48 movq %cr3, %rax 52 49 movq %rax, restore_cr3(%rip) ··· 54 57 ENDPROC(swsusp_arch_suspend) 55 58 56 59 ENTRY(restore_image) 57 - /* switch to temporary page tables */ 58 - movq $__PAGE_OFFSET, %rdx 59 - movq temp_level4_pgt(%rip), %rax 60 - subq %rdx, %rax 61 - movq %rax, %cr3 62 - /* Flush TLB */ 63 - movq mmu_cr4_features(%rip), %rax 64 - movq %rax, %rdx 65 - andq $~(X86_CR4_PGE), %rdx 66 - movq %rdx, %cr4; # turn off PGE 67 - movq %cr3, %rcx; # flush TLB 68 - movq %rcx, %cr3; 69 - movq %rax, %cr4; # turn PGE back on 70 - 71 60 /* prepare to jump to the image kernel */ 72 - movq restore_jump_address(%rip), %rax 73 - movq restore_cr3(%rip), %rbx 61 + movq restore_jump_address(%rip), %r8 62 + movq restore_cr3(%rip), %r9 63 + 64 + /* prepare to switch to temporary page tables */ 65 + movq temp_level4_pgt(%rip), %rax 66 + movq mmu_cr4_features(%rip), %rbx 74 67 75 68 /* prepare to copy image data to their original locations */ 76 69 movq restore_pblist(%rip), %rdx 70 + 71 + /* jump to relocated restore code */ 77 72 movq relocated_restore_code(%rip), %rcx 78 73 jmpq *%rcx 79 74 80 75 /* code below has been relocated to a safe page */ 81 76 ENTRY(core_restore_code) 77 + /* switch to temporary page tables */ 78 + movq $__PAGE_OFFSET, %rcx 79 + subq %rcx, %rax 80 + movq %rax, %cr3 81 + /* flush TLB */ 82 + movq %rbx, %rcx 83 + andq $~(X86_CR4_PGE), %rcx 84 + movq %rcx, %cr4; # turn off PGE 85 + movq %cr3, %rcx; # flush TLB 86 + movq %rcx, %cr3; 87 + movq %rbx, %cr4; # turn PGE back on 82 88 .Lloop: 83 89 testq %rdx, %rdx 84 90 jz .Ldone ··· 96 96 /* progress to the next pbe */ 97 97 movq pbe_next(%rdx), %rdx 98 98 jmp .Lloop 99 + 99 100 .Ldone: 100 101 /* jump to the restore_registers address from the image header */ 101 - jmpq *%rax 102 - /* 103 - * NOTE: This assumes that the boot kernel's text mapping covers the 104 - * image kernel's page containing restore_registers and the address of 105 - * this page is the same as in the image kernel's text mapping (it 106 - * should always be true, because the text mapping is linear, starting 107 - * from 0, and is supposed to cover the entire kernel text for every 108 - * kernel). 109 - * 110 - * code below belongs to the image kernel 111 - */ 102 + jmpq *%r8 112 103 104 + /* code below belongs to the image kernel */ 105 + .align PAGE_SIZE 113 106 ENTRY(restore_registers) 114 107 FRAME_BEGIN 115 108 /* go back to the original page tables */ 116 - movq %rbx, %cr3 109 + movq %r9, %cr3 117 110 118 111 /* Flush TLB, including "global" things (vmalloc) */ 119 112 movq mmu_cr4_features(%rip), %rax
+2
block/ioprio.c
··· 150 150 if (ret) 151 151 goto out; 152 152 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); 153 + task_lock(p); 153 154 if (p->io_context) 154 155 ret = p->io_context->ioprio; 156 + task_unlock(p); 155 157 out: 156 158 return ret; 157 159 }
+2 -2
drivers/acpi/acpi_dbg.c
··· 602 602 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); 603 603 ret = n; 604 604 out: 605 - acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret); 605 + acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0); 606 606 return ret; 607 607 } 608 608 ··· 672 672 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); 673 673 ret = n; 674 674 out: 675 - acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret); 675 + acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0); 676 676 return n; 677 677 } 678 678
+6 -1
drivers/acpi/acpica/nsload.c
··· 46 46 #include "acnamesp.h" 47 47 #include "acdispat.h" 48 48 #include "actables.h" 49 + #include "acinterp.h" 49 50 50 51 #define _COMPONENT ACPI_NAMESPACE 51 52 ACPI_MODULE_NAME("nsload") ··· 79 78 80 79 ACPI_FUNCTION_TRACE(ns_load_table); 81 80 81 + acpi_ex_enter_interpreter(); 82 + 82 83 /* 83 84 * Parse the table and load the namespace with all named 84 85 * objects found within. Control methods are NOT parsed ··· 92 89 */ 93 90 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 94 91 if (ACPI_FAILURE(status)) { 95 - return_ACPI_STATUS(status); 92 + goto unlock_interp; 96 93 } 97 94 98 95 /* If table already loaded into namespace, just return */ ··· 133 130 134 131 unlock: 135 132 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 133 + unlock_interp: 134 + (void)acpi_ex_exit_interpreter(); 136 135 137 136 if (ACPI_FAILURE(status)) { 138 137 return_ACPI_STATUS(status);
+2 -7
drivers/acpi/acpica/nsparse.c
··· 47 47 #include "acparser.h" 48 48 #include "acdispat.h" 49 49 #include "actables.h" 50 - #include "acinterp.h" 51 50 52 51 #define _COMPONENT ACPI_NAMESPACE 53 52 ACPI_MODULE_NAME("nsparse") ··· 170 171 171 172 ACPI_FUNCTION_TRACE(ns_parse_table); 172 173 173 - acpi_ex_enter_interpreter(); 174 - 175 174 /* 176 175 * AML Parse, pass 1 177 176 * ··· 185 188 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, 186 189 table_index, start_node); 187 190 if (ACPI_FAILURE(status)) { 188 - goto error_exit; 191 + return_ACPI_STATUS(status); 189 192 } 190 193 191 194 /* ··· 201 204 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, 202 205 table_index, start_node); 203 206 if (ACPI_FAILURE(status)) { 204 - goto error_exit; 207 + return_ACPI_STATUS(status); 205 208 } 206 209 207 - error_exit: 208 - acpi_ex_exit_interpreter(); 209 210 return_ACPI_STATUS(status); 210 211 }
+47 -14
drivers/acpi/pci_link.c
··· 470 470 { 471 471 struct acpi_pci_link *link; 472 472 int penalty = 0; 473 + int i; 473 474 474 475 list_for_each_entry(link, &acpi_link_list, list) { 475 476 /* ··· 479 478 */ 480 479 if (link->irq.active && link->irq.active == irq) 481 480 penalty += PIRQ_PENALTY_PCI_USING; 482 - else { 483 - int i; 484 481 485 - /* 486 - * If a link is inactive, penalize the IRQs it 487 - * might use, but not as severely. 488 - */ 489 - for (i = 0; i < link->irq.possible_count; i++) 490 - if (link->irq.possible[i] == irq) 491 - penalty += PIRQ_PENALTY_PCI_POSSIBLE / 492 - link->irq.possible_count; 493 - } 482 + /* 483 + * penalize the IRQs PCI might use, but not as severely. 484 + */ 485 + for (i = 0; i < link->irq.possible_count; i++) 486 + if (link->irq.possible[i] == irq) 487 + penalty += PIRQ_PENALTY_PCI_POSSIBLE / 488 + link->irq.possible_count; 494 489 } 495 490 496 491 return penalty; ··· 495 498 static int acpi_irq_get_penalty(int irq) 496 499 { 497 500 int penalty = 0; 498 - 499 - if (irq < ACPI_MAX_ISA_IRQS) 500 - penalty += acpi_isa_irq_penalty[irq]; 501 501 502 502 /* 503 503 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict ··· 510 516 penalty += PIRQ_PENALTY_PCI_USING; 511 517 } 512 518 519 + if (irq < ACPI_MAX_ISA_IRQS) 520 + return penalty + acpi_isa_irq_penalty[irq]; 521 + 513 522 penalty += acpi_irq_pci_sharing_penalty(irq); 514 523 return penalty; 524 + } 525 + 526 + int __init acpi_irq_penalty_init(void) 527 + { 528 + struct acpi_pci_link *link; 529 + int i; 530 + 531 + /* 532 + * Update penalties to facilitate IRQ balancing. 533 + */ 534 + list_for_each_entry(link, &acpi_link_list, list) { 535 + 536 + /* 537 + * reflect the possible and active irqs in the penalty table -- 538 + * useful for breaking ties. 539 + */ 540 + if (link->irq.possible_count) { 541 + int penalty = 542 + PIRQ_PENALTY_PCI_POSSIBLE / 543 + link->irq.possible_count; 544 + 545 + for (i = 0; i < link->irq.possible_count; i++) { 546 + if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS) 547 + acpi_isa_irq_penalty[link->irq. 548 + possible[i]] += 549 + penalty; 550 + } 551 + 552 + } else if (link->irq.active && 553 + (link->irq.active < ACPI_MAX_ISA_IRQS)) { 554 + acpi_isa_irq_penalty[link->irq.active] += 555 + PIRQ_PENALTY_PCI_POSSIBLE; 556 + } 557 + } 558 + 559 + return 0; 515 560 } 516 561 517 562 static int acpi_irq_balance = -1; /* 0: static, 1: balance */
+40 -51
drivers/block/xen-blkfront.c
··· 207 207 struct blk_mq_tag_set tag_set; 208 208 struct blkfront_ring_info *rinfo; 209 209 unsigned int nr_rings; 210 + /* Save uncomplete reqs and bios for migration. */ 211 + struct list_head requests; 212 + struct bio_list bio_list; 210 213 }; 211 214 212 215 static unsigned int nr_minors; ··· 2005 2002 { 2006 2003 unsigned int i, r_index; 2007 2004 struct request *req, *n; 2008 - struct blk_shadow *copy; 2009 2005 int rc; 2010 2006 struct bio *bio, *cloned_bio; 2011 - struct bio_list bio_list, merge_bio; 2012 2007 unsigned int segs, offset; 2013 2008 int pending, size; 2014 2009 struct split_bio *split_bio; 2015 - struct list_head requests; 2016 2010 2017 2011 blkfront_gather_backend_features(info); 2018 2012 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; 2019 2013 blk_queue_max_segments(info->rq, segs); 2020 - bio_list_init(&bio_list); 2021 - INIT_LIST_HEAD(&requests); 2022 2014 2023 2015 for (r_index = 0; r_index < info->nr_rings; r_index++) { 2024 - struct blkfront_ring_info *rinfo; 2025 - 2026 - rinfo = &info->rinfo[r_index]; 2027 - /* Stage 1: Make a safe copy of the shadow state. */ 2028 - copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow), 2029 - GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); 2030 - if (!copy) 2031 - return -ENOMEM; 2032 - 2033 - /* Stage 2: Set up free list. */ 2034 - memset(&rinfo->shadow, 0, sizeof(rinfo->shadow)); 2035 - for (i = 0; i < BLK_RING_SIZE(info); i++) 2036 - rinfo->shadow[i].req.u.rw.id = i+1; 2037 - rinfo->shadow_free = rinfo->ring.req_prod_pvt; 2038 - rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; 2016 + struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; 2039 2017 2040 2018 rc = blkfront_setup_indirect(rinfo); 2041 - if (rc) { 2042 - kfree(copy); 2019 + if (rc) 2043 2020 return rc; 2044 - } 2045 - 2046 - for (i = 0; i < BLK_RING_SIZE(info); i++) { 2047 - /* Not in use? */ 2048 - if (!copy[i].request) 2049 - continue; 2050 - 2051 - /* 2052 - * Get the bios in the request so we can re-queue them. 2053 - */ 2054 - if (copy[i].request->cmd_flags & 2055 - (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { 2056 - /* 2057 - * Flush operations don't contain bios, so 2058 - * we need to requeue the whole request 2059 - */ 2060 - list_add(&copy[i].request->queuelist, &requests); 2061 - continue; 2062 - } 2063 - merge_bio.head = copy[i].request->bio; 2064 - merge_bio.tail = copy[i].request->biotail; 2065 - bio_list_merge(&bio_list, &merge_bio); 2066 - copy[i].request->bio = NULL; 2067 - blk_end_request_all(copy[i].request, 0); 2068 - } 2069 - 2070 - kfree(copy); 2071 2021 } 2072 2022 xenbus_switch_state(info->xbdev, XenbusStateConnected); 2073 2023 ··· 2035 2079 kick_pending_request_queues(rinfo); 2036 2080 } 2037 2081 2038 - list_for_each_entry_safe(req, n, &requests, queuelist) { 2082 + list_for_each_entry_safe(req, n, &info->requests, queuelist) { 2039 2083 /* Requeue pending requests (flush or discard) */ 2040 2084 list_del_init(&req->queuelist); 2041 2085 BUG_ON(req->nr_phys_segments > segs); ··· 2043 2087 } 2044 2088 blk_mq_kick_requeue_list(info->rq); 2045 2089 2046 - while ((bio = bio_list_pop(&bio_list)) != NULL) { 2090 + while ((bio = bio_list_pop(&info->bio_list)) != NULL) { 2047 2091 /* Traverse the list of pending bios and re-queue them */ 2048 2092 if (bio_segments(bio) > segs) { 2049 2093 /* ··· 2089 2133 { 2090 2134 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2091 2135 int err = 0; 2136 + unsigned int i, j; 2092 2137 2093 2138 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2139 + 2140 + bio_list_init(&info->bio_list); 2141 + INIT_LIST_HEAD(&info->requests); 2142 + for (i = 0; i < info->nr_rings; i++) { 2143 + struct blkfront_ring_info *rinfo = &info->rinfo[i]; 2144 + struct bio_list merge_bio; 2145 + struct blk_shadow *shadow = rinfo->shadow; 2146 + 2147 + for (j = 0; j < BLK_RING_SIZE(info); j++) { 2148 + /* Not in use? */ 2149 + if (!shadow[j].request) 2150 + continue; 2151 + 2152 + /* 2153 + * Get the bios in the request so we can re-queue them. 2154 + */ 2155 + if (shadow[j].request->cmd_flags & 2156 + (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { 2157 + /* 2158 + * Flush operations don't contain bios, so 2159 + * we need to requeue the whole request 2160 + */ 2161 + list_add(&shadow[j].request->queuelist, &info->requests); 2162 + continue; 2163 + } 2164 + merge_bio.head = shadow[j].request->bio; 2165 + merge_bio.tail = shadow[j].request->biotail; 2166 + bio_list_merge(&info->bio_list, &merge_bio); 2167 + shadow[j].request->bio = NULL; 2168 + blk_mq_end_request(shadow[j].request, 0); 2169 + } 2170 + } 2094 2171 2095 2172 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2096 2173
+4 -8
drivers/cpuidle/cpuidle.c
··· 173 173 174 174 struct cpuidle_state *target_state = &drv->states[index]; 175 175 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); 176 - u64 time_start, time_end; 176 + ktime_t time_start, time_end; 177 177 s64 diff; 178 178 179 179 /* ··· 195 195 sched_idle_set_state(target_state); 196 196 197 197 trace_cpu_idle_rcuidle(index, dev->cpu); 198 - time_start = local_clock(); 198 + time_start = ns_to_ktime(local_clock()); 199 199 200 200 stop_critical_timings(); 201 201 entered_state = target_state->enter(dev, drv, index); 202 202 start_critical_timings(); 203 203 204 - time_end = local_clock(); 204 + time_end = ns_to_ktime(local_clock()); 205 205 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 206 206 207 207 /* The cpu is no longer idle or about to enter idle. */ ··· 217 217 if (!cpuidle_state_is_coupled(drv, index)) 218 218 local_irq_enable(); 219 219 220 - /* 221 - * local_clock() returns the time in nanosecond, let's shift 222 - * by 10 (divide by 1024) to have microsecond based time. 223 - */ 224 - diff = (time_end - time_start) >> 10; 220 + diff = ktime_us_delta(time_end, time_start); 225 221 if (diff > INT_MAX) 226 222 diff = INT_MAX; 227 223
+1 -1
drivers/gpio/Kconfig
··· 49 49 50 50 config OF_GPIO 51 51 def_bool y 52 - depends on OF || COMPILE_TEST 52 + depends on OF 53 53 54 54 config GPIO_ACPI 55 55 def_bool y
+10 -11
drivers/gpio/gpio-sch.c
··· 61 61 return gpio % 8; 62 62 } 63 63 64 - static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg) 64 + static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg) 65 65 { 66 - struct sch_gpio *sch = gpiochip_get_data(gc); 67 66 unsigned short offset, bit; 68 67 u8 reg_val; 69 68 ··· 74 75 return reg_val; 75 76 } 76 77 77 - static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg, 78 + static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg, 78 79 int val) 79 80 { 80 - struct sch_gpio *sch = gpiochip_get_data(gc); 81 81 unsigned short offset, bit; 82 82 u8 reg_val; 83 83 ··· 96 98 struct sch_gpio *sch = gpiochip_get_data(gc); 97 99 98 100 spin_lock(&sch->lock); 99 - sch_gpio_reg_set(gc, gpio_num, GIO, 1); 101 + sch_gpio_reg_set(sch, gpio_num, GIO, 1); 100 102 spin_unlock(&sch->lock); 101 103 return 0; 102 104 } 103 105 104 106 static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num) 105 107 { 106 - return sch_gpio_reg_get(gc, gpio_num, GLV); 108 + struct sch_gpio *sch = gpiochip_get_data(gc); 109 + return sch_gpio_reg_get(sch, gpio_num, GLV); 107 110 } 108 111 109 112 static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val) ··· 112 113 struct sch_gpio *sch = gpiochip_get_data(gc); 113 114 114 115 spin_lock(&sch->lock); 115 - sch_gpio_reg_set(gc, gpio_num, GLV, val); 116 + sch_gpio_reg_set(sch, gpio_num, GLV, val); 116 117 spin_unlock(&sch->lock); 117 118 } 118 119 ··· 122 123 struct sch_gpio *sch = gpiochip_get_data(gc); 123 124 124 125 spin_lock(&sch->lock); 125 - sch_gpio_reg_set(gc, gpio_num, GIO, 0); 126 + sch_gpio_reg_set(sch, gpio_num, GIO, 0); 126 127 spin_unlock(&sch->lock); 127 128 128 129 /* ··· 181 182 * GPIO7 is configured by the CMC as SLPIOVR 182 183 * Enable GPIO[9:8] core powered gpios explicitly 183 184 */ 184 - sch_gpio_reg_set(&sch->chip, 8, GEN, 1); 185 - sch_gpio_reg_set(&sch->chip, 9, GEN, 1); 185 + sch_gpio_reg_set(sch, 8, GEN, 1); 186 + sch_gpio_reg_set(sch, 9, GEN, 1); 186 187 /* 187 188 * SUS_GPIO[2:0] enabled by default 188 189 * Enable SUS_GPIO3 resume powered gpio explicitly 189 190 */ 190 - sch_gpio_reg_set(&sch->chip, 13, GEN, 1); 191 + sch_gpio_reg_set(sch, 13, GEN, 1); 191 192 break; 192 193 193 194 case PCI_DEVICE_ID_INTEL_ITC_LPC:
+4 -4
drivers/gpio/gpiolib-legacy.c
··· 28 28 if (!desc && gpio_is_valid(gpio)) 29 29 return -EPROBE_DEFER; 30 30 31 + err = gpiod_request(desc, label); 32 + if (err) 33 + return err; 34 + 31 35 if (flags & GPIOF_OPEN_DRAIN) 32 36 set_bit(FLAG_OPEN_DRAIN, &desc->flags); 33 37 ··· 40 36 41 37 if (flags & GPIOF_ACTIVE_LOW) 42 38 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 43 - 44 - err = gpiod_request(desc, label); 45 - if (err) 46 - return err; 47 39 48 40 if (flags & GPIOF_DIR_IN) 49 41 err = gpiod_direction_input(desc);
+16 -36
drivers/gpio/gpiolib.c
··· 1352 1352 spin_lock_irqsave(&gpio_lock, flags); 1353 1353 } 1354 1354 done: 1355 - if (status < 0) { 1356 - /* Clear flags that might have been set by the caller before 1357 - * requesting the GPIO. 1358 - */ 1359 - clear_bit(FLAG_ACTIVE_LOW, &desc->flags); 1360 - clear_bit(FLAG_OPEN_DRAIN, &desc->flags); 1361 - clear_bit(FLAG_OPEN_SOURCE, &desc->flags); 1362 - } 1363 1355 spin_unlock_irqrestore(&gpio_lock, flags); 1364 1356 return status; 1365 1357 } ··· 2579 2587 } 2580 2588 EXPORT_SYMBOL_GPL(gpiod_get_optional); 2581 2589 2582 - /** 2583 - * gpiod_parse_flags - helper function to parse GPIO lookup flags 2584 - * @desc: gpio to be setup 2585 - * @lflags: gpio_lookup_flags - returned from of_find_gpio() or 2586 - * of_get_gpio_hog() 2587 - * 2588 - * Set the GPIO descriptor flags based on the given GPIO lookup flags. 2589 - */ 2590 - static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags) 2591 - { 2592 - if (lflags & GPIO_ACTIVE_LOW) 2593 - set_bit(FLAG_ACTIVE_LOW, &desc->flags); 2594 - if (lflags & GPIO_OPEN_DRAIN) 2595 - set_bit(FLAG_OPEN_DRAIN, &desc->flags); 2596 - if (lflags & GPIO_OPEN_SOURCE) 2597 - set_bit(FLAG_OPEN_SOURCE, &desc->flags); 2598 - } 2599 2590 2600 2591 /** 2601 2592 * gpiod_configure_flags - helper function to configure a given GPIO 2602 2593 * @desc: gpio whose value will be assigned 2603 2594 * @con_id: function within the GPIO consumer 2595 + * @lflags: gpio_lookup_flags - returned from of_find_gpio() or 2596 + * of_get_gpio_hog() 2604 2597 * @dflags: gpiod_flags - optional GPIO initialization flags 2605 2598 * 2606 2599 * Return 0 on success, -ENOENT if no GPIO has been assigned to the ··· 2593 2616 * occurred while trying to acquire the GPIO. 2594 2617 */ 2595 2618 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, 2596 - enum gpiod_flags dflags) 2619 + unsigned long lflags, enum gpiod_flags dflags) 2597 2620 { 2598 2621 int status; 2622 + 2623 + if (lflags & GPIO_ACTIVE_LOW) 2624 + set_bit(FLAG_ACTIVE_LOW, &desc->flags); 2625 + if (lflags & GPIO_OPEN_DRAIN) 2626 + set_bit(FLAG_OPEN_DRAIN, &desc->flags); 2627 + if (lflags & GPIO_OPEN_SOURCE) 2628 + set_bit(FLAG_OPEN_SOURCE, &desc->flags); 2599 2629 2600 2630 /* No particular flag request, return here... */ 2601 2631 if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) { ··· 2670 2686 return desc; 2671 2687 } 2672 2688 2673 - gpiod_parse_flags(desc, lookupflags); 2674 - 2675 2689 status = gpiod_request(desc, con_id); 2676 2690 if (status < 0) 2677 2691 return ERR_PTR(status); 2678 2692 2679 - status = gpiod_configure_flags(desc, con_id, flags); 2693 + status = gpiod_configure_flags(desc, con_id, lookupflags, flags); 2680 2694 if (status < 0) { 2681 2695 dev_dbg(dev, "setup of GPIO %s failed\n", con_id); 2682 2696 gpiod_put(desc); ··· 2730 2748 if (IS_ERR(desc)) 2731 2749 return desc; 2732 2750 2751 + ret = gpiod_request(desc, NULL); 2752 + if (ret) 2753 + return ERR_PTR(ret); 2754 + 2733 2755 if (active_low) 2734 2756 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 2735 2757 ··· 2743 2757 else 2744 2758 set_bit(FLAG_OPEN_SOURCE, &desc->flags); 2745 2759 } 2746 - 2747 - ret = gpiod_request(desc, NULL); 2748 - if (ret) 2749 - return ERR_PTR(ret); 2750 2760 2751 2761 return desc; 2752 2762 } ··· 2796 2814 chip = gpiod_to_chip(desc); 2797 2815 hwnum = gpio_chip_hwgpio(desc); 2798 2816 2799 - gpiod_parse_flags(desc, lflags); 2800 - 2801 2817 local_desc = gpiochip_request_own_desc(chip, hwnum, name); 2802 2818 if (IS_ERR(local_desc)) { 2803 2819 status = PTR_ERR(local_desc); ··· 2804 2824 return status; 2805 2825 } 2806 2826 2807 - status = gpiod_configure_flags(desc, name, dflags); 2827 + status = gpiod_configure_flags(desc, name, lflags, dflags); 2808 2828 if (status < 0) { 2809 2829 pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n", 2810 2830 name, chip->label, hwnum, status);
+15 -17
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
··· 98 98 #define PCIE_BUS_CLK 10000 99 99 #define TCLK (PCIE_BUS_CLK / 10) 100 100 101 - #define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double)) 102 101 103 102 static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = 104 103 { {600, 1050, 3, 0}, {600, 1050, 6, 1} }; ··· 732 733 table->Smio[level] |= 733 734 data->mvdd_voltage_table.entries[level].smio_low; 734 735 } 735 - table->SmioMask2 = data->vddci_voltage_table.mask_low; 736 + table->SmioMask2 = data->mvdd_voltage_table.mask_low; 736 737 737 738 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); 738 739 } ··· 1806 1807 1807 1808 ro = efuse * (max -min)/255 + min; 1808 1809 1809 - /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset 1810 - * there is a little difference in calculating 1811 - * volt_with_cks with windows */ 1810 + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ 1812 1811 for (i = 0; i < sclk_table->count; i++) { 1813 1812 data->smc_state_table.Sclk_CKS_masterEn0_7 |= 1814 1813 sclk_table->entries[i].cks_enable << i; 1815 1814 if (hwmgr->chip_id == CHIP_POLARIS10) { 1816 - volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \ 1815 + volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \ 1817 1816 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); 1818 - volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \ 1819 - (252248000 - sclk_table->entries[i].clk/100 * 115764)); 1817 + volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \ 1818 + (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); 1820 1819 } else { 1821 - volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \ 1822 - (2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000)); 1823 - volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \ 1824 - (3422454 - sclk_table->entries[i].clk/100 * 18886376/10000)); 1820 + volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \ 1821 + (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000))); 1822 + volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \ 1823 + (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000))); 1825 1824 } 1826 1825 1827 1826 if (volt_without_cks >= volt_with_cks) 1828 - volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks + 1829 - sclk_table->entries[i].cks_voffset) * 100 / 625); 1827 + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + 1828 + sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); 1830 1829 1831 1830 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; 1832 1831 } ··· 2682 2685 { 2683 2686 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2684 2687 uint16_t vv_id; 2685 - uint16_t vddc = 0; 2688 + uint32_t vddc = 0; 2686 2689 uint16_t i, j; 2687 2690 uint32_t sclk = 0; 2688 2691 struct phm_ppt_v1_information *table_info = ··· 2713 2716 continue); 2714 2717 2715 2718 2716 - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ 2717 - PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), 2719 + /* need to make sure vddc is less than 2v or else, it could burn the ASIC. 2720 + * real voltage level in unit of 0.01mv */ 2721 + PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0), 2718 2722 "Invalid VDDC value", result = -EINVAL;); 2719 2723 2720 2724 /* the voltage should not be zero nor equal to leakage ID */
+2 -2
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
··· 1256 1256 } 1257 1257 1258 1258 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 1259 - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage) 1259 + uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage) 1260 1260 { 1261 1261 1262 1262 int result; ··· 1274 1274 if (0 != result) 1275 1275 return result; 1276 1276 1277 - *voltage = get_voltage_info_param_space.usVoltageLevel; 1277 + *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel; 1278 1278 1279 1279 return result; 1280 1280 }
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
··· 305 305 extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, 306 306 uint8_t level); 307 307 extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 308 - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 308 + uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage); 309 309 extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); 310 310 311 311 extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
··· 1302 1302 table->Smio[count] |= 1303 1303 data->mvdd_voltage_table.entries[count].smio_low; 1304 1304 } 1305 - table->SmioMask2 = data->vddci_voltage_table.mask_low; 1305 + table->SmioMask2 = data->mvdd_voltage_table.mask_low; 1306 1306 1307 1307 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); 1308 1308 }
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
··· 302 302 (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset)); 303 303 304 304 if (0 != powerplay_table->usPPMTableOffset) { 305 - if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) { 305 + if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) { 306 306 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 307 307 PHM_PlatformCaps_EnablePlatformPowerManagement); 308 308 }
+2 -1
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
··· 40 40 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) 41 41 { 42 42 struct nvkm_device *device = outp->base.disp->engine.subdev.device; 43 - nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern); 43 + const u32 soff = gf119_sor_soff(outp); 44 + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern); 44 45 return 0; 45 46 } 46 47
+8
drivers/gpu/drm/sun4i/sun4i_crtc.c
··· 65 65 DRM_DEBUG_DRIVER("Disabling the CRTC\n"); 66 66 67 67 sun4i_tcon_disable(drv->tcon); 68 + 69 + if (crtc->state->event && !crtc->state->active) { 70 + spin_lock_irq(&crtc->dev->event_lock); 71 + drm_crtc_send_vblank_event(crtc, crtc->state->event); 72 + spin_unlock_irq(&crtc->dev->event_lock); 73 + 74 + crtc->state->event = NULL; 75 + } 68 76 } 69 77 70 78 static void sun4i_crtc_enable(struct drm_crtc *crtc)
+2 -1
drivers/gpu/drm/sun4i/sun4i_drv.c
··· 92 92 /* Frame Buffer Operations */ 93 93 94 94 /* VBlank Operations */ 95 - .get_vblank_counter = drm_vblank_count, 95 + .get_vblank_counter = drm_vblank_no_hw_counter, 96 96 .enable_vblank = sun4i_drv_enable_vblank, 97 97 .disable_vblank = sun4i_drv_disable_vblank, 98 98 }; ··· 310 310 311 311 count += sun4i_drv_add_endpoints(&pdev->dev, &match, 312 312 pipeline); 313 + of_node_put(pipeline); 313 314 314 315 DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n", 315 316 count, i);
+12 -2
drivers/iommu/amd_iommu_init.c
··· 1568 1568 break; 1569 1569 } 1570 1570 1571 + /* 1572 + * Order is important here to make sure any unity map requirements are 1573 + * fulfilled. The unity mappings are created and written to the device 1574 + * table during the amd_iommu_init_api() call. 1575 + * 1576 + * After that we call init_device_table_dma() to make sure any 1577 + * uninitialized DTE will block DMA, and in the end we flush the caches 1578 + * of all IOMMUs to make sure the changes to the device table are 1579 + * active. 1580 + */ 1581 + ret = amd_iommu_init_api(); 1582 + 1571 1583 init_device_table_dma(); 1572 1584 1573 1585 for_each_iommu(iommu) 1574 1586 iommu_flush_all_caches(iommu); 1575 - 1576 - ret = amd_iommu_init_api(); 1577 1587 1578 1588 if (!ret) 1579 1589 print_iommu_info();
+2 -2
drivers/iommu/intel-iommu.c
··· 4602 4602 for (i = 0; i < g_num_of_iommus; i++) { 4603 4603 struct intel_iommu *iommu = g_iommus[i]; 4604 4604 struct dmar_domain *domain; 4605 - u16 did; 4605 + int did; 4606 4606 4607 4607 if (!iommu) 4608 4608 continue; 4609 4609 4610 4610 for (did = 0; did < cap_ndoms(iommu->cap); did++) { 4611 - domain = get_iommu_domain(iommu, did); 4611 + domain = get_iommu_domain(iommu, (u16)did); 4612 4612 4613 4613 if (!domain) 4614 4614 continue;
+2 -2
drivers/irqchip/irq-mips-gic.c
··· 718 718 719 719 spin_lock_irqsave(&gic_lock, flags); 720 720 gic_map_to_pin(intr, gic_cpu_pin); 721 - gic_map_to_vpe(intr, vpe); 721 + gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); 722 722 for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 723 723 clear_bit(intr, pcpu_masks[i].pcpu_mask); 724 724 set_bit(intr, pcpu_masks[vpe].pcpu_mask); ··· 959 959 switch (bus_token) { 960 960 case DOMAIN_BUS_IPI: 961 961 is_ipi = d->bus_token == bus_token; 962 - return to_of_node(d->fwnode) == node && is_ipi; 962 + return (!node || to_of_node(d->fwnode) == node) && is_ipi; 963 963 break; 964 964 default: 965 965 return 0;
+7 -4
drivers/net/bonding/bond_3ad.c
··· 101 101 #define MAC_ADDRESS_EQUAL(A, B) \ 102 102 ether_addr_equal_64bits((const u8 *)A, (const u8 *)B) 103 103 104 - static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; 104 + static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { 105 + 0, 0, 0, 0, 0, 0 106 + }; 105 107 static u16 ad_ticks_per_sec; 106 108 static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; 107 109 108 - static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 110 + static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = 111 + MULTICAST_LACPDU_ADDR; 109 112 110 113 /* ================= main 802.3ad protocol functions ================== */ 111 114 static int ad_lacpdu_send(struct port *port); ··· 1742 1739 aggregator->is_individual = false; 1743 1740 aggregator->actor_admin_aggregator_key = 0; 1744 1741 aggregator->actor_oper_aggregator_key = 0; 1745 - aggregator->partner_system = null_mac_addr; 1742 + eth_zero_addr(aggregator->partner_system.mac_addr_value); 1746 1743 aggregator->partner_system_priority = 0; 1747 1744 aggregator->partner_oper_aggregator_key = 0; 1748 1745 aggregator->receive_state = 0; ··· 1764 1761 if (aggregator) { 1765 1762 ad_clear_agg(aggregator); 1766 1763 1767 - aggregator->aggregator_mac_address = null_mac_addr; 1764 + eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value); 1768 1765 aggregator->aggregator_identifier = 0; 1769 1766 aggregator->slave = NULL; 1770 1767 }
+2 -5
drivers/net/bonding/bond_alb.c
··· 42 42 43 43 44 44 45 - #ifndef __long_aligned 46 - #define __long_aligned __attribute__((aligned((sizeof(long))))) 47 - #endif 48 - static const u8 mac_bcast[ETH_ALEN] __long_aligned = { 45 + static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = { 49 46 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 50 47 }; 51 - static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = { 48 + static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = { 52 49 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 53 50 }; 54 51 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
+1
drivers/net/bonding/bond_main.c
··· 1584 1584 } 1585 1585 1586 1586 /* check for initial state */ 1587 + new_slave->link = BOND_LINK_NOCHANGE; 1587 1588 if (bond->params.miimon) { 1588 1589 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 1589 1590 if (bond->params.updelay) {
+1 -1
drivers/net/ethernet/broadcom/bcmsysport.c
··· 392 392 else 393 393 p = (char *)priv; 394 394 p += s->stat_offset; 395 - data[i] = *(u32 *)p; 395 + data[i] = *(unsigned long *)p; 396 396 } 397 397 } 398 398
+6 -6
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
··· 36 36 #define __T4FW_VERSION_H__ 37 37 38 38 #define T4FW_VERSION_MAJOR 0x01 39 - #define T4FW_VERSION_MINOR 0x0E 40 - #define T4FW_VERSION_MICRO 0x04 39 + #define T4FW_VERSION_MINOR 0x0F 40 + #define T4FW_VERSION_MICRO 0x25 41 41 #define T4FW_VERSION_BUILD 0x00 42 42 43 43 #define T4FW_MIN_VERSION_MAJOR 0x01 ··· 45 45 #define T4FW_MIN_VERSION_MICRO 0x00 46 46 47 47 #define T5FW_VERSION_MAJOR 0x01 48 - #define T5FW_VERSION_MINOR 0x0E 49 - #define T5FW_VERSION_MICRO 0x04 48 + #define T5FW_VERSION_MINOR 0x0F 49 + #define T5FW_VERSION_MICRO 0x25 50 50 #define T5FW_VERSION_BUILD 0x00 51 51 52 52 #define T5FW_MIN_VERSION_MAJOR 0x00 ··· 54 54 #define T5FW_MIN_VERSION_MICRO 0x00 55 55 56 56 #define T6FW_VERSION_MAJOR 0x01 57 - #define T6FW_VERSION_MINOR 0x0E 58 - #define T6FW_VERSION_MICRO 0x04 57 + #define T6FW_VERSION_MINOR 0x0F 58 + #define T6FW_VERSION_MICRO 0x25 59 59 #define T6FW_VERSION_BUILD 0x00 60 60 61 61 #define T6FW_MIN_VERSION_MAJOR 0x00
+9 -12
drivers/net/ethernet/intel/e1000e/netdev.c
··· 154 154 writel(val, hw->hw_addr + reg); 155 155 } 156 156 157 - static bool e1000e_vlan_used(struct e1000_adapter *adapter) 158 - { 159 - u16 vid; 160 - 161 - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 162 - return true; 163 - 164 - return false; 165 - } 166 - 167 157 /** 168 158 * e1000_regdump - register printout routine 169 159 * @hw: pointer to the HW structure ··· 3443 3453 3444 3454 ew32(RCTL, rctl); 3445 3455 3446 - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX || 3447 - e1000e_vlan_used(adapter)) 3456 + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 3448 3457 e1000e_vlan_strip_enable(adapter); 3449 3458 else 3450 3459 e1000e_vlan_strip_disable(adapter); ··· 6914 6925 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ 6915 6926 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) 6916 6927 features &= ~NETIF_F_RXFCS; 6928 + 6929 + /* Since there is no support for separate Rx/Tx vlan accel 6930 + * enable/disable make sure Tx flag is always in same state as Rx. 6931 + */ 6932 + if (features & NETIF_F_HW_VLAN_CTAG_RX) 6933 + features |= NETIF_F_HW_VLAN_CTAG_TX; 6934 + else 6935 + features &= ~NETIF_F_HW_VLAN_CTAG_TX; 6917 6936 6918 6937 return features; 6919 6938 }
+2 -2
drivers/net/ethernet/intel/ixgbevf/mbx.c
··· 85 85 static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 86 86 { 87 87 struct ixgbe_mbx_info *mbx = &hw->mbx; 88 - s32 ret_val = -IXGBE_ERR_MBX; 88 + s32 ret_val = IXGBE_ERR_MBX; 89 89 90 90 if (!mbx->ops.read) 91 91 goto out; ··· 111 111 static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 112 112 { 113 113 struct ixgbe_mbx_info *mbx = &hw->mbx; 114 - s32 ret_val = -IXGBE_ERR_MBX; 114 + s32 ret_val = IXGBE_ERR_MBX; 115 115 116 116 /* exit if either we can't write or there isn't a defined timeout */ 117 117 if (!mbx->ops.write || !mbx->timeout)
+2
drivers/net/ethernet/marvell/mvneta.c
··· 3458 3458 return 0; 3459 3459 3460 3460 err_free_irq: 3461 + unregister_cpu_notifier(&pp->cpu_notifier); 3462 + on_each_cpu(mvneta_percpu_disable, pp, true); 3461 3463 free_percpu_irq(pp->dev->irq, pp->ports); 3462 3464 err_cleanup_txqs: 3463 3465 mvneta_cleanup_txqs(pp);
+71 -56
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 295 295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 296 296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 297 297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 298 + case MLX5_CMD_OP_2ERR_QP: 299 + case MLX5_CMD_OP_2RST_QP: 300 + case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 301 + case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 302 + case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 303 + case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 298 304 return MLX5_CMD_STAT_OK; 299 305 300 306 case MLX5_CMD_OP_QUERY_HCA_CAP: ··· 327 321 case MLX5_CMD_OP_RTR2RTS_QP: 328 322 case MLX5_CMD_OP_RTS2RTS_QP: 329 323 case MLX5_CMD_OP_SQERR2RTS_QP: 330 - case MLX5_CMD_OP_2ERR_QP: 331 - case MLX5_CMD_OP_2RST_QP: 332 324 case MLX5_CMD_OP_QUERY_QP: 333 325 case MLX5_CMD_OP_SQD_RTS_QP: 334 326 case MLX5_CMD_OP_INIT2INIT_QP: ··· 346 342 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 347 343 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 348 344 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 349 - case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 350 345 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 351 346 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 352 347 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: ··· 393 390 case MLX5_CMD_OP_CREATE_RQT: 394 391 case MLX5_CMD_OP_MODIFY_RQT: 395 392 case MLX5_CMD_OP_QUERY_RQT: 393 + 396 394 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 397 395 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 398 396 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 399 397 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 400 - case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 398 + 401 399 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 402 400 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 403 401 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: ··· 606 602 pr_debug("\n"); 607 603 } 608 604 605 + static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 606 + { 607 + struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 608 + 609 + return be16_to_cpu(hdr->opcode); 610 + } 611 + 612 + static void cb_timeout_handler(struct work_struct *work) 613 + { 614 + struct delayed_work *dwork = container_of(work, struct delayed_work, 615 + work); 616 + struct mlx5_cmd_work_ent *ent = container_of(dwork, 617 + struct mlx5_cmd_work_ent, 618 + cb_timeout_work); 619 + struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 620 + cmd); 621 + 622 + ent->ret = -ETIMEDOUT; 623 + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 624 + mlx5_command_str(msg_to_opcode(ent->in)), 625 + msg_to_opcode(ent->in)); 626 + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 627 + } 628 + 609 629 static void cmd_work_handler(struct work_struct *work) 610 630 { 611 631 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 612 632 struct mlx5_cmd *cmd = ent->cmd; 613 633 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 634 + unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 614 635 struct mlx5_cmd_layout *lay; 615 636 struct semaphore *sem; 616 637 unsigned long flags; ··· 675 646 set_signature(ent, !cmd->checksum_disabled); 676 647 dump_command(dev, ent, 1); 677 648 ent->ts1 = ktime_get_ns(); 649 + 650 + if (ent->callback) 651 + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 678 652 679 653 /* ring doorbell after the descriptor is valid */ 680 654 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); ··· 723 691 } 724 692 } 725 693 726 - static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 727 - { 728 - struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 729 - 730 - return be16_to_cpu(hdr->opcode); 731 - } 732 - 733 694 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 734 695 { 735 696 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); ··· 731 706 732 707 if (cmd->mode == CMD_MODE_POLLING) { 733 708 wait_for_completion(&ent->done); 734 - err = ent->ret; 735 - } else { 736 - if (!wait_for_completion_timeout(&ent->done, timeout)) 737 - err = -ETIMEDOUT; 738 - else 739 - err = 0; 709 + } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 710 + ent->ret = -ETIMEDOUT; 711 + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 740 712 } 713 + 714 + err = ent->ret; 715 + 741 716 if (err == -ETIMEDOUT) { 742 717 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 743 718 mlx5_command_str(msg_to_opcode(ent->in)), ··· 786 761 if (!callback) 787 762 init_completion(&ent->done); 788 763 764 + INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 789 765 INIT_WORK(&ent->work, cmd_work_handler); 790 766 if (page_queue) { 791 767 cmd_work_handler(&ent->work); ··· 796 770 goto out_free; 797 771 } 798 772 799 - if (!callback) { 800 - err = wait_func(dev, ent); 801 - if (err == -ETIMEDOUT) 802 - goto out; 773 + if (callback) 774 + goto out; 803 775 804 - ds = ent->ts2 - ent->ts1; 805 - op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 806 - if (op < ARRAY_SIZE(cmd->stats)) { 807 - stats = &cmd->stats[op]; 808 - spin_lock_irq(&stats->lock); 809 - stats->sum += ds; 810 - ++stats->n; 811 - spin_unlock_irq(&stats->lock); 812 - } 813 - mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 814 - "fw exec time for %s is %lld nsec\n", 815 - mlx5_command_str(op), ds); 816 - *status = ent->status; 817 - free_cmd(ent); 776 + err = wait_func(dev, ent); 777 + if (err == -ETIMEDOUT) 778 + goto out_free; 779 + 780 + ds = ent->ts2 - ent->ts1; 781 + op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 782 + if (op < ARRAY_SIZE(cmd->stats)) { 783 + stats = &cmd->stats[op]; 784 + spin_lock_irq(&stats->lock); 785 + stats->sum += ds; 786 + ++stats->n; 787 + spin_unlock_irq(&stats->lock); 818 788 } 819 - 820 - return err; 789 + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 790 + "fw exec time for %s is %lld nsec\n", 791 + mlx5_command_str(op), ds); 792 + *status = ent->status; 821 793 822 794 out_free: 823 795 free_cmd(ent); ··· 1205 1181 return err; 1206 1182 } 1207 1183 1208 - void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1184 + static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1209 1185 { 1210 1186 struct mlx5_cmd *cmd = &dev->cmd; 1211 1187 int i; 1212 1188 1213 1189 for (i = 0; i < cmd->max_reg_cmds; i++) 1214 1190 down(&cmd->sem); 1215 - 1216 1191 down(&cmd->pages_sem); 1217 1192 1218 - flush_workqueue(cmd->wq); 1219 - 1220 - cmd->mode = CMD_MODE_EVENTS; 1193 + cmd->mode = mode; 1221 1194 1222 1195 up(&cmd->pages_sem); 1223 1196 for (i = 0; i < cmd->max_reg_cmds; i++) 1224 1197 up(&cmd->sem); 1225 1198 } 1226 1199 1200 + void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1201 + { 1202 + mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1203 + } 1204 + 1227 1205 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1228 1206 { 1229 - struct mlx5_cmd *cmd = &dev->cmd; 1230 - int i; 1231 - 1232 - for (i = 0; i < cmd->max_reg_cmds; i++) 1233 - down(&cmd->sem); 1234 - 1235 - down(&cmd->pages_sem); 1236 - 1237 - flush_workqueue(cmd->wq); 1238 - cmd->mode = CMD_MODE_POLLING; 1239 - 1240 - up(&cmd->pages_sem); 1241 - for (i = 0; i < cmd->max_reg_cmds; i++) 1242 - up(&cmd->sem); 1207 + mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1243 1208 } 1244 1209 1245 1210 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) ··· 1264 1251 struct semaphore *sem; 1265 1252 1266 1253 ent = cmd->ent_arr[i]; 1254 + if (ent->callback) 1255 + cancel_delayed_work(&ent->cb_timeout_work); 1267 1256 if (ent->page_queue) 1268 1257 sem = &cmd->pages_sem; 1269 1258 else
+10 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 145 145 146 146 #ifdef CONFIG_MLX5_CORE_EN_DCB 147 147 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 148 - #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ 149 148 #endif 150 149 151 150 struct mlx5e_params { ··· 190 191 enum { 191 192 MLX5E_RQ_STATE_POST_WQES_ENABLE, 192 193 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, 194 + MLX5E_RQ_STATE_FLUSH_TIMEOUT, 193 195 }; 194 196 195 197 struct mlx5e_cq { ··· 220 220 typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, 221 221 u16 ix); 222 222 223 + typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix); 224 + 223 225 struct mlx5e_dma_info { 224 226 struct page *page; 225 227 dma_addr_t addr; ··· 243 241 struct mlx5e_cq cq; 244 242 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 245 243 mlx5e_fp_alloc_wqe alloc_wqe; 244 + mlx5e_fp_dealloc_wqe dealloc_wqe; 246 245 247 246 unsigned long state; 248 247 int ix; ··· 308 305 enum { 309 306 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, 310 307 MLX5E_SQ_STATE_BF_ENABLE, 308 + MLX5E_SQ_STATE_TX_TIMEOUT, 311 309 }; 312 310 313 311 struct mlx5e_ico_wqe_info { ··· 542 538 struct workqueue_struct *wq; 543 539 struct work_struct update_carrier_work; 544 540 struct work_struct set_rx_mode_work; 541 + struct work_struct tx_timeout_work; 545 542 struct delayed_work update_stats_work; 546 543 547 544 struct mlx5_core_dev *mdev; ··· 594 589 int mlx5e_napi_poll(struct napi_struct *napi, int budget); 595 590 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 596 591 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 592 + void mlx5e_free_tx_descs(struct mlx5e_sq *sq); 593 + void mlx5e_free_rx_descs(struct mlx5e_rq *rq); 597 594 598 595 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 599 596 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 600 597 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); 601 598 int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 602 599 int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 600 + void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); 601 + void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); 603 602 void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq); 604 603 void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq, 605 604 struct mlx5_cqe64 *cqe,
+6 -2
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 96 96 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 97 97 break; 98 98 case IEEE_8021QAZ_TSA_ETS: 99 - tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC; 99 + tc_tx_bw[i] = ets->tc_tx_bw[i]; 100 100 break; 101 101 } 102 102 } ··· 140 140 141 141 /* Validate Bandwidth Sum */ 142 142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 143 - if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 143 + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 144 + if (!ets->tc_tx_bw[i]) 145 + return -EINVAL; 146 + 144 147 bw_sum += ets->tc_tx_bw[i]; 148 + } 145 149 } 146 150 147 151 if (bw_sum != 0 && bw_sum != 100)
+90 -9
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 39 39 #include "eswitch.h" 40 40 #include "vxlan.h" 41 41 42 + enum { 43 + MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000, 44 + MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20, 45 + MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS / 46 + MLX5_EN_QP_FLUSH_MSLEEP_QUANT, 47 + }; 48 + 42 49 struct mlx5e_rq_param { 43 50 u32 rqc[MLX5_ST_SZ_DW(rqc)]; 44 51 struct mlx5_wq_param wq; ··· 81 74 port_state = mlx5_query_vport_state(mdev, 82 75 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 83 76 84 - if (port_state == VPORT_STATE_UP) 77 + if (port_state == VPORT_STATE_UP) { 78 + netdev_info(priv->netdev, "Link up\n"); 85 79 netif_carrier_on(priv->netdev); 86 - else 80 + } else { 81 + netdev_info(priv->netdev, "Link down\n"); 87 82 netif_carrier_off(priv->netdev); 83 + } 88 84 } 89 85 90 86 static void mlx5e_update_carrier_work(struct work_struct *work) ··· 99 89 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 100 90 mlx5e_update_carrier(priv); 101 91 mutex_unlock(&priv->state_lock); 92 + } 93 + 94 + static void mlx5e_tx_timeout_work(struct work_struct *work) 95 + { 96 + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 97 + tx_timeout_work); 98 + int err; 99 + 100 + rtnl_lock(); 101 + mutex_lock(&priv->state_lock); 102 + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 103 + goto unlock; 104 + mlx5e_close_locked(priv->netdev); 105 + err = mlx5e_open_locked(priv->netdev); 106 + if (err) 107 + netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", 108 + err); 109 + unlock: 110 + mutex_unlock(&priv->state_lock); 111 + rtnl_unlock(); 102 112 } 103 113 104 114 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) ··· 335 305 } 336 306 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; 337 307 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; 308 + rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 338 309 339 310 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); 340 311 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); ··· 351 320 } 352 321 rq->handle_rx_cqe = mlx5e_handle_rx_cqe; 353 322 rq->alloc_wqe = mlx5e_alloc_rx_wqe; 323 + rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 354 324 355 325 rq->wqe_sz = (priv->params.lro_en) ? 356 326 priv->params.lro_wqe_sz : ··· 557 525 558 526 static void mlx5e_close_rq(struct mlx5e_rq *rq) 559 527 { 528 + int tout = 0; 529 + int err; 530 + 560 531 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); 561 532 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ 562 533 563 - mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 564 - while (!mlx5_wq_ll_is_empty(&rq->wq)) 565 - msleep(20); 534 + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 535 + while (!mlx5_wq_ll_is_empty(&rq->wq) && !err && 536 + tout++ < MLX5_EN_QP_FLUSH_MAX_ITER) 537 + msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); 538 + 539 + if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER) 540 + set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state); 566 541 567 542 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ 568 543 napi_synchronize(&rq->channel->napi); 569 544 570 545 mlx5e_disable_rq(rq); 546 + mlx5e_free_rx_descs(rq); 571 547 mlx5e_destroy_rq(rq); 572 548 } 573 549 ··· 822 782 823 783 static void mlx5e_close_sq(struct mlx5e_sq *sq) 824 784 { 785 + int tout = 0; 786 + int err; 787 + 825 788 if (sq->txq) { 826 789 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); 827 790 /* prevent netif_tx_wake_queue */ ··· 835 792 if (mlx5e_sq_has_room_for(sq, 1)) 836 793 mlx5e_send_nop(sq, true); 837 794 838 - mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 795 + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, 796 + MLX5_SQC_STATE_ERR); 797 + if (err) 798 + set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); 839 799 } 840 800 841 - while (sq->cc != sq->pc) /* wait till sq is empty */ 842 - msleep(20); 801 + /* wait till sq is empty, unless a TX timeout occurred on this SQ */ 802 + while (sq->cc != sq->pc && 803 + !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) { 804 + msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); 805 + if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER) 806 + set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); 807 + } 843 808 844 809 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ 845 810 napi_synchronize(&sq->channel->napi); 846 811 812 + mlx5e_free_tx_descs(sq); 847 813 mlx5e_disable_sq(sq); 848 814 mlx5e_destroy_sq(sq); 849 815 } ··· 1710 1658 1711 1659 netdev_set_num_tc(netdev, ntc); 1712 1660 1661 + /* Map netdev TCs to offset 0 1662 + * We have our own UP to TXQ mapping for QoS 1663 + */ 1713 1664 for (tc = 0; tc < ntc; tc++) 1714 - netdev_set_tc_queue(netdev, tc, nch, tc * nch); 1665 + netdev_set_tc_queue(netdev, tc, nch, 0); 1715 1666 } 1716 1667 1717 1668 int mlx5e_open_locked(struct net_device *netdev) ··· 2645 2590 return features; 2646 2591 } 2647 2592 2593 + static void mlx5e_tx_timeout(struct net_device *dev) 2594 + { 2595 + struct mlx5e_priv *priv = netdev_priv(dev); 2596 + bool sched_work = false; 2597 + int i; 2598 + 2599 + netdev_err(dev, "TX timeout detected\n"); 2600 + 2601 + for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) { 2602 + struct mlx5e_sq *sq = priv->txq_to_sq_map[i]; 2603 + 2604 + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 2605 + continue; 2606 + sched_work = true; 2607 + set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); 2608 + netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", 2609 + i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); 2610 + } 2611 + 2612 + if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state)) 2613 + schedule_work(&priv->tx_timeout_work); 2614 + } 2615 + 2648 2616 static const struct net_device_ops mlx5e_netdev_ops_basic = { 2649 2617 .ndo_open = mlx5e_open, 2650 2618 .ndo_stop = mlx5e_close, ··· 2685 2607 #ifdef CONFIG_RFS_ACCEL 2686 2608 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 2687 2609 #endif 2610 + .ndo_tx_timeout = mlx5e_tx_timeout, 2688 2611 }; 2689 2612 2690 2613 static const struct net_device_ops mlx5e_netdev_ops_sriov = { ··· 2715 2636 .ndo_get_vf_config = mlx5e_get_vf_config, 2716 2637 .ndo_set_vf_link_state = mlx5e_set_vf_link_state, 2717 2638 .ndo_get_vf_stats = mlx5e_get_vf_stats, 2639 + .ndo_tx_timeout = mlx5e_tx_timeout, 2718 2640 }; 2719 2641 2720 2642 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) ··· 2918 2838 2919 2839 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 2920 2840 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 2841 + INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); 2921 2842 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 2922 2843 } 2923 2844
+41
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 212 212 return -ENOMEM; 213 213 } 214 214 215 + void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) 216 + { 217 + struct sk_buff *skb = rq->skb[ix]; 218 + 219 + if (skb) { 220 + rq->skb[ix] = NULL; 221 + dma_unmap_single(rq->pdev, 222 + *((dma_addr_t *)skb->cb), 223 + rq->wqe_sz, 224 + DMA_FROM_DEVICE); 225 + dev_kfree_skb(skb); 226 + } 227 + } 228 + 215 229 static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) 216 230 { 217 231 return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; ··· 588 574 return 0; 589 575 } 590 576 577 + void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 578 + { 579 + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; 580 + 581 + wi->free_wqe(rq, wi); 582 + } 583 + 584 + void mlx5e_free_rx_descs(struct mlx5e_rq *rq) 585 + { 586 + struct mlx5_wq_ll *wq = &rq->wq; 587 + struct mlx5e_rx_wqe *wqe; 588 + __be16 wqe_ix_be; 589 + u16 wqe_ix; 590 + 591 + while (!mlx5_wq_ll_is_empty(wq)) { 592 + wqe_ix_be = *wq->tail_next; 593 + wqe_ix = be16_to_cpu(wqe_ix_be); 594 + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); 595 + rq->dealloc_wqe(rq, wqe_ix); 596 + mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, 597 + &wqe->next.next_wqe_index); 598 + } 599 + } 600 + 591 601 #define RQ_CANNOT_POST(rq) \ 592 602 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ 593 603 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) ··· 915 877 { 916 878 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 917 879 int work_done = 0; 880 + 881 + if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) 882 + return 0; 918 883 919 884 if (cq->decmprs_left) 920 885 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
+48 -4
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 110 110 { 111 111 struct mlx5e_priv *priv = netdev_priv(dev); 112 112 int channel_ix = fallback(dev, skb); 113 - int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ? 114 - skb->vlan_tci >> VLAN_PRIO_SHIFT : 0; 113 + int up = 0; 114 + 115 + if (!netdev_get_num_tc(dev)) 116 + return channel_ix; 117 + 118 + if (skb_vlan_tag_present(skb)) 119 + up = skb->vlan_tci >> VLAN_PRIO_SHIFT; 120 + 121 + /* channel_ix can be larger than num_channels since 122 + * dev->num_real_tx_queues = num_channels * num_tc 123 + */ 124 + if (channel_ix >= priv->params.num_channels) 125 + channel_ix = reciprocal_scale(channel_ix, 126 + priv->params.num_channels); 115 127 116 128 return priv->channeltc_to_txq_map[channel_ix][up]; 117 129 } ··· 135 123 * headers and occur before the data gather. 136 124 * Therefore these headers must be copied into the WQE 137 125 */ 138 - #define MLX5E_MIN_INLINE ETH_HLEN 126 + #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 139 127 140 128 if (bf) { 141 129 u16 ihs = skb_headlen(skb); ··· 147 135 return skb_headlen(skb); 148 136 } 149 137 150 - return MLX5E_MIN_INLINE; 138 + return max(skb_network_offset(skb), MLX5E_MIN_INLINE); 151 139 } 152 140 153 141 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, ··· 353 341 return mlx5e_sq_xmit(sq, skb); 354 342 } 355 343 344 + void mlx5e_free_tx_descs(struct mlx5e_sq *sq) 345 + { 346 + struct mlx5e_tx_wqe_info *wi; 347 + struct sk_buff *skb; 348 + u16 ci; 349 + int i; 350 + 351 + while (sq->cc != sq->pc) { 352 + ci = sq->cc & sq->wq.sz_m1; 353 + skb = sq->skb[ci]; 354 + wi = &sq->wqe_info[ci]; 355 + 356 + if (!skb) { /* nop */ 357 + sq->cc++; 358 + continue; 359 + } 360 + 361 + for (i = 0; i < wi->num_dma; i++) { 362 + struct mlx5e_sq_dma *dma = 363 + mlx5e_dma_get(sq, sq->dma_fifo_cc++); 364 + 365 + mlx5e_tx_dma_unmap(sq->pdev, dma); 366 + } 367 + 368 + dev_kfree_skb_any(skb); 369 + sq->cc += wi->num_wqebbs; 370 + } 371 + } 372 + 356 373 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 357 374 { 358 375 struct mlx5e_sq *sq; ··· 392 351 int i; 393 352 394 353 sq = container_of(cq, struct mlx5e_sq, cq); 354 + 355 + if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) 356 + return false; 395 357 396 358 npkts = 0; 397 359 nbytes = 0;
+8 -3
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 108 108 109 109 void mlx5_enter_error_state(struct mlx5_core_dev *dev) 110 110 { 111 + mutex_lock(&dev->intf_state_mutex); 111 112 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 112 - return; 113 + goto unlock; 113 114 114 115 mlx5_core_err(dev, "start\n"); 115 - if (pci_channel_offline(dev->pdev) || in_fatal(dev)) 116 + if (pci_channel_offline(dev->pdev) || in_fatal(dev)) { 116 117 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 118 + trigger_cmd_completions(dev); 119 + } 117 120 118 121 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 119 122 mlx5_core_err(dev, "end\n"); 123 + 124 + unlock: 125 + mutex_unlock(&dev->intf_state_mutex); 120 126 } 121 127 122 128 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) ··· 251 245 u32 count; 252 246 253 247 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 254 - trigger_cmd_completions(dev); 255 248 mod_timer(&health->timer, get_next_poll_jiffies()); 256 249 return; 257 250 }
+15 -26
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1422 1422 mlx5_pci_err_detected(dev->pdev, 0); 1423 1423 } 1424 1424 1425 - /* wait for the device to show vital signs. For now we check 1426 - * that we can read the device ID and that the health buffer 1427 - * shows a non zero value which is different than 0xffffffff 1425 + /* wait for the device to show vital signs by waiting 1426 + * for the health counter to start counting. 1428 1427 */ 1429 - static void wait_vital(struct pci_dev *pdev) 1428 + static int wait_vital(struct pci_dev *pdev) 1430 1429 { 1431 1430 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1432 1431 struct mlx5_core_health *health = &dev->priv.health; 1433 1432 const int niter = 100; 1433 + u32 last_count = 0; 1434 1434 u32 count; 1435 - u16 did; 1436 1435 int i; 1437 - 1438 - /* Wait for firmware to be ready after reset */ 1439 - msleep(1000); 1440 - for (i = 0; i < niter; i++) { 1441 - if (pci_read_config_word(pdev, 2, &did)) { 1442 - dev_warn(&pdev->dev, "failed reading config word\n"); 1443 - break; 1444 - } 1445 - if (did == pdev->device) { 1446 - dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); 1447 - break; 1448 - } 1449 - msleep(50); 1450 - } 1451 - if (i == niter) 1452 - dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1453 1436 1454 1437 for (i = 0; i < niter; i++) { 1455 1438 count = ioread32be(health->health_counter); 1456 1439 if (count && count != 0xffffffff) { 1457 - dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1458 - break; 1440 + if (last_count && last_count != count) { 1441 + dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1442 + return 0; 1443 + } 1444 + last_count = count; 1459 1445 } 1460 1446 msleep(50); 1461 1447 } 1462 1448 1463 - if (i == niter) 1464 - dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1449 + return -ETIMEDOUT; 1465 1450 } 1466 1451 1467 1452 static void mlx5_pci_resume(struct pci_dev *pdev) ··· 1458 1473 dev_info(&pdev->dev, "%s was called\n", __func__); 1459 1474 1460 1475 pci_save_state(pdev); 1461 - wait_vital(pdev); 1476 + err = wait_vital(pdev); 1477 + if (err) { 1478 + dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); 1479 + return; 1480 + } 1462 1481 1463 1482 err = mlx5_load_one(dev, priv); 1464 1483 if (err)
+44 -19
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
··· 345 345 func_id, npages, err); 346 346 goto out_4k; 347 347 } 348 - dev->priv.fw_pages += npages; 349 348 350 349 err = mlx5_cmd_status_to_err(&out.hdr); 351 350 if (err) { ··· 372 373 return err; 373 374 } 374 375 376 + static int reclaim_pages_cmd(struct mlx5_core_dev *dev, 377 + struct mlx5_manage_pages_inbox *in, int in_size, 378 + struct mlx5_manage_pages_outbox *out, int out_size) 379 + { 380 + struct fw_page *fwp; 381 + struct rb_node *p; 382 + u32 npages; 383 + u32 i = 0; 384 + 385 + if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) 386 + return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size, 387 + (u32 *)out, out_size); 388 + 389 + npages = be32_to_cpu(in->num_entries); 390 + 391 + p = rb_first(&dev->priv.page_root); 392 + while (p && i < npages) { 393 + fwp = rb_entry(p, struct fw_page, rb_node); 394 + out->pas[i] = cpu_to_be64(fwp->addr); 395 + p = rb_next(p); 396 + i++; 397 + } 398 + 399 + out->num_entries = cpu_to_be32(i); 400 + return 0; 401 + } 402 + 375 403 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, 376 404 int *nclaimed) 377 405 { ··· 424 398 in.func_id = cpu_to_be16(func_id); 425 399 in.num_entries = cpu_to_be32(npages); 426 400 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 427 - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 401 + err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen); 428 402 if (err) { 429 - mlx5_core_err(dev, "failed reclaiming pages\n"); 430 - goto out_free; 431 - } 432 - dev->priv.fw_pages -= npages; 433 - 434 - if (out->hdr.status) { 435 - err = mlx5_cmd_status_to_err(&out->hdr); 403 + mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); 436 404 goto out_free; 437 405 } 438 406 ··· 437 417 err = -EINVAL; 438 418 goto out_free; 439 419 } 440 - if (nclaimed) 441 - *nclaimed = num_claimed; 442 420 443 421 for (i = 0; i < num_claimed; i++) { 444 422 addr = be64_to_cpu(out->pas[i]); 445 423 free_4k(dev, addr); 446 424 } 425 + 426 + if (nclaimed) 427 + *nclaimed = num_claimed; 428 + 447 429 dev->priv.fw_pages -= num_claimed; 448 430 if (func_id) 449 431 dev->priv.vfs_pages -= num_claimed; ··· 536 514 p = rb_first(&dev->priv.page_root); 537 515 if (p) { 538 516 fwp = rb_entry(p, struct fw_page, rb_node); 539 - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 540 - free_4k(dev, fwp->addr); 541 - nclaimed = 1; 542 - } else { 543 - err = reclaim_pages(dev, fwp->func_id, 544 - optimal_reclaimed_pages(), 545 - &nclaimed); 546 - } 517 + err = reclaim_pages(dev, fwp->func_id, 518 + optimal_reclaimed_pages(), 519 + &nclaimed); 520 + 547 521 if (err) { 548 522 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", 549 523 err); ··· 553 535 break; 554 536 } 555 537 } while (p); 538 + 539 + WARN(dev->priv.fw_pages, 540 + "FW pages counter is %d after reclaiming all pages\n", 541 + dev->priv.fw_pages); 542 + WARN(dev->priv.vfs_pages, 543 + "VFs FW pages counter is %d after reclaiming all pages\n", 544 + dev->priv.vfs_pages); 556 545 557 546 return 0; 558 547 }
-3
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 513 513 { 514 514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 515 515 void *nic_vport_context; 516 - u8 *guid; 517 516 void *in; 518 517 int err; 519 518 ··· 534 535 535 536 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, 536 537 in, nic_vport_context); 537 - guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context, 538 - node_guid); 539 538 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); 540 539 541 540 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+5 -2
drivers/net/ethernet/microchip/enc28j60.c
··· 1151 1151 enc28j60_phy_read(priv, PHIR); 1152 1152 } 1153 1153 /* TX complete handler */ 1154 - if ((intflags & EIR_TXIF) != 0) { 1154 + if (((intflags & EIR_TXIF) != 0) && 1155 + ((intflags & EIR_TXERIF) == 0)) { 1155 1156 bool err = false; 1156 1157 loop++; 1157 1158 if (netif_msg_intr(priv)) ··· 1204 1203 enc28j60_tx_clear(ndev, true); 1205 1204 } else 1206 1205 enc28j60_tx_clear(ndev, true); 1207 - locked_reg_bfclr(priv, EIR, EIR_TXERIF); 1206 + locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF); 1208 1207 } 1209 1208 /* RX Error handler */ 1210 1209 if ((intflags & EIR_RXERIF) != 0) { ··· 1239 1238 */ 1240 1239 static void enc28j60_hw_tx(struct enc28j60_net *priv) 1241 1240 { 1241 + BUG_ON(!priv->tx_skb); 1242 + 1242 1243 if (netif_msg_tx_queued(priv)) 1243 1244 printk(KERN_DEBUG DRV_NAME 1244 1245 ": Tx Packet Len:%d\n", priv->tx_skb->len);
+2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
··· 772 772 tx_ring->tx_stats.tx_bytes += skb->len; 773 773 tx_ring->tx_stats.xmit_called++; 774 774 775 + /* Ensure writes are complete before HW fetches Tx descriptors */ 776 + wmb(); 775 777 qlcnic_update_cmd_producer(tx_ring); 776 778 777 779 return NETDEV_TX_OK;
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2804 2804 priv->tx_path_in_lpi_mode = true; 2805 2805 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2806 2806 priv->tx_path_in_lpi_mode = false; 2807 - if (status & CORE_IRQ_MTL_RX_OVERFLOW) 2807 + if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr) 2808 2808 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, 2809 2809 priv->rx_tail_addr, 2810 2810 STMMAC_CHAN0);
+7 -2
drivers/net/geneve.c
··· 1072 1072 1073 1073 static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) 1074 1074 { 1075 + struct geneve_dev *geneve = netdev_priv(dev); 1075 1076 /* The max_mtu calculation does not take account of GENEVE 1076 1077 * options, to avoid excluding potentially valid 1077 1078 * configurations. 1078 1079 */ 1079 - int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr) 1080 - - dev->hard_header_len; 1080 + int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; 1081 + 1082 + if (geneve->remote.sa.sa_family == AF_INET6) 1083 + max_mtu -= sizeof(struct ipv6hdr); 1084 + else 1085 + max_mtu -= sizeof(struct iphdr); 1081 1086 1082 1087 if (new_mtu < 68) 1083 1088 return -EINVAL;
+1
drivers/net/macsec.c
··· 2640 2640 u64_stats_update_begin(&secy_stats->syncp); 2641 2641 secy_stats->stats.OutPktsUntagged++; 2642 2642 u64_stats_update_end(&secy_stats->syncp); 2643 + skb->dev = macsec->real_dev; 2643 2644 len = skb->len; 2644 2645 ret = dev_queue_xmit(skb); 2645 2646 count_tx(dev, ret, len);
+9 -4
drivers/net/phy/dp83867.c
··· 57 57 58 58 /* PHY CTRL bits */ 59 59 #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 60 + #define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14) 60 61 61 62 /* RGMIIDCTL bits */ 62 63 #define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 ··· 134 133 static int dp83867_config_init(struct phy_device *phydev) 135 134 { 136 135 struct dp83867_private *dp83867; 137 - int ret; 138 - u16 val, delay; 136 + int ret, val; 137 + u16 delay; 139 138 140 139 if (!phydev->priv) { 141 140 dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867), ··· 152 151 } 153 152 154 153 if (phy_interface_is_rgmii(phydev)) { 155 - ret = phy_write(phydev, MII_DP83867_PHYCTRL, 156 - (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); 154 + val = phy_read(phydev, MII_DP83867_PHYCTRL); 155 + if (val < 0) 156 + return val; 157 + val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK; 158 + val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT); 159 + ret = phy_write(phydev, MII_DP83867_PHYCTRL, val); 157 160 if (ret) 158 161 return ret; 159 162 }
+7
drivers/net/usb/cdc_ncm.c
··· 854 854 if (cdc_ncm_init(dev)) 855 855 goto error2; 856 856 857 + /* Some firmwares need a pause here or they will silently fail 858 + * to set up the interface properly. This value was decided 859 + * empirically on a Sierra Wireless MC7455 running 02.08.02.00 860 + * firmware. 861 + */ 862 + usleep_range(10000, 20000); 863 + 857 864 /* configure data interface */ 858 865 temp = usb_set_interface(dev->udev, iface_no, data_altsetting); 859 866 if (temp) {
+28 -7
drivers/net/usb/r8152.c
··· 31 31 #define NETNEXT_VERSION "08" 32 32 33 33 /* Information for net */ 34 - #define NET_VERSION "4" 34 + #define NET_VERSION "5" 35 35 36 36 #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 37 37 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" ··· 624 624 int (*eee_get)(struct r8152 *, struct ethtool_eee *); 625 625 int (*eee_set)(struct r8152 *, struct ethtool_eee *); 626 626 bool (*in_nway)(struct r8152 *); 627 + void (*autosuspend_en)(struct r8152 *tp, bool enable); 627 628 } rtl_ops; 628 629 629 630 int intr_interval; ··· 2409 2408 if (enable) { 2410 2409 u32 ocp_data; 2411 2410 2412 - r8153_u1u2en(tp, false); 2413 - r8153_u2p3en(tp, false); 2414 - 2415 2411 __rtl_set_wol(tp, WAKE_ANY); 2416 2412 2417 2413 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ··· 2419 2421 2420 2422 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2421 2423 } else { 2424 + u32 ocp_data; 2425 + 2422 2426 __rtl_set_wol(tp, tp->saved_wolopts); 2427 + 2428 + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2429 + 2430 + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); 2431 + ocp_data &= ~LINK_OFF_WAKE_EN; 2432 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); 2433 + 2434 + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2435 + } 2436 + } 2437 + 2438 + static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) 2439 + { 2440 + rtl_runtime_suspend_enable(tp, enable); 2441 + 2442 + if (enable) { 2443 + r8153_u1u2en(tp, false); 2444 + r8153_u2p3en(tp, false); 2445 + } else { 2423 2446 r8153_u2p3en(tp, true); 2424 2447 r8153_u1u2en(tp, true); 2425 2448 } ··· 3531 3512 napi_disable(&tp->napi); 3532 3513 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3533 3514 rtl_stop_rx(tp); 3534 - rtl_runtime_suspend_enable(tp, true); 3515 + tp->rtl_ops.autosuspend_en(tp, true); 3535 3516 } else { 3536 3517 cancel_delayed_work_sync(&tp->schedule); 3537 3518 tp->rtl_ops.down(tp); ··· 3557 3538 3558 3539 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3559 3540 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3560 - rtl_runtime_suspend_enable(tp, false); 3541 + tp->rtl_ops.autosuspend_en(tp, false); 3561 3542 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3562 3543 napi_disable(&tp->napi); 3563 3544 set_bit(WORK_ENABLE, &tp->flags); ··· 3576 3557 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3577 3558 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3578 3559 if (tp->netdev->flags & IFF_UP) 3579 - rtl_runtime_suspend_enable(tp, false); 3560 + tp->rtl_ops.autosuspend_en(tp, false); 3580 3561 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3581 3562 } 3582 3563 ··· 4156 4137 ops->eee_get = r8152_get_eee; 4157 4138 ops->eee_set = r8152_set_eee; 4158 4139 ops->in_nway = rtl8152_in_nway; 4140 + ops->autosuspend_en = rtl_runtime_suspend_enable; 4159 4141 break; 4160 4142 4161 4143 case RTL_VER_03: ··· 4172 4152 ops->eee_get = r8153_get_eee; 4173 4153 ops->eee_set = r8153_set_eee; 4174 4154 ops->in_nway = rtl8153_in_nway; 4155 + ops->autosuspend_en = rtl8153_runtime_enable; 4175 4156 break; 4176 4157 4177 4158 default:
+7 -3
drivers/net/usb/usbnet.c
··· 395 395 dev->hard_mtu = net->mtu + net->hard_header_len; 396 396 if (dev->rx_urb_size == old_hard_mtu) { 397 397 dev->rx_urb_size = dev->hard_mtu; 398 - if (dev->rx_urb_size > old_rx_urb_size) 398 + if (dev->rx_urb_size > old_rx_urb_size) { 399 + usbnet_pause_rx(dev); 399 400 usbnet_unlink_rx_urbs(dev); 401 + usbnet_resume_rx(dev); 402 + } 400 403 } 401 404 402 405 /* max qlen depend on hard_mtu and rx_urb_size */ ··· 1511 1508 } else if (netif_running (dev->net) && 1512 1509 netif_device_present (dev->net) && 1513 1510 netif_carrier_ok(dev->net) && 1514 - !timer_pending (&dev->delay) && 1515 - !test_bit (EVENT_RX_HALT, &dev->flags)) { 1511 + !timer_pending(&dev->delay) && 1512 + !test_bit(EVENT_RX_PAUSED, &dev->flags) && 1513 + !test_bit(EVENT_RX_HALT, &dev->flags)) { 1516 1514 int temp = dev->rxq.qlen; 1517 1515 1518 1516 if (temp < RX_QLEN(dev)) {
+7 -1
drivers/platform/chrome/cros_ec_dev.c
··· 151 151 goto exit; 152 152 } 153 153 154 + if (u_cmd.outsize != s_cmd->outsize || 155 + u_cmd.insize != s_cmd->insize) { 156 + ret = -EINVAL; 157 + goto exit; 158 + } 159 + 154 160 s_cmd->command += ec->cmd_offset; 155 161 ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); 156 162 /* Only copy data to userland if data was received. */ 157 163 if (ret < 0) 158 164 goto exit; 159 165 160 - if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize)) 166 + if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize)) 161 167 ret = -EFAULT; 162 168 exit: 163 169 kfree(s_cmd);
+1
drivers/s390/net/qeth_l2_main.c
··· 1051 1051 qeth_l2_set_offline(cgdev); 1052 1052 1053 1053 if (card->dev) { 1054 + netif_napi_del(&card->napi); 1054 1055 unregister_netdev(card->dev); 1055 1056 card->dev = NULL; 1056 1057 }
+1
drivers/s390/net/qeth_l3_main.c
··· 3226 3226 qeth_l3_set_offline(cgdev); 3227 3227 3228 3228 if (card->dev) { 3229 + netif_napi_del(&card->napi); 3229 3230 unregister_netdev(card->dev); 3230 3231 card->dev = NULL; 3231 3232 }
+1
drivers/scsi/ipr.c
··· 10093 10093 ioa_cfg->intr_flag = IPR_USE_MSI; 10094 10094 else { 10095 10095 ioa_cfg->intr_flag = IPR_USE_LSI; 10096 + ioa_cfg->clear_isr = 1; 10096 10097 ioa_cfg->nvectors = 1; 10097 10098 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 10098 10099 }
+1 -1
drivers/scsi/qla2xxx/qla_isr.c
··· 2548 2548 if (!vha->flags.online) 2549 2549 return; 2550 2550 2551 - if (rsp->msix->cpuid != smp_processor_id()) { 2551 + if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) { 2552 2552 /* if kernel does not notify qla of IRQ's CPU change, 2553 2553 * then set it here. 2554 2554 */
+6 -4
drivers/scsi/scsi_devinfo.c
··· 429 429 * here, and we don't know what device it is 430 430 * trying to work with, leave it as-is. 431 431 */ 432 - vmax = 8; /* max length of vendor */ 432 + vmax = sizeof(devinfo->vendor); 433 433 vskip = vendor; 434 434 while (vmax > 0 && *vskip == ' ') { 435 435 vmax--; ··· 439 439 while (vmax > 0 && vskip[vmax - 1] == ' ') 440 440 --vmax; 441 441 442 - mmax = 16; /* max length of model */ 442 + mmax = sizeof(devinfo->model); 443 443 mskip = model; 444 444 while (mmax > 0 && *mskip == ' ') { 445 445 mmax--; ··· 455 455 * Behave like the older version of get_device_flags. 456 456 */ 457 457 if (memcmp(devinfo->vendor, vskip, vmax) || 458 - devinfo->vendor[vmax]) 458 + (vmax < sizeof(devinfo->vendor) && 459 + devinfo->vendor[vmax])) 459 460 continue; 460 461 if (memcmp(devinfo->model, mskip, mmax) || 461 - devinfo->model[mmax]) 462 + (mmax < sizeof(devinfo->model) && 463 + devinfo->model[mmax])) 462 464 continue; 463 465 return devinfo; 464 466 } else {
+3 -32
drivers/xen/xen-acpi-processor.c
··· 423 423 424 424 return 0; 425 425 } 426 - static int __init check_prereq(void) 427 - { 428 - struct cpuinfo_x86 *c = &cpu_data(0); 429 426 430 - if (!xen_initial_domain()) 431 - return -ENODEV; 432 - 433 - if (!acpi_gbl_FADT.smi_command) 434 - return -ENODEV; 435 - 436 - if (c->x86_vendor == X86_VENDOR_INTEL) { 437 - if (!cpu_has(c, X86_FEATURE_EST)) 438 - return -ENODEV; 439 - 440 - return 0; 441 - } 442 - if (c->x86_vendor == X86_VENDOR_AMD) { 443 - /* Copied from powernow-k8.h, can't include ../cpufreq/powernow 444 - * as we get compile warnings for the static functions. 445 - */ 446 - #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 447 - #define USE_HW_PSTATE 0x00000080 448 - u32 eax, ebx, ecx, edx; 449 - cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 450 - if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) 451 - return -ENODEV; 452 - return 0; 453 - } 454 - return -ENODEV; 455 - } 456 427 /* acpi_perf_data is a pointer to percpu data. */ 457 428 static struct acpi_processor_performance __percpu *acpi_perf_data; 458 429 ··· 480 509 static int __init xen_acpi_processor_init(void) 481 510 { 482 511 unsigned int i; 483 - int rc = check_prereq(); 512 + int rc; 484 513 485 - if (rc) 486 - return rc; 514 + if (!xen_initial_domain()) 515 + return -ENODEV; 487 516 488 517 nr_acpi_bits = get_max_acpi_id() + 1; 489 518 acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
+8 -6
drivers/xen/xenbus/xenbus_dev_frontend.c
··· 316 316 rc = -ENOMEM; 317 317 goto out; 318 318 } 319 + } else { 320 + list_for_each_entry(trans, &u->transactions, list) 321 + if (trans->handle.id == u->u.msg.tx_id) 322 + break; 323 + if (&trans->list == &u->transactions) 324 + return -ESRCH; 319 325 } 320 326 321 327 reply = xenbus_dev_request_and_reply(&u->u.msg); 322 328 if (IS_ERR(reply)) { 323 - kfree(trans); 329 + if (msg_type == XS_TRANSACTION_START) 330 + kfree(trans); 324 331 rc = PTR_ERR(reply); 325 332 goto out; 326 333 } ··· 340 333 list_add(&trans->list, &u->transactions); 341 334 } 342 335 } else if (u->u.msg.type == XS_TRANSACTION_END) { 343 - list_for_each_entry(trans, &u->transactions, list) 344 - if (trans->handle.id == u->u.msg.tx_id) 345 - break; 346 - BUG_ON(&trans->list == &u->transactions); 347 336 list_del(&trans->list); 348 - 349 337 kfree(trans); 350 338 } 351 339
+3 -7
drivers/xen/xenbus/xenbus_xs.c
··· 232 232 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) 233 233 { 234 234 void *ret; 235 - struct xsd_sockmsg req_msg = *msg; 235 + enum xsd_sockmsg_type type = msg->type; 236 236 int err; 237 237 238 - if (req_msg.type == XS_TRANSACTION_START) 238 + if (type == XS_TRANSACTION_START) 239 239 transaction_start(); 240 240 241 241 mutex_lock(&xs_state.request_mutex); ··· 249 249 250 250 mutex_unlock(&xs_state.request_mutex); 251 251 252 - if (IS_ERR(ret)) 253 - return ret; 254 - 255 252 if ((msg->type == XS_TRANSACTION_END) || 256 - ((req_msg.type == XS_TRANSACTION_START) && 257 - (msg->type == XS_ERROR))) 253 + ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) 258 254 transaction_end(); 259 255 260 256 return ret;
-2
fs/configfs/file.c
··· 357 357 358 358 len = simple_write_to_buffer(buffer->bin_buffer, 359 359 buffer->bin_buffer_size, ppos, buf, count); 360 - if (len > 0) 361 - *ppos += len; 362 360 out: 363 361 mutex_unlock(&buffer->mutex); 364 362 return len;
+4 -4
fs/ecryptfs/crypto.c
··· 45 45 * ecryptfs_to_hex 46 46 * @dst: Buffer to take hex character representation of contents of 47 47 * src; must be at least of size (src_size * 2) 48 - * @src: Buffer to be converted to a hex string respresentation 48 + * @src: Buffer to be converted to a hex string representation 49 49 * @src_size: number of bytes to convert 50 50 */ 51 51 void ecryptfs_to_hex(char *dst, char *src, size_t src_size) ··· 60 60 * ecryptfs_from_hex 61 61 * @dst: Buffer to take the bytes from src hex; must be at least of 62 62 * size (src_size / 2) 63 - * @src: Buffer to be converted from a hex string respresentation to raw value 63 + * @src: Buffer to be converted from a hex string representation to raw value 64 64 * @dst_size: size of dst buffer, or number of hex characters pairs to convert 65 65 */ 66 66 void ecryptfs_from_hex(char *dst, char *src, int dst_size) ··· 953 953 }; 954 954 955 955 /* Add support for additional ciphers by adding elements here. The 956 - * cipher_code is whatever OpenPGP applicatoins use to identify the 956 + * cipher_code is whatever OpenPGP applications use to identify the 957 957 * ciphers. List in order of probability. */ 958 958 static struct ecryptfs_cipher_code_str_map_elem 959 959 ecryptfs_cipher_code_str_map[] = { ··· 1410 1410 * 1411 1411 * Common entry point for reading file metadata. From here, we could 1412 1412 * retrieve the header information from the header region of the file, 1413 - * the xattr region of the file, or some other repostory that is 1413 + * the xattr region of the file, or some other repository that is 1414 1414 * stored separately from the file itself. The current implementation 1415 1415 * supports retrieving the metadata information from the file contents 1416 1416 * and from the xattr region.
+16 -3
fs/ecryptfs/file.c
··· 169 169 return rc; 170 170 } 171 171 172 + static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma) 173 + { 174 + struct file *lower_file = ecryptfs_file_to_lower(file); 175 + /* 176 + * Don't allow mmap on top of file systems that don't support it 177 + * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs 178 + * allows recursive mounting, this will need to be extended. 179 + */ 180 + if (!lower_file->f_op->mmap) 181 + return -ENODEV; 182 + return generic_file_mmap(file, vma); 183 + } 184 + 172 185 /** 173 186 * ecryptfs_open 174 - * @inode: inode speciying file to open 187 + * @inode: inode specifying file to open 175 188 * @file: Structure to return filled in 176 189 * 177 190 * Opens the file specified by inode. ··· 253 240 254 241 /** 255 242 * ecryptfs_dir_open 256 - * @inode: inode speciying file to open 243 + * @inode: inode specifying file to open 257 244 * @file: Structure to return filled in 258 245 * 259 246 * Opens the file specified by inode. ··· 416 403 #ifdef CONFIG_COMPAT 417 404 .compat_ioctl = ecryptfs_compat_ioctl, 418 405 #endif 419 - .mmap = generic_file_mmap, 406 + .mmap = ecryptfs_mmap, 420 407 .open = ecryptfs_open, 421 408 .flush = ecryptfs_flush, 422 409 .release = ecryptfs_release,
+2 -11
fs/ecryptfs/kthread.c
··· 25 25 #include <linux/slab.h> 26 26 #include <linux/wait.h> 27 27 #include <linux/mount.h> 28 - #include <linux/file.h> 29 28 #include "ecryptfs_kernel.h" 30 29 31 30 struct ecryptfs_open_req { ··· 147 148 flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; 148 149 (*lower_file) = dentry_open(&req.path, flags, cred); 149 150 if (!IS_ERR(*lower_file)) 150 - goto have_file; 151 + goto out; 151 152 if ((flags & O_ACCMODE) == O_RDONLY) { 152 153 rc = PTR_ERR((*lower_file)); 153 154 goto out; ··· 165 166 mutex_unlock(&ecryptfs_kthread_ctl.mux); 166 167 wake_up(&ecryptfs_kthread_ctl.wait); 167 168 wait_for_completion(&req.done); 168 - if (IS_ERR(*lower_file)) { 169 + if (IS_ERR(*lower_file)) 169 170 rc = PTR_ERR(*lower_file); 170 - goto out; 171 - } 172 - have_file: 173 - if ((*lower_file)->f_op->mmap == NULL) { 174 - fput(*lower_file); 175 - *lower_file = NULL; 176 - rc = -EMEDIUMTYPE; 177 - } 178 171 out: 179 172 return rc; 180 173 }
+1 -2
fs/ecryptfs/main.c
··· 738 738 struct ecryptfs_cache_info *info; 739 739 740 740 info = &ecryptfs_cache_infos[i]; 741 - if (*(info->cache)) 742 - kmem_cache_destroy(*(info->cache)); 741 + kmem_cache_destroy(*(info->cache)); 743 742 } 744 743 } 745 744
+1 -1
fs/fs-writeback.c
··· 483 483 goto out_free; 484 484 } 485 485 inode->i_state |= I_WB_SWITCH; 486 + __iget(inode); 486 487 spin_unlock(&inode->i_lock); 487 488 488 - ihold(inode); 489 489 isw->inode = inode; 490 490 491 491 atomic_inc(&isw_nr_in_flight);
+1
include/acpi/acpi_drivers.h
··· 78 78 79 79 /* ACPI PCI Interrupt Link (pci_link.c) */ 80 80 81 + int acpi_irq_penalty_init(void); 81 82 int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, 82 83 int *polarity, char **name); 83 84 int acpi_pci_link_free_irq(acpi_handle handle);
+1
include/linux/mlx5/driver.h
··· 629 629 void *uout; 630 630 int uout_size; 631 631 mlx5_cmd_cbk_t callback; 632 + struct delayed_work cb_timeout_work; 632 633 void *context; 633 634 int idx; 634 635 struct completion done;
+20
include/linux/skbuff.h
··· 1062 1062 } 1063 1063 1064 1064 void __skb_get_hash(struct sk_buff *skb); 1065 + u32 __skb_get_hash_symmetric(struct sk_buff *skb); 1065 1066 u32 skb_get_poff(const struct sk_buff *skb); 1066 1067 u32 __skb_get_poff(const struct sk_buff *skb, void *data, 1067 1068 const struct flow_keys *keys, int hlen); ··· 2868 2867 */ 2869 2868 if (skb->ip_summed == CHECKSUM_COMPLETE) 2870 2869 skb->csum = csum_partial(start, len, skb->csum); 2870 + } 2871 + 2872 + /** 2873 + * skb_push_rcsum - push skb and update receive checksum 2874 + * @skb: buffer to update 2875 + * @len: length of data pulled 2876 + * 2877 + * This function performs an skb_push on the packet and updates 2878 + * the CHECKSUM_COMPLETE checksum. It should be used on 2879 + * receive path processing instead of skb_push unless you know 2880 + * that the checksum difference is zero (e.g., a valid IP header) 2881 + * or you are setting ip_summed to CHECKSUM_NONE. 2882 + */ 2883 + static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, 2884 + unsigned int len) 2885 + { 2886 + skb_push(skb, len); 2887 + skb_postpush_rcsum(skb, skb->data, len); 2888 + return skb->data; 2871 2889 } 2872 2890 2873 2891 /**
+6 -1
include/net/bonding.h
··· 34 34 35 35 #define BOND_DEFAULT_MIIMON 100 36 36 37 + #ifndef __long_aligned 38 + #define __long_aligned __attribute__((aligned((sizeof(long))))) 39 + #endif 37 40 /* 38 41 * Less bad way to call ioctl from within the kernel; this needs to be 39 42 * done some other way to get the call out of interrupt context. ··· 141 138 struct reciprocal_value reciprocal_packets_per_slave; 142 139 u16 ad_actor_sys_prio; 143 140 u16 ad_user_port_key; 144 - u8 ad_actor_system[ETH_ALEN]; 141 + 142 + /* 2 bytes of padding : see ether_addr_equal_64bits() */ 143 + u8 ad_actor_system[ETH_ALEN + 2]; 145 144 }; 146 145 147 146 struct bond_parm_tbl {
+2 -3
include/net/ip.h
··· 313 313 return min(dst->dev->mtu, IP_MAX_MTU); 314 314 } 315 315 316 - static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) 316 + static inline unsigned int ip_skb_dst_mtu(struct sock *sk, 317 + const struct sk_buff *skb) 317 318 { 318 - struct sock *sk = skb->sk; 319 - 320 319 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 321 320 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 322 321
+1
init/Kconfig
··· 1458 1458 1459 1459 config KALLSYMS_ABSOLUTE_PERCPU 1460 1460 bool 1461 + depends on KALLSYMS 1461 1462 default X86_64 && SMP 1462 1463 1463 1464 config KALLSYMS_BASE_RELATIVE
+22 -1
kernel/events/core.c
··· 1678 1678 return event->state == PERF_EVENT_STATE_DEAD; 1679 1679 } 1680 1680 1681 - static inline int pmu_filter_match(struct perf_event *event) 1681 + static inline int __pmu_filter_match(struct perf_event *event) 1682 1682 { 1683 1683 struct pmu *pmu = event->pmu; 1684 1684 return pmu->filter_match ? pmu->filter_match(event) : 1; 1685 + } 1686 + 1687 + /* 1688 + * Check whether we should attempt to schedule an event group based on 1689 + * PMU-specific filtering. An event group can consist of HW and SW events, 1690 + * potentially with a SW leader, so we must check all the filters, to 1691 + * determine whether a group is schedulable: 1692 + */ 1693 + static inline int pmu_filter_match(struct perf_event *event) 1694 + { 1695 + struct perf_event *child; 1696 + 1697 + if (!__pmu_filter_match(event)) 1698 + return 0; 1699 + 1700 + list_for_each_entry(child, &event->sibling_list, group_entry) { 1701 + if (!__pmu_filter_match(child)) 1702 + return 0; 1703 + } 1704 + 1705 + return 1; 1685 1706 } 1686 1707 1687 1708 static inline int
+22 -24
kernel/sched/fair.c
··· 735 735 } 736 736 } 737 737 738 - static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq); 739 - static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq); 740 738 #else 741 739 void init_entity_runnable_average(struct sched_entity *se) 742 740 { ··· 2497 2499 2498 2500 #ifdef CONFIG_FAIR_GROUP_SCHED 2499 2501 # ifdef CONFIG_SMP 2500 - static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) 2501 - { 2502 - long tg_weight; 2503 - 2504 - /* 2505 - * Use this CPU's real-time load instead of the last load contribution 2506 - * as the updating of the contribution is delayed, and we will use the 2507 - * the real-time load to calc the share. See update_tg_load_avg(). 2508 - */ 2509 - tg_weight = atomic_long_read(&tg->load_avg); 2510 - tg_weight -= cfs_rq->tg_load_avg_contrib; 2511 - tg_weight += cfs_rq->load.weight; 2512 - 2513 - return tg_weight; 2514 - } 2515 - 2516 2502 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 2517 2503 { 2518 2504 long tg_weight, load, shares; 2519 2505 2520 - tg_weight = calc_tg_weight(tg, cfs_rq); 2521 - load = cfs_rq->load.weight; 2506 + /* 2507 + * This really should be: cfs_rq->avg.load_avg, but instead we use 2508 + * cfs_rq->load.weight, which is its upper bound. This helps ramp up 2509 + * the shares for small weight interactive tasks. 2510 + */ 2511 + load = scale_load_down(cfs_rq->load.weight); 2512 + 2513 + tg_weight = atomic_long_read(&tg->load_avg); 2514 + 2515 + /* Ensure tg_weight >= load */ 2516 + tg_weight -= cfs_rq->tg_load_avg_contrib; 2517 + tg_weight += load; 2522 2518 2523 2519 shares = (tg->shares * load); 2524 2520 if (tg_weight) ··· 2531 2539 return tg->shares; 2532 2540 } 2533 2541 # endif /* CONFIG_SMP */ 2542 + 2534 2543 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2535 2544 unsigned long weight) 2536 2545 { ··· 4939 4946 return wl; 4940 4947 4941 4948 for_each_sched_entity(se) { 4942 - long w, W; 4949 + struct cfs_rq *cfs_rq = se->my_q; 4950 + long W, w = cfs_rq_load_avg(cfs_rq); 4943 4951 4944 - tg = se->my_q->tg; 4952 + tg = cfs_rq->tg; 4945 4953 4946 4954 /* 4947 4955 * W = @wg + \Sum rw_j 4948 4956 */ 4949 - W = wg + calc_tg_weight(tg, se->my_q); 4957 + W = wg + atomic_long_read(&tg->load_avg); 4958 + 4959 + /* Ensure \Sum rw_j >= rw_i */ 4960 + W -= cfs_rq->tg_load_avg_contrib; 4961 + W += w; 4950 4962 4951 4963 /* 4952 4964 * w = rw_i + @wl 4953 4965 */ 4954 - w = cfs_rq_load_avg(se->my_q) + wl; 4966 + w += wl; 4955 4967 4956 4968 /* 4957 4969 * wl = S * s'_i; see (2)
+5 -3
mm/shmem.c
··· 2225 2225 error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2226 2226 if (error) { 2227 2227 /* Remove the !PageUptodate pages we added */ 2228 - shmem_undo_range(inode, 2229 - (loff_t)start << PAGE_SHIFT, 2230 - ((loff_t)index << PAGE_SHIFT) - 1, true); 2228 + if (index > start) { 2229 + shmem_undo_range(inode, 2230 + (loff_t)start << PAGE_SHIFT, 2231 + ((loff_t)index << PAGE_SHIFT) - 1, true); 2232 + } 2231 2233 goto undone; 2232 2234 } 2233 2235
+1 -1
net/bridge/br_netfilter_hooks.c
··· 700 700 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 701 701 int (*output)(struct net *, struct sock *, struct sk_buff *)) 702 702 { 703 - unsigned int mtu = ip_skb_dst_mtu(skb); 703 + unsigned int mtu = ip_skb_dst_mtu(sk, skb); 704 704 struct iphdr *iph = ip_hdr(skb); 705 705 706 706 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
+43
net/core/flow_dissector.c
··· 651 651 } 652 652 EXPORT_SYMBOL(make_flow_keys_digest); 653 653 654 + static struct flow_dissector flow_keys_dissector_symmetric __read_mostly; 655 + 656 + u32 __skb_get_hash_symmetric(struct sk_buff *skb) 657 + { 658 + struct flow_keys keys; 659 + 660 + __flow_hash_secret_init(); 661 + 662 + memset(&keys, 0, sizeof(keys)); 663 + __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys, 664 + NULL, 0, 0, 0, 665 + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 666 + 667 + return __flow_hash_from_keys(&keys, hashrnd); 668 + } 669 + EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); 670 + 654 671 /** 655 672 * __skb_get_hash: calculate a flow hash 656 673 * @skb: sk_buff to calculate flow hash from ··· 885 868 }, 886 869 }; 887 870 871 + static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = { 872 + { 873 + .key_id = FLOW_DISSECTOR_KEY_CONTROL, 874 + .offset = offsetof(struct flow_keys, control), 875 + }, 876 + { 877 + .key_id = FLOW_DISSECTOR_KEY_BASIC, 878 + .offset = offsetof(struct flow_keys, basic), 879 + }, 880 + { 881 + .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 882 + .offset = offsetof(struct flow_keys, addrs.v4addrs), 883 + }, 884 + { 885 + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 886 + .offset = offsetof(struct flow_keys, addrs.v6addrs), 887 + }, 888 + { 889 + .key_id = FLOW_DISSECTOR_KEY_PORTS, 890 + .offset = offsetof(struct flow_keys, ports), 891 + }, 892 + }; 893 + 888 894 static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { 889 895 { 890 896 .key_id = FLOW_DISSECTOR_KEY_CONTROL, ··· 929 889 skb_flow_dissector_init(&flow_keys_dissector, 930 890 flow_keys_dissector_keys, 931 891 ARRAY_SIZE(flow_keys_dissector_keys)); 892 + skb_flow_dissector_init(&flow_keys_dissector_symmetric, 893 + flow_keys_dissector_symmetric_keys, 894 + ARRAY_SIZE(flow_keys_dissector_symmetric_keys)); 932 895 skb_flow_dissector_init(&flow_keys_buf_dissector, 933 896 flow_keys_buf_dissector_keys, 934 897 ARRAY_SIZE(flow_keys_buf_dissector_keys));
-18
net/core/skbuff.c
··· 3016 3016 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3017 3017 3018 3018 /** 3019 - * skb_push_rcsum - push skb and update receive checksum 3020 - * @skb: buffer to update 3021 - * @len: length of data pulled 3022 - * 3023 - * This function performs an skb_push on the packet and updates 3024 - * the CHECKSUM_COMPLETE checksum. It should be used on 3025 - * receive path processing instead of skb_push unless you know 3026 - * that the checksum difference is zero (e.g., a valid IP header) 3027 - * or you are setting ip_summed to CHECKSUM_NONE. 3028 - */ 3029 - static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len) 3030 - { 3031 - skb_push(skb, len); 3032 - skb_postpush_rcsum(skb, skb->data, len); 3033 - return skb->data; 3034 - } 3035 - 3036 - /** 3037 3019 * skb_pull_rcsum - pull skb and update receive checksum 3038 3020 * @skb: buffer to update 3039 3021 * @len: length of data pulled
+12 -9
net/decnet/dn_fib.c
··· 41 41 #include <net/dn_fib.h> 42 42 #include <net/dn_neigh.h> 43 43 #include <net/dn_dev.h> 44 + #include <net/nexthop.h> 44 45 45 46 #define RT_MIN_TABLE 1 46 47 ··· 151 150 struct rtnexthop *nhp = nla_data(attr); 152 151 int nhs = 0, nhlen = nla_len(attr); 153 152 154 - while(nhlen >= (int)sizeof(struct rtnexthop)) { 155 - if ((nhlen -= nhp->rtnh_len) < 0) 156 - return 0; 153 + while (rtnh_ok(nhp, nhlen)) { 157 154 nhs++; 158 - nhp = RTNH_NEXT(nhp); 155 + nhp = rtnh_next(nhp, &nhlen); 159 156 } 160 157 161 - return nhs; 158 + /* leftover implies invalid nexthop configuration, discard it */ 159 + return nhlen > 0 ? 0 : nhs; 162 160 } 163 161 164 162 static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr, ··· 167 167 int nhlen = nla_len(attr); 168 168 169 169 change_nexthops(fi) { 170 - int attrlen = nhlen - sizeof(struct rtnexthop); 171 - if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) 170 + int attrlen; 171 + 172 + if (!rtnh_ok(nhp, nhlen)) 172 173 return -EINVAL; 173 174 174 175 nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; 175 176 nh->nh_oif = nhp->rtnh_ifindex; 176 177 nh->nh_weight = nhp->rtnh_hops + 1; 177 178 178 - if (attrlen) { 179 + attrlen = rtnh_attrlen(nhp); 180 + if (attrlen > 0) { 179 181 struct nlattr *gw_attr; 180 182 181 183 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY); 182 184 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0; 183 185 } 184 - nhp = RTNH_NEXT(nhp); 186 + 187 + nhp = rtnh_next(nhp, &nhlen); 185 188 } endfor_nexthops(fi); 186 189 187 190 return 0;
+2 -2
net/ipv4/ip_output.c
··· 271 271 return dst_output(net, sk, skb); 272 272 } 273 273 #endif 274 - mtu = ip_skb_dst_mtu(skb); 274 + mtu = ip_skb_dst_mtu(sk, skb); 275 275 if (skb_is_gso(skb)) 276 276 return ip_finish_output_gso(net, sk, skb, mtu); 277 277 ··· 541 541 542 542 iph = ip_hdr(skb); 543 543 544 - mtu = ip_skb_dst_mtu(skb); 544 + mtu = ip_skb_dst_mtu(sk, skb); 545 545 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) 546 546 mtu = IPCB(skb)->frag_max_size; 547 547
+1
net/ipv6/ip6_fib.c
··· 177 177 } 178 178 } 179 179 180 + free_percpu(non_pcpu_rt->rt6i_pcpu); 180 181 non_pcpu_rt->rt6i_pcpu = NULL; 181 182 } 182 183
+1 -1
net/packet/af_packet.c
··· 1341 1341 struct sk_buff *skb, 1342 1342 unsigned int num) 1343 1343 { 1344 - return reciprocal_scale(skb_get_hash(skb), num); 1344 + return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1345 1345 } 1346 1346 1347 1347 static unsigned int fanout_demux_lb(struct packet_fanout *f,
+3 -2
net/rds/tcp.c
··· 616 616 617 617 ret = rds_tcp_recv_init(); 618 618 if (ret) 619 - goto out_slab; 619 + goto out_pernet; 620 620 621 621 ret = rds_trans_register(&rds_tcp_transport); 622 622 if (ret) ··· 628 628 629 629 out_recv: 630 630 rds_tcp_recv_exit(); 631 - out_slab: 631 + out_pernet: 632 632 unregister_pernet_subsys(&rds_tcp_net_ops); 633 + out_slab: 633 634 kmem_cache_destroy(rds_tcp_conn_slab); 634 635 out: 635 636 return ret;
+1 -1
net/sched/act_mirred.c
··· 181 181 182 182 if (!(at & AT_EGRESS)) { 183 183 if (m->tcfm_ok_push) 184 - skb_push(skb2, skb->mac_len); 184 + skb_push_rcsum(skb2, skb->mac_len); 185 185 } 186 186 187 187 /* mirror is always swallowed */
+19 -17
security/apparmor/lsm.c
··· 500 500 { 501 501 struct common_audit_data sa; 502 502 struct apparmor_audit_data aad = {0,}; 503 - char *command, *args = value; 503 + char *command, *largs = NULL, *args = value; 504 504 size_t arg_size; 505 505 int error; 506 506 507 507 if (size == 0) 508 508 return -EINVAL; 509 - /* args points to a PAGE_SIZE buffer, AppArmor requires that 510 - * the buffer must be null terminated or have size <= PAGE_SIZE -1 511 - * so that AppArmor can null terminate them 512 - */ 513 - if (args[size - 1] != '\0') { 514 - if (size == PAGE_SIZE) 515 - return -EINVAL; 516 - args[size] = '\0'; 517 - } 518 - 519 509 /* task can only write its own attributes */ 520 510 if (current != task) 521 511 return -EACCES; 522 512 523 - args = value; 513 + /* AppArmor requires that the buffer must be null terminated atm */ 514 + if (args[size - 1] != '\0') { 515 + /* null terminate */ 516 + largs = args = kmalloc(size + 1, GFP_KERNEL); 517 + if (!args) 518 + return -ENOMEM; 519 + memcpy(args, value, size); 520 + args[size] = '\0'; 521 + } 522 + 523 + error = -EINVAL; 524 524 args = strim(args); 525 525 command = strsep(&args, " "); 526 526 if (!args) 527 - return -EINVAL; 527 + goto out; 528 528 args = skip_spaces(args); 529 529 if (!*args) 530 - return -EINVAL; 530 + goto out; 531 531 532 532 arg_size = size - (args - (char *) value); 533 533 if (strcmp(name, "current") == 0) { ··· 553 553 goto fail; 554 554 } else 555 555 /* only support the "current" and "exec" process attributes */ 556 - return -EINVAL; 556 + goto fail; 557 557 558 558 if (!error) 559 559 error = size; 560 + out: 561 + kfree(largs); 560 562 return error; 561 563 562 564 fail: ··· 567 565 aad.profile = aa_current_profile(); 568 566 aad.op = OP_SETPROCATTR; 569 567 aad.info = name; 570 - aad.error = -EINVAL; 568 + aad.error = error = -EINVAL; 571 569 aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL); 572 - return -EINVAL; 570 + goto out; 573 571 } 574 572 575 573 static int apparmor_task_setrlimit(struct task_struct *task,
+1 -1
sound/core/timer.c
··· 1955 1955 1956 1956 qhead = tu->qhead++; 1957 1957 tu->qhead %= tu->queue_size; 1958 + tu->qused--; 1958 1959 spin_unlock_irq(&tu->qlock); 1959 1960 1960 1961 if (tu->tread) { ··· 1969 1968 } 1970 1969 1971 1970 spin_lock_irq(&tu->qlock); 1972 - tu->qused--; 1973 1971 if (err < 0) 1974 1972 goto _error; 1975 1973 result += unit;
+2 -3
sound/pci/au88x0/au88x0_core.c
··· 1444 1444 int page, p, pp, delta, i; 1445 1445 1446 1446 page = 1447 - (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & 1448 - WT_SUBBUF_MASK) 1449 - >> WT_SUBBUF_SHIFT; 1447 + (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) 1448 + >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK; 1450 1449 if (dma->nr_periods >= 4) 1451 1450 delta = (page - dma->period_real) & 3; 1452 1451 else {
+2 -2
sound/pci/echoaudio/echoaudio.c
··· 2200 2200 u32 pipe_alloc_mask; 2201 2201 int err; 2202 2202 2203 - commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL); 2203 + commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL); 2204 2204 if (commpage_bak == NULL) 2205 2205 return -ENOMEM; 2206 2206 commpage = chip->comm_page; 2207 - memcpy(commpage_bak, commpage, sizeof(struct comm_page)); 2207 + memcpy(commpage_bak, commpage, sizeof(*commpage)); 2208 2208 2209 2209 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device); 2210 2210 if (err < 0) {
+2
sound/pci/hda/hda_generic.c
··· 3977 3977 3978 3978 for (n = 0; n < spec->paths.used; n++) { 3979 3979 path = snd_array_elem(&spec->paths, n); 3980 + if (!path->depth) 3981 + continue; 3980 3982 if (path->path[0] == nid || 3981 3983 path->path[path->depth - 1] == nid) { 3982 3984 bool pin_old = path->pin_enabled;
+5 -1
sound/pci/hda/hda_intel.c
··· 367 367 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) 368 368 #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) 369 369 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 370 + #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) 370 371 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 371 372 #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 372 - IS_KBL(pci) || IS_KBL_LP(pci) 373 + IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) 373 374 374 375 static char *driver_short_names[] = { 375 376 [AZX_DRIVER_ICH] = "HDA Intel", ··· 2190 2189 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2191 2190 /* Kabylake-LP */ 2192 2191 { PCI_DEVICE(0x8086, 0x9d71), 2192 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2193 + /* Kabylake-H */ 2194 + { PCI_DEVICE(0x8086, 0xa2f0), 2193 2195 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2194 2196 /* Broxton-P(Apollolake) */ 2195 2197 { PCI_DEVICE(0x8086, 0x5a98),
+1
sound/pci/hda/patch_realtek.c
··· 5651 5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), 5652 5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5653 5653 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), 5654 + SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460), 5654 5655 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), 5655 5656 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5656 5657 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+4 -3
sound/soc/codecs/Kconfig
··· 483 483 tristate 484 484 485 485 config SND_SOC_HDMI_CODEC 486 - tristate 487 - select SND_PCM_ELD 488 - select SND_PCM_IEC958 486 + tristate 487 + select SND_PCM_ELD 488 + select SND_PCM_IEC958 489 + select HDMI 489 490 490 491 config SND_SOC_ES8328 491 492 tristate "Everest Semi ES8328 CODEC"
+1 -1
sound/soc/codecs/ak4613.c
··· 146 146 .max_register = 0x16, 147 147 .reg_defaults = ak4613_reg, 148 148 .num_reg_defaults = ARRAY_SIZE(ak4613_reg), 149 + .cache_type = REGCACHE_RBTREE, 149 150 }; 150 151 151 152 static const struct of_device_id ak4613_of_match[] = { ··· 531 530 static struct i2c_driver ak4613_i2c_driver = { 532 531 .driver = { 533 532 .name = "ak4613-codec", 534 - .owner = THIS_MODULE, 535 533 .of_match_table = ak4613_of_match, 536 534 }, 537 535 .probe = ak4613_i2c_probe,
+1
sound/soc/codecs/cx20442.c
··· 226 226 if (!tty->disc_data) 227 227 return -ENODEV; 228 228 229 + tty->receive_room = 16; 229 230 if (tty->ops->write(tty, v253_init, len) != len) { 230 231 ret = -EIO; 231 232 goto err;
+20
sound/soc/codecs/hdac_hdmi.c
··· 1474 1474 * exit, we call pm_runtime_suspend() so that will do for us 1475 1475 */ 1476 1476 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1477 + if (!hlink) { 1478 + dev_err(&edev->hdac.dev, "hdac link not found\n"); 1479 + return -EIO; 1480 + } 1481 + 1477 1482 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1478 1483 1479 1484 ret = create_fill_widget_route_map(dapm); ··· 1639 1634 1640 1635 /* hold the ref while we probe */ 1641 1636 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1637 + if (!hlink) { 1638 + dev_err(&edev->hdac.dev, "hdac link not found\n"); 1639 + return -EIO; 1640 + } 1641 + 1642 1642 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1643 1643 1644 1644 hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL); ··· 1754 1744 } 1755 1745 1756 1746 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1747 + if (!hlink) { 1748 + dev_err(dev, "hdac link not found\n"); 1749 + return -EIO; 1750 + } 1751 + 1757 1752 snd_hdac_ext_bus_link_put(ebus, hlink); 1758 1753 1759 1754 return 0; ··· 1780 1765 return 0; 1781 1766 1782 1767 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1768 + if (!hlink) { 1769 + dev_err(dev, "hdac link not found\n"); 1770 + return -EIO; 1771 + } 1772 + 1783 1773 snd_hdac_ext_bus_link_get(ebus, hlink); 1784 1774 1785 1775 err = snd_hdac_display_power(bus, true);
+1 -1
sound/soc/codecs/rt5645.c
··· 253 253 { 0x2b, 0x5454 }, 254 254 { 0x2c, 0xaaa0 }, 255 255 { 0x2d, 0x0000 }, 256 - { 0x2f, 0x1002 }, 256 + { 0x2f, 0x5002 }, 257 257 { 0x31, 0x5000 }, 258 258 { 0x32, 0x0000 }, 259 259 { 0x33, 0x0000 },
+1 -1
sound/soc/codecs/rt5670.c
··· 619 619 RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1), 620 620 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL, 621 621 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 622 - 39, 0, out_vol_tlv), 622 + 39, 1, out_vol_tlv), 623 623 /* OUTPUT Control */ 624 624 SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1, 625 625 RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
+1 -1
sound/soc/codecs/wm5102.c
··· 1872 1872 .capture = { 1873 1873 .stream_name = "Audio Trace CPU", 1874 1874 .channels_min = 1, 1875 - .channels_max = 6, 1875 + .channels_max = 4, 1876 1876 .rates = WM5102_RATES, 1877 1877 .formats = WM5102_FORMATS, 1878 1878 },
+1
sound/soc/codecs/wm5110.c
··· 1723 1723 { "OUT2L", NULL, "SYSCLK" }, 1724 1724 { "OUT2R", NULL, "SYSCLK" }, 1725 1725 { "OUT3L", NULL, "SYSCLK" }, 1726 + { "OUT3R", NULL, "SYSCLK" }, 1726 1727 { "OUT4L", NULL, "SYSCLK" }, 1727 1728 { "OUT4R", NULL, "SYSCLK" }, 1728 1729 { "OUT5L", NULL, "SYSCLK" },
+1
sound/soc/codecs/wm8940.c
··· 743 743 .max_register = WM8940_MONOMIX, 744 744 .reg_defaults = wm8940_reg_defaults, 745 745 .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults), 746 + .cache_type = REGCACHE_RBTREE, 746 747 747 748 .readable_reg = wm8940_readable_register, 748 749 .volatile_reg = wm8940_volatile_register,
+52 -4
sound/soc/davinci/davinci-mcasp.c
··· 1513 1513 }; 1514 1514 1515 1515 static struct davinci_mcasp_pdata dra7_mcasp_pdata = { 1516 - .tx_dma_offset = 0x200, 1517 - .rx_dma_offset = 0x284, 1516 + /* The CFG port offset will be calculated if it is needed */ 1517 + .tx_dma_offset = 0, 1518 + .rx_dma_offset = 0, 1518 1519 .version = MCASP_VERSION_4, 1519 1520 }; 1520 1521 ··· 1735 1734 return PCM_EDMA; 1736 1735 } 1737 1736 1737 + static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata) 1738 + { 1739 + int i; 1740 + u32 offset = 0; 1741 + 1742 + if (pdata->version != MCASP_VERSION_4) 1743 + return pdata->tx_dma_offset; 1744 + 1745 + for (i = 0; i < pdata->num_serializer; i++) { 1746 + if (pdata->serial_dir[i] == TX_MODE) { 1747 + if (!offset) { 1748 + offset = DAVINCI_MCASP_TXBUF_REG(i); 1749 + } else { 1750 + pr_err("%s: Only one serializer allowed!\n", 1751 + __func__); 1752 + break; 1753 + } 1754 + } 1755 + } 1756 + 1757 + return offset; 1758 + } 1759 + 1760 + static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata) 1761 + { 1762 + int i; 1763 + u32 offset = 0; 1764 + 1765 + if (pdata->version != MCASP_VERSION_4) 1766 + return pdata->rx_dma_offset; 1767 + 1768 + for (i = 0; i < pdata->num_serializer; i++) { 1769 + if (pdata->serial_dir[i] == RX_MODE) { 1770 + if (!offset) { 1771 + offset = DAVINCI_MCASP_RXBUF_REG(i); 1772 + } else { 1773 + pr_err("%s: Only one serializer allowed!\n", 1774 + __func__); 1775 + break; 1776 + } 1777 + } 1778 + } 1779 + 1780 + return offset; 1781 + } 1782 + 1738 1783 static int davinci_mcasp_probe(struct platform_device *pdev) 1739 1784 { 1740 1785 struct snd_dmaengine_dai_dma_data *dma_data; ··· 1909 1862 if (dat) 1910 1863 dma_data->addr = dat->start; 1911 1864 else 1912 - dma_data->addr = mem->start + pdata->tx_dma_offset; 1865 + dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata); 1913 1866 1914 1867 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK]; 1915 1868 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); ··· 1930 1883 if (dat) 1931 1884 dma_data->addr = dat->start; 1932 1885 else 1933 - dma_data->addr = mem->start + pdata->rx_dma_offset; 1886 + dma_data->addr = 1887 + mem->start + davinci_mcasp_rxdma_offset(pdata); 1934 1888 1935 1889 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE]; 1936 1890 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+2 -2
sound/soc/davinci/davinci-mcasp.h
··· 85 85 (n << 2)) 86 86 87 87 /* Transmit Buffer for Serializer n */ 88 - #define DAVINCI_MCASP_TXBUF_REG 0x200 88 + #define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2)) 89 89 /* Receive Buffer for Serializer n */ 90 - #define DAVINCI_MCASP_RXBUF_REG 0x280 90 + #define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2)) 91 91 92 92 /* McASP FIFO Registers */ 93 93 #define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010)
+6 -6
sound/soc/fsl/fsl_ssi.c
··· 952 952 ssi_private->i2s_mode = CCSR_SSI_SCR_NET; 953 953 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 954 954 case SND_SOC_DAIFMT_I2S: 955 + regmap_update_bits(regs, CCSR_SSI_STCCR, 956 + CCSR_SSI_SxCCR_DC_MASK, 957 + CCSR_SSI_SxCCR_DC(2)); 958 + regmap_update_bits(regs, CCSR_SSI_SRCCR, 959 + CCSR_SSI_SxCCR_DC_MASK, 960 + CCSR_SSI_SxCCR_DC(2)); 955 961 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 956 962 case SND_SOC_DAIFMT_CBM_CFS: 957 963 case SND_SOC_DAIFMT_CBS_CFS: 958 964 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER; 959 - regmap_update_bits(regs, CCSR_SSI_STCCR, 960 - CCSR_SSI_SxCCR_DC_MASK, 961 - CCSR_SSI_SxCCR_DC(2)); 962 - regmap_update_bits(regs, CCSR_SSI_SRCCR, 963 - CCSR_SSI_SxCCR_DC_MASK, 964 - CCSR_SSI_SxCCR_DC(2)); 965 965 break; 966 966 case SND_SOC_DAIFMT_CBM_CFM: 967 967 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
+7 -2
sound/soc/intel/atom/sst-mfld-platform-compress.c
··· 182 182 case SNDRV_PCM_TRIGGER_START: 183 183 if (stream->compr_ops->stream_start) 184 184 return stream->compr_ops->stream_start(sst->dev, stream->id); 185 + break; 185 186 case SNDRV_PCM_TRIGGER_STOP: 186 187 if (stream->compr_ops->stream_drop) 187 188 return stream->compr_ops->stream_drop(sst->dev, stream->id); 189 + break; 188 190 case SND_COMPR_TRIGGER_DRAIN: 189 191 if (stream->compr_ops->stream_drain) 190 192 return stream->compr_ops->stream_drain(sst->dev, stream->id); 193 + break; 191 194 case SND_COMPR_TRIGGER_PARTIAL_DRAIN: 192 195 if (stream->compr_ops->stream_partial_drain) 193 196 return stream->compr_ops->stream_partial_drain(sst->dev, stream->id); 197 + break; 194 198 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 195 199 if (stream->compr_ops->stream_pause) 196 200 return stream->compr_ops->stream_pause(sst->dev, stream->id); 201 + break; 197 202 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 198 203 if (stream->compr_ops->stream_pause_release) 199 204 return stream->compr_ops->stream_pause_release(sst->dev, stream->id); 200 - default: 201 - return -EINVAL; 205 + break; 202 206 } 207 + return -EINVAL; 203 208 } 204 209 205 210 static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
+1
sound/soc/intel/skylake/bxt-sst.c
··· 291 291 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 292 292 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 293 293 294 + INIT_LIST_HEAD(&sst->module_list); 294 295 ret = skl_ipc_init(dev, skl); 295 296 if (ret) 296 297 return ret;
+1 -1
sound/soc/sh/rcar/adg.c
··· 518 518 } 519 519 } 520 520 521 - rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr); 521 + rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr); 522 522 rsnd_mod_write(adg_mod, BRRA, rbga); 523 523 rsnd_mod_write(adg_mod, BRRB, rbgb); 524 524