Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' into perf/core

Pick up the latest upstream fixes.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+2783 -1369
+1 -1
Documentation/kernel-parameters.txt
··· 2953 2953 improve throughput, but will also increase the 2954 2954 amount of memory reserved for use by the client. 2955 2955 2956 - swapaccount[=0|1] 2956 + swapaccount=[0|1] 2957 2957 [KNL] Enable accounting of swap in memory resource 2958 2958 controller if no parameter or 1 is given or disable 2959 2959 it if 0 is given (See Documentation/cgroups/memory.txt)
+7 -7
MAINTAINERS
··· 5581 5581 F: drivers/media/tuners/mxl5007t.* 5582 5582 5583 5583 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 5584 - M: Andrew Gallatin <gallatin@myri.com> 5584 + M: Hyong-Youb Kim <hykim@myri.com> 5585 5585 L: netdev@vger.kernel.org 5586 - W: http://www.myri.com/scs/download-Myri10GE.html 5586 + W: https://www.myricom.com/support/downloads/myri10ge.html 5587 5587 S: Supported 5588 5588 F: drivers/net/ethernet/myricom/myri10ge/ 5589 5589 ··· 5884 5884 F: include/linux/i2c-omap.h 5885 5885 5886 5886 OMAP DEVICE TREE SUPPORT 5887 - M: Benoît Cousson <b-cousson@ti.com> 5887 + M: Benoît Cousson <bcousson@baylibre.com> 5888 5888 M: Tony Lindgren <tony@atomide.com> 5889 5889 L: linux-omap@vger.kernel.org 5890 5890 L: devicetree@vger.kernel.org ··· 5964 5964 F: drivers/char/hw_random/omap-rng.c 5965 5965 5966 5966 OMAP HWMOD SUPPORT 5967 - M: Benoît Cousson <b-cousson@ti.com> 5967 + M: Benoît Cousson <bcousson@baylibre.com> 5968 5968 M: Paul Walmsley <paul@pwsan.com> 5969 5969 L: linux-omap@vger.kernel.org 5970 5970 S: Maintained 5971 5971 F: arch/arm/mach-omap2/omap_hwmod.* 5972 5972 5973 5973 OMAP HWMOD DATA FOR OMAP4-BASED DEVICES 5974 - M: Benoît Cousson <b-cousson@ti.com> 5974 + M: Benoît Cousson <bcousson@baylibre.com> 5975 5975 L: linux-omap@vger.kernel.org 5976 5976 S: Maintained 5977 5977 F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c ··· 7366 7366 7367 7367 SGI GRU DRIVER 7368 7368 M: Dimitri Sivanich <sivanich@sgi.com> 7369 - M: Robin Holt <holt@sgi.com> 7370 7369 S: Maintained 7371 7370 F: drivers/misc/sgi-gru/ 7372 7371 ··· 7385 7386 F: Documentation/sgi-visws.txt 7386 7387 7387 7388 SGI XP/XPC/XPNET DRIVER 7388 - M: Robin Holt <holt@sgi.com> 7389 + M: Cliff Whickman <cpw@sgi.com> 7390 + M: Robin Holt <robinmholt@gmail.com> 7389 7391 S: Maintained 7390 7392 F: drivers/misc/sgi-xp/ 7391 7393
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 11 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc7 5 5 NAME = Linux for Workgroups 6 6 7 7 # *DOCUMENTATION*
+6
arch/Kconfig
··· 407 407 help 408 408 Architecture has the first two arguments of clone(2) swapped. 409 409 410 + config CLONE_BACKWARDS3 411 + bool 412 + help 413 + Architecture has tls passed as the 3rd argument of clone(2), 414 + not the 5th one. 415 + 410 416 config ODD_RT_SIGACTION 411 417 bool 412 418 help
+10
arch/arc/lib/strchr-700.S
··· 39 39 ld.a r2,[r0,4] 40 40 sub r12,r6,r7 41 41 bic r12,r12,r6 42 + #ifdef __LITTLE_ENDIAN__ 42 43 and r7,r12,r4 43 44 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. 44 45 b .Lfound_char ; Likewise this one. 46 + #else 47 + and r12,r12,r4 48 + breq r12,0,.Loop ; For speed, we want this branch to be unaligned. 49 + lsr_s r12,r12,7 50 + bic r2,r7,r6 51 + b.d .Lfound_char_b 52 + and_s r2,r2,r12 53 + #endif 45 54 ; /* We require this code address to be unaligned for speed... */ 46 55 .Laligned: 47 56 ld_s r2,[r0] ··· 104 95 lsr r7,r7,7 105 96 106 97 bic r2,r7,r6 98 + .Lfound_char_b: 107 99 norm r2,r2 108 100 sub_s r0,r0,4 109 101 asr_s r2,r2,3
+2 -2
arch/arm/boot/dts/at91sam9n12ek.dts
··· 14 14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; 15 15 16 16 chosen { 17 - bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; 17 + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; 18 18 }; 19 19 20 20 memory { 21 - reg = <0x20000000 0x10000000>; 21 + reg = <0x20000000 0x8000000>; 22 22 }; 23 23 24 24 clocks {
+3 -2
arch/arm/boot/dts/at91sam9x5ek.dtsi
··· 94 94 95 95 usb0: ohci@00600000 { 96 96 status = "okay"; 97 - num-ports = <2>; 98 - atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW 97 + num-ports = <3>; 98 + atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */ 99 + &pioD 19 GPIO_ACTIVE_LOW 99 100 &pioD 20 GPIO_ACTIVE_LOW 100 101 >; 101 102 };
+2
arch/arm/boot/dts/tegra20-seaboard.dts
··· 830 830 regulator-max-microvolt = <5000000>; 831 831 enable-active-high; 832 832 gpio = <&gpio 24 0>; /* PD0 */ 833 + regulator-always-on; 834 + regulator-boot-on; 833 835 }; 834 836 }; 835 837
+2
arch/arm/boot/dts/tegra20-trimslice.dts
··· 412 412 regulator-max-microvolt = <5000000>; 413 413 enable-active-high; 414 414 gpio = <&gpio 170 0>; /* PV2 */ 415 + regulator-always-on; 416 + regulator-boot-on; 415 417 }; 416 418 }; 417 419
+4
arch/arm/boot/dts/tegra20-whistler.dts
··· 588 588 regulator-max-microvolt = <5000000>; 589 589 enable-active-high; 590 590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ 591 + regulator-always-on; 592 + regulator-boot-on; 591 593 }; 592 594 593 595 vbus3_reg: regulator@3 { ··· 600 598 regulator-max-microvolt = <5000000>; 601 599 enable-active-high; 602 600 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ 601 + regulator-always-on; 602 + regulator-boot-on; 603 603 }; 604 604 }; 605 605
+3
arch/arm/include/asm/smp_plat.h
··· 88 88 { 89 89 return 1 << mpidr_hash.bits; 90 90 } 91 + 92 + extern int platform_can_cpu_hotplug(void); 93 + 91 94 #endif
+31 -20
arch/arm/include/asm/spinlock.h
··· 107 107 " subs %1, %0, %0, ror #16\n" 108 108 " addeq %0, %0, %4\n" 109 109 " strexeq %2, %0, [%3]" 110 - : "=&r" (slock), "=&r" (contended), "=r" (res) 110 + : "=&r" (slock), "=&r" (contended), "=&r" (res) 111 111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 112 112 : "cc"); 113 113 } while (res); ··· 168 168 169 169 static inline int arch_write_trylock(arch_rwlock_t *rw) 170 170 { 171 - unsigned long tmp; 171 + unsigned long contended, res; 172 172 173 - __asm__ __volatile__( 174 - " ldrex %0, [%1]\n" 175 - " teq %0, #0\n" 176 - " strexeq %0, %2, [%1]" 177 - : "=&r" (tmp) 178 - : "r" (&rw->lock), "r" (0x80000000) 179 - : "cc"); 173 + do { 174 + __asm__ __volatile__( 175 + " ldrex %0, [%2]\n" 176 + " mov %1, #0\n" 177 + " teq %0, #0\n" 178 + " strexeq %1, %3, [%2]" 179 + : "=&r" (contended), "=&r" (res) 180 + : "r" (&rw->lock), "r" (0x80000000) 181 + : "cc"); 182 + } while (res); 180 183 181 - if (tmp == 0) { 184 + if (!contended) { 182 185 smp_mb(); 183 186 return 1; 184 187 } else { ··· 257 254 258 255 static inline int arch_read_trylock(arch_rwlock_t *rw) 259 256 { 260 - unsigned long tmp, tmp2 = 1; 257 + unsigned long contended, res; 261 258 262 - __asm__ __volatile__( 263 - " ldrex %0, [%2]\n" 264 - " adds %0, %0, #1\n" 265 - " strexpl %1, %0, [%2]\n" 266 - : "=&r" (tmp), "+r" (tmp2) 267 - : "r" (&rw->lock) 268 - : "cc"); 259 + do { 260 + __asm__ __volatile__( 261 + " ldrex %0, [%2]\n" 262 + " mov %1, #0\n" 263 + " adds %0, %0, #1\n" 264 + " strexpl %1, %0, [%2]" 265 + : "=&r" (contended), "=&r" (res) 266 + : "r" (&rw->lock) 267 + : "cc"); 268 + } while (res); 269 269 270 - smp_mb(); 271 - return tmp2 == 0; 270 + /* If the lock is negative, then it is already held for write. */ 271 + if (contended < 0x80000000) { 272 + smp_mb(); 273 + return 1; 274 + } else { 275 + return 0; 276 + } 272 277 } 273 278 274 279 /* read_can_lock - would read_trylock() succeed? */
+5 -2
arch/arm/include/asm/tlb.h
··· 43 43 struct mm_struct *mm; 44 44 unsigned int fullmm; 45 45 struct vm_area_struct *vma; 46 + unsigned long start, end; 46 47 unsigned long range_start; 47 48 unsigned long range_end; 48 49 unsigned int nr; ··· 108 107 } 109 108 110 109 static inline void 111 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 110 + tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 112 111 { 113 112 tlb->mm = mm; 114 - tlb->fullmm = fullmm; 113 + tlb->fullmm = !(start | (end+1)); 114 + tlb->start = start; 115 + tlb->end = end; 115 116 tlb->vma = NULL; 116 117 tlb->max = ARRAY_SIZE(tlb->local); 117 118 tlb->pages = tlb->local;
+2 -1
arch/arm/kernel/entry-armv.S
··· 357 357 .endm 358 358 359 359 .macro kuser_cmpxchg_check 360 - #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 360 + #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ 361 + !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 361 362 #ifndef CONFIG_MMU 362 363 #warning "NPTL on non MMU needs fixing" 363 364 #else
+3 -6
arch/arm/kernel/fiq.c
··· 84 84 85 85 void set_fiq_handler(void *start, unsigned int length) 86 86 { 87 - #if defined(CONFIG_CPU_USE_DOMAINS) 88 - void *base = (void *)0xffff0000; 89 - #else 90 87 void *base = vectors_page; 91 - #endif 92 88 unsigned offset = FIQ_OFFSET; 93 89 94 90 memcpy(base + offset, start, length); 91 + if (!cache_is_vipt_nonaliasing()) 92 + flush_icache_range((unsigned long)base + offset, offset + 93 + length); 95 94 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); 96 - if (!vectors_high()) 97 - flush_icache_range(offset, offset + length); 98 95 } 99 96 100 97 int claim_fiq(struct fiq_handler *f)
+17 -4
arch/arm/kernel/machine_kexec.c
··· 15 15 #include <asm/mmu_context.h> 16 16 #include <asm/cacheflush.h> 17 17 #include <asm/mach-types.h> 18 + #include <asm/smp_plat.h> 18 19 #include <asm/system_misc.h> 19 20 20 21 extern const unsigned char relocate_new_kernel[]; ··· 38 37 struct kexec_segment *current_segment; 39 38 __be32 header; 40 39 int i, err; 40 + 41 + /* 42 + * Validate that if the current HW supports SMP, then the SW supports 43 + * and implements CPU hotplug for the current HW. If not, we won't be 44 + * able to kexec reliably, so fail the prepare operation. 45 + */ 46 + if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) 47 + return -EINVAL; 41 48 42 49 /* 43 50 * No segment at default ATAGs address. try to locate ··· 82 73 crash_save_cpu(&regs, smp_processor_id()); 83 74 flush_cache_all(); 84 75 76 + set_cpu_online(smp_processor_id(), false); 85 77 atomic_dec(&waiting_for_crash_ipi); 86 78 while (1) 87 79 cpu_relax(); ··· 144 134 unsigned long reboot_code_buffer_phys; 145 135 void *reboot_code_buffer; 146 136 147 - if (num_online_cpus() > 1) { 148 - pr_err("kexec: error: multiple CPUs still online\n"); 149 - return; 150 - } 137 + /* 138 + * This can only happen if machine_shutdown() failed to disable some 139 + * CPU, and that can only happen if the checks in 140 + * machine_kexec_prepare() were not correct. If this fails, we can't 141 + * reliably kexec anyway, so BUG_ON is appropriate. 142 + */ 143 + BUG_ON(num_online_cpus() > 1); 151 144 152 145 page_list = image->head & PAGE_MASK; 153 146
+9 -1
arch/arm/kernel/perf_event.c
··· 53 53 static int 54 54 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 55 55 { 56 - int mapping = (*event_map)[config]; 56 + int mapping; 57 + 58 + if (config >= PERF_COUNT_HW_MAX) 59 + return -EINVAL; 60 + 61 + mapping = (*event_map)[config]; 57 62 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 58 63 } 59 64 ··· 257 252 { 258 253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 259 254 struct pmu *leader_pmu = event->group_leader->pmu; 255 + 256 + if (is_software_event(event)) 257 + return 1; 260 258 261 259 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 262 260 return 1;
+1 -1
arch/arm/kernel/process.c
··· 462 462 { 463 463 return in_gate_area(NULL, addr); 464 464 } 465 - #define is_gate_vma(vma) ((vma) = &gate_vma) 465 + #define is_gate_vma(vma) ((vma) == &gate_vma) 466 466 #else 467 467 #define is_gate_vma(vma) 0 468 468 #endif
+10
arch/arm/kernel/smp.c
··· 145 145 return -ENOSYS; 146 146 } 147 147 148 + int platform_can_cpu_hotplug(void) 149 + { 150 + #ifdef CONFIG_HOTPLUG_CPU 151 + if (smp_ops.cpu_kill) 152 + return 1; 153 + #endif 154 + 155 + return 0; 156 + } 157 + 148 158 #ifdef CONFIG_HOTPLUG_CPU 149 159 static void percpu_timer_stop(void); 150 160
+19 -7
arch/arm/kvm/coproc.c
··· 146 146 #define access_pmintenclr pm_fake 147 147 148 148 /* Architected CP15 registers. 149 - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 149 + * CRn denotes the primary register number, but is copied to the CRm in the 150 + * user space API for 64-bit register access in line with the terminology used 151 + * in the ARM ARM. 152 + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit 153 + * registers preceding 32-bit ones. 150 154 */ 151 155 static const struct coproc_reg cp15_regs[] = { 152 156 /* CSSELR: swapped by interrupt.S. */ ··· 158 154 NULL, reset_unknown, c0_CSSELR }, 159 155 160 156 /* TTBR0/TTBR1: swapped by interrupt.S. */ 161 - { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, 162 - { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, 157 + { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, 158 + { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, 163 159 164 160 /* TTBCR: swapped by interrupt.S. */ 165 161 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, ··· 186 182 NULL, reset_unknown, c6_IFAR }, 187 183 188 184 /* PAR swapped by interrupt.S */ 189 - { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 185 + { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 190 186 191 187 /* 192 188 * DC{C,I,CI}SW operations: ··· 403 399 | KVM_REG_ARM_OPC1_MASK)) 404 400 return false; 405 401 params->is_64bit = true; 406 - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 402 + /* CRm to CRn: see cp15_to_index for details */ 403 + params->CRn = ((id & KVM_REG_ARM_CRM_MASK) 407 404 >> KVM_REG_ARM_CRM_SHIFT); 408 405 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 409 406 >> KVM_REG_ARM_OPC1_SHIFT); 410 407 params->Op2 = 0; 411 - params->CRn = 0; 408 + params->CRm = 0; 412 409 return true; 413 410 default: 414 411 return false; ··· 903 898 if (reg->is_64) { 904 899 val |= KVM_REG_SIZE_U64; 905 900 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 906 - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 901 + /* 902 + * CRn always denotes the primary coproc. reg. nr. for the 903 + * in-kernel representation, but the user space API uses the 904 + * CRm for the encoding, because it is modelled after the 905 + * MRRC/MCRR instructions: see the ARM ARM rev. c page 906 + * B3-1445 907 + */ 908 + val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); 907 909 } else { 908 910 val |= KVM_REG_SIZE_U32; 909 911 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
+3
arch/arm/kvm/coproc.h
··· 135 135 return -1; 136 136 if (i1->CRn != i2->CRn) 137 137 return i1->CRn - i2->CRn; 138 + if (i1->is_64 != i2->is_64) 139 + return i2->is_64 - i1->is_64; 138 140 if (i1->CRm != i2->CRm) 139 141 return i1->CRm - i2->CRm; 140 142 if (i1->Op1 != i2->Op1) ··· 147 145 148 146 #define CRn(_x) .CRn = _x 149 147 #define CRm(_x) .CRm = _x 148 + #define CRm64(_x) .CRn = _x, .CRm = 0 150 149 #define Op1(_x) .Op1 = _x 151 150 #define Op2(_x) .Op2 = _x 152 151 #define is64 .is_64 = true
+5 -1
arch/arm/kvm/coproc_a15.c
··· 114 114 115 115 /* 116 116 * A15-specific CP15 registers. 117 - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 117 + * CRn denotes the primary register number, but is copied to the CRm in the 118 + * user space API for 64-bit register access in line with the terminology used 119 + * in the ARM ARM. 120 + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit 121 + * registers preceding 32-bit ones. 118 122 */ 119 123 static const struct coproc_reg a15_regs[] = { 120 124 /* MPIDR: we use VMPIDR for guest access. */
+2 -1
arch/arm/kvm/mmio.c
··· 63 63 static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 64 64 struct kvm_exit_mmio *mmio) 65 65 { 66 - unsigned long rt, len; 66 + unsigned long rt; 67 + int len; 67 68 bool is_write, sign_extend; 68 69 69 70 if (kvm_vcpu_dabt_isextabt(vcpu)) {
+15 -21
arch/arm/kvm/mmu.c
··· 85 85 return p; 86 86 } 87 87 88 + static bool page_empty(void *ptr) 89 + { 90 + struct page *ptr_page = virt_to_page(ptr); 91 + return page_count(ptr_page) == 1; 92 + } 93 + 88 94 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 89 95 { 90 96 pmd_t *pmd_table = pmd_offset(pud, 0); ··· 109 103 put_page(virt_to_page(pmd)); 110 104 } 111 105 112 - static bool pmd_empty(pmd_t *pmd) 113 - { 114 - struct page *pmd_page = virt_to_page(pmd); 115 - return page_count(pmd_page) == 1; 116 - } 117 - 118 106 static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 119 107 { 120 108 if (pte_present(*pte)) { ··· 116 116 put_page(virt_to_page(pte)); 117 117 kvm_tlb_flush_vmid_ipa(kvm, addr); 118 118 } 119 - } 120 - 121 - static bool pte_empty(pte_t *pte) 122 - { 123 - struct page *pte_page = virt_to_page(pte); 124 - return page_count(pte_page) == 1; 125 119 } 126 120 127 121 static void unmap_range(struct kvm *kvm, pgd_t *pgdp, ··· 126 132 pmd_t *pmd; 127 133 pte_t *pte; 128 134 unsigned long long addr = start, end = start + size; 129 - u64 range; 135 + u64 next; 130 136 131 137 while (addr < end) { 132 138 pgd = pgdp + pgd_index(addr); 133 139 pud = pud_offset(pgd, addr); 134 140 if (pud_none(*pud)) { 135 - addr += PUD_SIZE; 141 + addr = pud_addr_end(addr, end); 136 142 continue; 137 143 } 138 144 139 145 pmd = pmd_offset(pud, addr); 140 146 if (pmd_none(*pmd)) { 141 - addr += PMD_SIZE; 147 + addr = pmd_addr_end(addr, end); 142 148 continue; 143 149 } 144 150 145 151 pte = pte_offset_kernel(pmd, addr); 146 152 clear_pte_entry(kvm, pte, addr); 147 - range = PAGE_SIZE; 153 + next = addr + PAGE_SIZE; 148 154 149 155 /* If we emptied the pte, walk back up the ladder */ 150 - if (pte_empty(pte)) { 156 + if (page_empty(pte)) { 151 157 clear_pmd_entry(kvm, pmd, addr); 152 - range = PMD_SIZE; 153 - if (pmd_empty(pmd)) { 158 + next = pmd_addr_end(addr, end); 159 + if (page_empty(pmd) && !page_empty(pud)) { 154 160 clear_pud_entry(kvm, pud, addr); 155 - range = PUD_SIZE; 161 + next = pud_addr_end(addr, end); 156 162 } 157 163 } 158 164 159 - addr += range; 165 + addr = next; 160 166 } 161 167 } 162 168
+2
arch/arm/mach-at91/at91sam9x5.c
··· 227 227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), 228 228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), 229 229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), 230 + CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk), 231 + CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk), 230 232 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), 231 233 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), 232 234 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
+1
arch/arm/mach-davinci/board-dm355-leopard.c
··· 75 75 .parts = davinci_nand_partitions, 76 76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 77 77 .ecc_mode = NAND_ECC_HW_SYNDROME, 78 + .ecc_bits = 4, 78 79 .bbt_options = NAND_BBT_USE_FLASH, 79 80 }; 80 81
+1
arch/arm/mach-davinci/board-dm644x-evm.c
··· 153 153 .parts = davinci_evm_nandflash_partition, 154 154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), 155 155 .ecc_mode = NAND_ECC_HW, 156 + .ecc_bits = 1, 156 157 .bbt_options = NAND_BBT_USE_FLASH, 157 158 .timing = &davinci_evm_nandflash_timing, 158 159 };
+1
arch/arm/mach-davinci/board-dm646x-evm.c
··· 90 90 .parts = davinci_nand_partitions, 91 91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 92 92 .ecc_mode = NAND_ECC_HW, 93 + .ecc_bits = 1, 93 94 .options = 0, 94 95 }; 95 96
+1
arch/arm/mach-davinci/board-neuros-osd2.c
··· 88 88 .parts = davinci_ntosd2_nandflash_partition, 89 89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), 90 90 .ecc_mode = NAND_ECC_HW, 91 + .ecc_bits = 1, 91 92 .bbt_options = NAND_BBT_USE_FLASH, 92 93 }; 93 94
-4
arch/arm/mach-omap2/board-n8x0.c
··· 122 122 }; 123 123 124 124 static struct musb_hdrc_platform_data tusb_data = { 125 - #ifdef CONFIG_USB_GADGET_MUSB_HDRC 126 125 .mode = MUSB_OTG, 127 - #else 128 - .mode = MUSB_HOST, 129 - #endif 130 126 .set_power = tusb_set_power, 131 127 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ 132 128 .power = 100, /* Max 100 mA VBUS for host mode */
+1 -1
arch/arm/mach-omap2/board-rx51.c
··· 85 85 86 86 static struct omap_musb_board_data musb_board_data = { 87 87 .interface_type = MUSB_INTERFACE_ULPI, 88 - .mode = MUSB_PERIPHERAL, 88 + .mode = MUSB_OTG, 89 89 .power = 0, 90 90 }; 91 91
+1 -4
arch/arm/mach-omap2/usb-musb.c
··· 38 38 }; 39 39 40 40 static struct musb_hdrc_platform_data musb_plat = { 41 - #ifdef CONFIG_USB_GADGET_MUSB_HDRC 42 41 .mode = MUSB_OTG, 43 - #else 44 - .mode = MUSB_HOST, 45 - #endif 42 + 46 43 /* .clock is set dynamically */ 47 44 .config = &musb_config, 48 45
+6 -3
arch/arm/mm/Kconfig
··· 809 809 the CPU type fitted to the system. This permits binaries to be 810 810 run on ARMv4 through to ARMv7 without modification. 811 811 812 + See Documentation/arm/kernel_user_helpers.txt for details. 813 + 812 814 However, the fixed address nature of these helpers can be used 813 815 by ROP (return orientated programming) authors when creating 814 816 exploits. 815 817 816 818 If all of the binaries and libraries which run on your platform 817 819 are built specifically for your platform, and make no use of 818 - these helpers, then you can turn this option off. However, 819 - when such an binary or library is run, it will receive a SIGILL 820 - signal, which will terminate the program. 820 + these helpers, then you can turn this option off to hinder 821 + such exploits. However, in that case, if a binary or library 822 + relying on those helpers is run, it will receive a SIGILL signal, 823 + which will terminate the program. 821 824 822 825 Say N here only if you are absolutely certain that you do not 823 826 need these helpers; otherwise, the safe option is to say Y.
+3 -2
arch/arm/plat-samsung/init.c
··· 55 55 56 56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); 57 57 58 - if (cpu->map_io == NULL || cpu->init == NULL) { 58 + if (cpu->init == NULL) { 59 59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); 60 60 panic("Unsupported Samsung CPU"); 61 61 } 62 62 63 - cpu->map_io(); 63 + if (cpu->map_io) 64 + cpu->map_io(); 64 65 } 65 66 66 67 /* s3c24xx_init_clocks
+1
arch/arm/xen/enlighten.c
··· 170 170 per_cpu(xen_vcpu, cpu) = vcpup; 171 171 172 172 enable_percpu_irq(xen_events_irq, 0); 173 + put_cpu(); 173 174 } 174 175 175 176 static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
+10 -7
arch/arm64/include/asm/kvm_asm.h
··· 42 42 #define TPIDR_EL1 18 /* Thread ID, Privileged */ 43 43 #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 44 44 #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 45 + #define PAR_EL1 21 /* Physical Address Register */ 45 46 /* 32bit specific registers. Keep them at the end of the range */ 46 - #define DACR32_EL2 21 /* Domain Access Control Register */ 47 - #define IFSR32_EL2 22 /* Instruction Fault Status Register */ 48 - #define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ 49 - #define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ 50 - #define TEECR32_EL1 25 /* ThumbEE Configuration Register */ 51 - #define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ 52 - #define NR_SYS_REGS 27 47 + #define DACR32_EL2 22 /* Domain Access Control Register */ 48 + #define IFSR32_EL2 23 /* Instruction Fault Status Register */ 49 + #define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ 50 + #define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ 51 + #define TEECR32_EL1 26 /* ThumbEE Configuration Register */ 52 + #define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ 53 + #define NR_SYS_REGS 28 53 54 54 55 /* 32bit mapping */ 55 56 #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ ··· 70 69 #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ 71 70 #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ 72 71 #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ 72 + #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ 73 + #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ 73 74 #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ 74 75 #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ 75 76 #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
+1 -1
arch/arm64/include/asm/kvm_host.h
··· 129 129 struct kvm_mmu_memory_cache mmu_page_cache; 130 130 131 131 /* Target CPU and feature flags */ 132 - u32 target; 132 + int target; 133 133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 134 134 135 135 /* Detect first run of a vcpu */
+5 -2
arch/arm64/include/asm/tlb.h
··· 35 35 struct mm_struct *mm; 36 36 unsigned int fullmm; 37 37 struct vm_area_struct *vma; 38 + unsigned long start, end; 38 39 unsigned long range_start; 39 40 unsigned long range_end; 40 41 unsigned int nr; ··· 98 97 } 99 98 100 99 static inline void 101 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 100 + tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 102 101 { 103 102 tlb->mm = mm; 104 - tlb->fullmm = fullmm; 103 + tlb->fullmm = !(start | (end+1)); 104 + tlb->start = start; 105 + tlb->end = end; 105 106 tlb->vma = NULL; 106 107 tlb->max = ARRAY_SIZE(tlb->local); 107 108 tlb->pages = tlb->local;
+9 -1
arch/arm64/kernel/perf_event.c
··· 107 107 static int 108 108 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 109 109 { 110 - int mapping = (*event_map)[config]; 110 + int mapping; 111 + 112 + if (config >= PERF_COUNT_HW_MAX) 113 + return -EINVAL; 114 + 115 + mapping = (*event_map)[config]; 111 116 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 112 117 } 113 118 ··· 321 316 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 322 317 struct hw_perf_event fake_event = event->hw; 323 318 struct pmu *leader_pmu = event->group_leader->pmu; 319 + 320 + if (is_software_event(event)) 321 + return 1; 324 322 325 323 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 326 324 return 1;
+13
arch/arm64/kvm/hyp.S
··· 214 214 mrs x21, tpidr_el1 215 215 mrs x22, amair_el1 216 216 mrs x23, cntkctl_el1 217 + mrs x24, par_el1 217 218 218 219 stp x4, x5, [x3] 219 220 stp x6, x7, [x3, #16] ··· 226 225 stp x18, x19, [x3, #112] 227 226 stp x20, x21, [x3, #128] 228 227 stp x22, x23, [x3, #144] 228 + str x24, [x3, #160] 229 229 .endm 230 230 231 231 .macro restore_sysregs ··· 245 243 ldp x18, x19, [x3, #112] 246 244 ldp x20, x21, [x3, #128] 247 245 ldp x22, x23, [x3, #144] 246 + ldr x24, [x3, #160] 248 247 249 248 msr vmpidr_el2, x4 250 249 msr csselr_el1, x5 ··· 267 264 msr tpidr_el1, x21 268 265 msr amair_el1, x22 269 266 msr cntkctl_el1, x23 267 + msr par_el1, x24 270 268 .endm 271 269 272 270 .macro skip_32bit_state tmp, target ··· 604 600 605 601 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 606 602 ENTRY(__kvm_tlb_flush_vmid_ipa) 603 + dsb ishst 604 + 607 605 kern_hyp_va x0 608 606 ldr x2, [x0, #KVM_VTTBR] 609 607 msr vttbr_el2, x2 ··· 627 621 ENDPROC(__kvm_tlb_flush_vmid_ipa) 628 622 629 623 ENTRY(__kvm_flush_vm_context) 624 + dsb ishst 630 625 tlbi alle1is 631 626 ic ialluis 632 627 dsb sy ··· 760 753 */ 761 754 tbnz x1, #7, 1f // S1PTW is set 762 755 756 + /* Preserve PAR_EL1 */ 757 + mrs x3, par_el1 758 + push x3, xzr 759 + 763 760 /* 764 761 * Permission fault, HPFAR_EL2 is invalid. 765 762 * Resolve the IPA the hard way using the guest VA. ··· 777 766 778 767 /* Read result */ 779 768 mrs x3, par_el1 769 + pop x0, xzr // Restore PAR_EL1 from the stack 770 + msr par_el1, x0 780 771 tbnz x3, #0, 3f // Bail out if we failed the translation 781 772 ubfx x3, x3, #12, #36 // Extract IPA 782 773 lsl x3, x3, #4 // and present it like HPFAR
+3
arch/arm64/kvm/sys_regs.c
··· 211 211 /* FAR_EL1 */ 212 212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 213 213 NULL, reset_unknown, FAR_EL1 }, 214 + /* PAR_EL1 */ 215 + { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), 216 + NULL, reset_unknown, PAR_EL1 }, 214 217 215 218 /* PMINTENSET_EL1 */ 216 219 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
+1
arch/hexagon/Kconfig
··· 158 158 endmenu 159 159 160 160 source "init/Kconfig" 161 + source "kernel/Kconfig.freezer" 161 162 source "drivers/Kconfig" 162 163 source "fs/Kconfig" 163 164
+6 -3
arch/ia64/include/asm/tlb.h
··· 22 22 * unmapping a portion of the virtual address space, these hooks are called according to 23 23 * the following template: 24 24 * 25 - * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM 25 + * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM 26 26 * { 27 27 * for each vma that needs a shootdown do { 28 28 * tlb_start_vma(tlb, vma); ··· 58 58 unsigned int max; 59 59 unsigned char fullmm; /* non-zero means full mm flush */ 60 60 unsigned char need_flush; /* really unmapped some PTEs? */ 61 + unsigned long start, end; 61 62 unsigned long start_addr; 62 63 unsigned long end_addr; 63 64 struct page **pages; ··· 156 155 157 156 158 157 static inline void 159 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 158 + tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 160 159 { 161 160 tlb->mm = mm; 162 161 tlb->max = ARRAY_SIZE(tlb->local); 163 162 tlb->pages = tlb->local; 164 163 tlb->nr = 0; 165 - tlb->fullmm = full_mm_flush; 164 + tlb->fullmm = !(start | (end+1)); 165 + tlb->start = start; 166 + tlb->end = end; 166 167 tlb->start_addr = ~0UL; 167 168 } 168 169
+19 -4
arch/m68k/emu/natfeat.c
··· 18 18 #include <asm/machdep.h> 19 19 #include <asm/natfeat.h> 20 20 21 + extern long nf_get_id2(const char *feature_name); 22 + 21 23 asm("\n" 22 - " .global nf_get_id,nf_call\n" 23 - "nf_get_id:\n" 24 + " .global nf_get_id2,nf_call\n" 25 + "nf_get_id2:\n" 24 26 " .short 0x7300\n" 25 27 " rts\n" 26 28 "nf_call:\n" ··· 31 29 "1: moveq.l #0,%d0\n" 32 30 " rts\n" 33 31 " .section __ex_table,\"a\"\n" 34 - " .long nf_get_id,1b\n" 32 + " .long nf_get_id2,1b\n" 35 33 " .long nf_call,1b\n" 36 34 " .previous"); 37 - EXPORT_SYMBOL_GPL(nf_get_id); 38 35 EXPORT_SYMBOL_GPL(nf_call); 36 + 37 + long nf_get_id(const char *feature_name) 38 + { 39 + /* feature_name may be in vmalloc()ed memory, so make a copy */ 40 + char name_copy[32]; 41 + size_t n; 42 + 43 + n = strlcpy(name_copy, feature_name, sizeof(name_copy)); 44 + if (n >= sizeof(name_copy)) 45 + return 0; 46 + 47 + return nf_get_id2(name_copy); 48 + } 49 + EXPORT_SYMBOL_GPL(nf_get_id); 39 50 40 51 void nfprint(const char *fmt, ...) 41 52 {
+5 -4
arch/m68k/include/asm/div64.h
··· 15 15 unsigned long long n64; \ 16 16 } __n; \ 17 17 unsigned long __rem, __upper; \ 18 + unsigned long __base = (base); \ 18 19 \ 19 20 __n.n64 = (n); \ 20 21 if ((__upper = __n.n32[0])) { \ 21 22 asm ("divul.l %2,%1:%0" \ 22 - : "=d" (__n.n32[0]), "=d" (__upper) \ 23 - : "d" (base), "0" (__n.n32[0])); \ 23 + : "=d" (__n.n32[0]), "=d" (__upper) \ 24 + : "d" (__base), "0" (__n.n32[0])); \ 24 25 } \ 25 26 asm ("divu.l %2,%1:%0" \ 26 - : "=d" (__n.n32[1]), "=d" (__rem) \ 27 - : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ 27 + : "=d" (__n.n32[1]), "=d" (__rem) \ 28 + : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \ 28 29 (n) = __n.n64; \ 29 30 __rem; \ 30 31 })
+1 -1
arch/microblaze/Kconfig
··· 28 28 select GENERIC_CLOCKEVENTS 29 29 select GENERIC_IDLE_POLL_SETUP 30 30 select MODULES_USE_ELF_RELA 31 - select CLONE_BACKWARDS 31 + select CLONE_BACKWARDS3 32 32 33 33 config SWAP 34 34 def_bool n
+26
arch/mips/math-emu/cp1emu.c
··· 803 803 dec_insn.next_pc_inc; 804 804 return 1; 805 805 break; 806 + #ifdef CONFIG_CPU_CAVIUM_OCTEON 807 + case lwc2_op: /* This is bbit0 on Octeon */ 808 + if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) 809 + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); 810 + else 811 + *contpc = regs->cp0_epc + 8; 812 + return 1; 813 + case ldc2_op: /* This is bbit032 on Octeon */ 814 + if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) 815 + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); 816 + else 817 + *contpc = regs->cp0_epc + 8; 818 + return 1; 819 + case swc2_op: /* This is bbit1 on Octeon */ 820 + if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) 821 + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); 822 + else 823 + *contpc = regs->cp0_epc + 8; 824 + return 1; 825 + case sdc2_op: /* This is bbit132 on Octeon */ 826 + if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) 827 + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); 828 + else 829 + *contpc = regs->cp0_epc + 8; 830 + return 1; 831 + #endif 806 832 case cop0_op: 807 833 case cop1_op: 808 834 case cop2_op:
+1
arch/openrisc/Kconfig
··· 55 55 56 56 source "init/Kconfig" 57 57 58 + source "kernel/Kconfig.freezer" 58 59 59 60 menu "Processor type and features" 60 61
+1
arch/powerpc/Kconfig
··· 979 979 must live at a different physical address than the primary 980 980 kernel. 981 981 982 + # This value must have zeroes in the bottom 60 bits otherwise lots will break 982 983 config PAGE_OFFSET 983 984 hex 984 985 default "0xc000000000000000"
+10
arch/powerpc/include/asm/page.h
··· 211 211 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) 212 212 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) 213 213 #else 214 + #ifdef CONFIG_PPC64 215 + /* 216 + * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET 217 + * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. 218 + */ 219 + #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) 220 + #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) 221 + 222 + #else /* 32-bit, non book E */ 214 223 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) 215 224 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) 225 + #endif 216 226 #endif 217 227 218 228 /*
+9 -13
arch/powerpc/kernel/lparcfg.c
··· 35 35 #include <asm/vdso_datapage.h> 36 36 #include <asm/vio.h> 37 37 #include <asm/mmu.h> 38 + #include <asm/machdep.h> 38 39 40 + 41 + /* 42 + * This isn't a module but we expose that to userspace 43 + * via /proc so leave the definitions here 44 + */ 39 45 #define MODULE_VERS "1.9" 40 46 #define MODULE_NAME "lparcfg" 41 47 ··· 424 418 { 425 419 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 426 420 427 - if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) 421 + if (firmware_has_feature(FW_FEATURE_LPAR) && 422 + plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) 428 423 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); 429 424 } 430 425 ··· 684 677 } 685 678 686 679 static const struct file_operations lparcfg_fops = { 687 - .owner = THIS_MODULE, 688 680 .read = seq_read, 689 681 .write = lparcfg_write, 690 682 .open = lparcfg_open, ··· 705 699 } 706 700 return 0; 707 701 } 708 - 709 - static void __exit lparcfg_cleanup(void) 710 - { 711 - remove_proc_subtree("powerpc/lparcfg", NULL); 712 - } 713 - 714 - module_init(lparcfg_init); 715 - module_exit(lparcfg_cleanup); 716 - MODULE_DESCRIPTION("Interface for LPAR configuration data"); 717 - MODULE_AUTHOR("Dave Engebretsen"); 718 - MODULE_LICENSE("GPL"); 702 + machine_device_initcall(pseries, lparcfg_init);
+6 -2
arch/s390/include/asm/tlb.h
··· 32 32 struct mm_struct *mm; 33 33 struct mmu_table_batch *batch; 34 34 unsigned int fullmm; 35 + unsigned long start, end; 35 36 }; 36 37 37 38 struct mmu_table_batch { ··· 49 48 50 49 static inline void tlb_gather_mmu(struct mmu_gather *tlb, 51 50 struct mm_struct *mm, 52 - unsigned int full_mm_flush) 51 + unsigned long start, 52 + unsigned long end) 53 53 { 54 54 tlb->mm = mm; 55 - tlb->fullmm = full_mm_flush; 55 + tlb->start = start; 56 + tlb->end = end; 57 + tlb->fullmm = !(start | (end+1)); 56 58 tlb->batch = NULL; 57 59 if (tlb->fullmm) 58 60 __tlb_flush_mm(mm);
+2
arch/score/Kconfig
··· 87 87 88 88 source "init/Kconfig" 89 89 90 + source "kernel/Kconfig.freezer" 91 + 90 92 config MMU 91 93 def_bool y 92 94
+4 -2
arch/sh/include/asm/tlb.h
··· 36 36 } 37 37 38 38 static inline void 39 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 39 + tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 40 40 { 41 41 tlb->mm = mm; 42 - tlb->fullmm = full_mm_flush; 42 + tlb->start = start; 43 + tlb->end = end; 44 + tlb->fullmm = !(start | (end+1)); 43 45 44 46 init_tlb_gather(tlb); 45 47 }
+4 -2
arch/um/include/asm/tlb.h
··· 45 45 } 46 46 47 47 static inline void 48 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 48 + tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 49 49 { 50 50 tlb->mm = mm; 51 - tlb->fullmm = full_mm_flush; 51 + tlb->start = start; 52 + tlb->end = end; 53 + tlb->fullmm = !(start | (end+1)); 52 54 53 55 init_tlb_gather(tlb); 54 56 }
+2 -2
arch/x86/include/asm/bootparam_utils.h
··· 35 35 */ 36 36 if (boot_params->sentinel) { 37 37 /* fields in boot_params are left uninitialized, clear them */ 38 - memset(&boot_params->olpc_ofw_header, 0, 38 + memset(&boot_params->ext_ramdisk_image, 0, 39 39 (char *)&boot_params->efi_info - 40 - (char *)&boot_params->olpc_ofw_header); 40 + (char *)&boot_params->ext_ramdisk_image); 41 41 memset(&boot_params->kbd_status, 0, 42 42 (char *)&boot_params->hdr - 43 43 (char *)&boot_params->kbd_status);
+1 -1
arch/x86/include/asm/microcode_amd.h
··· 59 59 60 60 extern int __apply_microcode_amd(struct microcode_amd *mc_amd); 61 61 extern int apply_microcode_amd(int cpu); 62 - extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); 62 + extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); 63 63 64 64 #ifdef CONFIG_MICROCODE_AMD_EARLY 65 65 #ifdef CONFIG_X86_32
+47 -1
arch/x86/include/asm/pgtable-2level.h
··· 55 55 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 56 56 #endif 57 57 58 + #ifdef CONFIG_MEM_SOFT_DIRTY 59 + 60 + /* 61 + * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and 62 + * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset 63 + * into this range. 64 + */ 65 + #define PTE_FILE_MAX_BITS 28 66 + #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) 67 + #define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) 68 + #define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) 69 + #define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1) 70 + #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) 71 + #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) 72 + #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) 73 + 74 + #define pte_to_pgoff(pte) \ 75 + ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ 76 + & ((1U << PTE_FILE_BITS1) - 1))) \ 77 + + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ 78 + & ((1U << PTE_FILE_BITS2) - 1)) \ 79 + << (PTE_FILE_BITS1)) \ 80 + + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ 81 + & ((1U << PTE_FILE_BITS3) - 1)) \ 82 + << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ 83 + + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ 84 + << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) 85 + 86 + #define pgoff_to_pte(off) \ 87 + ((pte_t) { .pte_low = \ 88 + ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ 89 + + ((((off) >> PTE_FILE_BITS1) \ 90 + & ((1U << PTE_FILE_BITS2) - 1)) \ 91 + << PTE_FILE_SHIFT2) \ 92 + + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ 93 + & ((1U << PTE_FILE_BITS3) - 1)) \ 94 + << PTE_FILE_SHIFT3) \ 95 + + ((((off) >> \ 96 + (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ 97 + << PTE_FILE_SHIFT4) \ 98 + + _PAGE_FILE }) 99 + 100 + #else /* CONFIG_MEM_SOFT_DIRTY */ 101 + 58 102 /* 59 103 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 60 - * split up the 29 bits of offset into this range: 104 + * split up the 29 bits of offset into this range. 61 105 */ 62 106 #define PTE_FILE_MAX_BITS 29 63 107 #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) ··· 131 87 + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ 132 88 << PTE_FILE_SHIFT3) \ 133 89 + _PAGE_FILE }) 90 + 91 + #endif /* CONFIG_MEM_SOFT_DIRTY */ 134 92 135 93 /* Encode and de-code a swap entry */ 136 94 #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
+3
arch/x86/include/asm/pgtable-3level.h
··· 179 179 /* 180 180 * Bits 0, 6 and 7 are taken in the low part of the pte, 181 181 * put the 32 bits of offset into the high part. 182 + * 183 + * For soft-dirty tracking 11 bit is taken from 184 + * the low part of pte as well. 182 185 */ 183 186 #define pte_to_pgoff(pte) ((pte).pte_high) 184 187 #define pgoff_to_pte(off) \
+30
arch/x86/include/asm/pgtable.h
··· 314 314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 315 315 } 316 316 317 + static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 318 + { 319 + return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); 320 + } 321 + 322 + static inline int pte_swp_soft_dirty(pte_t pte) 323 + { 324 + return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; 325 + } 326 + 327 + static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 328 + { 329 + return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); 330 + } 331 + 332 + static inline pte_t pte_file_clear_soft_dirty(pte_t pte) 333 + { 334 + return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); 335 + } 336 + 337 + static inline pte_t pte_file_mksoft_dirty(pte_t pte) 338 + { 339 + return pte_set_flags(pte, _PAGE_SOFT_DIRTY); 340 + } 341 + 342 + static inline int pte_file_soft_dirty(pte_t pte) 343 + { 344 + return pte_flags(pte) & _PAGE_SOFT_DIRTY; 345 + } 346 + 317 347 /* 318 348 * Mask out unsupported bits in a present pgprot. Non-present pgprots 319 349 * can use those bits for other purposes, so leave them be.
+16 -1
arch/x86/include/asm/pgtable_types.h
··· 61 61 * they do not conflict with each other. 62 62 */ 63 63 64 + #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN 65 + 64 66 #ifdef CONFIG_MEM_SOFT_DIRTY 65 - #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) 67 + #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) 66 68 #else 67 69 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) 70 + #endif 71 + 72 + /* 73 + * Tracking soft dirty bit when a page goes to a swap is tricky. 74 + * We need a bit which can be stored in pte _and_ not conflict 75 + * with swap entry format. On x86 bits 6 and 7 are *not* involved 76 + * into swap entry computation, but bit 6 is used for nonlinear 77 + * file mapping, so we borrow bit 7 for soft dirty tracking. 78 + */ 79 + #ifdef CONFIG_MEM_SOFT_DIRTY 80 + #define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE 81 + #else 82 + #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) 68 83 #endif 69 84 70 85 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-4
arch/x86/include/asm/spinlock.h
··· 233 233 #define arch_read_relax(lock) cpu_relax() 234 234 #define arch_write_relax(lock) cpu_relax() 235 235 236 - /* The {read|write|spin}_lock() on x86 are full memory barriers. */ 237 - static inline void smp_mb__after_lock(void) { } 238 - #define ARCH_HAS_SMP_MB_AFTER_LOCK 239 - 240 236 #endif /* _ASM_X86_SPINLOCK_H */
+5 -15
arch/x86/kernel/cpu/amd.c
··· 512 512 513 513 static const int amd_erratum_383[]; 514 514 static const int amd_erratum_400[]; 515 - static bool cpu_has_amd_erratum(const int *erratum); 515 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); 516 516 517 517 static void init_amd(struct cpuinfo_x86 *c) 518 518 { ··· 729 729 value &= ~(1ULL << 24); 730 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 731 731 732 - if (cpu_has_amd_erratum(amd_erratum_383)) 732 + if (cpu_has_amd_erratum(c, amd_erratum_383)) 733 733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 734 734 } 735 735 736 - if (cpu_has_amd_erratum(amd_erratum_400)) 736 + if (cpu_has_amd_erratum(c, amd_erratum_400)) 737 737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); 738 738 739 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); ··· 878 878 static const int amd_erratum_383[] = 879 879 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 880 880 881 - static bool cpu_has_amd_erratum(const int *erratum) 881 + 882 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) 882 883 { 883 - struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); 884 884 int osvw_id = *erratum++; 885 885 u32 range; 886 886 u32 ms; 887 - 888 - /* 889 - * If called early enough that current_cpu_data hasn't been initialized 890 - * yet, fall back to boot_cpu_data. 891 - */ 892 - if (cpu->x86 == 0) 893 - cpu = &boot_cpu_data; 894 - 895 - if (cpu->x86_vendor != X86_VENDOR_AMD) 896 - return false; 897 887 898 888 if (osvw_id >= 0 && osvw_id < 65536 && 899 889 cpu_has(cpu, X86_FEATURE_OSVW)) {
+1
arch/x86/kernel/cpu/perf_event_intel.c
··· 2270 2270 case 70: 2271 2271 case 71: 2272 2272 case 63: 2273 + case 69: 2273 2274 x86_pmu.late_ack = true; 2274 2275 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2275 2276 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+2 -2
arch/x86/kernel/cpu/perf_event_intel_uncore.c
··· 352 352 static struct uncore_event_desc snbep_uncore_qpi_events[] = { 353 353 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 354 354 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 355 - INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), 356 - INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), 355 + INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), 356 + INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), 357 357 { /* end: all zeroes */ }, 358 358 }; 359 359
+13 -14
arch/x86/kernel/microcode_amd.c
··· 145 145 return 0; 146 146 } 147 147 148 - static unsigned int verify_patch_size(int cpu, u32 patch_size, 148 + static unsigned int verify_patch_size(u8 family, u32 patch_size, 149 149 unsigned int size) 150 150 { 151 - struct cpuinfo_x86 *c = &cpu_data(cpu); 152 151 u32 max_size; 153 152 154 153 #define F1XH_MPB_MAX_SIZE 2048 ··· 155 156 #define F15H_MPB_MAX_SIZE 4096 156 157 #define F16H_MPB_MAX_SIZE 3458 157 158 158 - switch (c->x86) { 159 + switch (family) { 159 160 case 0x14: 160 161 max_size = F14H_MPB_MAX_SIZE; 161 162 break; ··· 276 277 * driver cannot continue functioning normally. In such cases, we tear 277 278 * down everything we've used up so far and exit. 278 279 */ 279 - static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) 280 + static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) 280 281 { 281 - struct cpuinfo_x86 *c = &cpu_data(cpu); 282 282 struct microcode_header_amd *mc_hdr; 283 283 struct ucode_patch *patch; 284 284 unsigned int patch_size, crnt_size, ret; ··· 297 299 298 300 /* check if patch is for the current family */ 299 301 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); 300 - if (proc_fam != c->x86) 302 + if (proc_fam != family) 301 303 return crnt_size; 302 304 303 305 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { ··· 306 308 return crnt_size; 307 309 } 308 310 309 - ret = verify_patch_size(cpu, patch_size, leftover); 311 + ret = verify_patch_size(family, patch_size, leftover); 310 312 if (!ret) { 311 313 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); 312 314 return crnt_size; ··· 337 339 return crnt_size; 338 340 } 339 341 340 - static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) 342 + static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, 343 + size_t size) 341 344 { 342 345 enum ucode_state ret = UCODE_ERROR; 343 346 unsigned int leftover; ··· 361 362 } 362 363 363 364 while (leftover) { 364 - crnt_size = verify_and_add_patch(cpu, fw, leftover); 365 + crnt_size = verify_and_add_patch(family, fw, leftover); 365 366 if (crnt_size < 0) 366 367 return ret; 367 368 ··· 372 373 return UCODE_OK; 373 374 } 374 375 375 - enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) 376 + enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) 376 377 { 377 378 enum ucode_state ret; 378 379 379 380 /* free old equiv table */ 380 381 free_equiv_cpu_table(); 381 382 382 - ret = __load_microcode_amd(cpu, data, size); 383 + ret = __load_microcode_amd(family, data, size); 383 384 384 385 if (ret != UCODE_OK) 385 386 cleanup(); 386 387 387 388 #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) 388 389 /* save BSP's matching patch for early load */ 389 - if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 390 - struct ucode_patch *p = find_patch(cpu); 390 + if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { 391 + struct ucode_patch *p = find_patch(smp_processor_id()); 391 392 if (p) { 392 393 memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); 393 394 memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), ··· 440 441 goto fw_release; 441 442 } 442 443 443 - ret = load_microcode_amd(cpu, fw->data, fw->size); 444 + ret = load_microcode_amd(c->x86, fw->data, fw->size); 444 445 445 446 fw_release: 446 447 release_firmware(fw);
+13 -14
arch/x86/kernel/microcode_amd_early.c
··· 238 238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 239 239 } 240 240 #else 241 - static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 242 - struct ucode_cpu_info *uci) 241 + void load_ucode_amd_ap(void) 243 242 { 243 + unsigned int cpu = smp_processor_id(); 244 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 244 245 u32 rev, eax; 245 246 246 247 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); 247 248 eax = cpuid_eax(0x00000001); 248 249 249 - uci->cpu_sig.sig = eax; 250 250 uci->cpu_sig.rev = rev; 251 - c->microcode = rev; 252 - c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 253 - } 254 - 255 - void load_ucode_amd_ap(void) 256 - { 257 - unsigned int cpu = smp_processor_id(); 258 - 259 - collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); 251 + uci->cpu_sig.sig = eax; 260 252 261 253 if (cpu && !ucode_loaded) { 262 254 void *ucode; ··· 257 265 return; 258 266 259 267 ucode = (void *)(initrd_start + ucode_offset); 260 - if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) 268 + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 269 + if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) 261 270 return; 271 + 262 272 ucode_loaded = true; 263 273 } 264 274 ··· 272 278 { 273 279 enum ucode_state ret; 274 280 void *ucode; 281 + u32 eax; 282 + 275 283 #ifdef CONFIG_X86_32 276 284 unsigned int bsp = boot_cpu_data.cpu_index; 277 285 struct ucode_cpu_info *uci = ucode_cpu_info + bsp; ··· 289 293 return 0; 290 294 291 295 ucode = (void *)(initrd_start + ucode_offset); 292 - ret = load_microcode_amd(0, ucode, ucode_size); 296 + eax = cpuid_eax(0x00000001); 297 + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 298 + 299 + ret = load_microcode_amd(eax, ucode, ucode_size); 293 300 if (ret != UCODE_OK) 294 301 return -EINVAL; 295 302
+1 -1
arch/x86/kernel/sys_x86_64.c
··· 101 101 *begin = new_begin; 102 102 } 103 103 } else { 104 - *begin = TASK_UNMAPPED_BASE; 104 + *begin = current->mm->mmap_legacy_base; 105 105 *end = TASK_SIZE; 106 106 } 107 107 }
+4 -2
arch/x86/mm/mmap.c
··· 112 112 */ 113 113 void arch_pick_mmap_layout(struct mm_struct *mm) 114 114 { 115 + mm->mmap_legacy_base = mmap_legacy_base(); 116 + mm->mmap_base = mmap_base(); 117 + 115 118 if (mmap_is_legacy()) { 116 - mm->mmap_base = mmap_legacy_base(); 119 + mm->mmap_base = mm->mmap_legacy_base; 117 120 mm->get_unmapped_area = arch_get_unmapped_area; 118 121 } else { 119 - mm->mmap_base = mmap_base(); 120 122 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 121 123 } 122 124 }
+22
arch/x86/xen/setup.c
··· 313 313 e820_add_region(start, end - start, type); 314 314 } 315 315 316 + void xen_ignore_unusable(struct e820entry *list, size_t map_size) 317 + { 318 + struct e820entry *entry; 319 + unsigned int i; 320 + 321 + for (i = 0, entry = list; i < map_size; i++, entry++) { 322 + if (entry->type == E820_UNUSABLE) 323 + entry->type = E820_RAM; 324 + } 325 + } 326 + 316 327 /** 317 328 * machine_specific_memory_setup - Hook for machine specific memory setup. 318 329 **/ ··· 363 352 rc = 0; 364 353 } 365 354 BUG_ON(rc); 355 + 356 + /* 357 + * Xen won't allow a 1:1 mapping to be created to UNUSABLE 358 + * regions, so if we're using the machine memory map leave the 359 + * region as RAM as it is in the pseudo-physical map. 360 + * 361 + * UNUSABLE regions in domUs are not handled and will need 362 + * a patch in the future. 363 + */ 364 + if (xen_initial_domain()) 365 + xen_ignore_unusable(map, memmap.nr_entries); 366 366 367 367 /* Make sure the Xen-supplied memory map is well-ordered. */ 368 368 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+9 -2
arch/x86/xen/smp.c
··· 694 694 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 695 695 { 696 696 int rc; 697 - rc = native_cpu_up(cpu, tidle); 698 - WARN_ON (xen_smp_intr_init(cpu)); 697 + /* 698 + * xen_smp_intr_init() needs to run before native_cpu_up() 699 + * so that IPI vectors are set up on the booting CPU before 700 + * it is marked online in native_cpu_up(). 701 + */ 702 + rc = xen_smp_intr_init(cpu); 703 + WARN_ON(rc); 704 + if (!rc) 705 + rc = native_cpu_up(cpu, tidle); 699 706 return rc; 700 707 } 701 708
+3 -8
drivers/acpi/video.c
··· 908 908 device->cap._DDC = 1; 909 909 } 910 910 911 - if (acpi_video_init_brightness(device)) 912 - return; 913 - 914 911 if (acpi_video_backlight_support()) { 915 912 struct backlight_properties props; 916 913 struct pci_dev *pdev; ··· 917 920 static int count = 0; 918 921 char *name; 919 922 923 + result = acpi_video_init_brightness(device); 924 + if (result) 925 + return; 920 926 name = kasprintf(GFP_KERNEL, "acpi_video%d", count); 921 927 if (!name) 922 928 return; ··· 979 979 if (result) 980 980 printk(KERN_ERR PREFIX "Create sysfs link\n"); 981 981 982 - } else { 983 - /* Remove the brightness object. */ 984 - kfree(device->brightness->levels); 985 - kfree(device->brightness); 986 - device->brightness = NULL; 987 982 } 988 983 } 989 984
+6 -6
drivers/ata/libata-pmp.c
··· 289 289 290 290 /* Disable sending Early R_OK. 291 291 * With "cached read" HDD testing and multiple ports busy on a SATA 292 - * host controller, 3726 PMP will very rarely drop a deferred 292 + * host controller, 3x26 PMP will very rarely drop a deferred 293 293 * R_OK that was intended for the host. Symptom will be all 294 294 * 5 drives under test will timeout, get reset, and recover. 295 295 */ 296 - if (vendor == 0x1095 && devid == 0x3726) { 296 + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { 297 297 u32 reg; 298 298 299 299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg); 300 300 if (err_mask) { 301 301 rc = -EIO; 302 - reason = "failed to read Sil3726 Private Register"; 302 + reason = "failed to read Sil3x26 Private Register"; 303 303 goto fail; 304 304 } 305 305 reg &= ~0x1; 306 306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); 307 307 if (err_mask) { 308 308 rc = -EIO; 309 - reason = "failed to write Sil3726 Private Register"; 309 + reason = "failed to write Sil3x26 Private Register"; 310 310 goto fail; 311 311 } 312 312 } ··· 383 383 u16 devid = sata_pmp_gscr_devid(gscr); 384 384 struct ata_link *link; 385 385 386 - if (vendor == 0x1095 && devid == 0x3726) { 387 - /* sil3726 quirks */ 386 + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { 387 + /* sil3x26 quirks */ 388 388 ata_for_each_link(link, ap, EDGE) { 389 389 /* link reports offline after LPM */ 390 390 link->flags |= ATA_LFLAG_NO_LPM;
+3 -2
drivers/ata/sata_fsl.c
··· 293 293 { 294 294 struct sata_fsl_host_priv *host_priv = host->private_data; 295 295 void __iomem *hcr_base = host_priv->hcr_base; 296 + unsigned long flags; 296 297 297 298 if (count > ICC_MAX_INT_COUNT_THRESHOLD) 298 299 count = ICC_MAX_INT_COUNT_THRESHOLD; ··· 306 305 (count > ICC_MIN_INT_COUNT_THRESHOLD)) 307 306 ticks = ICC_SAFE_INT_TICKS; 308 307 309 - spin_lock(&host->lock); 308 + spin_lock_irqsave(&host->lock, flags); 310 309 iowrite32((count << 24 | ticks), hcr_base + ICC); 311 310 312 311 intr_coalescing_count = count; 313 312 intr_coalescing_ticks = ticks; 314 - spin_unlock(&host->lock); 313 + spin_unlock_irqrestore(&host->lock, flags); 315 314 316 315 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", 317 316 intr_coalescing_count, intr_coalescing_ticks);
+2 -2
drivers/ata/sata_highbank.c
··· 86 86 87 87 #define SGPIO_SIGNALS 3 88 88 #define ECX_ACTIVITY_BITS 0x300000 89 - #define ECX_ACTIVITY_SHIFT 2 89 + #define ECX_ACTIVITY_SHIFT 0 90 90 #define ECX_LOCATE_BITS 0x80000 91 91 #define ECX_LOCATE_SHIFT 1 92 92 #define ECX_FAULT_BITS 0x400000 93 - #define ECX_FAULT_SHIFT 0 93 + #define ECX_FAULT_SHIFT 2 94 94 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, 95 95 u32 shift) 96 96 {
+2
drivers/base/memory.c
··· 141 141 container_of(dev, struct memory_block, dev); 142 142 143 143 for (i = 0; i < sections_per_block; i++) { 144 + if (!present_section_nr(mem->start_section_nr + i)) 145 + continue; 144 146 pfn = section_nr_to_pfn(mem->start_section_nr + i); 145 147 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); 146 148 }
+1 -1
drivers/base/regmap/regcache-rbtree.c
··· 332 332 } 333 333 334 334 if (!rbnode->blklen) { 335 - rbnode->blklen = sizeof(*rbnode); 335 + rbnode->blklen = 1; 336 336 rbnode->base_reg = reg; 337 337 } 338 338
+7 -10
drivers/block/aoe/aoecmd.c
··· 906 906 int i; 907 907 908 908 bio_for_each_segment(bv, bio, i) { 909 - page = bv->bv_page; 910 909 /* Non-zero page count for non-head members of 911 - * compound pages is no longer allowed by the kernel, 912 - * but this has never been seen here. 910 + * compound pages is no longer allowed by the kernel. 913 911 */ 914 - if (unlikely(PageCompound(page))) 915 - if (compound_trans_head(page) != page) { 916 - pr_crit("page tail used for block I/O\n"); 917 - BUG(); 918 - } 912 + page = compound_trans_head(bv->bv_page); 919 913 atomic_inc(&page->_count); 920 914 } 921 915 } ··· 918 924 bio_pagedec(struct bio *bio) 919 925 { 920 926 struct bio_vec *bv; 927 + struct page *page; 921 928 int i; 922 929 923 - bio_for_each_segment(bv, bio, i) 924 - atomic_dec(&bv->bv_page->_count); 930 + bio_for_each_segment(bv, bio, i) { 931 + page = compound_trans_head(bv->bv_page); 932 + atomic_dec(&page->_count); 933 + } 925 934 } 926 935 927 936 static void
+34 -30
drivers/clk/samsung/clk-exynos4.c
··· 581 581 DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), 582 582 DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), 583 583 DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), 584 - DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), 585 - DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), 584 + DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, 585 + CLK_GET_RATE_NOCACHE, 0), 586 + DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, 587 + CLK_GET_RATE_NOCACHE, 0), 586 588 DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), 587 - DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), 588 - DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), 589 + DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 590 + 4, 3, CLK_GET_RATE_NOCACHE, 0), 591 + DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 592 + 8, 3, CLK_GET_RATE_NOCACHE, 0), 589 593 DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), 590 594 }; 591 595 ··· 867 863 GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", 868 864 E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), 869 865 GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, 870 - CLK_IGNORE_UNUSED, 0), 866 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 871 867 GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, 872 - CLK_IGNORE_UNUSED, 0), 868 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 873 869 GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, 874 - CLK_IGNORE_UNUSED, 0), 870 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 875 871 GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, 876 - CLK_IGNORE_UNUSED, 0), 872 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 877 873 GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, 878 - CLK_IGNORE_UNUSED, 0), 874 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 879 875 GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, 880 - CLK_IGNORE_UNUSED, 0), 876 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 881 877 GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, 882 - CLK_IGNORE_UNUSED, 0), 878 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 883 879 GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, 884 - CLK_IGNORE_UNUSED, 0), 880 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 885 881 GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, 886 - CLK_IGNORE_UNUSED, 0), 882 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 887 883 GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, 888 - CLK_IGNORE_UNUSED, 0), 884 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 889 885 GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, 890 - CLK_IGNORE_UNUSED, 0), 886 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 891 887 GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, 892 - CLK_IGNORE_UNUSED, 0), 888 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 893 889 GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, 894 - CLK_IGNORE_UNUSED, 0), 890 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 895 891 GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, 896 - CLK_IGNORE_UNUSED, 0), 892 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 897 893 GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, 898 - CLK_IGNORE_UNUSED, 0), 894 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 899 895 GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, 900 - CLK_IGNORE_UNUSED, 0), 896 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 901 897 GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, 902 - CLK_IGNORE_UNUSED, 0), 898 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 903 899 GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, 904 - CLK_IGNORE_UNUSED, 0), 900 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 905 901 GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, 906 - CLK_IGNORE_UNUSED, 0), 902 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 907 903 GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, 908 - CLK_IGNORE_UNUSED, 0), 904 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 909 905 GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, 910 - CLK_IGNORE_UNUSED, 0), 906 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 911 907 GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, 912 - CLK_IGNORE_UNUSED, 0), 908 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 913 909 GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, 914 - CLK_IGNORE_UNUSED, 0), 910 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 915 911 GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, 916 - CLK_IGNORE_UNUSED, 0), 912 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 917 913 GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, 918 - CLK_IGNORE_UNUSED, 0), 914 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 919 915 GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, 920 - CLK_IGNORE_UNUSED, 0), 916 + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), 921 917 GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), 922 918 }; 923 919
+8 -5
drivers/clk/zynq/clkc.c
··· 71 71 static DEFINE_SPINLOCK(ddrpll_lock); 72 72 static DEFINE_SPINLOCK(iopll_lock); 73 73 static DEFINE_SPINLOCK(armclk_lock); 74 + static DEFINE_SPINLOCK(swdtclk_lock); 74 75 static DEFINE_SPINLOCK(ddrclk_lock); 75 76 static DEFINE_SPINLOCK(dciclk_lock); 76 77 static DEFINE_SPINLOCK(gem0clk_lock); ··· 294 293 } 295 294 clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], 296 295 swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, 297 - SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); 296 + SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock); 298 297 299 298 /* DDR clocks */ 300 299 clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, ··· 365 364 CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, 366 365 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, 367 366 &gem0clk_lock); 368 - clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, 369 - SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); 367 + clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 368 + CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0, 369 + &gem0clk_lock); 370 370 clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], 371 371 "gem0_emio_mux", CLK_SET_RATE_PARENT, 372 372 SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); ··· 388 386 CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, 389 387 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, 390 388 &gem1clk_lock); 391 - clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, 392 - SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); 389 + clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 390 + CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0, 391 + &gem1clk_lock); 393 392 clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], 394 393 "gem1_emio_mux", CLK_SET_RATE_PARENT, 395 394 SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
+2 -1
drivers/gpu/drm/gma500/psb_intel_sdvo.c
··· 500 500 &status)) 501 501 goto log_fail; 502 502 503 - while (status == SDVO_CMD_STATUS_PENDING && retry--) { 503 + while ((status == SDVO_CMD_STATUS_PENDING || 504 + status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) { 504 505 udelay(15); 505 506 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, 506 507 SDVO_I2C_CMD_STATUS,
+8
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 85 85 struct sg_table *sg, 86 86 enum dma_data_direction dir) 87 87 { 88 + struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 89 + 90 + mutex_lock(&obj->base.dev->struct_mutex); 91 + 88 92 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 89 93 sg_free_table(sg); 90 94 kfree(sg); 95 + 96 + i915_gem_object_unpin_pages(obj); 97 + 98 + mutex_unlock(&obj->base.dev->struct_mutex); 91 99 } 92 100 93 101 static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
+2
drivers/gpu/drm/i915/i915_reg.h
··· 752 752 will not assert AGPBUSY# and will only 753 753 be delivered when out of C3. */ 754 754 #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 755 + #define INSTPM_TLB_INVALIDATE (1<<9) 756 + #define INSTPM_SYNC_FLUSH (1<<5) 755 757 #define ACTHD 0x020c8 756 758 #define FW_BLC 0x020d8 757 759 #define FW_BLC2 0x020dc
+57 -29
drivers/gpu/drm/i915/intel_display.c
··· 10042 10042 10043 10043 u32 power_well_driver; 10044 10044 10045 + int num_transcoders; 10046 + 10045 10047 struct intel_cursor_error_state { 10046 10048 u32 control; 10047 10049 u32 position; ··· 10052 10050 } cursor[I915_MAX_PIPES]; 10053 10051 10054 10052 struct intel_pipe_error_state { 10055 - enum transcoder cpu_transcoder; 10056 - u32 conf; 10057 10053 u32 source; 10058 - 10059 - u32 htotal; 10060 - u32 hblank; 10061 - u32 hsync; 10062 - u32 vtotal; 10063 - u32 vblank; 10064 - u32 vsync; 10065 10054 } pipe[I915_MAX_PIPES]; 10066 10055 10067 10056 struct intel_plane_error_state { ··· 10064 10071 u32 surface; 10065 10072 u32 tile_offset; 10066 10073 } plane[I915_MAX_PIPES]; 10074 + 10075 + struct intel_transcoder_error_state { 10076 + enum transcoder cpu_transcoder; 10077 + 10078 + u32 conf; 10079 + 10080 + u32 htotal; 10081 + u32 hblank; 10082 + u32 hsync; 10083 + u32 vtotal; 10084 + u32 vblank; 10085 + u32 vsync; 10086 + } transcoder[4]; 10067 10087 }; 10068 10088 10069 10089 struct intel_display_error_state * ··· 10084 10078 { 10085 10079 drm_i915_private_t *dev_priv = dev->dev_private; 10086 10080 struct intel_display_error_state *error; 10087 - enum transcoder cpu_transcoder; 10081 + int transcoders[] = { 10082 + TRANSCODER_A, 10083 + TRANSCODER_B, 10084 + TRANSCODER_C, 10085 + TRANSCODER_EDP, 10086 + }; 10088 10087 int i; 10088 + 10089 + if (INTEL_INFO(dev)->num_pipes == 0) 10090 + return NULL; 10089 10091 10090 10092 error = kmalloc(sizeof(*error), GFP_ATOMIC); 10091 10093 if (error == NULL) ··· 10103 10089 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 10104 10090 10105 10091 for_each_pipe(i) { 10106 - cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); 10107 - error->pipe[i].cpu_transcoder = cpu_transcoder; 10108 - 10109 10092 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 10110 10093 error->cursor[i].control = I915_READ(CURCNTR(i)); 10111 10094 error->cursor[i].position = I915_READ(CURPOS(i)); ··· 10126 10115 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 10127 10116 } 10128 10117 10129 - error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 10130 10118 error->pipe[i].source = I915_READ(PIPESRC(i)); 10131 - error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 10132 - error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 10133 - error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 10134 - error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 10135 - error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 10136 - error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 10119 + } 10120 + 10121 + error->num_transcoders = INTEL_INFO(dev)->num_pipes; 10122 + if (HAS_DDI(dev_priv->dev)) 10123 + error->num_transcoders++; /* Account for eDP. */ 10124 + 10125 + for (i = 0; i < error->num_transcoders; i++) { 10126 + enum transcoder cpu_transcoder = transcoders[i]; 10127 + 10128 + error->transcoder[i].cpu_transcoder = cpu_transcoder; 10129 + 10130 + error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 10131 + error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 10132 + error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 10133 + error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 10134 + error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 10135 + error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 10136 + error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 10137 10137 } 10138 10138 10139 10139 /* In the code above we read the registers without checking if the power ··· 10166 10144 { 10167 10145 int i; 10168 10146 10147 + if (!error) 10148 + return; 10149 + 10169 10150 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 10170 10151 if (HAS_POWER_WELL(dev)) 10171 10152 err_printf(m, "PWR_WELL_CTL2: %08x\n", 10172 10153 error->power_well_driver); 10173 10154 for_each_pipe(i) { 10174 10155 err_printf(m, "Pipe [%d]:\n", i); 10175 - err_printf(m, " CPU transcoder: %c\n", 10176 - transcoder_name(error->pipe[i].cpu_transcoder)); 10177 - err_printf(m, " CONF: %08x\n", error->pipe[i].conf); 10178 10156 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 10179 - err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 10180 - err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 10181 - err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 10182 - err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 10183 - err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 10184 - err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 10185 10157 10186 10158 err_printf(m, "Plane [%d]:\n", i); 10187 10159 err_printf(m, " CNTR: %08x\n", error->plane[i].control); ··· 10195 10179 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 10196 10180 err_printf(m, " POS: %08x\n", error->cursor[i].position); 10197 10181 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 10182 + } 10183 + 10184 + for (i = 0; i < error->num_transcoders; i++) { 10185 + err_printf(m, " CPU transcoder: %c\n", 10186 + transcoder_name(error->transcoder[i].cpu_transcoder)); 10187 + err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 10188 + err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 10189 + err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 10190 + err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 10191 + err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 10192 + err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 10193 + err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 10198 10194 } 10199 10195 } 10200 10196 #endif
+12
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 968 968 969 969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 970 970 POSTING_READ(mmio); 971 + 972 + /* Flush the TLB for this page */ 973 + if (INTEL_INFO(dev)->gen >= 6) { 974 + u32 reg = RING_INSTPM(ring->mmio_base); 975 + I915_WRITE(reg, 976 + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 977 + INSTPM_SYNC_FLUSH)); 978 + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 979 + 1000)) 980 + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 981 + ring->name); 982 + } 971 983 } 972 984 973 985 static int
+4
drivers/gpu/drm/nouveau/core/core/mm.c
··· 98 98 u32 splitoff; 99 99 u32 s, e; 100 100 101 + BUG_ON(!type); 102 + 101 103 list_for_each_entry(this, &mm->free, fl_entry) { 102 104 e = this->offset + this->length; 103 105 s = this->offset; ··· 163 161 { 164 162 struct nouveau_mm_node *prev, *this, *next; 165 163 u32 mask = align - 1; 164 + 165 + BUG_ON(!type); 166 166 167 167 list_for_each_entry_reverse(this, &mm->free, fl_entry) { 168 168 u32 e = this->offset + this->length;
+4 -3
drivers/gpu/drm/nouveau/core/include/subdev/mc.h
··· 20 20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 21 21 } 22 22 23 - #define nouveau_mc_create(p,e,o,d) \ 24 - nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) 23 + #define nouveau_mc_create(p,e,o,m,d) \ 24 + nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) 25 25 #define nouveau_mc_destroy(p) ({ \ 26 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ 27 27 }) ··· 33 33 }) 34 34 35 35 int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, 36 - struct nouveau_oclass *, int, void **); 36 + struct nouveau_oclass *, const struct nouveau_mc_intr *, 37 + int, void **); 37 38 void _nouveau_mc_dtor(struct nouveau_object *); 38 39 int _nouveau_mc_init(struct nouveau_object *); 39 40 int _nouveau_mc_fini(struct nouveau_object *, bool);
+6 -6
drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
··· 40 40 return ret; 41 41 42 42 switch (pfb914 & 0x00000003) { 43 - case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; 44 - case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; 45 - case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; 43 + case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; 44 + case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; 45 + case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; 46 46 case 0x00000003: break; 47 47 } 48 48 49 - pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 50 - pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 51 - pfb->ram->tags = nv_rd32(pfb, 0x100320); 49 + ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 50 + ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 51 + ram->tags = nv_rd32(pfb, 0x100320); 52 52 return 0; 53 53 } 54 54
+2 -2
drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
··· 38 38 if (ret) 39 39 return ret; 40 40 41 - pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 42 - pfb->ram->type = NV_MEM_TYPE_STOLEN; 41 + ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 42 + ram->type = NV_MEM_TYPE_STOLEN; 43 43 return 0; 44 44 } 45 45
+24 -10
drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
··· 30 30 struct nouveau_ltcg base; 31 31 u32 part_nr; 32 32 u32 subp_nr; 33 - struct nouveau_mm tags; 34 33 u32 num_tags; 34 + u32 tag_base; 35 + struct nouveau_mm tags; 35 36 struct nouveau_mm_node *tag_ram; 36 37 }; 37 38 ··· 118 117 u32 tag_size, tag_margin, tag_align; 119 118 int ret; 120 119 121 - nv_wr32(priv, 0x17e8d8, priv->part_nr); 122 - if (nv_device(pfb)->card_type >= NV_E0) 123 - nv_wr32(priv, 0x17e000, priv->part_nr); 124 - 125 120 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 126 121 priv->num_tags = (pfb->ram->size >> 17) / 4; 127 122 if (priv->num_tags > (1 << 17)) ··· 139 142 tag_size += tag_align; 140 143 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 141 144 142 - ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, 145 + ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, 143 146 &priv->tag_ram); 144 147 if (ret) { 145 148 priv->num_tags = 0; ··· 149 152 tag_base += tag_align - 1; 150 153 ret = do_div(tag_base, tag_align); 151 154 152 - nv_wr32(priv, 0x17e8d4, tag_base); 155 + priv->tag_base = tag_base; 153 156 } 154 157 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); 155 158 ··· 179 182 } 180 183 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; 181 184 182 - nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 183 - 184 185 ret = nvc0_ltcg_init_tag_ram(pfb, priv); 185 186 if (ret) 186 187 return ret; ··· 204 209 nouveau_ltcg_destroy(ltcg); 205 210 } 206 211 212 + static int 213 + nvc0_ltcg_init(struct nouveau_object *object) 214 + { 215 + struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; 216 + struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; 217 + int ret; 218 + 219 + ret = nouveau_ltcg_init(ltcg); 220 + if (ret) 221 + return ret; 222 + 223 + nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 224 + nv_wr32(priv, 0x17e8d8, priv->part_nr); 225 + if (nv_device(ltcg)->card_type >= NV_E0) 226 + nv_wr32(priv, 0x17e000, priv->part_nr); 227 + nv_wr32(priv, 0x17e8d4, priv->tag_base); 228 + return 0; 229 + } 230 + 207 231 struct nouveau_oclass 208 232 nvc0_ltcg_oclass = { 209 233 .handle = NV_SUBDEV(LTCG, 0xc0), 210 234 .ofuncs = &(struct nouveau_ofuncs) { 211 235 .ctor = nvc0_ltcg_ctor, 212 236 .dtor = nvc0_ltcg_dtor, 213 - .init = _nouveau_ltcg_init, 237 + .init = nvc0_ltcg_init, 214 238 .fini = _nouveau_ltcg_fini, 215 239 }, 216 240 };
+5 -1
drivers/gpu/drm/nouveau/core/subdev/mc/base.c
··· 80 80 81 81 int 82 82 nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, 83 - struct nouveau_oclass *oclass, int length, void **pobject) 83 + struct nouveau_oclass *oclass, 84 + const struct nouveau_mc_intr *intr_map, 85 + int length, void **pobject) 84 86 { 85 87 struct nouveau_device *device = nv_device(parent); 86 88 struct nouveau_mc *pmc; ··· 93 91 pmc = *pobject; 94 92 if (ret) 95 93 return ret; 94 + 95 + pmc->intr_map = intr_map; 96 96 97 97 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 98 98 IRQF_SHARED, "nouveau", pmc);
+1 -2
drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
··· 50 50 struct nv04_mc_priv *priv; 51 51 int ret; 52 52 53 - ret = nouveau_mc_create(parent, engine, oclass, &priv); 53 + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); 54 54 *pobject = nv_object(priv); 55 55 if (ret) 56 56 return ret; 57 57 58 - priv->base.intr_map = nv04_mc_intr; 59 58 return 0; 60 59 } 61 60
+1 -2
drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
··· 36 36 struct nv44_mc_priv *priv; 37 37 int ret; 38 38 39 - ret = nouveau_mc_create(parent, engine, oclass, &priv); 39 + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); 40 40 *pobject = nv_object(priv); 41 41 if (ret) 42 42 return ret; 43 43 44 - priv->base.intr_map = nv04_mc_intr; 45 44 return 0; 46 45 } 47 46
+1 -2
drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
··· 53 53 struct nv50_mc_priv *priv; 54 54 int ret; 55 55 56 - ret = nouveau_mc_create(parent, engine, oclass, &priv); 56 + ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); 57 57 *pobject = nv_object(priv); 58 58 if (ret) 59 59 return ret; 60 60 61 - priv->base.intr_map = nv50_mc_intr; 62 61 return 0; 63 62 } 64 63
+1 -2
drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
··· 54 54 struct nv98_mc_priv *priv; 55 55 int ret; 56 56 57 - ret = nouveau_mc_create(parent, engine, oclass, &priv); 57 + ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); 58 58 *pobject = nv_object(priv); 59 59 if (ret) 60 60 return ret; 61 61 62 - priv->base.intr_map = nv98_mc_intr; 63 62 return 0; 64 63 } 65 64
+1 -2
drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
··· 57 57 struct nvc0_mc_priv *priv; 58 58 int ret; 59 59 60 - ret = nouveau_mc_create(parent, engine, oclass, &priv); 60 + ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); 61 61 *pobject = nv_object(priv); 62 62 if (ret) 63 63 return ret; 64 64 65 - priv->base.intr_map = nvc0_mc_intr; 66 65 return 0; 67 66 } 68 67
+43 -15
drivers/gpu/drm/nouveau/dispnv04/crtc.c
··· 606 606 regp->ramdac_a34 = 0x1; 607 607 } 608 608 609 + static int 610 + nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) 611 + { 612 + struct nv04_display *disp = nv04_display(crtc->dev); 613 + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); 614 + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 615 + int ret; 616 + 617 + ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); 618 + if (ret == 0) { 619 + if (disp->image[nv_crtc->index]) 620 + nouveau_bo_unpin(disp->image[nv_crtc->index]); 621 + nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]); 622 + } 623 + 624 + return ret; 625 + } 626 + 609 627 /** 610 628 * Sets up registers for the given mode/adjusted_mode pair. 611 629 * ··· 640 622 struct drm_device *dev = crtc->dev; 641 623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 642 624 struct nouveau_drm *drm = nouveau_drm(dev); 625 + int ret; 643 626 644 627 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); 645 628 drm_mode_debug_printmodeline(adjusted_mode); 629 + 630 + ret = nv_crtc_swap_fbs(crtc, old_fb); 631 + if (ret) 632 + return ret; 646 633 647 634 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 648 635 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); ··· 745 722 746 723 static void nv_crtc_destroy(struct drm_crtc *crtc) 747 724 { 725 + struct nv04_display *disp = nv04_display(crtc->dev); 748 726 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 749 727 750 728 if (!nv_crtc) 751 729 return; 752 730 753 731 drm_crtc_cleanup(crtc); 732 + 733 + if (disp->image[nv_crtc->index]) 734 + nouveau_bo_unpin(disp->image[nv_crtc->index]); 735 + nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); 754 736 755 737 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 756 738 nouveau_bo_unpin(nv_crtc->cursor.nvbo); ··· 779 751 } 780 752 781 753 nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg); 754 + } 755 + 756 + static void 757 + nv_crtc_disable(struct drm_crtc *crtc) 758 + { 759 + struct nv04_display *disp = nv04_display(crtc->dev); 760 + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 761 + if (disp->image[nv_crtc->index]) 762 + nouveau_bo_unpin(disp->image[nv_crtc->index]); 763 + nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); 782 764 } 783 765 784 766 static void ··· 829 791 struct drm_framebuffer *drm_fb; 830 792 struct nouveau_framebuffer *fb; 831 793 int arb_burst, arb_lwm; 832 - int ret; 833 794 834 795 NV_DEBUG(drm, "index %d\n", nv_crtc->index); 835 796 ··· 838 801 return 0; 839 802 } 840 803 841 - 842 804 /* If atomic, we want to switch to the fb we were passed, so 843 - * now we update pointers to do that. (We don't pin; just 844 - * assume we're already pinned and update the base address.) 805 + * now we update pointers to do that. 845 806 */ 846 807 if (atomic) { 847 808 drm_fb = passed_fb; ··· 847 812 } else { 848 813 drm_fb = crtc->fb; 849 814 fb = nouveau_framebuffer(crtc->fb); 850 - /* If not atomic, we can go ahead and pin, and unpin the 851 - * old fb we were passed. 852 - */ 853 - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); 854 - if (ret) 855 - return ret; 856 - 857 - if (passed_fb) { 858 - struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); 859 - nouveau_bo_unpin(ofb->nvbo); 860 - } 861 815 } 862 816 863 817 nv_crtc->fb.offset = fb->nvbo->bo.offset; ··· 901 877 nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 902 878 struct drm_framebuffer *old_fb) 903 879 { 880 + int ret = nv_crtc_swap_fbs(crtc, old_fb); 881 + if (ret) 882 + return ret; 904 883 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); 905 884 } 906 885 ··· 1054 1027 .mode_set_base = nv04_crtc_mode_set_base, 1055 1028 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, 1056 1029 .load_lut = nv_crtc_gamma_load, 1030 + .disable = nv_crtc_disable, 1057 1031 }; 1058 1032 1059 1033 int
+1
drivers/gpu/drm/nouveau/dispnv04/disp.h
··· 81 81 uint32_t saved_vga_font[4][16384]; 82 82 uint32_t dac_users[4]; 83 83 struct nouveau_object *core; 84 + struct nouveau_bo *image[2]; 84 85 }; 85 86 86 87 static inline struct nv04_display *
+3
drivers/gpu/drm/nouveau/nouveau_display.c
··· 577 577 ret = nv50_display_flip_next(crtc, fb, chan, 0); 578 578 if (ret) 579 579 goto fail_unreserve; 580 + } else { 581 + struct nv04_display *dispnv04 = nv04_display(dev); 582 + nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); 580 583 } 581 584 582 585 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+1 -1
drivers/gpu/drm/nouveau/nv40_pm.c
··· 131 131 if (clk < pll->vco1.max_freq) 132 132 pll->vco2.max_freq = 0; 133 133 134 - pclk->pll_calc(pclk, pll, clk, &coef); 134 + ret = pclk->pll_calc(pclk, pll, clk, &coef); 135 135 if (ret == 0) 136 136 return -ERANGE; 137 137
+1 -1
drivers/gpu/drm/radeon/radeon.h
··· 2163 2163 WREG32(reg, tmp_); \ 2164 2164 } while (0) 2165 2165 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2166 - #define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) 2166 + #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 2167 2167 #define WREG32_PLL_P(reg, val, mask) \ 2168 2168 do { \ 2169 2169 uint32_t tmp_ = RREG32_PLL(reg); \
+8
drivers/gpu/drm/radeon/radeon_uvd.c
··· 356 356 return -EINVAL; 357 357 } 358 358 359 + if (bo->tbo.sync_obj) { 360 + r = radeon_fence_wait(bo->tbo.sync_obj, false); 361 + if (r) { 362 + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); 363 + return r; 364 + } 365 + } 366 + 359 367 r = radeon_bo_kmap(bo, &ptr); 360 368 if (r) { 361 369 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+6 -6
drivers/gpu/drm/radeon/rv770.c
··· 744 744 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); 745 745 radeon_program_register_sequence(rdev, 746 746 rv730_golden_registers, 747 - (const u32)ARRAY_SIZE(rv770_golden_registers)); 747 + (const u32)ARRAY_SIZE(rv730_golden_registers)); 748 748 radeon_program_register_sequence(rdev, 749 749 rv730_mgcg_init, 750 - (const u32)ARRAY_SIZE(rv770_mgcg_init)); 750 + (const u32)ARRAY_SIZE(rv730_mgcg_init)); 751 751 break; 752 752 case CHIP_RV710: 753 753 radeon_program_register_sequence(rdev, ··· 758 758 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); 759 759 radeon_program_register_sequence(rdev, 760 760 rv710_golden_registers, 761 - (const u32)ARRAY_SIZE(rv770_golden_registers)); 761 + (const u32)ARRAY_SIZE(rv710_golden_registers)); 762 762 radeon_program_register_sequence(rdev, 763 763 rv710_mgcg_init, 764 - (const u32)ARRAY_SIZE(rv770_mgcg_init)); 764 + (const u32)ARRAY_SIZE(rv710_mgcg_init)); 765 765 break; 766 766 case CHIP_RV740: 767 767 radeon_program_register_sequence(rdev, 768 768 rv740_golden_registers, 769 - (const u32)ARRAY_SIZE(rv770_golden_registers)); 769 + (const u32)ARRAY_SIZE(rv740_golden_registers)); 770 770 radeon_program_register_sequence(rdev, 771 771 rv740_mgcg_init, 772 - (const u32)ARRAY_SIZE(rv770_mgcg_init)); 772 + (const u32)ARRAY_SIZE(rv740_mgcg_init)); 773 773 break; 774 774 default: 775 775 break;
+2 -1
drivers/iio/light/adjd_s311.c
··· 232 232 233 233 switch (mask) { 234 234 case IIO_CHAN_INFO_RAW: 235 - ret = adjd_s311_read_data(indio_dev, chan->address, val); 235 + ret = adjd_s311_read_data(indio_dev, 236 + ADJD_S311_DATA_REG(chan->address), val); 236 237 if (ret < 0) 237 238 return ret; 238 239 return IIO_VAL_INT;
+7 -9
drivers/md/dm-cache-policy-mq.c
··· 959 959 return r; 960 960 } 961 961 962 - static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) 962 + static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) 963 963 { 964 - struct entry *e = hash_lookup(mq, oblock); 964 + struct mq_policy *mq = to_mq_policy(p); 965 + struct entry *e; 966 + 967 + mutex_lock(&mq->lock); 968 + 969 + e = hash_lookup(mq, oblock); 965 970 966 971 BUG_ON(!e || !e->in_cache); 967 972 968 973 del(mq, e); 969 974 e->in_cache = false; 970 975 push(mq, e); 971 - } 972 976 973 - static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) 974 - { 975 - struct mq_policy *mq = to_mq_policy(p); 976 - 977 - mutex_lock(&mq->lock); 978 - remove_mapping(mq, oblock); 979 977 mutex_unlock(&mq->lock); 980 978 } 981 979
+7 -1
drivers/net/bonding/bond_main.c
··· 3714 3714 * The bonding ndo_neigh_setup is called at init time beofre any 3715 3715 * slave exists. So we must declare proxy setup function which will 3716 3716 * be used at run time to resolve the actual slave neigh param setup. 3717 + * 3718 + * It's also called by master devices (such as vlans) to setup their 3719 + * underlying devices. In that case - do nothing, we're already set up from 3720 + * our init. 3717 3721 */ 3718 3722 static int bond_neigh_setup(struct net_device *dev, 3719 3723 struct neigh_parms *parms) 3720 3724 { 3721 - parms->neigh_setup = bond_neigh_init; 3725 + /* modify only our neigh_parms */ 3726 + if (parms->dev == dev) 3727 + parms->neigh_setup = bond_neigh_init; 3722 3728 3723 3729 return 0; 3724 3730 }
+1 -1
drivers/net/can/usb/peak_usb/pcan_usb.c
··· 649 649 if ((mc->ptr + rec_len) > mc->end) 650 650 goto decode_failed; 651 651 652 - memcpy(cf->data, mc->ptr, rec_len); 652 + memcpy(cf->data, mc->ptr, cf->can_dlc); 653 653 mc->ptr += rec_len; 654 654 } 655 655
+1 -1
drivers/net/ethernet/arc/emac_main.c
··· 199 199 struct arc_emac_priv *priv = netdev_priv(ndev); 200 200 unsigned int work_done; 201 201 202 - for (work_done = 0; work_done <= budget; work_done++) { 202 + for (work_done = 0; work_done < budget; work_done++) { 203 203 unsigned int *last_rx_bd = &priv->last_rx_bd; 204 204 struct net_device_stats *stats = &priv->stats; 205 205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
+7
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 1333 1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 1334 1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1335 1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1336 + BNX2X_SP_RTNL_TX_STOP, 1337 + BNX2X_SP_RTNL_TX_RESUME, 1336 1338 }; 1337 1339 1338 1340 struct bnx2x_prev_path_list { ··· 1504 1502 #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) 1505 1503 #define IS_VF_FLAG (1 << 22) 1506 1504 #define INTERRUPTS_ENABLED_FLAG (1 << 23) 1505 + #define BC_SUPPORTS_RMMOD_CMD (1 << 24) 1507 1506 1508 1507 #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1509 1508 ··· 1833 1830 1834 1831 int fp_array_size; 1835 1832 u32 dump_preset_idx; 1833 + bool stats_started; 1834 + struct semaphore stats_sema; 1836 1835 }; 1837 1836 1838 1837 /* Tx queues may be less or equal to Rx queues */ ··· 2456 2451 BNX2X_PCI_LINK_SPEED_5000 = 5000, 2457 2452 BNX2X_PCI_LINK_SPEED_8000 = 8000 2458 2453 }; 2454 + 2455 + void bnx2x_set_local_cmng(struct bnx2x *bp); 2459 2456 #endif /* bnx2x.h */
+42 -11
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
··· 30 30 #include "bnx2x_dcb.h" 31 31 32 32 /* forward declarations of dcbx related functions */ 33 - static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); 34 33 static void bnx2x_pfc_set_pfc(struct bnx2x *bp); 35 34 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); 36 - static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); 37 35 static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, 38 36 u32 *set_configuration_ets_pg, 39 37 u32 *pri_pg_tbl); ··· 423 425 bnx2x_pfc_clear(bp); 424 426 } 425 427 426 - static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) 428 + int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) 427 429 { 428 430 struct bnx2x_func_state_params func_params = {NULL}; 431 + int rc; 429 432 430 433 func_params.f_obj = &bp->func_obj; 431 434 func_params.cmd = BNX2X_F_CMD_TX_STOP; 432 435 436 + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 437 + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 438 + 433 439 DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); 434 - return bnx2x_func_state_change(bp, &func_params); 440 + 441 + rc = bnx2x_func_state_change(bp, &func_params); 442 + if (rc) { 443 + BNX2X_ERR("Unable to hold traffic for HW configuration\n"); 444 + bnx2x_panic(); 445 + } 446 + 447 + return rc; 435 448 } 436 449 437 - static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) 450 + int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) 438 451 { 439 452 struct bnx2x_func_state_params func_params = {NULL}; 440 453 struct bnx2x_func_tx_start_params *tx_params = 441 454 &func_params.params.tx_start; 455 + int rc; 442 456 443 457 func_params.f_obj = &bp->func_obj; 444 458 func_params.cmd = BNX2X_F_CMD_TX_START; 445 459 460 + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 461 + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 462 + 446 463 bnx2x_dcbx_fw_struct(bp, tx_params); 447 464 448 465 DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); 449 - return bnx2x_func_state_change(bp, &func_params); 466 + 467 + rc = bnx2x_func_state_change(bp, &func_params); 468 + if (rc) { 469 + BNX2X_ERR("Unable to resume traffic after HW configuration\n"); 470 + bnx2x_panic(); 471 + } 472 + 473 + return rc; 450 474 } 451 475 452 476 static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) ··· 764 744 if (IS_MF(bp)) 765 745 bnx2x_link_sync_notify(bp); 766 746 767 - bnx2x_dcbx_stop_hw_tx(bp); 747 + set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); 748 + 749 + schedule_delayed_work(&bp->sp_rtnl_task, 0); 768 750 769 751 return; 770 752 } ··· 775 753 bnx2x_pfc_set_pfc(bp); 776 754 777 755 bnx2x_dcbx_update_ets_params(bp); 778 - bnx2x_dcbx_resume_hw_tx(bp); 756 + 757 + /* ets may affect cmng configuration: reinit it in hw */ 758 + bnx2x_set_local_cmng(bp); 759 + 760 + set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); 761 + 762 + schedule_delayed_work(&bp->sp_rtnl_task, 0); 779 763 780 764 return; 781 765 case BNX2X_DCBX_STATE_TX_RELEASED: ··· 2391 2363 case DCB_FEATCFG_ATTR_PG: 2392 2364 if (bp->dcbx_local_feat.ets.enabled) 2393 2365 *flags |= DCB_FEATCFG_ENABLE; 2394 - if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) 2366 + if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | 2367 + DCBX_REMOTE_MIB_ERROR)) 2395 2368 *flags |= DCB_FEATCFG_ERROR; 2396 2369 break; 2397 2370 case DCB_FEATCFG_ATTR_PFC: 2398 2371 if (bp->dcbx_local_feat.pfc.enabled) 2399 2372 *flags |= DCB_FEATCFG_ENABLE; 2400 2373 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | 2401 - DCBX_LOCAL_PFC_MISMATCH)) 2374 + DCBX_LOCAL_PFC_MISMATCH | 2375 + DCBX_REMOTE_MIB_ERROR)) 2402 2376 *flags |= DCB_FEATCFG_ERROR; 2403 2377 break; 2404 2378 case DCB_FEATCFG_ATTR_APP: 2405 2379 if (bp->dcbx_local_feat.app.enabled) 2406 2380 *flags |= DCB_FEATCFG_ENABLE; 2407 2381 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | 2408 - DCBX_LOCAL_APP_MISMATCH)) 2382 + DCBX_LOCAL_APP_MISMATCH | 2383 + DCBX_REMOTE_MIB_ERROR)) 2409 2384 *flags |= DCB_FEATCFG_ERROR; 2410 2385 break; 2411 2386 default:
+3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
··· 199 199 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); 200 200 #endif /* BCM_DCBNL */ 201 201 202 + int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); 203 + int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); 204 + 202 205 #endif /* BNX2X_DCB_H */
+5
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
··· 1300 1300 1301 1301 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 1302 1302 1303 + #define DRV_MSG_CODE_RMMOD 0xdb000000 1304 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f 1305 + 1303 1306 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 1304 1307 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 1305 1308 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 ··· 1374 1371 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 1375 1372 1376 1373 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 1374 + 1375 + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 1377 1376 1378 1377 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 1379 1378 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
+61 -27
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 2261 2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2262 2262 } 2263 2263 2264 + static void bnx2x_init_dropless_fc(struct bnx2x *bp) 2265 + { 2266 + u32 pause_enabled = 0; 2267 + 2268 + if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { 2269 + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2270 + pause_enabled = 1; 2271 + 2272 + REG_WR(bp, BAR_USTRORM_INTMEM + 2273 + USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), 2274 + pause_enabled); 2275 + } 2276 + 2277 + DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", 2278 + pause_enabled ? "enabled" : "disabled"); 2279 + } 2280 + 2264 2281 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2265 2282 { 2266 2283 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); ··· 2311 2294 2312 2295 bnx2x_release_phy_lock(bp); 2313 2296 2297 + bnx2x_init_dropless_fc(bp); 2298 + 2314 2299 bnx2x_calc_fc_adv(bp); 2315 2300 2316 2301 if (bp->link_vars.link_up) { ··· 2333 2314 bnx2x_acquire_phy_lock(bp); 2334 2315 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2335 2316 bnx2x_release_phy_lock(bp); 2317 + 2318 + bnx2x_init_dropless_fc(bp); 2336 2319 2337 2320 bnx2x_calc_fc_adv(bp); 2338 2321 } else ··· 2497 2476 2498 2477 input.port_rate = bp->link_vars.line_speed; 2499 2478 2500 - if (cmng_type == CMNG_FNS_MINMAX) { 2479 + if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { 2501 2480 int vn; 2502 2481 2503 2482 /* read mf conf from shmem */ ··· 2554 2533 } 2555 2534 } 2556 2535 2536 + /* init cmng mode in HW according to local configuration */ 2537 + void bnx2x_set_local_cmng(struct bnx2x *bp) 2538 + { 2539 + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2540 + 2541 + if (cmng_fns != CMNG_FNS_NONE) { 2542 + bnx2x_cmng_fns_init(bp, false, cmng_fns); 2543 + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2544 + } else { 2545 + /* rate shaping and fairness are disabled */ 2546 + DP(NETIF_MSG_IFUP, 2547 + "single function mode without fairness\n"); 2548 + } 2549 + } 2550 + 2557 2551 /* This function is called upon link interrupt */ 2558 2552 static void bnx2x_link_attn(struct bnx2x *bp) 2559 2553 { ··· 2577 2541 2578 2542 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2579 2543 2544 + bnx2x_init_dropless_fc(bp); 2545 + 2580 2546 if (bp->link_vars.link_up) { 2581 - 2582 - /* dropless flow control */ 2583 - if (!CHIP_IS_E1(bp) && bp->dropless_fc) { 2584 - int port = BP_PORT(bp); 2585 - u32 pause_enabled = 0; 2586 - 2587 - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2588 - pause_enabled = 1; 2589 - 2590 - REG_WR(bp, BAR_USTRORM_INTMEM + 2591 - USTORM_ETH_PAUSE_ENABLED_OFFSET(port), 2592 - pause_enabled); 2593 - } 2594 2547 2595 2548 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2596 2549 struct host_port_stats *pstats; ··· 2593 2568 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2594 2569 } 2595 2570 2596 - if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2597 - int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2598 - 2599 - if (cmng_fns != CMNG_FNS_NONE) { 2600 - bnx2x_cmng_fns_init(bp, false, cmng_fns); 2601 - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2602 - } else 2603 - /* rate shaping and fairness are disabled */ 2604 - DP(NETIF_MSG_IFUP, 2605 - "single function mode without fairness\n"); 2606 - } 2571 + if (bp->link_vars.link_up && bp->link_vars.line_speed) 2572 + bnx2x_set_local_cmng(bp); 2607 2573 2608 2574 __bnx2x_link_report(bp); 2609 2575 ··· 9655 9639 &bp->sp_rtnl_state)) 9656 9640 bnx2x_pf_set_vfs_vlan(bp); 9657 9641 9642 + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) 9643 + bnx2x_dcbx_stop_hw_tx(bp); 9644 + 9645 + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) 9646 + bnx2x_dcbx_resume_hw_tx(bp); 9647 + 9658 9648 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9659 9649 * can be called from other contexts as well) 9660 9650 */ ··· 10384 10362 10385 10363 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 10386 10364 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 10365 + 10366 + bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? 10367 + BC_SUPPORTS_RMMOD_CMD : 0; 10368 + 10387 10369 boot_mode = SHMEM_RD(bp, 10388 10370 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 10389 10371 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; ··· 11163 11137 int tmp; 11164 11138 u32 cfg; 11165 11139 11140 + if (IS_VF(bp)) 11141 + return 0; 11142 + 11166 11143 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11167 11144 /* Take function: tmp = func */ 11168 11145 tmp = BP_ABS_FUNC(bp); ··· 11553 11524 mutex_init(&bp->port.phy_mutex); 11554 11525 mutex_init(&bp->fw_mb_mutex); 11555 11526 spin_lock_init(&bp->stats_lock); 11527 + sema_init(&bp->stats_sema, 1); 11556 11528 11557 11529 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11558 11530 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); ··· 12847 12817 bnx2x_dcbnl_update_applist(bp, true); 12848 12818 #endif 12849 12819 12820 + if (IS_PF(bp) && 12821 + !BP_NOMCP(bp) && 12822 + (bp->flags & BC_SUPPORTS_RMMOD_CMD)) 12823 + bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); 12824 + 12850 12825 /* Close the interface - either directly or implicitly */ 12851 12826 if (remove_netdev) { 12852 12827 unregister_netdev(dev); 12853 12828 } else { 12854 12829 rtnl_lock(); 12855 - if (netif_running(dev)) 12856 - bnx2x_close(dev); 12830 + dev_close(dev); 12857 12831 rtnl_unlock(); 12858 12832 } 12859 12833
+34 -24
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 1747 1747 1748 1748 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1749 1749 { 1750 - DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 1751 - if (!IS_SRIOV(bp)) 1752 - return; 1753 - 1754 - REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1750 + if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1751 + REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1755 1752 } 1756 1753 1757 1754 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) ··· 3081 3084 pci_disable_sriov(bp->pdev); 3082 3085 } 3083 3086 3084 - static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, 3085 - struct bnx2x_virtf *vf) 3087 + static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3088 + struct bnx2x_virtf **vf, 3089 + struct pf_vf_bulletin_content **bulletin) 3086 3090 { 3087 3091 if (bp->state != BNX2X_STATE_OPEN) { 3088 3092 BNX2X_ERR("vf ndo called though PF is down\n"); ··· 3101 3103 return -EINVAL; 3102 3104 } 3103 3105 3104 - if (!vf) { 3106 + /* init members */ 3107 + *vf = BP_VF(bp, vfidx); 3108 + *bulletin = BP_VF_BULLETIN(bp, vfidx); 3109 + 3110 + if (!*vf) { 3105 3111 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3112 + vfidx); 3113 + return -EINVAL; 3114 + } 3115 + 3116 + if (!*bulletin) { 3117 + BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3106 3118 vfidx); 3107 3119 return -EINVAL; 3108 3120 } ··· 3124 3116 struct ifla_vf_info *ivi) 3125 3117 { 3126 3118 struct bnx2x *bp = netdev_priv(dev); 3127 - struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3128 - struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3129 - struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3130 - struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3119 + struct bnx2x_virtf *vf = NULL; 3120 + struct pf_vf_bulletin_content *bulletin = NULL; 3121 + struct bnx2x_vlan_mac_obj *mac_obj; 3122 + struct bnx2x_vlan_mac_obj *vlan_obj; 3131 3123 int rc; 3132 3124 3133 - /* sanity */ 3134 - rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3125 + /* sanity and init */ 3126 + rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3135 3127 if (rc) 3136 3128 return rc; 3137 - if (!mac_obj || !vlan_obj || !bulletin) { 3129 + mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3130 + vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3131 + if (!mac_obj || !vlan_obj) { 3138 3132 BNX2X_ERR("VF partially initialized\n"); 3139 3133 return -EINVAL; 3140 3134 } ··· 3193 3183 { 3194 3184 struct bnx2x *bp = netdev_priv(dev); 3195 3185 int rc, q_logical_state; 3196 - struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3197 - struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3186 + struct bnx2x_virtf *vf = NULL; 3187 + struct pf_vf_bulletin_content *bulletin = NULL; 3198 3188 3199 - /* sanity */ 3200 - rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3189 + /* sanity and init */ 3190 + rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3201 3191 if (rc) 3202 3192 return rc; 3203 3193 if (!is_valid_ether_addr(mac)) { ··· 3259 3249 { 3260 3250 struct bnx2x *bp = netdev_priv(dev); 3261 3251 int rc, q_logical_state; 3262 - struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3263 - struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3252 + struct bnx2x_virtf *vf = NULL; 3253 + struct pf_vf_bulletin_content *bulletin = NULL; 3264 3254 3265 - /* sanity */ 3266 - rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3255 + /* sanity and init */ 3256 + rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3267 3257 if (rc) 3268 3258 return rc; 3269 3259 ··· 3473 3463 alloc_mem_err: 3474 3464 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3475 3465 sizeof(struct bnx2x_vf_mbx_msg)); 3476 - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3466 + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3477 3467 sizeof(union pf_vf_bulletin)); 3478 3468 return -ENOMEM; 3479 3469 }
+54 -12
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
··· 221 221 * Statistics service functions 222 222 */ 223 223 224 - static void bnx2x_stats_pmf_update(struct bnx2x *bp) 224 + /* should be called under stats_sema */ 225 + static void __bnx2x_stats_pmf_update(struct bnx2x *bp) 225 226 { 226 227 struct dmae_command *dmae; 227 228 u32 opcode; ··· 519 518 *stats_comp = 0; 520 519 } 521 520 522 - static void bnx2x_stats_start(struct bnx2x *bp) 521 + /* should be called under stats_sema */ 522 + static void __bnx2x_stats_start(struct bnx2x *bp) 523 523 { 524 524 /* vfs travel through here as part of the statistics FSM, but no action 525 525 * is required ··· 536 534 537 535 bnx2x_hw_stats_post(bp); 538 536 bnx2x_storm_stats_post(bp); 537 + 538 + bp->stats_started = true; 539 + } 540 + 541 + static void bnx2x_stats_start(struct bnx2x *bp) 542 + { 543 + if (down_timeout(&bp->stats_sema, HZ/10)) 544 + BNX2X_ERR("Unable to acquire stats lock\n"); 545 + __bnx2x_stats_start(bp); 546 + up(&bp->stats_sema); 539 547 } 540 548 541 549 static void bnx2x_stats_pmf_start(struct bnx2x *bp) 542 550 { 551 + if (down_timeout(&bp->stats_sema, HZ/10)) 552 + BNX2X_ERR("Unable to acquire stats lock\n"); 543 553 bnx2x_stats_comp(bp); 544 - bnx2x_stats_pmf_update(bp); 545 - bnx2x_stats_start(bp); 554 + __bnx2x_stats_pmf_update(bp); 555 + __bnx2x_stats_start(bp); 556 + up(&bp->stats_sema); 557 + } 558 + 559 + static void bnx2x_stats_pmf_update(struct bnx2x *bp) 560 + { 561 + if (down_timeout(&bp->stats_sema, HZ/10)) 562 + BNX2X_ERR("Unable to acquire stats lock\n"); 563 + __bnx2x_stats_pmf_update(bp); 564 + up(&bp->stats_sema); 546 565 } 547 566 548 567 static void bnx2x_stats_restart(struct bnx2x *bp) ··· 573 550 */ 574 551 if (IS_VF(bp)) 575 552 return; 553 + if (down_timeout(&bp->stats_sema, HZ/10)) 554 + BNX2X_ERR("Unable to acquire stats lock\n"); 576 555 bnx2x_stats_comp(bp); 577 - bnx2x_stats_start(bp); 556 + __bnx2x_stats_start(bp); 557 + up(&bp->stats_sema); 578 558 } 579 559 580 560 static void bnx2x_bmac_stats_update(struct bnx2x *bp) ··· 914 888 /* Make sure we use the value of the counter 915 889 * used for sending the last stats ramrod. 916 890 */ 917 - spin_lock_bh(&bp->stats_lock); 918 891 cur_stats_counter = bp->stats_counter - 1; 919 - spin_unlock_bh(&bp->stats_lock); 920 892 921 893 /* are storm stats valid? */ 922 894 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { ··· 1251 1227 { 1252 1228 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1253 1229 1254 - if (bnx2x_edebug_stats_stopped(bp)) 1230 + /* we run update from timer context, so give up 1231 + * if somebody is in the middle of transition 1232 + */ 1233 + if (down_trylock(&bp->stats_sema)) 1255 1234 return; 1235 + 1236 + if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) 1237 + goto out; 1256 1238 1257 1239 if (IS_PF(bp)) { 1258 1240 if (*stats_comp != DMAE_COMP_VAL) 1259 - return; 1241 + goto out; 1260 1242 1261 1243 if (bp->port.pmf) 1262 1244 bnx2x_hw_stats_update(bp); ··· 1272 1242 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1273 1243 bnx2x_panic(); 1274 1244 } 1275 - return; 1245 + goto out; 1276 1246 } 1277 1247 } else { 1278 1248 /* vf doesn't collect HW statistics, and doesn't get completions ··· 1286 1256 1287 1257 /* vf is done */ 1288 1258 if (IS_VF(bp)) 1289 - return; 1259 + goto out; 1290 1260 1291 1261 if (netif_msg_timer(bp)) { 1292 1262 struct bnx2x_eth_stats *estats = &bp->eth_stats; ··· 1297 1267 1298 1268 bnx2x_hw_stats_post(bp); 1299 1269 bnx2x_storm_stats_post(bp); 1270 + 1271 + out: 1272 + up(&bp->stats_sema); 1300 1273 } 1301 1274 1302 1275 static void bnx2x_port_stats_stop(struct bnx2x *bp) ··· 1365 1332 { 1366 1333 int update = 0; 1367 1334 1335 + if (down_timeout(&bp->stats_sema, HZ/10)) 1336 + BNX2X_ERR("Unable to acquire stats lock\n"); 1337 + 1338 + bp->stats_started = false; 1339 + 1368 1340 bnx2x_stats_comp(bp); 1369 1341 1370 1342 if (bp->port.pmf) ··· 1386 1348 bnx2x_hw_stats_post(bp); 1387 1349 bnx2x_stats_comp(bp); 1388 1350 } 1351 + 1352 + up(&bp->stats_sema); 1389 1353 } 1390 1354 1391 1355 static void bnx2x_stats_do_nothing(struct bnx2x *bp) ··· 1416 1376 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1417 1377 { 1418 1378 enum bnx2x_stats_state state; 1379 + void (*action)(struct bnx2x *bp); 1419 1380 if (unlikely(bp->panic)) 1420 1381 return; 1421 1382 1422 1383 spin_lock_bh(&bp->stats_lock); 1423 1384 state = bp->stats_state; 1424 1385 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1386 + action = bnx2x_stats_stm[state][event].action; 1425 1387 spin_unlock_bh(&bp->stats_lock); 1426 1388 1427 - bnx2x_stats_stm[state][event].action(bp); 1389 + action(bp); 1428 1390 1429 1391 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1430 1392 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+8 -5
drivers/net/ethernet/broadcom/tg3.c
··· 17796 17796 17797 17797 done: 17798 17798 if (state == pci_channel_io_perm_failure) { 17799 - tg3_napi_enable(tp); 17800 - dev_close(netdev); 17799 + if (netdev) { 17800 + tg3_napi_enable(tp); 17801 + dev_close(netdev); 17802 + } 17801 17803 err = PCI_ERS_RESULT_DISCONNECT; 17802 17804 } else { 17803 17805 pci_disable_device(pdev); ··· 17829 17827 rtnl_lock(); 17830 17828 17831 17829 if (pci_enable_device(pdev)) { 17832 - netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); 17830 + dev_err(&pdev->dev, 17831 + "Cannot re-enable PCI device after reset.\n"); 17833 17832 goto done; 17834 17833 } 17835 17834 ··· 17838 17835 pci_restore_state(pdev); 17839 17836 pci_save_state(pdev); 17840 17837 17841 - if (!netif_running(netdev)) { 17838 + if (!netdev || !netif_running(netdev)) { 17842 17839 rc = PCI_ERS_RESULT_RECOVERED; 17843 17840 goto done; 17844 17841 } ··· 17850 17847 rc = PCI_ERS_RESULT_RECOVERED; 17851 17848 17852 17849 done: 17853 - if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { 17850 + if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 17854 17851 tg3_napi_enable(tp); 17855 17852 dev_close(netdev); 17856 17853 }
+24 -83
drivers/net/ethernet/chelsio/cxgb3/sge.c
··· 455 455 q->pg_chunk.offset = 0; 456 456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 457 457 0, q->alloc_size, PCI_DMA_FROMDEVICE); 458 - if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { 459 - __free_pages(q->pg_chunk.page, order); 460 - q->pg_chunk.page = NULL; 461 - return -EIO; 462 - } 463 458 q->pg_chunk.mapping = mapping; 464 459 } 465 460 sd->pg_chunk = q->pg_chunk; ··· 949 954 return flits_to_desc(flits); 950 955 } 951 956 952 - 953 - /* map_skb - map a packet main body and its page fragments 954 - * @pdev: the PCI device 955 - * @skb: the packet 956 - * @addr: placeholder to save the mapped addresses 957 - * 958 - * map the main body of an sk_buff and its page fragments, if any. 959 - */ 960 - static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, 961 - dma_addr_t *addr) 962 - { 963 - const skb_frag_t *fp, *end; 964 - const struct skb_shared_info *si; 965 - 966 - *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), 967 - PCI_DMA_TODEVICE); 968 - if (pci_dma_mapping_error(pdev, *addr)) 969 - goto out_err; 970 - 971 - si = skb_shinfo(skb); 972 - end = &si->frags[si->nr_frags]; 973 - 974 - for (fp = si->frags; fp < end; fp++) { 975 - *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), 976 - DMA_TO_DEVICE); 977 - if (pci_dma_mapping_error(pdev, *addr)) 978 - goto unwind; 979 - } 980 - return 0; 981 - 982 - unwind: 983 - while (fp-- > si->frags) 984 - dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), 985 - DMA_TO_DEVICE); 986 - 987 - pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); 988 - out_err: 989 - return -ENOMEM; 990 - } 991 - 992 957 /** 993 - * write_sgl - populate a scatter/gather list for a packet 958 + * make_sgl - populate a scatter/gather list for a packet 994 959 * @skb: the packet 995 960 * @sgp: the SGL to populate 996 961 * @start: start address of skb main body data to include in the SGL 997 962 * @len: length of skb main body data to include in the SGL 998 - * @addr: the list of the mapped addresses 963 + * @pdev: the PCI device 999 964 * 1000 - * Copies the scatter/gather list for the buffers that make up a packet 965 + * Generates a scatter/gather list for the buffers that make up a packet 1001 966 * and returns the SGL size in 8-byte words. The caller must size the SGL 1002 967 * appropriately. 1003 968 */ 1004 - static inline unsigned int write_sgl(const struct sk_buff *skb, 969 + static inline unsigned int make_sgl(const struct sk_buff *skb, 1005 970 struct sg_ent *sgp, unsigned char *start, 1006 - unsigned int len, const dma_addr_t *addr) 971 + unsigned int len, struct pci_dev *pdev) 1007 972 { 1008 - unsigned int i, j = 0, k = 0, nfrags; 973 + dma_addr_t mapping; 974 + unsigned int i, j = 0, nfrags; 1009 975 1010 976 if (len) { 977 + mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); 1011 978 sgp->len[0] = cpu_to_be32(len); 1012 - sgp->addr[j++] = cpu_to_be64(addr[k++]); 979 + sgp->addr[0] = cpu_to_be64(mapping); 980 + j = 1; 1013 981 } 1014 982 1015 983 nfrags = skb_shinfo(skb)->nr_frags; 1016 984 for (i = 0; i < nfrags; i++) { 1017 985 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1018 986 987 + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), 988 + DMA_TO_DEVICE); 1019 989 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 1020 - sgp->addr[j] = cpu_to_be64(addr[k++]); 990 + sgp->addr[j] = cpu_to_be64(mapping); 1021 991 j ^= 1; 1022 992 if (j == 0) 1023 993 ++sgp; ··· 1138 1178 const struct port_info *pi, 1139 1179 unsigned int pidx, unsigned int gen, 1140 1180 struct sge_txq *q, unsigned int ndesc, 1141 - unsigned int compl, const dma_addr_t *addr) 1181 + unsigned int compl) 1142 1182 { 1143 1183 unsigned int flits, sgl_flits, cntrl, tso_info; 1144 1184 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; ··· 1196 1236 } 1197 1237 1198 1238 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1199 - sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); 1239 + sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); 1200 1240 1201 1241 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1202 1242 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), ··· 1227 1267 struct netdev_queue *txq; 1228 1268 struct sge_qset *qs; 1229 1269 struct sge_txq *q; 1230 - dma_addr_t addr[MAX_SKB_FRAGS + 1]; 1231 1270 1232 1271 /* 1233 1272 * The chip min packet length is 9 octets but play safe and reject ··· 1253 1294 "%s: Tx ring %u full while queue awake!\n", 1254 1295 dev->name, q->cntxt_id & 7); 1255 1296 return NETDEV_TX_BUSY; 1256 - } 1257 - 1258 - if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { 1259 - dev_kfree_skb(skb); 1260 - return NETDEV_TX_OK; 1261 1297 } 1262 1298 1263 1299 q->in_use += ndesc; ··· 1312 1358 if (likely(!skb_shared(skb))) 1313 1359 skb_orphan(skb); 1314 1360 1315 - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); 1361 + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); 1316 1362 check_ring_tx_db(adap, q); 1317 1363 return NETDEV_TX_OK; 1318 1364 } ··· 1577 1623 */ 1578 1624 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1579 1625 struct sge_txq *q, unsigned int pidx, 1580 - unsigned int gen, unsigned int ndesc, 1581 - const dma_addr_t *addr) 1626 + unsigned int gen, unsigned int ndesc) 1582 1627 { 1583 1628 unsigned int sgl_flits, flits; 1584 1629 struct work_request_hdr *from; ··· 1598 1645 1599 1646 flits = skb_transport_offset(skb) / 8; 1600 1647 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1601 - sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), 1602 - skb_tail_pointer(skb) - 1603 - skb_transport_header(skb), addr); 1648 + sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), 1649 + skb->tail - skb->transport_header, 1650 + adap->pdev); 1604 1651 if (need_skb_unmap()) { 1605 1652 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1606 1653 skb->destructor = deferred_unmap_destructor; ··· 1658 1705 goto again; 1659 1706 } 1660 1707 1661 - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { 1662 - spin_unlock(&q->lock); 1663 - return NET_XMIT_SUCCESS; 1664 - } 1665 - 1666 1708 gen = q->gen; 1667 1709 q->in_use += ndesc; 1668 1710 pidx = q->pidx; ··· 1668 1720 } 1669 1721 spin_unlock(&q->lock); 1670 1722 1671 - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); 1723 + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1672 1724 check_ring_tx_db(adap, q); 1673 1725 return NET_XMIT_SUCCESS; 1674 1726 } ··· 1686 1738 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1687 1739 const struct port_info *pi = netdev_priv(qs->netdev); 1688 1740 struct adapter *adap = pi->adapter; 1689 - unsigned int written = 0; 1690 1741 1691 1742 spin_lock(&q->lock); 1692 1743 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); ··· 1705 1758 break; 1706 1759 } 1707 1760 1708 - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) 1709 - break; 1710 - 1711 1761 gen = q->gen; 1712 1762 q->in_use += ndesc; 1713 1763 pidx = q->pidx; 1714 1764 q->pidx += ndesc; 1715 - written += ndesc; 1716 1765 if (q->pidx >= q->size) { 1717 1766 q->pidx -= q->size; 1718 1767 q->gen ^= 1; ··· 1716 1773 __skb_unlink(skb, &q->sendq); 1717 1774 spin_unlock(&q->lock); 1718 1775 1719 - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, 1720 - (dma_addr_t *)skb->head); 1776 + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1721 1777 spin_lock(&q->lock); 1722 1778 } 1723 1779 spin_unlock(&q->lock); ··· 1726 1784 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1727 1785 #endif 1728 1786 wmb(); 1729 - if (likely(written)) 1730 - t3_write_reg(adap, A_SG_KDOORBELL, 1731 - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1787 + t3_write_reg(adap, A_SG_KDOORBELL, 1788 + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1732 1789 } 1733 1790 1734 1791 /**
+3
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 3048 3048 3049 3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count); 3050 3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); 3051 + 3052 + /* Clear flags that driver is not interested in */ 3053 + adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; 3051 3054 } 3052 3055 err: 3053 3056 mutex_unlock(&adapter->mbox_lock);
+6
drivers/net/ethernet/emulex/benet/be_cmds.h
··· 563 563 BE_IF_FLAGS_MULTICAST = 0x1000 564 564 }; 565 565 566 + #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ 567 + BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 568 + BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ 569 + BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ 570 + BE_IF_FLAGS_UNTAGGED) 571 + 566 572 /* An RX interface is an object with one or more MAC addresses and 567 573 * filtering capabilities. */ 568 574 struct be_cmd_req_if_create {
+1 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 2563 2563 /* Wait for all pending tx completions to arrive so that 2564 2564 * all tx skbs are freed. 2565 2565 */ 2566 - be_tx_compl_clean(adapter); 2567 2566 netif_tx_disable(netdev); 2567 + be_tx_compl_clean(adapter); 2568 2568 2569 2569 be_rx_qs_destroy(adapter); 2570 2570
+53 -15
drivers/net/ethernet/marvell/skge.c
··· 931 931 } 932 932 933 933 /* Allocate and setup a new buffer for receiving */ 934 - static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 935 - struct sk_buff *skb, unsigned int bufsize) 934 + static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, 935 + struct sk_buff *skb, unsigned int bufsize) 936 936 { 937 937 struct skge_rx_desc *rd = e->desc; 938 - u64 map; 938 + dma_addr_t map; 939 939 940 940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 941 941 PCI_DMA_FROMDEVICE); 942 942 943 - rd->dma_lo = map; 944 - rd->dma_hi = map >> 32; 943 + if (pci_dma_mapping_error(skge->hw->pdev, map)) 944 + return -1; 945 + 946 + rd->dma_lo = lower_32_bits(map); 947 + rd->dma_hi = upper_32_bits(map); 945 948 e->skb = skb; 946 949 rd->csum1_start = ETH_HLEN; 947 950 rd->csum2_start = ETH_HLEN; ··· 956 953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 957 954 dma_unmap_addr_set(e, mapaddr, map); 958 955 dma_unmap_len_set(e, maplen, bufsize); 956 + return 0; 959 957 } 960 958 961 959 /* Resume receiving using existing skb, ··· 1018 1014 return -ENOMEM; 1019 1015 1020 1016 skb_reserve(skb, NET_IP_ALIGN); 1021 - skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1017 + if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { 1018 + dev_kfree_skb(skb); 1019 + return -EIO; 1020 + } 1022 1021 } while ((e = e->next) != ring->start); 1023 1022 1024 1023 ring->to_clean = ring->start; ··· 2551 2544 2552 2545 BUG_ON(skge->dma & 7); 2553 2546 2554 - if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2547 + if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { 2555 2548 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); 2556 2549 err = -EINVAL; 2557 2550 goto free_pci_mem; ··· 2736 2729 struct skge_tx_desc *td; 2737 2730 int i; 2738 2731 u32 control, len; 2739 - u64 map; 2732 + dma_addr_t map; 2740 2733 2741 2734 if (skb_padto(skb, ETH_ZLEN)) 2742 2735 return NETDEV_TX_OK; ··· 2750 2743 e->skb = skb; 2751 2744 len = skb_headlen(skb); 2752 2745 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2746 + if (pci_dma_mapping_error(hw->pdev, map)) 2747 + goto mapping_error; 2748 + 2753 2749 dma_unmap_addr_set(e, mapaddr, map); 2754 2750 dma_unmap_len_set(e, maplen, len); 2755 2751 2756 - td->dma_lo = map; 2757 - td->dma_hi = map >> 32; 2752 + td->dma_lo = lower_32_bits(map); 2753 + td->dma_hi = upper_32_bits(map); 2758 2754 2759 2755 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2760 2756 const int offset = skb_checksum_start_offset(skb); ··· 2788 2778 2789 2779 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2790 2780 skb_frag_size(frag), DMA_TO_DEVICE); 2781 + if (dma_mapping_error(&hw->pdev->dev, map)) 2782 + goto mapping_unwind; 2791 2783 2792 2784 e = e->next; 2793 2785 e->skb = skb; 2794 2786 tf = e->desc; 2795 2787 BUG_ON(tf->control & BMU_OWN); 2796 2788 2797 - tf->dma_lo = map; 2798 - tf->dma_hi = (u64) map >> 32; 2789 + tf->dma_lo = lower_32_bits(map); 2790 + tf->dma_hi = upper_32_bits(map); 2799 2791 dma_unmap_addr_set(e, mapaddr, map); 2800 2792 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2801 2793 ··· 2826 2814 netif_stop_queue(dev); 2827 2815 } 2828 2816 2817 + return NETDEV_TX_OK; 2818 + 2819 + mapping_unwind: 2820 + e = skge->tx_ring.to_use; 2821 + pci_unmap_single(hw->pdev, 2822 + dma_unmap_addr(e, mapaddr), 2823 + dma_unmap_len(e, maplen), 2824 + PCI_DMA_TODEVICE); 2825 + while (i-- > 0) { 2826 + e = e->next; 2827 + pci_unmap_page(hw->pdev, 2828 + dma_unmap_addr(e, mapaddr), 2829 + dma_unmap_len(e, maplen), 2830 + PCI_DMA_TODEVICE); 2831 + } 2832 + 2833 + mapping_error: 2834 + if (net_ratelimit()) 2835 + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); 2836 + dev_kfree_skb(skb); 2829 2837 return NETDEV_TX_OK; 2830 2838 } 2831 2839 ··· 3077 3045 3078 3046 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3079 3047 dma_unmap_addr(e, mapaddr), 3080 - len, PCI_DMA_FROMDEVICE); 3048 + dma_unmap_len(e, maplen), 3049 + PCI_DMA_FROMDEVICE); 3081 3050 skb_copy_from_linear_data(e->skb, skb->data, len); 3082 3051 pci_dma_sync_single_for_device(skge->hw->pdev, 3083 3052 dma_unmap_addr(e, mapaddr), 3084 - len, PCI_DMA_FROMDEVICE); 3053 + dma_unmap_len(e, maplen), 3054 + PCI_DMA_FROMDEVICE); 3085 3055 skge_rx_reuse(e, skge->rx_buf_size); 3086 3056 } else { 3087 3057 struct sk_buff *nskb; ··· 3092 3058 if (!nskb) 3093 3059 goto resubmit; 3094 3060 3061 + if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3062 + dev_kfree_skb(nskb); 3063 + goto resubmit; 3064 + } 3065 + 3095 3066 pci_unmap_single(skge->hw->pdev, 3096 3067 dma_unmap_addr(e, mapaddr), 3097 3068 dma_unmap_len(e, maplen), 3098 3069 PCI_DMA_FROMDEVICE); 3099 3070 skb = e->skb; 3100 3071 prefetch(skb->data); 3101 - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 3102 3072 } 3103 3073 3104 3074 skb_put(skb, len);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 46 46 #include "mlx5_core.h" 47 47 48 48 enum { 49 - CMD_IF_REV = 4, 49 + CMD_IF_REV = 5, 50 50 }; 51 51 52 52 enum {
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 268 268 case MLX5_EVENT_TYPE_PAGE_REQUEST: 269 269 { 270 270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 271 - s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); 271 + s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 272 272 273 273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 274 274 mlx5_core_req_pages_handler(dev, func_id, npages);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 113 113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; 114 114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; 115 115 caps->log_max_mcg = out->hca_cap.log_max_mcg; 116 - caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); 116 + caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; 117 117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); 118 118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); 119 119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
+1 -28
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 55 55 }; 56 56 57 57 static DEFINE_SPINLOCK(health_lock); 58 - 59 58 static LIST_HEAD(health_list); 60 59 static struct work_struct health_work; 61 - 62 - static health_handler_t reg_handler; 63 - int mlx5_register_health_report_handler(health_handler_t handler) 64 - { 65 - spin_lock_irq(&health_lock); 66 - if (reg_handler) { 67 - spin_unlock_irq(&health_lock); 68 - return -EEXIST; 69 - } 70 - reg_handler = handler; 71 - spin_unlock_irq(&health_lock); 72 - 73 - return 0; 74 - } 75 - EXPORT_SYMBOL(mlx5_register_health_report_handler); 76 - 77 - void mlx5_unregister_health_report_handler(void) 78 - { 79 - spin_lock_irq(&health_lock); 80 - reg_handler = NULL; 81 - spin_unlock_irq(&health_lock); 82 - } 83 - EXPORT_SYMBOL(mlx5_unregister_health_report_handler); 84 60 85 61 static void health_care(struct work_struct *work) 86 62 { ··· 74 98 priv = container_of(health, struct mlx5_priv, health); 75 99 dev = container_of(priv, struct mlx5_core_dev, priv); 76 100 mlx5_core_warn(dev, "handling bad device here\n"); 101 + /* nothing yet */ 77 102 spin_lock_irq(&health_lock); 78 - if (reg_handler) 79 - reg_handler(dev->pdev, health->health, 80 - sizeof(health->health)); 81 - 82 103 list_del_init(&health->list); 83 104 spin_unlock_irq(&health_lock); 84 105 }
+26 -32
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
··· 43 43 MLX5_PAGES_TAKE = 2 44 44 }; 45 45 46 + enum { 47 + MLX5_BOOT_PAGES = 1, 48 + MLX5_INIT_PAGES = 2, 49 + MLX5_POST_INIT_PAGES = 3 50 + }; 51 + 46 52 struct mlx5_pages_req { 47 53 struct mlx5_core_dev *dev; 48 54 u32 func_id; 49 - s16 npages; 55 + s32 npages; 50 56 struct work_struct work; 51 57 }; 52 58 ··· 70 64 71 65 struct mlx5_query_pages_outbox { 72 66 struct mlx5_outbox_hdr hdr; 73 - __be16 num_boot_pages; 67 + __be16 rsvd; 74 68 __be16 func_id; 75 - __be16 init_pages; 76 - __be16 num_pages; 69 + __be32 num_pages; 77 70 }; 78 71 79 72 struct mlx5_manage_pages_inbox { 80 73 struct mlx5_inbox_hdr hdr; 81 - __be16 rsvd0; 74 + __be16 rsvd; 82 75 __be16 func_id; 83 - __be16 rsvd1; 84 - __be16 num_entries; 85 - u8 rsvd2[16]; 76 + __be32 num_entries; 86 77 __be64 pas[0]; 87 78 }; 88 79 89 80 struct mlx5_manage_pages_outbox { 90 81 struct mlx5_outbox_hdr hdr; 91 - u8 rsvd0[2]; 92 - __be16 num_entries; 93 - u8 rsvd1[20]; 82 + __be32 num_entries; 83 + u8 rsvd[4]; 94 84 __be64 pas[0]; 95 85 }; 96 86 ··· 148 146 } 149 147 150 148 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, 151 - s16 *pages, s16 *init_pages, u16 *boot_pages) 149 + s32 *npages, int boot) 152 150 { 153 151 struct mlx5_query_pages_inbox in; 154 152 struct mlx5_query_pages_outbox out; ··· 157 155 memset(&in, 0, sizeof(in)); 158 156 memset(&out, 0, sizeof(out)); 159 157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); 158 + in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); 159 + 160 160 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 161 161 if (err) 162 162 return err; ··· 166 162 if (out.hdr.status) 167 163 return mlx5_cmd_status_to_err(&out.hdr); 168 164 169 - if (pages) 170 - *pages = be16_to_cpu(out.num_pages); 171 - 172 - if (init_pages) 173 - *init_pages = be16_to_cpu(out.init_pages); 174 - 175 - if (boot_pages) 176 - *boot_pages = be16_to_cpu(out.num_boot_pages); 177 - 165 + *npages = be32_to_cpu(out.num_pages); 178 166 *func_id = be16_to_cpu(out.func_id); 179 167 180 168 return err; ··· 220 224 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 221 225 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); 222 226 in->func_id = cpu_to_be16(func_id); 223 - in->num_entries = cpu_to_be16(npages); 227 + in->num_entries = cpu_to_be32(npages); 224 228 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 225 229 mlx5_core_dbg(dev, "err %d\n", err); 226 230 if (err) { ··· 288 292 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 289 293 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); 290 294 in.func_id = cpu_to_be16(func_id); 291 - in.num_entries = cpu_to_be16(npages); 295 + in.num_entries = cpu_to_be32(npages); 292 296 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 293 297 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 294 298 if (err) { ··· 302 306 goto out_free; 303 307 } 304 308 305 - num_claimed = be16_to_cpu(out->num_entries); 309 + num_claimed = be32_to_cpu(out->num_entries); 306 310 if (nclaimed) 307 311 *nclaimed = num_claimed; 308 312 ··· 341 345 } 342 346 343 347 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 344 - s16 npages) 348 + s32 npages) 345 349 { 346 350 struct mlx5_pages_req *req; 347 351 ··· 360 364 361 365 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) 362 366 { 363 - u16 uninitialized_var(boot_pages); 364 - s16 uninitialized_var(init_pages); 365 367 u16 uninitialized_var(func_id); 368 + s32 uninitialized_var(npages); 366 369 int err; 367 370 368 - err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, 369 - &boot_pages); 371 + err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); 370 372 if (err) 371 373 return err; 372 374 375 + mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", 376 + npages, boot ? "boot" : "init", func_id); 373 377 374 - mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", 375 - init_pages, boot_pages, func_id); 376 - return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); 378 + return give_pages(dev, func_id, npages, 0); 377 379 } 378 380 379 381 static int optimal_reclaimed_pages(void)
+5
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 3266 3266 u8 val; 3267 3267 int ret, max_sds_rings = adapter->max_sds_rings; 3268 3268 3269 + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 3270 + netdev_info(netdev, "Device is resetting\n"); 3271 + return -EBUSY; 3272 + } 3273 + 3269 3274 if (qlcnic_get_diag_lock(adapter)) { 3270 3275 netdev_info(netdev, "Device in diagnostics mode\n"); 3271 3276 return -EBUSY;
+2 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
··· 629 629 return -EIO; 630 630 } 631 631 632 - qlcnic_set_drv_version(adapter); 632 + if (adapter->portnum == 0) 633 + qlcnic_set_drv_version(adapter); 633 634 qlcnic_83xx_idc_attach_driver(adapter); 634 635 635 636 return 0;
+4 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 2165 2165 if (err) 2166 2166 goto err_out_disable_mbx_intr; 2167 2167 2168 - qlcnic_set_drv_version(adapter); 2168 + if (adapter->portnum == 0) 2169 + qlcnic_set_drv_version(adapter); 2169 2170 2170 2171 pci_set_drvdata(pdev, adapter); 2171 2172 ··· 3086 3085 adapter->fw_fail_cnt = 0; 3087 3086 adapter->flags &= ~QLCNIC_FW_HANG; 3088 3087 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3089 - qlcnic_set_drv_version(adapter); 3088 + if (adapter->portnum == 0) 3089 + qlcnic_set_drv_version(adapter); 3090 3090 3091 3091 if (!qlcnic_clr_drv_state(adapter)) 3092 3092 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+3 -3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
··· 170 170 171 171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { 172 172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state); 173 - if (!err) { 174 - dev_info(&adapter->pdev->dev, 175 - "Failed to get current beacon state\n"); 173 + if (err) { 174 + netdev_err(adapter->netdev, 175 + "Failed to get current beacon state\n"); 176 176 } else { 177 177 if (h_beacon_state == QLCNIC_BEACON_DISABLE) 178 178 ahw->beacon_state = 0;
+1
drivers/net/ethernet/realtek/8139cp.c
··· 524 524 PCI_DMA_FROMDEVICE); 525 525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { 526 526 dev->stats.rx_dropped++; 527 + kfree_skb(new_skb); 527 528 goto rx_next; 528 529 } 529 530
+1 -1
drivers/net/ethernet/realtek/r8169.c
··· 7088 7088 7089 7089 RTL_W8(Cfg9346, Cfg9346_Unlock); 7090 7090 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 7091 - RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); 7091 + RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); 7092 7092 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) 7093 7093 tp->features |= RTL_FEATURE_WOL; 7094 7094 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
+1 -1
drivers/net/ethernet/sfc/filter.c
··· 675 675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); 676 676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != 677 677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); 678 - rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; 678 + rep_index = spec->type - EFX_FILTER_UC_DEF; 679 679 ins_index = rep_index; 680 680 681 681 spin_lock_bh(&state->lock);
+11 -2
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
··· 33 33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 34 34 unsigned int txsize = priv->dma_tx_size; 35 35 unsigned int entry = priv->cur_tx % txsize; 36 - struct dma_desc *desc = priv->dma_tx + entry; 36 + struct dma_desc *desc; 37 37 unsigned int nopaged_len = skb_headlen(skb); 38 38 unsigned int bmax, len; 39 + 40 + if (priv->extend_desc) 41 + desc = (struct dma_desc *)(priv->dma_etx + entry); 42 + else 43 + desc = priv->dma_tx + entry; 39 44 40 45 if (priv->plat->enh_desc) 41 46 bmax = BUF_SIZE_8KiB; ··· 59 54 STMMAC_RING_MODE); 60 55 wmb(); 61 56 entry = (++priv->cur_tx) % txsize; 62 - desc = priv->dma_tx + entry; 57 + 58 + if (priv->extend_desc) 59 + desc = (struct dma_desc *)(priv->dma_etx + entry); 60 + else 61 + desc = priv->dma_tx + entry; 63 62 64 63 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 65 64 len, DMA_TO_DEVICE);
+92 -19
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 939 939 940 940 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 941 941 GFP_KERNEL); 942 - if (unlikely(skb == NULL)) { 942 + if (!skb) { 943 943 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 944 - return 1; 944 + return -ENOMEM; 945 945 } 946 946 skb_reserve(skb, NET_IP_ALIGN); 947 947 priv->rx_skbuff[i] = skb; 948 948 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 949 949 priv->dma_buf_sz, 950 950 DMA_FROM_DEVICE); 951 + if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { 952 + pr_err("%s: DMA mapping error\n", __func__); 953 + dev_kfree_skb_any(skb); 954 + return -EINVAL; 955 + } 951 956 952 957 p->des2 = priv->rx_skbuff_dma[i]; 953 958 ··· 963 958 return 0; 964 959 } 965 960 961 + static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) 962 + { 963 + if (priv->rx_skbuff[i]) { 964 + dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], 965 + priv->dma_buf_sz, DMA_FROM_DEVICE); 966 + dev_kfree_skb_any(priv->rx_skbuff[i]); 967 + } 968 + priv->rx_skbuff[i] = NULL; 969 + } 970 + 966 971 /** 967 972 * init_dma_desc_rings - init the RX/TX descriptor rings 968 973 * @dev: net device structure ··· 980 965 * and allocates the socket buffers. It suppors the chained and ring 981 966 * modes. 982 967 */ 983 - static void init_dma_desc_rings(struct net_device *dev) 968 + static int init_dma_desc_rings(struct net_device *dev) 984 969 { 985 970 int i; 986 971 struct stmmac_priv *priv = netdev_priv(dev); 987 972 unsigned int txsize = priv->dma_tx_size; 988 973 unsigned int rxsize = priv->dma_rx_size; 989 974 unsigned int bfsize = 0; 975 + int ret = -ENOMEM; 990 976 991 977 /* Set the max buffer size according to the DESC mode 992 978 * and the MTU. Note that RING mode allows 16KiB bsize. ··· 1008 992 dma_extended_desc), 1009 993 &priv->dma_rx_phy, 1010 994 GFP_KERNEL); 995 + if (!priv->dma_erx) 996 + goto err_dma; 997 + 1011 998 priv->dma_etx = dma_alloc_coherent(priv->device, txsize * 1012 999 sizeof(struct 1013 1000 dma_extended_desc), 1014 1001 &priv->dma_tx_phy, 1015 1002 GFP_KERNEL); 1016 - if ((!priv->dma_erx) || (!priv->dma_etx)) 1017 - return; 1003 + if (!priv->dma_etx) { 1004 + dma_free_coherent(priv->device, priv->dma_rx_size * 1005 + sizeof(struct dma_extended_desc), 1006 + priv->dma_erx, priv->dma_rx_phy); 1007 + goto err_dma; 1008 + } 1018 1009 } else { 1019 1010 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * 1020 1011 sizeof(struct dma_desc), 1021 1012 &priv->dma_rx_phy, 1022 1013 GFP_KERNEL); 1014 + if (!priv->dma_rx) 1015 + goto err_dma; 1016 + 1023 1017 priv->dma_tx = dma_alloc_coherent(priv->device, txsize * 1024 1018 sizeof(struct dma_desc), 1025 1019 &priv->dma_tx_phy, 1026 1020 GFP_KERNEL); 1027 - if ((!priv->dma_rx) || (!priv->dma_tx)) 1028 - return; 1021 + if (!priv->dma_tx) { 1022 + dma_free_coherent(priv->device, priv->dma_rx_size * 1023 + sizeof(struct dma_desc), 1024 + priv->dma_rx, priv->dma_rx_phy); 1025 + goto err_dma; 1026 + } 1029 1027 } 1030 1028 1031 1029 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 1032 1030 GFP_KERNEL); 1031 + if (!priv->rx_skbuff_dma) 1032 + goto err_rx_skbuff_dma; 1033 + 1033 1034 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 1034 1035 GFP_KERNEL); 1036 + if (!priv->rx_skbuff) 1037 + goto err_rx_skbuff; 1038 + 1035 1039 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), 1036 1040 GFP_KERNEL); 1041 + if (!priv->tx_skbuff_dma) 1042 + goto err_tx_skbuff_dma; 1043 + 1037 1044 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1038 1045 GFP_KERNEL); 1046 + if (!priv->tx_skbuff) 1047 + goto err_tx_skbuff; 1048 + 1039 1049 if (netif_msg_probe(priv)) { 1040 1050 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1041 1051 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); ··· 1076 1034 else 1077 1035 p = priv->dma_rx + i; 1078 1036 1079 - if (stmmac_init_rx_buffers(priv, p, i)) 1080 - break; 1037 + ret = stmmac_init_rx_buffers(priv, p, i); 1038 + if (ret) 1039 + goto err_init_rx_buffers; 1081 1040 1082 1041 if (netif_msg_probe(priv)) 1083 1042 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], ··· 1124 1081 1125 1082 if (netif_msg_hw(priv)) 1126 1083 stmmac_display_rings(priv); 1084 + 1085 + return 0; 1086 + err_init_rx_buffers: 1087 + while (--i >= 0) 1088 + stmmac_free_rx_buffers(priv, i); 1089 + kfree(priv->tx_skbuff); 1090 + err_tx_skbuff: 1091 + kfree(priv->tx_skbuff_dma); 1092 + err_tx_skbuff_dma: 1093 + kfree(priv->rx_skbuff); 1094 + err_rx_skbuff: 1095 + kfree(priv->rx_skbuff_dma); 1096 + err_rx_skbuff_dma: 1097 + if (priv->extend_desc) { 1098 + dma_free_coherent(priv->device, priv->dma_tx_size * 1099 + sizeof(struct dma_extended_desc), 1100 + priv->dma_etx, priv->dma_tx_phy); 1101 + dma_free_coherent(priv->device, priv->dma_rx_size * 1102 + sizeof(struct dma_extended_desc), 1103 + priv->dma_erx, priv->dma_rx_phy); 1104 + } else { 1105 + dma_free_coherent(priv->device, 1106 + priv->dma_tx_size * sizeof(struct dma_desc), 1107 + priv->dma_tx, priv->dma_tx_phy); 1108 + dma_free_coherent(priv->device, 1109 + priv->dma_rx_size * sizeof(struct dma_desc), 1110 + priv->dma_rx, priv->dma_rx_phy); 1111 + } 1112 + err_dma: 1113 + return ret; 1127 1114 } 1128 1115 1129 1116 static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1130 1117 { 1131 1118 int i; 1132 1119 1133 - for (i = 0; i < priv->dma_rx_size; i++) { 1134 - if (priv->rx_skbuff[i]) { 1135 - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], 1136 - priv->dma_buf_sz, DMA_FROM_DEVICE); 1137 - dev_kfree_skb_any(priv->rx_skbuff[i]); 1138 - } 1139 - priv->rx_skbuff[i] = NULL; 1140 - } 1120 + for (i = 0; i < priv->dma_rx_size; i++) 1121 + stmmac_free_rx_buffers(priv, i); 1141 1122 } 1142 1123 1143 1124 static void dma_free_tx_skbufs(struct stmmac_priv *priv) ··· 1627 1560 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 1628 1561 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 1629 1562 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1630 - init_dma_desc_rings(dev); 1563 + 1564 + ret = init_dma_desc_rings(dev); 1565 + if (ret < 0) { 1566 + pr_err("%s: DMA descriptors initialization failed\n", __func__); 1567 + goto dma_desc_error; 1568 + } 1631 1569 1632 1570 /* DMA initialization and SW reset */ 1633 1571 ret = stmmac_init_dma_engine(priv); 1634 1572 if (ret < 0) { 1635 - pr_err("%s: DMA initialization failed\n", __func__); 1573 + pr_err("%s: DMA engine initialization failed\n", __func__); 1636 1574 goto init_error; 1637 1575 } 1638 1576 ··· 1744 1672 1745 1673 init_error: 1746 1674 free_dma_desc_resources(priv); 1675 + dma_desc_error: 1747 1676 if (priv->phydev) 1748 1677 phy_disconnect(priv->phydev); 1749 1678 phy_error:
+3 -1
drivers/net/ethernet/via/via-velocity.c
··· 2100 2100 2101 2101 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 2102 2102 } 2103 - netif_rx(skb); 2103 + netif_receive_skb(skb); 2104 2104 2105 2105 stats->rx_bytes += pkt_len; 2106 2106 stats->rx_packets++; ··· 2884 2884 return ret; 2885 2885 2886 2886 err_iounmap: 2887 + netif_napi_del(&vptr->napi); 2887 2888 iounmap(regs); 2888 2889 err_free_dev: 2889 2890 free_netdev(netdev); ··· 2905 2904 struct velocity_info *vptr = netdev_priv(netdev); 2906 2905 2907 2906 unregister_netdev(netdev); 2907 + netif_napi_del(&vptr->napi); 2908 2908 iounmap(vptr->mac_regs); 2909 2909 free_netdev(netdev); 2910 2910 velocity_nics--;
+2 -4
drivers/net/irda/via-ircc.c
··· 210 210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); 211 211 pci_write_config_byte(pcidev,0x5a,0xc0); 212 212 WriteLPCReg(0x28, 0x70 ); 213 - if (via_ircc_open(pcidev, &info, 0x3076) == 0) 214 - rc=0; 213 + rc = via_ircc_open(pcidev, &info, 0x3076); 215 214 } else 216 215 rc = -ENODEV; //IR not turn on 217 216 } else { //Not VT1211 ··· 248 249 info.irq=FirIRQ; 249 250 info.dma=FirDRQ1; 250 251 info.dma2=FirDRQ0; 251 - if (via_ircc_open(pcidev, &info, 0x3096) == 0) 252 - rc=0; 252 + rc = via_ircc_open(pcidev, &info, 0x3096); 253 253 } else 254 254 rc = -ENODEV; //IR not turn on !!!!! 255 255 }//Not VT1211
+4
drivers/net/macvlan.c
··· 739 739 return -EADDRNOTAVAIL; 740 740 } 741 741 742 + if (data && data[IFLA_MACVLAN_FLAGS] && 743 + nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) 744 + return -EINVAL; 745 + 742 746 if (data && data[IFLA_MACVLAN_MODE]) { 743 747 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { 744 748 case MACVLAN_MODE_PRIVATE:
+18 -12
drivers/net/macvtap.c
··· 68 68 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 69 69 NETIF_F_TSO6 | NETIF_F_UFO) 70 70 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 71 + #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 72 + 71 73 /* 72 74 * RCU usage: 73 75 * The macvtap_queue and the macvlan_dev are loosely coupled, the ··· 280 278 { 281 279 struct macvlan_dev *vlan = netdev_priv(dev); 282 280 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 283 - netdev_features_t features; 281 + netdev_features_t features = TAP_FEATURES; 282 + 284 283 if (!q) 285 284 goto drop; 286 285 ··· 290 287 291 288 skb->dev = dev; 292 289 /* Apply the forward feature mask so that we perform segmentation 293 - * according to users wishes. 290 + * according to users wishes. This only works if VNET_HDR is 291 + * enabled. 294 292 */ 295 - features = netif_skb_features(skb) & vlan->tap_features; 293 + if (q->flags & IFF_VNET_HDR) 294 + features |= vlan->tap_features; 296 295 if (netif_needs_gso(skb, features)) { 297 296 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 298 297 ··· 823 818 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 824 819 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 825 820 } 826 - if (vlan) 821 + if (vlan) { 822 + local_bh_disable(); 827 823 macvlan_start_xmit(skb, vlan->dev); 828 - else 824 + local_bh_enable(); 825 + } else { 829 826 kfree_skb(skb); 827 + } 830 828 rcu_read_unlock(); 831 829 832 830 return total_len; ··· 920 912 done: 921 913 rcu_read_lock(); 922 914 vlan = rcu_dereference(q->vlan); 923 - if (vlan) 915 + if (vlan) { 916 + preempt_disable(); 924 917 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 918 + preempt_enable(); 919 + } 925 920 rcu_read_unlock(); 926 921 927 922 return ret ? ret : copied; ··· 1069 1058 /* tap_features are the same as features on tun/tap and 1070 1059 * reflect user expectations. 1071 1060 */ 1072 - vlan->tap_features = vlan->dev->features & 1073 - (feature_mask | ~TUN_OFFLOADS); 1061 + vlan->tap_features = feature_mask; 1074 1062 vlan->set_features = features; 1075 1063 netdev_update_features(vlan->dev); 1076 1064 ··· 1165 1155 TUN_F_TSO_ECN | TUN_F_UFO)) 1166 1156 return -EINVAL; 1167 1157 1168 - /* TODO: only accept frames with the features that 1169 - got enabled for forwarded frames */ 1170 - if (!(q->flags & IFF_VNET_HDR)) 1171 - return -EINVAL; 1172 1158 rtnl_lock(); 1173 1159 ret = set_offload(q, arg); 1174 1160 rtnl_unlock();
+2 -2
drivers/net/phy/realtek.c
··· 23 23 #define RTL821x_INER_INIT 0x6400 24 24 #define RTL821x_INSR 0x13 25 25 26 - #define RTL8211E_INER_LINK_STAT 0x10 26 + #define RTL8211E_INER_LINK_STATUS 0x400 27 27 28 28 MODULE_DESCRIPTION("Realtek PHY driver"); 29 29 MODULE_AUTHOR("Johnson Leung"); ··· 57 57 58 58 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 59 59 err = phy_write(phydev, RTL821x_INER, 60 - RTL8211E_INER_LINK_STAT); 60 + RTL8211E_INER_LINK_STATUS); 61 61 else 62 62 err = phy_write(phydev, RTL821x_INER, 0); 63 63
+4 -2
drivers/net/tun.c
··· 1074 1074 u32 rxhash; 1075 1075 1076 1076 if (!(tun->flags & TUN_NO_PI)) { 1077 - if ((len -= sizeof(pi)) > total_len) 1077 + if (len < sizeof(pi)) 1078 1078 return -EINVAL; 1079 + len -= sizeof(pi); 1079 1080 1080 1081 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) 1081 1082 return -EFAULT; ··· 1084 1083 } 1085 1084 1086 1085 if (tun->flags & TUN_VNET_HDR) { 1087 - if ((len -= tun->vnet_hdr_sz) > total_len) 1086 + if (len < tun->vnet_hdr_sz) 1088 1087 return -EINVAL; 1088 + len -= tun->vnet_hdr_sz; 1089 1089 1090 1090 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) 1091 1091 return -EFAULT;
+10 -5
drivers/net/usb/hso.c
··· 2816 2816 static int hso_get_config_data(struct usb_interface *interface) 2817 2817 { 2818 2818 struct usb_device *usbdev = interface_to_usbdev(interface); 2819 - u8 config_data[17]; 2819 + u8 *config_data = kmalloc(17, GFP_KERNEL); 2820 2820 u32 if_num = interface->altsetting->desc.bInterfaceNumber; 2821 2821 s32 result; 2822 2822 2823 + if (!config_data) 2824 + return -ENOMEM; 2823 2825 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 2824 2826 0x86, 0xC0, 0, 0, config_data, 17, 2825 2827 USB_CTRL_SET_TIMEOUT) != 0x11) { 2828 + kfree(config_data); 2826 2829 return -EIO; 2827 2830 } 2828 2831 ··· 2876 2873 if (config_data[16] & 0x1) 2877 2874 result |= HSO_INFO_CRC_BUG; 2878 2875 2876 + kfree(config_data); 2879 2877 return result; 2880 2878 } 2881 2879 ··· 2890 2886 struct hso_shared_int *shared_int; 2891 2887 struct hso_device *tmp_dev = NULL; 2892 2888 2889 + if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { 2890 + dev_err(&interface->dev, "Not our interface\n"); 2891 + return -ENODEV; 2892 + } 2893 + 2893 2894 if_num = interface->altsetting->desc.bInterfaceNumber; 2894 2895 2895 2896 /* Get the interface/port specification from either driver_info or from ··· 2904 2895 else 2905 2896 port_spec = hso_get_config_data(interface); 2906 2897 2907 - if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { 2908 - dev_err(&interface->dev, "Not our interface\n"); 2909 - return -ENODEV; 2910 - } 2911 2898 /* Check if we need to switch to alt interfaces prior to port 2912 2899 * configuration */ 2913 2900 if (interface->num_altsetting > 1)
+1 -3
drivers/net/vxlan.c
··· 1386 1386 return -ENOTCONN; 1387 1387 1388 1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && 1389 - ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { 1389 + vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { 1390 1390 vxlan_sock_hold(vs); 1391 1391 dev_hold(dev); 1392 1392 queue_work(vxlan_wq, &vxlan->igmp_join); ··· 1792 1792 { 1793 1793 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 1794 1794 struct vxlan_dev *vxlan = netdev_priv(dev); 1795 - 1796 - flush_workqueue(vxlan_wq); 1797 1795 1798 1796 spin_lock(&vn->sock_lock); 1799 1797 hlist_del_rcu(&vxlan->hlist);
+2 -5
drivers/net/wireless/cw1200/sta.c
··· 1406 1406 if (!priv->join_status) 1407 1407 goto done; 1408 1408 1409 - if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { 1410 - wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", 1411 - priv->join_status); 1412 - BUG_ON(1); 1413 - } 1409 + if (priv->join_status == CW1200_JOIN_STATUS_AP) 1410 + goto done; 1414 1411 1415 1412 cancel_work_sync(&priv->update_filtering_work); 1416 1413 cancel_work_sync(&priv->set_beacon_wakeup_period_work);
+2 -2
drivers/net/wireless/hostap/hostap_ioctl.c
··· 523 523 524 524 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); 525 525 526 - memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); 526 + memcpy(extra, addr, sizeof(struct sockaddr) * data->length); 527 527 data->flags = 1; /* has quality information */ 528 - memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, 528 + memcpy(extra + sizeof(struct sockaddr) * data->length, qual, 529 529 sizeof(struct iw_quality) * data->length); 530 530 531 531 kfree(addr);
+8 -8
drivers/net/wireless/iwlegacy/4965-mac.c
··· 4460 4460 * is killed. Hence update the killswitch state here. The 4461 4461 * rfkill handler will care about restarting if needed. 4462 4462 */ 4463 - if (!test_bit(S_ALIVE, &il->status)) { 4464 - if (hw_rf_kill) 4465 - set_bit(S_RFKILL, &il->status); 4466 - else 4467 - clear_bit(S_RFKILL, &il->status); 4463 + if (hw_rf_kill) { 4464 + set_bit(S_RFKILL, &il->status); 4465 + } else { 4466 + clear_bit(S_RFKILL, &il->status); 4468 4467 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); 4468 + il_force_reset(il, true); 4469 4469 } 4470 4470 4471 4471 handled |= CSR_INT_BIT_RF_KILL; ··· 5334 5334 5335 5335 il->active_rate = RATES_MASK; 5336 5336 5337 + il_power_update_mode(il, true); 5338 + D_INFO("Updated power mode\n"); 5339 + 5337 5340 if (il_is_associated(il)) { 5338 5341 struct il_rxon_cmd *active_rxon = 5339 5342 (struct il_rxon_cmd *)&il->active; ··· 5366 5363 5367 5364 D_INFO("ALIVE processing complete.\n"); 5368 5365 wake_up(&il->wait_command_queue); 5369 - 5370 - il_power_update_mode(il, true); 5371 - D_INFO("Updated power mode\n"); 5372 5366 5373 5367 return; 5374 5368
+1
drivers/net/wireless/iwlegacy/common.c
··· 4660 4660 4661 4661 return 0; 4662 4662 } 4663 + EXPORT_SYMBOL(il_force_reset); 4663 4664 4664 4665 int 4665 4666 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+4 -1
drivers/net/wireless/iwlwifi/dvm/mac80211.c
··· 1068 1068 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1069 1069 return; 1070 1070 1071 - if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 1071 + if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 1072 + return; 1073 + 1074 + if (ctx->vif) 1072 1075 ieee80211_chswitch_done(ctx->vif, is_success); 1073 1076 } 1074 1077
-2
drivers/net/wireless/iwlwifi/iwl-prph.h
··· 97 97 98 98 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 99 99 100 - #define APMG_RTC_INT_STT_RFKILL (0x10000000) 101 - 102 100 /* Device system time */ 103 101 #define DEVICE_SYSTEM_TIME_REG 0xA0206C 104 102
+23 -10
drivers/net/wireless/iwlwifi/mvm/time-event.c
··· 138 138 schedule_work(&mvm->roc_done_wk); 139 139 } 140 140 141 + static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, 142 + struct ieee80211_vif *vif, 143 + const char *errmsg) 144 + { 145 + if (vif->type != NL80211_IFTYPE_STATION) 146 + return false; 147 + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) 148 + return false; 149 + if (errmsg) 150 + IWL_ERR(mvm, "%s\n", errmsg); 151 + ieee80211_connection_loss(vif); 152 + return true; 153 + } 154 + 141 155 /* 142 156 * Handles a FW notification for an event that is known to the driver. 143 157 * ··· 177 163 * P2P Device discoveribility, while there are other higher priority 178 164 * events in the system). 179 165 */ 180 - WARN_ONCE(!le32_to_cpu(notif->status), 181 - "Failed to schedule time event\n"); 166 + if (WARN_ONCE(!le32_to_cpu(notif->status), 167 + "Failed to schedule time event\n")) { 168 + if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { 169 + iwl_mvm_te_clear_data(mvm, te_data); 170 + return; 171 + } 172 + } 182 173 183 174 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { 184 175 IWL_DEBUG_TE(mvm, ··· 199 180 * By now, we should have finished association 200 181 * and know the dtim period. 201 182 */ 202 - if (te_data->vif->type == NL80211_IFTYPE_STATION && 203 - (!te_data->vif->bss_conf.assoc || 204 - !te_data->vif->bss_conf.dtim_period)) { 205 - IWL_ERR(mvm, 206 - "No assocation and the time event is over already...\n"); 207 - ieee80211_connection_loss(te_data->vif); 208 - } 209 - 183 + iwl_mvm_te_check_disconnect(mvm, te_data->vif, 184 + "No assocation and the time event is over already..."); 210 185 iwl_mvm_te_clear_data(mvm, te_data); 211 186 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { 212 187 te_data->running = true;
-8
drivers/net/wireless/iwlwifi/pcie/rx.c
··· 888 888 889 889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 890 890 if (hw_rfkill) { 891 - /* 892 - * Clear the interrupt in APMG if the NIC is going down. 893 - * Note that when the NIC exits RFkill (else branch), we 894 - * can't access prph and the NIC will be reset in 895 - * start_hw anyway. 896 - */ 897 - iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 898 - APMG_RTC_INT_STT_RFKILL); 899 891 set_bit(STATUS_RFKILL, &trans_pcie->status); 900 892 if (test_and_clear_bit(STATUS_HCMD_ACTIVE, 901 893 &trans_pcie->status))
+5 -5
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 1502 1502 spin_lock_init(&trans_pcie->reg_lock); 1503 1503 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1504 1504 1505 - /* W/A - seems to solve weird behavior. We need to remove this if we 1506 - * don't want to stay in L1 all the time. This wastes a lot of power */ 1507 - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 1508 - PCIE_LINK_STATE_CLKPM); 1509 - 1510 1505 if (pci_enable_device(pdev)) { 1511 1506 err = -ENODEV; 1512 1507 goto out_no_pci; 1513 1508 } 1509 + 1510 + /* W/A - seems to solve weird behavior. We need to remove this if we 1511 + * don't want to stay in L1 all the time. This wastes a lot of power */ 1512 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 1513 + PCIE_LINK_STATE_CLKPM); 1514 1514 1515 1515 pci_set_master(pdev); 1516 1516
+3 -1
drivers/net/wireless/zd1201.c
··· 98 98 goto exit; 99 99 100 100 err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, 101 - USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); 101 + USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); 102 102 if (err < 0) 103 103 goto exit; 104 + 105 + memcpy(&ret, buf, sizeof(ret)); 104 106 105 107 if (ret & 0x80) { 106 108 err = -EIO;
+2
drivers/of/fdt.c
··· 392 392 mem = (unsigned long) 393 393 dt_alloc(size + 4, __alignof__(struct device_node)); 394 394 395 + memset((void *)mem, 0, size); 396 + 395 397 ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); 396 398 397 399 pr_debug(" unflattening %lx...\n", mem);
+62 -4
drivers/pinctrl/pinctrl-sunxi.c
··· 278 278 { 279 279 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 280 280 struct sunxi_pinctrl_group *g = &pctl->groups[group]; 281 + unsigned long flags; 281 282 u32 val, mask; 282 283 u16 strength; 283 284 u8 dlevel; ··· 296 295 * 3: 40mA 297 296 */ 298 297 dlevel = strength / 10 - 1; 298 + 299 + spin_lock_irqsave(&pctl->lock, flags); 300 + 299 301 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); 300 302 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); 301 303 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), 302 304 pctl->membase + sunxi_dlevel_reg(g->pin)); 305 + 306 + spin_unlock_irqrestore(&pctl->lock, flags); 303 307 break; 304 308 case PIN_CONFIG_BIAS_PULL_UP: 309 + spin_lock_irqsave(&pctl->lock, flags); 310 + 305 311 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 306 312 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 307 313 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), 308 314 pctl->membase + sunxi_pull_reg(g->pin)); 315 + 316 + spin_unlock_irqrestore(&pctl->lock, flags); 309 317 break; 310 318 case PIN_CONFIG_BIAS_PULL_DOWN: 319 + spin_lock_irqsave(&pctl->lock, flags); 320 + 311 321 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 312 322 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 313 323 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), 314 324 pctl->membase + sunxi_pull_reg(g->pin)); 325 + 326 + spin_unlock_irqrestore(&pctl->lock, flags); 315 327 break; 316 328 default: 317 329 break; ··· 374 360 u8 config) 375 361 { 376 362 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 363 + unsigned long flags; 364 + u32 val, mask; 377 365 378 - u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); 379 - u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); 366 + spin_lock_irqsave(&pctl->lock, flags); 367 + 368 + val = readl(pctl->membase + sunxi_mux_reg(pin)); 369 + mask = MUX_PINS_MASK << sunxi_mux_offset(pin); 380 370 writel((val & ~mask) | config << sunxi_mux_offset(pin), 381 371 pctl->membase + sunxi_mux_reg(pin)); 372 + 373 + spin_unlock_irqrestore(&pctl->lock, flags); 382 374 } 383 375 384 376 static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, ··· 484 464 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 485 465 u32 reg = sunxi_data_reg(offset); 486 466 u8 index = sunxi_data_offset(offset); 467 + unsigned long flags; 468 + u32 regval; 487 469 488 - writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); 470 + spin_lock_irqsave(&pctl->lock, flags); 471 + 472 + regval = readl(pctl->membase + reg); 473 + 474 + if (value) 475 + regval |= BIT(index); 476 + else 477 + regval &= ~(BIT(index)); 478 + 479 + writel(regval, pctl->membase + reg); 480 + 481 + spin_unlock_irqrestore(&pctl->lock, flags); 489 482 } 490 483 491 484 static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, ··· 559 526 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 560 527 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 561 528 u8 index = sunxi_irq_cfg_offset(d->hwirq); 529 + unsigned long flags; 530 + u32 regval; 562 531 u8 mode; 563 532 564 533 switch (type) { ··· 583 548 return -EINVAL; 584 549 } 585 550 586 - writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); 551 + spin_lock_irqsave(&pctl->lock, flags); 552 + 553 + regval = readl(pctl->membase + reg); 554 + regval &= ~IRQ_CFG_IRQ_MASK; 555 + writel(regval | (mode << index), pctl->membase + reg); 556 + 557 + spin_unlock_irqrestore(&pctl->lock, flags); 587 558 588 559 return 0; 589 560 } ··· 601 560 u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); 602 561 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 603 562 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 563 + unsigned long flags; 604 564 u32 val; 565 + 566 + spin_lock_irqsave(&pctl->lock, flags); 605 567 606 568 /* Mask the IRQ */ 607 569 val = readl(pctl->membase + ctrl_reg); ··· 612 568 613 569 /* Clear the IRQ */ 614 570 writel(1 << status_idx, pctl->membase + status_reg); 571 + 572 + spin_unlock_irqrestore(&pctl->lock, flags); 615 573 } 616 574 617 575 static void sunxi_pinctrl_irq_mask(struct irq_data *d) ··· 621 575 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 622 576 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 623 577 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 578 + unsigned long flags; 624 579 u32 val; 580 + 581 + spin_lock_irqsave(&pctl->lock, flags); 625 582 626 583 /* Mask the IRQ */ 627 584 val = readl(pctl->membase + reg); 628 585 writel(val & ~(1 << idx), pctl->membase + reg); 586 + 587 + spin_unlock_irqrestore(&pctl->lock, flags); 629 588 } 630 589 631 590 static void sunxi_pinctrl_irq_unmask(struct irq_data *d) ··· 639 588 struct sunxi_desc_function *func; 640 589 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 641 590 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 591 + unsigned long flags; 642 592 u32 val; 643 593 644 594 func = sunxi_pinctrl_desc_find_function_by_pin(pctl, ··· 649 597 /* Change muxing to INT mode */ 650 598 sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); 651 599 600 + spin_lock_irqsave(&pctl->lock, flags); 601 + 652 602 /* Unmask the IRQ */ 653 603 val = readl(pctl->membase + reg); 654 604 writel(val | (1 << idx), pctl->membase + reg); 605 + 606 + spin_unlock_irqrestore(&pctl->lock, flags); 655 607 } 656 608 657 609 static struct irq_chip sunxi_pinctrl_irq_chip = { ··· 807 751 if (!pctl) 808 752 return -ENOMEM; 809 753 platform_set_drvdata(pdev, pctl); 754 + 755 + spin_lock_init(&pctl->lock); 810 756 811 757 pctl->membase = of_iomap(node, 0); 812 758 if (!pctl->membase)
+2
drivers/pinctrl/pinctrl-sunxi.h
··· 14 14 #define __PINCTRL_SUNXI_H 15 15 16 16 #include <linux/kernel.h> 17 + #include <linux/spinlock.h> 17 18 18 19 #define PA_BASE 0 19 20 #define PB_BASE 32 ··· 408 407 unsigned ngroups; 409 408 int irq; 410 409 int irq_array[SUNXI_IRQ_NUMBER]; 410 + spinlock_t lock; 411 411 struct pinctrl_dev *pctl_dev; 412 412 }; 413 413
+1 -1
drivers/platform/olpc/olpc-ec.c
··· 330 330 return platform_driver_register(&olpc_ec_plat_driver); 331 331 } 332 332 333 - module_init(olpc_ec_init_module); 333 + arch_initcall(olpc_ec_init_module); 334 334 335 335 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); 336 336 MODULE_LICENSE("GPL");
-16
drivers/platform/x86/hp-wmi.c
··· 53 53 #define HPWMI_ALS_QUERY 0x3 54 54 #define HPWMI_HARDWARE_QUERY 0x4 55 55 #define HPWMI_WIRELESS_QUERY 0x5 56 - #define HPWMI_BIOS_QUERY 0x9 57 56 #define HPWMI_HOTKEY_QUERY 0xc 58 57 #define HPWMI_WIRELESS2_QUERY 0x1b 59 58 #define HPWMI_POSTCODEERROR_QUERY 0x2a ··· 290 291 return ret; 291 292 292 293 return (state & 0x4) ? 1 : 0; 293 - } 294 - 295 - static int hp_wmi_enable_hotkeys(void) 296 - { 297 - int ret; 298 - int query = 0x6e; 299 - 300 - ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), 301 - 0); 302 - 303 - if (ret) 304 - return -EINVAL; 305 - return 0; 306 294 } 307 295 308 296 static int hp_wmi_set_block(void *data, bool blocked) ··· 995 1009 err = hp_wmi_input_setup(); 996 1010 if (err) 997 1011 return err; 998 - 999 - hp_wmi_enable_hotkeys(); 1000 1012 } 1001 1013 1002 1014 if (bios_capable) {
+6 -2
drivers/platform/x86/sony-laptop.c
··· 2440 2440 if (pos < 0) 2441 2441 return pos; 2442 2442 2443 - return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina"); 2443 + return snprintf(buffer, PAGE_SIZE, "%s\n", 2444 + pos == SPEED ? "speed" : 2445 + pos == STAMINA ? "stamina" : 2446 + pos == AUTO ? "auto" : "unknown"); 2444 2447 } 2445 2448 2446 2449 static int sony_nc_gfx_switch_setup(struct platform_device *pd, ··· 4323 4320 goto err_free_resources; 4324 4321 } 4325 4322 4326 - if (sonypi_compat_init()) 4323 + result = sonypi_compat_init(); 4324 + if (result) 4327 4325 goto err_remove_input; 4328 4326 4329 4327 /* request io port */
+25 -10
drivers/rtc/rtc-stmp3xxx.c
··· 23 23 #include <linux/init.h> 24 24 #include <linux/platform_device.h> 25 25 #include <linux/interrupt.h> 26 + #include <linux/delay.h> 26 27 #include <linux/rtc.h> 27 28 #include <linux/slab.h> 28 29 #include <linux/of_device.h> ··· 120 119 } 121 120 #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ 122 121 123 - static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) 122 + static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) 124 123 { 124 + int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */ 125 125 /* 126 - * The datasheet doesn't say which way round the 127 - * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, 128 - * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS 126 + * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010 127 + * states: 128 + * | The order in which registers are updated is 129 + * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds. 130 + * | (This list is in bitfield order, from LSB to MSB, as they would 131 + * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT 132 + * | register. For example, the Seconds register corresponds to 133 + * | STALE_REGS or NEW_REGS containing 0x80.) 129 134 */ 130 - while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & 131 - (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) 132 - cpu_relax(); 135 + do { 136 + if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & 137 + (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))) 138 + return 0; 139 + udelay(1); 140 + } while (--timeout > 0); 141 + return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & 142 + (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0; 133 143 } 134 144 135 145 /* Time read/write */ 136 146 static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 137 147 { 148 + int ret; 138 149 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 139 150 140 - stmp3xxx_wait_time(rtc_data); 151 + ret = stmp3xxx_wait_time(rtc_data); 152 + if (ret) 153 + return ret; 154 + 141 155 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); 142 156 return 0; 143 157 } ··· 162 146 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 163 147 164 148 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); 165 - stmp3xxx_wait_time(rtc_data); 166 - return 0; 149 + return stmp3xxx_wait_time(rtc_data); 167 150 } 168 151 169 152 /* interrupt(s) handler */
+22 -7
drivers/s390/scsi/zfcp_erp.c
··· 102 102 103 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 104 104 zfcp_erp_action_dismiss(&port->erp_action); 105 - else 106 - shost_for_each_device(sdev, port->adapter->scsi_host) 105 + else { 106 + spin_lock(port->adapter->scsi_host->host_lock); 107 + __shost_for_each_device(sdev, port->adapter->scsi_host) 107 108 if (sdev_to_zfcp(sdev)->port == port) 108 109 zfcp_erp_action_dismiss_lun(sdev); 110 + spin_unlock(port->adapter->scsi_host->host_lock); 111 + } 109 112 } 110 113 111 114 static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) ··· 595 592 { 596 593 struct scsi_device *sdev; 597 594 598 - shost_for_each_device(sdev, port->adapter->scsi_host) 595 + spin_lock(port->adapter->scsi_host->host_lock); 596 + __shost_for_each_device(sdev, port->adapter->scsi_host) 599 597 if (sdev_to_zfcp(sdev)->port == port) 600 598 _zfcp_erp_lun_reopen(sdev, clear, id, 0); 599 + spin_unlock(port->adapter->scsi_host->host_lock); 601 600 } 602 601 603 602 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) ··· 1439 1434 atomic_set_mask(common_mask, &port->status); 1440 1435 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1441 1436 1442 - shost_for_each_device(sdev, adapter->scsi_host) 1437 + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); 1438 + __shost_for_each_device(sdev, adapter->scsi_host) 1443 1439 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1440 + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); 1444 1441 } 1445 1442 1446 1443 /** ··· 1476 1469 } 1477 1470 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1478 1471 1479 - shost_for_each_device(sdev, adapter->scsi_host) { 1472 + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); 1473 + __shost_for_each_device(sdev, adapter->scsi_host) { 1480 1474 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1481 1475 if (clear_counter) 1482 1476 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1483 1477 } 1478 + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); 1484 1479 } 1485 1480 1486 1481 /** ··· 1496 1487 { 1497 1488 struct scsi_device *sdev; 1498 1489 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1490 + unsigned long flags; 1499 1491 1500 1492 atomic_set_mask(mask, &port->status); 1501 1493 1502 1494 if (!common_mask) 1503 1495 return; 1504 1496 1505 - shost_for_each_device(sdev, port->adapter->scsi_host) 1497 + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); 1498 + __shost_for_each_device(sdev, port->adapter->scsi_host) 1506 1499 if (sdev_to_zfcp(sdev)->port == port) 1507 1500 atomic_set_mask(common_mask, 1508 1501 &sdev_to_zfcp(sdev)->status); 1502 + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); 1509 1503 } 1510 1504 1511 1505 /** ··· 1523 1511 struct scsi_device *sdev; 1524 1512 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1525 1513 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; 1514 + unsigned long flags; 1526 1515 1527 1516 atomic_clear_mask(mask, &port->status); 1528 1517 ··· 1533 1520 if (clear_counter) 1534 1521 atomic_set(&port->erp_counter, 0); 1535 1522 1536 - shost_for_each_device(sdev, port->adapter->scsi_host) 1523 + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); 1524 + __shost_for_each_device(sdev, port->adapter->scsi_host) 1537 1525 if (sdev_to_zfcp(sdev)->port == port) { 1538 1526 atomic_clear_mask(common_mask, 1539 1527 &sdev_to_zfcp(sdev)->status); 1540 1528 if (clear_counter) 1541 1529 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1542 1530 } 1531 + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); 1543 1532 } 1544 1533 1545 1534 /**
+2 -6
drivers/s390/scsi/zfcp_qdio.c
··· 224 224 225 225 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 226 226 { 227 - spin_lock_irq(&qdio->req_q_lock); 228 227 if (atomic_read(&qdio->req_q_free) || 229 228 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 230 229 return 1; 231 - spin_unlock_irq(&qdio->req_q_lock); 232 230 return 0; 233 231 } 234 232 ··· 244 246 { 245 247 long ret; 246 248 247 - spin_unlock_irq(&qdio->req_q_lock); 248 - ret = wait_event_interruptible_timeout(qdio->req_q_wq, 249 - zfcp_qdio_sbal_check(qdio), 5 * HZ); 249 + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, 250 + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); 250 251 251 252 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 252 253 return -EIO; ··· 259 262 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 260 263 } 261 264 262 - spin_lock_irq(&qdio->req_q_lock); 263 265 return -EIO; 264 266 } 265 267
+14
drivers/s390/scsi/zfcp_sysfs.c
··· 27 27 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 28 28 zfcp_sysfs_##_feat##_##_name##_show, NULL); 29 29 30 + #define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \ 31 + static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ 32 + struct device_attribute *at,\ 33 + char *buf) \ 34 + { \ 35 + return sprintf(buf, _format, _value); \ 36 + } \ 37 + static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 38 + zfcp_sysfs_##_feat##_##_name##_show, NULL); 39 + 30 40 #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ 31 41 static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ 32 42 struct device_attribute *at,\ ··· 85 75 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 86 76 (zfcp_unit_sdev_status(unit) & 87 77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 78 + ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0); 79 + ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0); 88 80 89 81 static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, 90 82 struct device_attribute *attr, ··· 359 347 &dev_attr_unit_in_recovery.attr, 360 348 &dev_attr_unit_status.attr, 361 349 &dev_attr_unit_access_denied.attr, 350 + &dev_attr_unit_access_shared.attr, 351 + &dev_attr_unit_access_readonly.attr, 362 352 NULL 363 353 }; 364 354 static struct attribute_group zfcp_unit_attr_group = {
-1
drivers/scsi/Kconfig
··· 1353 1353 tristate "Emulex LightPulse Fibre Channel Support" 1354 1354 depends on PCI && SCSI 1355 1355 select SCSI_FC_ATTRS 1356 - select GENERIC_CSUM 1357 1356 select CRC_T10DIF 1358 1357 help 1359 1358 This lpfc driver supports the Emulex LightPulse
+1 -1
drivers/staging/comedi/drivers.c
··· 482 482 ret = comedi_device_postconfig(dev); 483 483 if (ret < 0) { 484 484 comedi_device_detach(dev); 485 - module_put(dev->driver->module); 485 + module_put(driv->module); 486 486 } 487 487 /* On success, the driver module count has been incremented. */ 488 488 return ret;
+2 -2
drivers/tty/hvc/hvsi_lib.c
··· 341 341 342 342 pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); 343 343 344 - /* Try for up to 200s */ 345 - for (timeout = 0; timeout < 20; timeout++) { 344 + /* Try for up to 400ms */ 345 + for (timeout = 0; timeout < 40; timeout++) { 346 346 if (pv->established) 347 347 goto established; 348 348 if (!hvsi_get_packet(pv))
+4 -4
drivers/usb/class/usbtmc.c
··· 1119 1119 /* Determine if it is a Rigol or not */ 1120 1120 data->rigol_quirk = 0; 1121 1121 dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", 1122 - data->usb_dev->descriptor.idVendor, 1123 - data->usb_dev->descriptor.idProduct); 1122 + le16_to_cpu(data->usb_dev->descriptor.idVendor), 1123 + le16_to_cpu(data->usb_dev->descriptor.idProduct)); 1124 1124 for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { 1125 - if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && 1126 - (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { 1125 + if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) && 1126 + (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) { 1127 1127 dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); 1128 1128 data->rigol_quirk = 1; 1129 1129 break;
+6
drivers/usb/core/quirks.c
··· 78 78 { USB_DEVICE(0x04d8, 0x000c), .driver_info = 79 79 USB_QUIRK_CONFIG_INTF_STRINGS }, 80 80 81 + /* CarrolTouch 4000U */ 82 + { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, 83 + 84 + /* CarrolTouch 4500U */ 85 + { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, 86 + 81 87 /* Samsung Android phone modem - ID conflict with SPH-I500 */ 82 88 { USB_DEVICE(0x04e8, 0x6601), .driver_info = 83 89 USB_QUIRK_CONFIG_INTF_STRINGS },
+6 -7
drivers/usb/host/ehci-sched.c
··· 1391 1391 1392 1392 /* Behind the scheduling threshold? */ 1393 1393 if (unlikely(start < next)) { 1394 + unsigned now2 = (now - base) & (mod - 1); 1394 1395 1395 1396 /* USB_ISO_ASAP: Round up to the first available slot */ 1396 1397 if (urb->transfer_flags & URB_ISO_ASAP) 1397 1398 start += (next - start + period - 1) & -period; 1398 1399 1399 1400 /* 1400 - * Not ASAP: Use the next slot in the stream. If 1401 - * the entire URB falls before the threshold, fail. 1401 + * Not ASAP: Use the next slot in the stream, 1402 + * no matter what. 1402 1403 */ 1403 - else if (start + span - period < next) { 1404 - ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", 1404 + else if (start + span - period < now2) { 1405 + ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", 1405 1406 urb, start + base, 1406 - span - period, next + base); 1407 - status = -EXDEV; 1408 - goto fail; 1407 + span - period, now2 + base); 1409 1408 } 1410 1409 } 1411 1410
+7
drivers/usb/host/ohci-pci.c
··· 304 304 pr_info("%s: " DRIVER_DESC "\n", hcd_name); 305 305 306 306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); 307 + 308 + #ifdef CONFIG_PM 309 + /* Entries for the PCI suspend/resume callbacks are special */ 310 + ohci_pci_hc_driver.pci_suspend = ohci_suspend; 311 + ohci_pci_hc_driver.pci_resume = ohci_resume; 312 + #endif 313 + 307 314 return pci_register_driver(&ohci_pci_driver); 308 315 } 309 316 module_init(ohci_pci_init);
+1 -1
drivers/usb/misc/adutux.c
··· 830 830 831 831 /* let the user know what node this device is now attached to */ 832 832 dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", 833 - udev->descriptor.idProduct, dev->serial_number, 833 + le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, 834 834 (dev->minor - ADU_MINOR_BASE)); 835 835 exit: 836 836 dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
+1 -1
drivers/usb/phy/phy-fsl-usb.h
··· 15 15 * 675 Mass Ave, Cambridge, MA 02139, USA. 16 16 */ 17 17 18 - #include "otg_fsm.h" 18 + #include "phy-fsm-usb.h" 19 19 #include <linux/usb/otg.h> 20 20 #include <linux/ioctl.h> 21 21
+1 -1
drivers/usb/phy/phy-fsm-usb.c
··· 29 29 #include <linux/usb/gadget.h> 30 30 #include <linux/usb/otg.h> 31 31 32 - #include "phy-otg-fsm.h" 32 + #include "phy-fsm-usb.h" 33 33 34 34 /* Change USB protocol when there is a protocol change */ 35 35 static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
+1 -1
drivers/usb/serial/keyspan.c
··· 2303 2303 if (d_details == NULL) { 2304 2304 dev_err(&serial->dev->dev, "%s - unknown product id %x\n", 2305 2305 __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); 2306 - return 1; 2306 + return -ENODEV; 2307 2307 } 2308 2308 2309 2309 /* Setup private data for serial driver */
+14 -7
drivers/usb/serial/mos7720.c
··· 90 90 struct list_head urblist_entry; 91 91 struct kref ref_count; 92 92 struct urb *urb; 93 + struct usb_ctrlrequest *setup; 93 94 }; 94 95 95 96 enum mos7715_pp_modes { ··· 272 271 struct mos7715_parport *mos_parport = urbtrack->mos_parport; 273 272 274 273 usb_free_urb(urbtrack->urb); 274 + kfree(urbtrack->setup); 275 275 kfree(urbtrack); 276 276 kref_put(&mos_parport->ref_count, destroy_mos_parport); 277 277 } ··· 357 355 struct urbtracker *urbtrack; 358 356 int ret_val; 359 357 unsigned long flags; 360 - struct usb_ctrlrequest setup; 361 358 struct usb_serial *serial = mos_parport->serial; 362 359 struct usb_device *usbdev = serial->dev; 363 360 ··· 374 373 kfree(urbtrack); 375 374 return -ENOMEM; 376 375 } 377 - setup.bRequestType = (__u8)0x40; 378 - setup.bRequest = (__u8)0x0e; 379 - setup.wValue = get_reg_value(reg, dummy); 380 - setup.wIndex = get_reg_index(reg); 381 - setup.wLength = 0; 376 + urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); 377 + if (!urbtrack->setup) { 378 + usb_free_urb(urbtrack->urb); 379 + kfree(urbtrack); 380 + return -ENOMEM; 381 + } 382 + urbtrack->setup->bRequestType = (__u8)0x40; 383 + urbtrack->setup->bRequest = (__u8)0x0e; 384 + urbtrack->setup->wValue = get_reg_value(reg, dummy); 385 + urbtrack->setup->wIndex = get_reg_index(reg); 386 + urbtrack->setup->wLength = 0; 382 387 usb_fill_control_urb(urbtrack->urb, usbdev, 383 388 usb_sndctrlpipe(usbdev, 0), 384 - (unsigned char *)&setup, 389 + (unsigned char *)urbtrack->setup, 385 390 NULL, 0, async_complete, urbtrack); 386 391 kref_init(&urbtrack->ref_count); 387 392 INIT_LIST_HEAD(&urbtrack->urblist_entry);
+1 -1
drivers/usb/serial/mos7840.c
··· 2193 2193 static int mos7840_probe(struct usb_serial *serial, 2194 2194 const struct usb_device_id *id) 2195 2195 { 2196 - u16 product = serial->dev->descriptor.idProduct; 2196 + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); 2197 2197 u8 *buf; 2198 2198 int device_type; 2199 2199
+5 -4
drivers/usb/serial/ti_usb_3410_5052.c
··· 1536 1536 char buf[32]; 1537 1537 1538 1538 /* try ID specific firmware first, then try generic firmware */ 1539 - sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, 1540 - dev->descriptor.idProduct); 1539 + sprintf(buf, "ti_usb-v%04x-p%04x.fw", 1540 + le16_to_cpu(dev->descriptor.idVendor), 1541 + le16_to_cpu(dev->descriptor.idProduct)); 1541 1542 status = request_firmware(&fw_p, buf, &dev->dev); 1542 1543 1543 1544 if (status != 0) { 1544 1545 buf[0] = '\0'; 1545 - if (dev->descriptor.idVendor == MTS_VENDOR_ID) { 1546 - switch (dev->descriptor.idProduct) { 1546 + if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { 1547 + switch (le16_to_cpu(dev->descriptor.idProduct)) { 1547 1548 case MTS_CDMA_PRODUCT_ID: 1548 1549 strcpy(buf, "mts_cdma.fw"); 1549 1550 break;
+10 -10
drivers/usb/serial/usb_wwan.c
··· 291 291 tty_flip_buffer_push(&port->port); 292 292 } else 293 293 dev_dbg(dev, "%s: empty read urb received\n", __func__); 294 - 295 - /* Resubmit urb so we continue receiving */ 296 - err = usb_submit_urb(urb, GFP_ATOMIC); 297 - if (err) { 298 - if (err != -EPERM) { 299 - dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); 300 - /* busy also in error unless we are killed */ 301 - usb_mark_last_busy(port->serial->dev); 302 - } 303 - } else { 294 + } 295 + /* Resubmit urb so we continue receiving */ 296 + err = usb_submit_urb(urb, GFP_ATOMIC); 297 + if (err) { 298 + if (err != -EPERM) { 299 + dev_err(dev, "%s: resubmit read urb failed. (%d)\n", 300 + __func__, err); 301 + /* busy also in error unless we are killed */ 304 302 usb_mark_last_busy(port->serial->dev); 305 303 } 304 + } else { 305 + usb_mark_last_busy(port->serial->dev); 306 306 } 307 307 } 308 308
+7 -2
drivers/usb/wusbcore/wa-xfer.c
··· 1226 1226 } 1227 1227 spin_lock_irqsave(&xfer->lock, flags); 1228 1228 rpipe = xfer->ep->hcpriv; 1229 + if (rpipe == NULL) { 1230 + pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", 1231 + __func__, wa_xfer_id(xfer), 1232 + "Probably already aborted.\n" ); 1233 + goto out_unlock; 1234 + } 1229 1235 /* Check the delayed list -> if there, release and complete */ 1230 1236 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1231 1237 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) ··· 1650 1644 break; 1651 1645 } 1652 1646 usb_status = xfer_result->bTransferStatus & 0x3f; 1653 - if (usb_status == WA_XFER_STATUS_ABORTED 1654 - || usb_status == WA_XFER_STATUS_NOT_FOUND) 1647 + if (usb_status == WA_XFER_STATUS_NOT_FOUND) 1655 1648 /* taken care of already */ 1656 1649 break; 1657 1650 xfer_id = xfer_result->dwTransferID;
+12 -1
drivers/xen/events.c
··· 348 348 349 349 for_each_possible_cpu(i) 350 350 memset(per_cpu(cpu_evtchn_mask, i), 351 - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); 351 + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); 352 352 } 353 353 354 354 static inline void clear_evtchn(int port) ··· 1493 1493 /* Rebind an evtchn so that it gets delivered to a specific cpu */ 1494 1494 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1495 1495 { 1496 + struct shared_info *s = HYPERVISOR_shared_info; 1496 1497 struct evtchn_bind_vcpu bind_vcpu; 1497 1498 int evtchn = evtchn_from_irq(irq); 1499 + int masked; 1498 1500 1499 1501 if (!VALID_EVTCHN(evtchn)) 1500 1502 return -1; ··· 1513 1511 bind_vcpu.vcpu = tcpu; 1514 1512 1515 1513 /* 1514 + * Mask the event while changing the VCPU binding to prevent 1515 + * it being delivered on an unexpected VCPU. 1516 + */ 1517 + masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); 1518 + 1519 + /* 1516 1520 * If this fails, it usually just indicates that we're dealing with a 1517 1521 * virq or IPI channel, which don't actually need to be rebound. Ignore 1518 1522 * it, but don't do the xenlinux-level rebind in that case. 1519 1523 */ 1520 1524 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1521 1525 bind_evtchn_to_cpu(evtchn, tcpu); 1526 + 1527 + if (!masked) 1528 + unmask_evtchn(evtchn); 1522 1529 1523 1530 return 0; 1524 1531 }
+1 -1
fs/bfs/inode.c
··· 40 40 int block, off; 41 41 42 42 inode = iget_locked(sb, ino); 43 - if (IS_ERR(inode)) 43 + if (!inode) 44 44 return ERR_PTR(-ENOMEM); 45 45 if (!(inode->i_state & I_NEW)) 46 46 return inode;
+15 -5
fs/bio.c
··· 1045 1045 int bio_uncopy_user(struct bio *bio) 1046 1046 { 1047 1047 struct bio_map_data *bmd = bio->bi_private; 1048 - int ret = 0; 1048 + struct bio_vec *bvec; 1049 + int ret = 0, i; 1049 1050 1050 - if (!bio_flagged(bio, BIO_NULL_MAPPED)) 1051 - ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 1052 - bmd->nr_sgvecs, bio_data_dir(bio) == READ, 1053 - 0, bmd->is_our_pages); 1051 + if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1052 + /* 1053 + * if we're in a workqueue, the request is orphaned, so 1054 + * don't copy into a random user address space, just free. 1055 + */ 1056 + if (current->mm) 1057 + ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 1058 + bmd->nr_sgvecs, bio_data_dir(bio) == READ, 1059 + 0, bmd->is_our_pages); 1060 + else if (bmd->is_our_pages) 1061 + bio_for_each_segment_all(bvec, bio, i) 1062 + __free_page(bvec->bv_page); 1063 + } 1054 1064 bio_free_map_data(bmd); 1055 1065 bio_put(bio); 1056 1066 return ret;
+9 -5
fs/cifs/cifsencrypt.c
··· 43 43 server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); 44 44 if (IS_ERR(server->secmech.md5)) { 45 45 cifs_dbg(VFS, "could not allocate crypto md5\n"); 46 - return PTR_ERR(server->secmech.md5); 46 + rc = PTR_ERR(server->secmech.md5); 47 + server->secmech.md5 = NULL; 48 + return rc; 47 49 } 48 50 49 51 size = sizeof(struct shash_desc) + 50 52 crypto_shash_descsize(server->secmech.md5); 51 53 server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); 52 54 if (!server->secmech.sdescmd5) { 53 - rc = -ENOMEM; 54 55 crypto_free_shash(server->secmech.md5); 55 56 server->secmech.md5 = NULL; 56 - return rc; 57 + return -ENOMEM; 57 58 } 58 59 server->secmech.sdescmd5->shash.tfm = server->secmech.md5; 59 60 server->secmech.sdescmd5->shash.flags = 0x0; ··· 422 421 if (blobptr + attrsize > blobend) 423 422 break; 424 423 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { 425 - if (!attrsize) 424 + if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN) 426 425 break; 427 426 if (!ses->domainName) { 428 427 ses->domainName = ··· 592 591 593 592 static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) 594 593 { 594 + int rc; 595 595 unsigned int size; 596 596 597 597 /* check if already allocated */ ··· 602 600 server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); 603 601 if (IS_ERR(server->secmech.hmacmd5)) { 604 602 cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); 605 - return PTR_ERR(server->secmech.hmacmd5); 603 + rc = PTR_ERR(server->secmech.hmacmd5); 604 + server->secmech.hmacmd5 = NULL; 605 + return rc; 606 606 } 607 607 608 608 size = sizeof(struct shash_desc) +
+5 -6
fs/cifs/cifsfs.c
··· 147 147 goto out_no_root; 148 148 } 149 149 150 + if (cifs_sb_master_tcon(cifs_sb)->nocase) 151 + sb->s_d_op = &cifs_ci_dentry_ops; 152 + else 153 + sb->s_d_op = &cifs_dentry_ops; 154 + 150 155 sb->s_root = d_make_root(inode); 151 156 if (!sb->s_root) { 152 157 rc = -ENOMEM; 153 158 goto out_no_root; 154 159 } 155 - 156 - /* do that *after* d_make_root() - we want NULL ->d_op for root here */ 157 - if (cifs_sb_master_tcon(cifs_sb)->nocase) 158 - sb->s_d_op = &cifs_ci_dentry_ops; 159 - else 160 - sb->s_d_op = &cifs_dentry_ops; 161 160 162 161 #ifdef CONFIG_CIFS_NFSD_EXPORT 163 162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+4
fs/cifs/cifsglob.h
··· 44 44 #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) 45 45 #define MAX_SERVER_SIZE 15 46 46 #define MAX_SHARE_SIZE 80 47 + #define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */ 47 48 #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ 48 49 #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ 49 50 ··· 370 369 void (*generate_signingkey)(struct TCP_Server_Info *server); 371 370 int (*calc_signature)(struct smb_rqst *rqst, 372 371 struct TCP_Server_Info *server); 372 + int (*query_mf_symlink)(const unsigned char *path, char *pbuf, 373 + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, 374 + unsigned int xid); 373 375 }; 374 376 375 377 struct smb_version_values {
+3 -1
fs/cifs/cifsproto.h
··· 497 497 struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, 498 498 work_func_t complete); 499 499 void cifs_writedata_release(struct kref *refcount); 500 - 500 + int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, 501 + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, 502 + unsigned int xid); 501 503 #endif /* _CIFSPROTO_H */
+4 -3
fs/cifs/connect.c
··· 1675 1675 if (string == NULL) 1676 1676 goto out_nomem; 1677 1677 1678 - if (strnlen(string, 256) == 256) { 1678 + if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN) 1679 + == CIFS_MAX_DOMAINNAME_LEN) { 1679 1680 printk(KERN_WARNING "CIFS: domain name too" 1680 1681 " long\n"); 1681 1682 goto cifs_parse_mount_err; ··· 2277 2276 2278 2277 #ifdef CONFIG_KEYS 2279 2278 2280 - /* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ 2281 - #define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) 2279 + /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ 2280 + #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) 2282 2281 2283 2282 /* Populate username and pw fields from keyring if possible */ 2284 2283 static int
+1
fs/cifs/file.c
··· 647 647 oflags, &oplock, &cfile->fid.netfid, xid); 648 648 if (rc == 0) { 649 649 cifs_dbg(FYI, "posix reopen succeeded\n"); 650 + oparms.reconnect = true; 650 651 goto reopen_success; 651 652 } 652 653 /*
+53 -31
fs/cifs/link.c
··· 305 305 } 306 306 307 307 int 308 - CIFSCheckMFSymlink(struct cifs_fattr *fattr, 309 - const unsigned char *path, 310 - struct cifs_sb_info *cifs_sb, unsigned int xid) 308 + open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, 309 + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, 310 + unsigned int xid) 311 311 { 312 312 int rc; 313 313 int oplock = 0; 314 314 __u16 netfid = 0; 315 315 struct tcon_link *tlink; 316 - struct cifs_tcon *pTcon; 316 + struct cifs_tcon *ptcon; 317 317 struct cifs_io_parms io_parms; 318 - u8 *buf; 319 - char *pbuf; 320 - unsigned int bytes_read = 0; 321 318 int buf_type = CIFS_NO_BUFFER; 322 - unsigned int link_len = 0; 323 319 FILE_ALL_INFO file_info; 324 - 325 - if (!CIFSCouldBeMFSymlink(fattr)) 326 - /* it's not a symlink */ 327 - return 0; 328 320 329 321 tlink = cifs_sb_tlink(cifs_sb); 330 322 if (IS_ERR(tlink)) 331 323 return PTR_ERR(tlink); 332 - pTcon = tlink_tcon(tlink); 324 + ptcon = tlink_tcon(tlink); 333 325 334 - rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, 326 + rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ, 335 327 CREATE_NOT_DIR, &netfid, &oplock, &file_info, 336 328 cifs_sb->local_nls, 337 329 cifs_sb->mnt_cifs_flags & 338 330 CIFS_MOUNT_MAP_SPECIAL_CHR); 339 - if (rc != 0) 340 - goto out; 331 + if (rc != 0) { 332 + cifs_put_tlink(tlink); 333 + return rc; 334 + } 341 335 342 336 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { 343 - CIFSSMBClose(xid, pTcon, netfid); 337 + CIFSSMBClose(xid, ptcon, netfid); 338 + cifs_put_tlink(tlink); 344 339 /* it's not a symlink */ 345 - goto out; 340 + return rc; 346 341 } 342 + 343 + io_parms.netfid = netfid; 344 + io_parms.pid = current->tgid; 345 + io_parms.tcon = ptcon; 346 + io_parms.offset = 0; 347 + io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; 348 + 349 + rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type); 350 + CIFSSMBClose(xid, ptcon, netfid); 351 + cifs_put_tlink(tlink); 352 + return rc; 353 + } 354 + 355 + 356 + int 357 + CIFSCheckMFSymlink(struct cifs_fattr *fattr, 358 + const unsigned char *path, 359 + struct cifs_sb_info *cifs_sb, unsigned int xid) 360 + { 361 + int rc = 0; 362 + u8 *buf = NULL; 363 + unsigned int link_len = 0; 364 + unsigned int bytes_read = 0; 365 + struct cifs_tcon *ptcon; 366 + 367 + if (!CIFSCouldBeMFSymlink(fattr)) 368 + /* it's not a symlink */ 369 + return 0; 347 370 348 371 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); 349 372 if (!buf) { 350 373 rc = -ENOMEM; 351 374 goto out; 352 375 } 353 - pbuf = buf; 354 - io_parms.netfid = netfid; 355 - io_parms.pid = current->tgid; 356 - io_parms.tcon = pTcon; 357 - io_parms.offset = 0; 358 - io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; 359 376 360 - rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); 361 - CIFSSMBClose(xid, pTcon, netfid); 362 - if (rc != 0) { 363 - kfree(buf); 377 + ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); 378 + if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) 379 + rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, 380 + &bytes_read, cifs_sb, xid); 381 + else 364 382 goto out; 365 - } 383 + 384 + if (rc != 0) 385 + goto out; 386 + 387 + if (bytes_read == 0) /* not a symlink */ 388 + goto out; 366 389 367 390 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); 368 - kfree(buf); 369 391 if (rc == -EINVAL) { 370 392 /* it's not a symlink */ 371 393 rc = 0; ··· 403 381 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; 404 382 fattr->cf_dtype = DT_LNK; 405 383 out: 406 - cifs_put_tlink(tlink); 384 + kfree(buf); 407 385 return rc; 408 386 } 409 387
+8
fs/cifs/readdir.c
··· 111 111 return; 112 112 } 113 113 114 + /* 115 + * If we know that the inode will need to be revalidated immediately, 116 + * then don't create a new dentry for it. We'll end up doing an on 117 + * the wire call either way and this spares us an invalidation. 118 + */ 119 + if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) 120 + return; 121 + 114 122 dentry = d_alloc(parent, name); 115 123 if (!dentry) 116 124 return;
+3 -3
fs/cifs/sess.c
··· 197 197 bytes_ret = 0; 198 198 } else 199 199 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, 200 - 256, nls_cp); 200 + CIFS_MAX_DOMAINNAME_LEN, nls_cp); 201 201 bcc_ptr += 2 * bytes_ret; 202 202 bcc_ptr += 2; /* account for null terminator */ 203 203 ··· 255 255 256 256 /* copy domain */ 257 257 if (ses->domainName != NULL) { 258 - strncpy(bcc_ptr, ses->domainName, 256); 259 - bcc_ptr += strnlen(ses->domainName, 256); 258 + strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 259 + bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 260 260 } /* else we will send a null domain name 261 261 so the server will default to its own domain */ 262 262 *bcc_ptr = 0;
+1
fs/cifs/smb1ops.c
··· 944 944 .mand_lock = cifs_mand_lock, 945 945 .mand_unlock_range = cifs_unlock_range, 946 946 .push_mand_locks = cifs_push_mandatory_locks, 947 + .query_mf_symlink = open_query_close_cifs_symlink, 947 948 }; 948 949 949 950 struct smb_version_values smb1_values = {
+7 -2
fs/cifs/smb2transport.c
··· 42 42 static int 43 43 smb2_crypto_shash_allocate(struct TCP_Server_Info *server) 44 44 { 45 + int rc; 45 46 unsigned int size; 46 47 47 48 if (server->secmech.sdeschmacsha256 != NULL) ··· 51 50 server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); 52 51 if (IS_ERR(server->secmech.hmacsha256)) { 53 52 cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); 54 - return PTR_ERR(server->secmech.hmacsha256); 53 + rc = PTR_ERR(server->secmech.hmacsha256); 54 + server->secmech.hmacsha256 = NULL; 55 + return rc; 55 56 } 56 57 57 58 size = sizeof(struct shash_desc) + ··· 90 87 server->secmech.sdeschmacsha256 = NULL; 91 88 crypto_free_shash(server->secmech.hmacsha256); 92 89 server->secmech.hmacsha256 = NULL; 93 - return PTR_ERR(server->secmech.cmacaes); 90 + rc = PTR_ERR(server->secmech.cmacaes); 91 + server->secmech.cmacaes = NULL; 92 + return rc; 94 93 } 95 94 96 95 size = sizeof(struct shash_desc) +
+34 -34
fs/dcache.c
··· 229 229 */ 230 230 static void d_free(struct dentry *dentry) 231 231 { 232 - BUG_ON(dentry->d_count); 232 + BUG_ON(dentry->d_lockref.count); 233 233 this_cpu_dec(nr_dentry); 234 234 if (dentry->d_op && dentry->d_op->d_release) 235 235 dentry->d_op->d_release(dentry); ··· 467 467 } 468 468 469 469 if (ref) 470 - dentry->d_count--; 470 + dentry->d_lockref.count--; 471 471 /* 472 472 * inform the fs via d_prune that this dentry is about to be 473 473 * unhashed and destroyed. ··· 513 513 return; 514 514 515 515 repeat: 516 - if (dentry->d_count == 1) 516 + if (dentry->d_lockref.count == 1) 517 517 might_sleep(); 518 - spin_lock(&dentry->d_lock); 519 - BUG_ON(!dentry->d_count); 520 - if (dentry->d_count > 1) { 521 - dentry->d_count--; 522 - spin_unlock(&dentry->d_lock); 518 + if (lockref_put_or_lock(&dentry->d_lockref)) 523 519 return; 524 - } 525 520 526 521 if (dentry->d_flags & DCACHE_OP_DELETE) { 527 522 if (dentry->d_op->d_delete(dentry)) ··· 530 535 dentry->d_flags |= DCACHE_REFERENCED; 531 536 dentry_lru_add(dentry); 532 537 533 - dentry->d_count--; 538 + dentry->d_lockref.count--; 534 539 spin_unlock(&dentry->d_lock); 535 540 return; 536 541 ··· 585 590 * We also need to leave mountpoints alone, 586 591 * directory or not. 587 592 */ 588 - if (dentry->d_count > 1 && dentry->d_inode) { 593 + if (dentry->d_lockref.count > 1 && dentry->d_inode) { 589 594 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 590 595 spin_unlock(&dentry->d_lock); 591 596 return -EBUSY; ··· 601 606 /* This must be called with d_lock held */ 602 607 static inline void __dget_dlock(struct dentry *dentry) 603 608 { 604 - dentry->d_count++; 609 + dentry->d_lockref.count++; 605 610 } 606 611 607 612 static inline void __dget(struct dentry *dentry) 608 613 { 609 - spin_lock(&dentry->d_lock); 610 - __dget_dlock(dentry); 611 - spin_unlock(&dentry->d_lock); 614 + lockref_get(&dentry->d_lockref); 612 615 } 613 616 614 617 struct dentry *dget_parent(struct dentry *dentry) ··· 627 634 goto repeat; 628 635 } 629 636 rcu_read_unlock(); 630 - BUG_ON(!ret->d_count); 631 - ret->d_count++; 637 + BUG_ON(!ret->d_lockref.count); 638 + ret->d_lockref.count++; 632 639 spin_unlock(&ret->d_lock); 633 640 return ret; 634 641 } ··· 711 718 spin_lock(&inode->i_lock); 712 719 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { 713 720 spin_lock(&dentry->d_lock); 714 - if (!dentry->d_count) { 721 + if (!dentry->d_lockref.count) { 715 722 __dget_dlock(dentry); 716 723 __d_drop(dentry); 717 724 spin_unlock(&dentry->d_lock); ··· 756 763 /* Prune ancestors. */ 757 764 dentry = parent; 758 765 while (dentry) { 759 - spin_lock(&dentry->d_lock); 760 - if (dentry->d_count > 1) { 761 - dentry->d_count--; 762 - spin_unlock(&dentry->d_lock); 766 + if (lockref_put_or_lock(&dentry->d_lockref)) 763 767 return; 764 - } 765 768 dentry = dentry_kill(dentry, 1); 766 769 } 767 770 } ··· 782 793 * the LRU because of laziness during lookup. Do not free 783 794 * it - just keep it off the LRU list. 784 795 */ 785 - if (dentry->d_count) { 796 + if (dentry->d_lockref.count) { 786 797 dentry_lru_del(dentry); 787 798 spin_unlock(&dentry->d_lock); 788 799 continue; ··· 902 913 dentry_lru_del(dentry); 903 914 __d_shrink(dentry); 904 915 905 - if (dentry->d_count != 0) { 916 + if (dentry->d_lockref.count != 0) { 906 917 printk(KERN_ERR 907 918 "BUG: Dentry %p{i=%lx,n=%s}" 908 919 " still in use (%d)" ··· 911 922 dentry->d_inode ? 912 923 dentry->d_inode->i_ino : 0UL, 913 924 dentry->d_name.name, 914 - dentry->d_count, 925 + dentry->d_lockref.count, 915 926 dentry->d_sb->s_type->name, 916 927 dentry->d_sb->s_id); 917 928 BUG(); ··· 922 933 list_del(&dentry->d_u.d_child); 923 934 } else { 924 935 parent = dentry->d_parent; 925 - parent->d_count--; 936 + parent->d_lockref.count--; 926 937 list_del(&dentry->d_u.d_child); 927 938 } 928 939 ··· 970 981 971 982 dentry = sb->s_root; 972 983 sb->s_root = NULL; 973 - dentry->d_count--; 984 + dentry->d_lockref.count--; 974 985 shrink_dcache_for_umount_subtree(dentry); 975 986 976 987 while (!hlist_bl_empty(&sb->s_anon)) { ··· 1136 1147 * loop in shrink_dcache_parent() might not make any progress 1137 1148 * and loop forever. 1138 1149 */ 1139 - if (dentry->d_count) { 1150 + if (dentry->d_lockref.count) { 1140 1151 dentry_lru_del(dentry); 1141 1152 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1142 1153 dentry_lru_move_list(dentry, dispose); ··· 1258 1269 smp_wmb(); 1259 1270 dentry->d_name.name = dname; 1260 1271 1261 - dentry->d_count = 1; 1272 + dentry->d_lockref.count = 1; 1262 1273 dentry->d_flags = 0; 1263 1274 spin_lock_init(&dentry->d_lock); 1264 1275 seqcount_init(&dentry->d_seq); ··· 1959 1970 goto next; 1960 1971 } 1961 1972 1962 - dentry->d_count++; 1973 + dentry->d_lockref.count++; 1963 1974 found = dentry; 1964 1975 spin_unlock(&dentry->d_lock); 1965 1976 break; ··· 2058 2069 spin_lock(&dentry->d_lock); 2059 2070 inode = dentry->d_inode; 2060 2071 isdir = S_ISDIR(inode->i_mode); 2061 - if (dentry->d_count == 1) { 2072 + if (dentry->d_lockref.count == 1) { 2062 2073 if (!spin_trylock(&inode->i_lock)) { 2063 2074 spin_unlock(&dentry->d_lock); 2064 2075 cpu_relax(); ··· 2713 2724 return memcpy(buffer, temp, sz); 2714 2725 } 2715 2726 2727 + char *simple_dname(struct dentry *dentry, char *buffer, int buflen) 2728 + { 2729 + char *end = buffer + buflen; 2730 + /* these dentries are never renamed, so d_lock is not needed */ 2731 + if (prepend(&end, &buflen, " (deleted)", 11) || 2732 + prepend_name(&end, &buflen, &dentry->d_name) || 2733 + prepend(&end, &buflen, "/", 1)) 2734 + end = ERR_PTR(-ENAMETOOLONG); 2735 + return end; 2736 + } 2737 + 2716 2738 /* 2717 2739 * Write full pathname from the root of the filesystem into the buffer. 2718 2740 */ ··· 2937 2937 } 2938 2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 2939 2939 dentry->d_flags |= DCACHE_GENOCIDE; 2940 - dentry->d_count--; 2940 + dentry->d_lockref.count--; 2941 2941 } 2942 2942 spin_unlock(&dentry->d_lock); 2943 2943 } ··· 2945 2945 struct dentry *child = this_parent; 2946 2946 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 2947 2947 this_parent->d_flags |= DCACHE_GENOCIDE; 2948 - this_parent->d_count--; 2948 + this_parent->d_lockref.count--; 2949 2949 } 2950 2950 this_parent = try_to_ascend(this_parent, locked, seq); 2951 2951 if (!this_parent)
+1 -1
fs/efs/inode.c
··· 57 57 struct inode *inode; 58 58 59 59 inode = iget_locked(super, ino); 60 - if (IS_ERR(inode)) 60 + if (!inode) 61 61 return ERR_PTR(-ENOMEM); 62 62 if (!(inode->i_state & I_NEW)) 63 63 return inode;
+2 -2
fs/exec.c
··· 608 608 return -ENOMEM; 609 609 610 610 lru_add_drain(); 611 - tlb_gather_mmu(&tlb, mm, 0); 611 + tlb_gather_mmu(&tlb, mm, old_start, old_end); 612 612 if (new_end > old_start) { 613 613 /* 614 614 * when the old and new regions overlap clear from new_end. ··· 625 625 free_pgd_range(&tlb, old_start, old_end, new_end, 626 626 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 627 627 } 628 - tlb_finish_mmu(&tlb, new_end, old_end); 628 + tlb_finish_mmu(&tlb, old_start, old_end); 629 629 630 630 /* 631 631 * Shrink the vma to just the new range. Always succeeds.
+1
fs/ext4/ext4.h
··· 2086 2086 extern void ext4_dirty_inode(struct inode *, int); 2087 2087 extern int ext4_change_inode_journal_flag(struct inode *, int); 2088 2088 extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); 2089 + extern int ext4_inode_attach_jinode(struct inode *inode); 2089 2090 extern int ext4_can_truncate(struct inode *inode); 2090 2091 extern void ext4_truncate(struct inode *); 2091 2092 extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
+4 -4
fs/ext4/ext4_jbd2.c
··· 255 255 set_buffer_prio(bh); 256 256 if (ext4_handle_valid(handle)) { 257 257 err = jbd2_journal_dirty_metadata(handle, bh); 258 - if (err) { 259 - /* Errors can only happen if there is a bug */ 260 - handle->h_err = err; 261 - __ext4_journal_stop(where, line, handle); 258 + /* Errors can only happen if there is a bug */ 259 + if (WARN_ON_ONCE(err)) { 260 + ext4_journal_abort_handle(where, line, __func__, bh, 261 + handle, err); 262 262 } 263 263 } else { 264 264 if (inode)
+4 -17
fs/ext4/file.c
··· 219 219 { 220 220 struct super_block *sb = inode->i_sb; 221 221 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 222 - struct ext4_inode_info *ei = EXT4_I(inode); 223 222 struct vfsmount *mnt = filp->f_path.mnt; 224 223 struct path path; 225 224 char buf[64], *cp; ··· 258 259 * Set up the jbd2_inode if we are opening the inode for 259 260 * writing and the journal is present 260 261 */ 261 - if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { 262 - struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); 263 - 264 - spin_lock(&inode->i_lock); 265 - if (!ei->jinode) { 266 - if (!jinode) { 267 - spin_unlock(&inode->i_lock); 268 - return -ENOMEM; 269 - } 270 - ei->jinode = jinode; 271 - jbd2_journal_init_jbd_inode(ei->jinode, inode); 272 - jinode = NULL; 273 - } 274 - spin_unlock(&inode->i_lock); 275 - if (unlikely(jinode != NULL)) 276 - jbd2_free_inode(jinode); 262 + if (filp->f_mode & FMODE_WRITE) { 263 + int ret = ext4_inode_attach_jinode(inode); 264 + if (ret < 0) 265 + return ret; 277 266 } 278 267 return dquot_file_open(inode, filp); 279 268 }
+43
fs/ext4/inode.c
··· 3533 3533 offset; 3534 3534 } 3535 3535 3536 + if (offset & (sb->s_blocksize - 1) || 3537 + (offset + length) & (sb->s_blocksize - 1)) { 3538 + /* 3539 + * Attach jinode to inode for jbd2 if we do any zeroing of 3540 + * partial block 3541 + */ 3542 + ret = ext4_inode_attach_jinode(inode); 3543 + if (ret < 0) 3544 + goto out_mutex; 3545 + 3546 + } 3547 + 3536 3548 first_block_offset = round_up(offset, sb->s_blocksize); 3537 3549 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 3538 3550 ··· 3613 3601 return ret; 3614 3602 } 3615 3603 3604 + int ext4_inode_attach_jinode(struct inode *inode) 3605 + { 3606 + struct ext4_inode_info *ei = EXT4_I(inode); 3607 + struct jbd2_inode *jinode; 3608 + 3609 + if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 3610 + return 0; 3611 + 3612 + jinode = jbd2_alloc_inode(GFP_KERNEL); 3613 + spin_lock(&inode->i_lock); 3614 + if (!ei->jinode) { 3615 + if (!jinode) { 3616 + spin_unlock(&inode->i_lock); 3617 + return -ENOMEM; 3618 + } 3619 + ei->jinode = jinode; 3620 + jbd2_journal_init_jbd_inode(ei->jinode, inode); 3621 + jinode = NULL; 3622 + } 3623 + spin_unlock(&inode->i_lock); 3624 + if (unlikely(jinode != NULL)) 3625 + jbd2_free_inode(jinode); 3626 + return 0; 3627 + } 3628 + 3616 3629 /* 3617 3630 * ext4_truncate() 3618 3631 * ··· 3695 3658 3696 3659 ext4_inline_data_truncate(inode, &has_inline); 3697 3660 if (has_inline) 3661 + return; 3662 + } 3663 + 3664 + /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 3665 + if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 3666 + if (ext4_inode_attach_jinode(inode) < 0) 3698 3667 return; 3699 3668 } 3700 3669
+4 -2
fs/ext4/ioctl.c
··· 77 77 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); 78 78 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); 79 79 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); 80 - memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); 81 - memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); 80 + ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); 81 + ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); 82 + ext4_es_lru_del(inode1); 83 + ext4_es_lru_del(inode2); 82 84 83 85 isize = i_size_read(inode1); 84 86 i_size_write(inode1, i_size_read(inode2));
+17 -2
fs/ext4/super.c
··· 1359 1359 {Opt_delalloc, EXT4_MOUNT_DELALLOC, 1360 1360 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1361 1361 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 1362 - MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, 1362 + MOPT_EXT4_ONLY | MOPT_CLEAR}, 1363 1363 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1364 1364 MOPT_EXT4_ONLY | MOPT_SET}, 1365 1365 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | ··· 3483 3483 } 3484 3484 if (test_opt(sb, DIOREAD_NOLOCK)) { 3485 3485 ext4_msg(sb, KERN_ERR, "can't mount with " 3486 - "both data=journal and delalloc"); 3486 + "both data=journal and dioread_nolock"); 3487 3487 goto failed_mount; 3488 3488 } 3489 3489 if (test_opt(sb, DELALLOC)) ··· 4725 4725 if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { 4726 4726 err = -EINVAL; 4727 4727 goto restore_opts; 4728 + } 4729 + 4730 + if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4731 + if (test_opt2(sb, EXPLICIT_DELALLOC)) { 4732 + ext4_msg(sb, KERN_ERR, "can't mount with " 4733 + "both data=journal and delalloc"); 4734 + err = -EINVAL; 4735 + goto restore_opts; 4736 + } 4737 + if (test_opt(sb, DIOREAD_NOLOCK)) { 4738 + ext4_msg(sb, KERN_ERR, "can't mount with " 4739 + "both data=journal and dioread_nolock"); 4740 + err = -EINVAL; 4741 + goto restore_opts; 4742 + } 4728 4743 } 4729 4744 4730 4745 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
+4 -4
fs/gfs2/glock.c
··· 1838 1838 1839 1839 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1840 1840 WQ_HIGHPRI | WQ_FREEZABLE, 0); 1841 - if (IS_ERR(glock_workqueue)) 1842 - return PTR_ERR(glock_workqueue); 1841 + if (!glock_workqueue) 1842 + return -ENOMEM; 1843 1843 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1844 1844 WQ_MEM_RECLAIM | WQ_FREEZABLE, 1845 1845 0); 1846 - if (IS_ERR(gfs2_delete_workqueue)) { 1846 + if (!gfs2_delete_workqueue) { 1847 1847 destroy_workqueue(glock_workqueue); 1848 - return PTR_ERR(gfs2_delete_workqueue); 1848 + return -ENOMEM; 1849 1849 } 1850 1850 1851 1851 register_shrinker(&glock_shrinker);
+13 -5
fs/gfs2/glops.c
··· 47 47 * None of the buffers should be dirty, locked, or pinned. 48 48 */ 49 49 50 - static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 50 + static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 51 + unsigned int nr_revokes) 51 52 { 52 53 struct gfs2_sbd *sdp = gl->gl_sbd; 53 54 struct list_head *head = &gl->gl_ail_list; ··· 58 57 59 58 gfs2_log_lock(sdp); 60 59 spin_lock(&sdp->sd_ail_lock); 61 - list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { 60 + list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 61 + if (nr_revokes == 0) 62 + break; 62 63 bh = bd->bd_bh; 63 64 if (bh->b_state & b_state) { 64 65 if (fsync) ··· 68 65 gfs2_ail_error(gl, bh); 69 66 } 70 67 gfs2_trans_add_revoke(sdp, bd); 68 + nr_revokes--; 71 69 } 72 70 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 73 71 spin_unlock(&sdp->sd_ail_lock); ··· 95 91 WARN_ON_ONCE(current->journal_info); 96 92 current->journal_info = &tr; 97 93 98 - __gfs2_ail_flush(gl, 0); 94 + __gfs2_ail_flush(gl, 0, tr.tr_revokes); 99 95 100 96 gfs2_trans_end(sdp); 101 97 gfs2_log_flush(sdp, NULL); ··· 105 101 { 106 102 struct gfs2_sbd *sdp = gl->gl_sbd; 107 103 unsigned int revokes = atomic_read(&gl->gl_ail_count); 104 + unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 108 105 int ret; 109 106 110 107 if (!revokes) 111 108 return; 112 109 113 - ret = gfs2_trans_begin(sdp, 0, revokes); 110 + while (revokes > max_revokes) 111 + max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 112 + 113 + ret = gfs2_trans_begin(sdp, 0, max_revokes); 114 114 if (ret) 115 115 return; 116 - __gfs2_ail_flush(gl, fsync); 116 + __gfs2_ail_flush(gl, fsync, max_revokes); 117 117 gfs2_trans_end(sdp); 118 118 gfs2_log_flush(sdp, NULL); 119 119 }
+5 -1
fs/gfs2/inode.c
··· 594 594 } 595 595 gfs2_glock_dq_uninit(ghs); 596 596 if (IS_ERR(d)) 597 - return PTR_RET(d); 597 + return PTR_ERR(d); 598 598 return error; 599 599 } else if (error != -ENOENT) { 600 600 goto fail_gunlock; ··· 1749 1749 struct gfs2_inode *ip = GFS2_I(inode); 1750 1750 struct gfs2_holder gh; 1751 1751 int ret; 1752 + 1753 + /* For selinux during lookup */ 1754 + if (gfs2_glock_is_locked_by_me(ip->i_gl)) 1755 + return generic_getxattr(dentry, name, data, size); 1752 1756 1753 1757 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 1754 1758 ret = gfs2_glock_nq(&gh);
+1 -1
fs/gfs2/main.c
··· 155 155 goto fail_wq; 156 156 157 157 gfs2_control_wq = alloc_workqueue("gfs2_control", 158 - WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); 158 + WQ_UNBOUND | WQ_FREEZABLE, 0); 159 159 if (!gfs2_control_wq) 160 160 goto fail_recovery; 161 161
+11 -7
fs/hugetlbfs/inode.c
··· 463 463 return inode; 464 464 } 465 465 466 + /* 467 + * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never 468 + * be taken from reclaim -- unlike regular filesystems. This needs an 469 + * annotation because huge_pmd_share() does an allocation under 470 + * i_mmap_mutex. 471 + */ 472 + struct lock_class_key hugetlbfs_i_mmap_mutex_key; 473 + 466 474 static struct inode *hugetlbfs_get_inode(struct super_block *sb, 467 475 struct inode *dir, 468 476 umode_t mode, dev_t dev) ··· 482 474 struct hugetlbfs_inode_info *info; 483 475 inode->i_ino = get_next_ino(); 484 476 inode_init_owner(inode, dir, mode); 477 + lockdep_set_class(&inode->i_mapping->i_mmap_mutex, 478 + &hugetlbfs_i_mmap_mutex_key); 485 479 inode->i_mapping->a_ops = &hugetlbfs_aops; 486 480 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; 487 481 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ··· 926 916 return h - hstates; 927 917 } 928 918 929 - static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen) 930 - { 931 - return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", 932 - dentry->d_name.name); 933 - } 934 - 935 919 static struct dentry_operations anon_ops = { 936 - .d_dname = hugetlb_dname 920 + .d_dname = simple_dname 937 921 }; 938 922 939 923 /*
+23 -8
fs/jfs/jfs_dtree.c
··· 3047 3047 3048 3048 dir_index = (u32) ctx->pos; 3049 3049 3050 + /* 3051 + * NFSv4 reserves cookies 1 and 2 for . and .. so the value 3052 + * we return to the vfs is one greater than the one we use 3053 + * internally. 3054 + */ 3055 + if (dir_index) 3056 + dir_index--; 3057 + 3050 3058 if (dir_index > 1) { 3051 3059 struct dir_table_slot dirtab_slot; 3052 3060 ··· 3094 3086 if (p->header.flag & BT_INTERNAL) { 3095 3087 jfs_err("jfs_readdir: bad index table"); 3096 3088 DT_PUTPAGE(mp); 3097 - ctx->pos = -1; 3089 + ctx->pos = DIREND; 3098 3090 return 0; 3099 3091 } 3100 3092 } else { ··· 3102 3094 /* 3103 3095 * self "." 3104 3096 */ 3105 - ctx->pos = 0; 3097 + ctx->pos = 1; 3106 3098 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 3107 3099 return 0; 3108 3100 } 3109 3101 /* 3110 3102 * parent ".." 3111 3103 */ 3112 - ctx->pos = 1; 3104 + ctx->pos = 2; 3113 3105 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 3114 3106 return 0; 3115 3107 ··· 3130 3122 /* 3131 3123 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6 3132 3124 * 3133 - * pn = index = 0: First entry "." 3134 - * pn = 0; index = 1: Second entry ".." 3125 + * pn = 0; index = 1: First entry "." 3126 + * pn = 0; index = 2: Second entry ".." 3135 3127 * pn > 0: Real entries, pn=1 -> leftmost page 3136 3128 * pn = index = -1: No more entries 3137 3129 */ 3138 3130 dtpos = ctx->pos; 3139 - if (dtpos == 0) { 3131 + if (dtpos < 2) { 3140 3132 /* build "." entry */ 3133 + ctx->pos = 1; 3141 3134 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 3142 3135 return 0; 3143 - dtoffset->index = 1; 3136 + dtoffset->index = 2; 3144 3137 ctx->pos = dtpos; 3145 3138 } 3146 3139 3147 3140 if (dtoffset->pn == 0) { 3148 - if (dtoffset->index == 1) { 3141 + if (dtoffset->index == 2) { 3149 3142 /* build ".." entry */ 3150 3143 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 3151 3144 return 0; ··· 3237 3228 } 3238 3229 jfs_dirent->position = unique_pos++; 3239 3230 } 3231 + /* 3232 + * We add 1 to the index because we may 3233 + * use a value of 2 internally, and NFSv4 3234 + * doesn't like that. 3235 + */ 3236 + jfs_dirent->position++; 3240 3237 } else { 3241 3238 jfs_dirent->position = dtpos; 3242 3239 len = min(d_namleft, DTLHDRDATALEN_LEGACY);
+10 -6
fs/namei.c
··· 536 536 * a reference at this point. 537 537 */ 538 538 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); 539 - BUG_ON(!parent->d_count); 540 - parent->d_count++; 539 + BUG_ON(!parent->d_lockref.count); 540 + parent->d_lockref.count++; 541 541 spin_unlock(&dentry->d_lock); 542 542 } 543 543 spin_unlock(&parent->d_lock); ··· 3327 3327 { 3328 3328 shrink_dcache_parent(dentry); 3329 3329 spin_lock(&dentry->d_lock); 3330 - if (dentry->d_count == 1) 3330 + if (dentry->d_lockref.count == 1) 3331 3331 __d_drop(dentry); 3332 3332 spin_unlock(&dentry->d_lock); 3333 3333 } ··· 3671 3671 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) 3672 3672 return -EINVAL; 3673 3673 /* 3674 - * Using empty names is equivalent to using AT_SYMLINK_FOLLOW 3675 - * on /proc/self/fd/<fd>. 3674 + * To use null names we require CAP_DAC_READ_SEARCH 3675 + * This ensures that not everyone will be able to create 3676 + * handlink using the passed filedescriptor. 3676 3677 */ 3677 - if (flags & AT_EMPTY_PATH) 3678 + if (flags & AT_EMPTY_PATH) { 3679 + if (!capable(CAP_DAC_READ_SEARCH)) 3680 + return -ENOENT; 3678 3681 how = LOOKUP_EMPTY; 3682 + } 3679 3683 3680 3684 if (flags & AT_SYMLINK_FOLLOW) 3681 3685 how |= LOOKUP_FOLLOW;
+1 -1
fs/namespace.c
··· 1429 1429 CL_COPY_ALL | CL_PRIVATE); 1430 1430 namespace_unlock(); 1431 1431 if (IS_ERR(tree)) 1432 - return NULL; 1432 + return ERR_CAST(tree); 1433 1433 return &tree->mnt; 1434 1434 } 1435 1435
+2 -3
fs/nilfs2/segbuf.c
··· 345 345 346 346 if (err == -EOPNOTSUPP) { 347 347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 348 - bio_put(bio); 349 - /* to be detected by submit_seg_bio() */ 348 + /* to be detected by nilfs_segbuf_submit_bio() */ 350 349 } 351 350 352 351 if (!uptodate) ··· 376 377 bio->bi_private = segbuf; 377 378 bio_get(bio); 378 379 submit_bio(mode, bio); 380 + segbuf->sb_nbio++; 379 381 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 380 382 bio_put(bio); 381 383 err = -EOPNOTSUPP; 382 384 goto failed; 383 385 } 384 - segbuf->sb_nbio++; 385 386 bio_put(bio); 386 387 387 388 wi->bio = NULL;
+1 -1
fs/ocfs2/aops.c
··· 1757 1757 goto out; 1758 1758 } else if (ret == 1) { 1759 1759 clusters_need = wc->w_clen; 1760 - ret = ocfs2_refcount_cow(inode, filp, di_bh, 1760 + ret = ocfs2_refcount_cow(inode, di_bh, 1761 1761 wc->w_cpos, wc->w_clen, UINT_MAX); 1762 1762 if (ret) { 1763 1763 mlog_errno(ret);
+1 -3
fs/ocfs2/dir.c
··· 2153 2153 { 2154 2154 int ret; 2155 2155 struct ocfs2_empty_dir_priv priv = { 2156 - .ctx.actor = ocfs2_empty_dir_filldir 2156 + .ctx.actor = ocfs2_empty_dir_filldir, 2157 2157 }; 2158 - 2159 - memset(&priv, 0, sizeof(priv)); 2160 2158 2161 2159 if (ocfs2_dir_indexed(inode)) { 2162 2160 ret = ocfs2_empty_dir_dx(inode, &priv);
+3 -3
fs/ocfs2/file.c
··· 370 370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) 371 371 goto out; 372 372 373 - return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); 373 + return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); 374 374 375 375 out: 376 376 return status; ··· 899 899 zero_clusters = last_cpos - zero_cpos; 900 900 901 901 if (needs_cow) { 902 - rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, 902 + rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, 903 903 zero_clusters, UINT_MAX); 904 904 if (rc) { 905 905 mlog_errno(rc); ··· 2078 2078 2079 2079 *meta_level = 1; 2080 2080 2081 - ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); 2081 + ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); 2082 2082 if (ret) 2083 2083 mlog_errno(ret); 2084 2084 out:
+1 -1
fs/ocfs2/journal.h
··· 537 537 extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); 538 538 539 539 return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + 540 - ocfs2_quota_trans_credits(sb) + bits_wanted; 540 + ocfs2_quota_trans_credits(sb); 541 541 } 542 542 543 543 static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
+1 -1
fs/ocfs2/move_extents.c
··· 69 69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); 70 70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); 71 71 72 - ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, 72 + ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos, 73 73 p_cpos, new_p_cpos, len); 74 74 if (ret) { 75 75 mlog_errno(ret);
+8 -45
fs/ocfs2/refcounttree.c
··· 49 49 50 50 struct ocfs2_cow_context { 51 51 struct inode *inode; 52 - struct file *file; 53 52 u32 cow_start; 54 53 u32 cow_len; 55 54 struct ocfs2_extent_tree data_et; ··· 65 66 u32 *num_clusters, 66 67 unsigned int *extent_flags); 67 68 int (*cow_duplicate_clusters)(handle_t *handle, 68 - struct file *file, 69 + struct inode *inode, 69 70 u32 cpos, u32 old_cluster, 70 71 u32 new_cluster, u32 new_len); 71 72 }; ··· 2921 2922 } 2922 2923 2923 2924 int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2924 - struct file *file, 2925 + struct inode *inode, 2925 2926 u32 cpos, u32 old_cluster, 2926 2927 u32 new_cluster, u32 new_len) 2927 2928 { 2928 2929 int ret = 0, partial; 2929 - struct inode *inode = file_inode(file); 2930 - struct ocfs2_caching_info *ci = INODE_CACHE(inode); 2931 - struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2930 + struct super_block *sb = inode->i_sb; 2932 2931 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2933 2932 struct page *page; 2934 2933 pgoff_t page_index; ··· 2975 2978 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2976 2979 BUG_ON(PageDirty(page)); 2977 2980 2978 - if (PageReadahead(page)) { 2979 - page_cache_async_readahead(mapping, 2980 - &file->f_ra, file, 2981 - page, page_index, 2982 - readahead_pages); 2983 - } 2984 - 2985 2981 if (!PageUptodate(page)) { 2986 2982 ret = block_read_full_page(page, ocfs2_get_block); 2987 2983 if (ret) { ··· 2994 3004 } 2995 3005 } 2996 3006 2997 - ocfs2_map_and_dirty_page(inode, handle, from, to, 3007 + ocfs2_map_and_dirty_page(inode, 3008 + handle, from, to, 2998 3009 page, 0, &new_block); 2999 3010 mark_page_accessed(page); 3000 3011 unlock: ··· 3011 3020 } 3012 3021 3013 3022 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 3014 - struct file *file, 3023 + struct inode *inode, 3015 3024 u32 cpos, u32 old_cluster, 3016 3025 u32 new_cluster, u32 new_len) 3017 3026 { 3018 3027 int ret = 0; 3019 - struct inode *inode = file_inode(file); 3020 3028 struct super_block *sb = inode->i_sb; 3021 3029 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3022 3030 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); ··· 3140 3150 3141 3151 /*If the old clusters is unwritten, no need to duplicate. */ 3142 3152 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3143 - ret = context->cow_duplicate_clusters(handle, context->file, 3153 + ret = context->cow_duplicate_clusters(handle, context->inode, 3144 3154 cpos, old, new, len); 3145 3155 if (ret) { 3146 3156 mlog_errno(ret); ··· 3418 3428 return ret; 3419 3429 } 3420 3430 3421 - static void ocfs2_readahead_for_cow(struct inode *inode, 3422 - struct file *file, 3423 - u32 start, u32 len) 3424 - { 3425 - struct address_space *mapping; 3426 - pgoff_t index; 3427 - unsigned long num_pages; 3428 - int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; 3429 - 3430 - if (!file) 3431 - return; 3432 - 3433 - mapping = file->f_mapping; 3434 - num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT; 3435 - if (!num_pages) 3436 - num_pages = 1; 3437 - 3438 - index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT; 3439 - page_cache_sync_readahead(mapping, &file->f_ra, file, 3440 - index, num_pages); 3441 - } 3442 - 3443 3431 /* 3444 3432 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3445 3433 * past max_cpos. This will stop when it runs into a hole or an 3446 3434 * unrefcounted extent. 3447 3435 */ 3448 3436 static int ocfs2_refcount_cow_hunk(struct inode *inode, 3449 - struct file *file, 3450 3437 struct buffer_head *di_bh, 3451 3438 u32 cpos, u32 write_len, u32 max_cpos) 3452 3439 { ··· 3452 3485 3453 3486 BUG_ON(cow_len == 0); 3454 3487 3455 - ocfs2_readahead_for_cow(inode, file, cow_start, cow_len); 3456 - 3457 3488 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3458 3489 if (!context) { 3459 3490 ret = -ENOMEM; ··· 3473 3508 context->ref_root_bh = ref_root_bh; 3474 3509 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3475 3510 context->get_clusters = ocfs2_di_get_clusters; 3476 - context->file = file; 3477 3511 3478 3512 ocfs2_init_dinode_extent_tree(&context->data_et, 3479 3513 INODE_CACHE(inode), di_bh); ··· 3501 3537 * clusters between cpos and cpos+write_len are safe to modify. 3502 3538 */ 3503 3539 int ocfs2_refcount_cow(struct inode *inode, 3504 - struct file *file, 3505 3540 struct buffer_head *di_bh, 3506 3541 u32 cpos, u32 write_len, u32 max_cpos) 3507 3542 { ··· 3520 3557 num_clusters = write_len; 3521 3558 3522 3559 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3523 - ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, 3560 + ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, 3524 3561 num_clusters, max_cpos); 3525 3562 if (ret) { 3526 3563 mlog_errno(ret);
+3 -3
fs/ocfs2/refcounttree.h
··· 53 53 int *credits, 54 54 int *ref_blocks); 55 55 int ocfs2_refcount_cow(struct inode *inode, 56 - struct file *filep, struct buffer_head *di_bh, 56 + struct buffer_head *di_bh, 57 57 u32 cpos, u32 write_len, u32 max_cpos); 58 58 59 59 typedef int (ocfs2_post_refcount_func)(struct inode *inode, ··· 85 85 u32 cpos, u32 write_len, 86 86 struct ocfs2_post_refcount *post); 87 87 int ocfs2_duplicate_clusters_by_page(handle_t *handle, 88 - struct file *file, 88 + struct inode *inode, 89 89 u32 cpos, u32 old_cluster, 90 90 u32 new_cluster, u32 new_len); 91 91 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 92 - struct file *file, 92 + struct inode *inode, 93 93 u32 cpos, u32 old_cluster, 94 94 u32 new_cluster, u32 new_len); 95 95 int ocfs2_cow_sync_writeback(struct super_block *sb,
+1 -1
fs/ocfs2/super.c
··· 1022 1022 struct inode *inode = NULL; 1023 1023 struct ocfs2_super *osb = NULL; 1024 1024 struct buffer_head *bh = NULL; 1025 - char nodestr[8]; 1025 + char nodestr[12]; 1026 1026 struct ocfs2_blockcheck_stats stats; 1027 1027 1028 1028 trace_ocfs2_fill_super(sb, data, silent);
-2
fs/proc/fd.c
··· 230 230 231 231 if (!dir_emit_dots(file, ctx)) 232 232 goto out; 233 - if (!dir_emit_dots(file, ctx)) 234 - goto out; 235 233 files = get_files_struct(p); 236 234 if (!files) 237 235 goto out;
+1 -1
fs/proc/generic.c
··· 271 271 de = next; 272 272 } while (de); 273 273 spin_unlock(&proc_subdir_lock); 274 - return 0; 274 + return 1; 275 275 } 276 276 277 277 int proc_readdir(struct file *file, struct dir_context *ctx)
+3 -1
fs/proc/root.c
··· 205 205 static int proc_root_readdir(struct file *file, struct dir_context *ctx) 206 206 { 207 207 if (ctx->pos < FIRST_PROCESS_ENTRY) { 208 - proc_readdir(file, ctx); 208 + int error = proc_readdir(file, ctx); 209 + if (unlikely(error <= 0)) 210 + return error; 209 211 ctx->pos = FIRST_PROCESS_ENTRY; 210 212 } 211 213
+21 -10
fs/proc/task_mmu.c
··· 730 730 * of how soft-dirty works. 731 731 */ 732 732 pte_t ptent = *pte; 733 - ptent = pte_wrprotect(ptent); 734 - ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 733 + 734 + if (pte_present(ptent)) { 735 + ptent = pte_wrprotect(ptent); 736 + ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 737 + } else if (is_swap_pte(ptent)) { 738 + ptent = pte_swp_clear_soft_dirty(ptent); 739 + } else if (pte_file(ptent)) { 740 + ptent = pte_file_clear_soft_dirty(ptent); 741 + } 742 + 735 743 set_pte_at(vma->vm_mm, addr, pte, ptent); 736 744 #endif 737 745 } ··· 760 752 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 761 753 for (; addr != end; pte++, addr += PAGE_SIZE) { 762 754 ptent = *pte; 763 - if (!pte_present(ptent)) 764 - continue; 765 755 766 756 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 767 757 clear_soft_dirty(vma, addr, pte); 768 758 continue; 769 759 } 760 + 761 + if (!pte_present(ptent)) 762 + continue; 770 763 771 764 page = vm_normal_page(vma, addr, ptent); 772 765 if (!page) ··· 868 859 } pagemap_entry_t; 869 860 870 861 struct pagemapread { 871 - int pos, len; 862 + int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 872 863 pagemap_entry_t *buffer; 873 864 bool v2; 874 865 }; ··· 876 867 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 877 868 #define PAGEMAP_WALK_MASK (PMD_MASK) 878 869 879 - #define PM_ENTRY_BYTES sizeof(u64) 870 + #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 880 871 #define PM_STATUS_BITS 3 881 872 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 882 873 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) ··· 939 930 flags = PM_PRESENT; 940 931 page = vm_normal_page(vma, addr, pte); 941 932 } else if (is_swap_pte(pte)) { 942 - swp_entry_t entry = pte_to_swp_entry(pte); 943 - 933 + swp_entry_t entry; 934 + if (pte_swp_soft_dirty(pte)) 935 + flags2 |= __PM_SOFT_DIRTY; 936 + entry = pte_to_swp_entry(pte); 944 937 frame = swp_type(entry) | 945 938 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 946 939 flags = PM_SWAP; ··· 1127 1116 goto out_task; 1128 1117 1129 1118 pm.v2 = soft_dirty_cleared; 1130 - pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1131 - pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 1119 + pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1120 + pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); 1132 1121 ret = -ENOMEM; 1133 1122 if (!pm.buffer) 1134 1123 goto out_task;
+30
include/asm-generic/pgtable.h
··· 417 417 { 418 418 return pmd; 419 419 } 420 + 421 + static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 422 + { 423 + return pte; 424 + } 425 + 426 + static inline int pte_swp_soft_dirty(pte_t pte) 427 + { 428 + return 0; 429 + } 430 + 431 + static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 432 + { 433 + return pte; 434 + } 435 + 436 + static inline pte_t pte_file_clear_soft_dirty(pte_t pte) 437 + { 438 + return pte; 439 + } 440 + 441 + static inline pte_t pte_file_mksoft_dirty(pte_t pte) 442 + { 443 + return pte; 444 + } 445 + 446 + static inline int pte_file_soft_dirty(pte_t pte) 447 + { 448 + return 0; 449 + } 420 450 #endif 421 451 422 452 #ifndef __HAVE_PFNMAP_TRACKING
+1 -1
include/asm-generic/tlb.h
··· 112 112 113 113 #define HAVE_GENERIC_MMU_GATHER 114 114 115 - void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); 115 + void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); 116 116 void tlb_flush_mmu(struct mmu_gather *tlb); 117 117 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 118 118 unsigned long end);
+10 -10
include/linux/dcache.h
··· 9 9 #include <linux/seqlock.h> 10 10 #include <linux/cache.h> 11 11 #include <linux/rcupdate.h> 12 + #include <linux/lockref.h> 12 13 13 14 struct nameidata; 14 15 struct path; ··· 101 100 # endif 102 101 #endif 103 102 103 + #define d_lock d_lockref.lock 104 + 104 105 struct dentry { 105 106 /* RCU lookup touched fields */ 106 107 unsigned int d_flags; /* protected by d_lock */ ··· 115 112 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ 116 113 117 114 /* Ref lookup also touches following */ 118 - unsigned int d_count; /* protected by d_lock */ 119 - spinlock_t d_lock; /* per dentry lock */ 115 + struct lockref d_lockref; /* per-dentry lock and refcount */ 120 116 const struct dentry_operations *d_op; 121 117 struct super_block *d_sb; /* The root of the dentry tree */ 122 118 unsigned long d_time; /* used by d_revalidate */ ··· 320 318 assert_spin_locked(&dentry->d_lock); 321 319 if (!read_seqcount_retry(&dentry->d_seq, seq)) { 322 320 ret = 1; 323 - dentry->d_count++; 321 + dentry->d_lockref.count++; 324 322 } 325 323 326 324 return ret; ··· 328 326 329 327 static inline unsigned d_count(const struct dentry *dentry) 330 328 { 331 - return dentry->d_count; 329 + return dentry->d_lockref.count; 332 330 } 333 331 334 332 /* validate "insecure" dentry pointer */ ··· 338 336 * helper function for dentry_operations.d_dname() members 339 337 */ 340 338 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 339 + extern char *simple_dname(struct dentry *, char *, int); 341 340 342 341 extern char *__d_path(const struct path *, const struct path *, char *, int); 343 342 extern char *d_absolute_path(const struct path *, char *, int); ··· 359 356 static inline struct dentry *dget_dlock(struct dentry *dentry) 360 357 { 361 358 if (dentry) 362 - dentry->d_count++; 359 + dentry->d_lockref.count++; 363 360 return dentry; 364 361 } 365 362 366 363 static inline struct dentry *dget(struct dentry *dentry) 367 364 { 368 - if (dentry) { 369 - spin_lock(&dentry->d_lock); 370 - dget_dlock(dentry); 371 - spin_unlock(&dentry->d_lock); 372 - } 365 + if (dentry) 366 + lockref_get(&dentry->d_lockref); 373 367 return dentry; 374 368 } 375 369
+1 -33
include/linux/inetdevice.h
··· 5 5 6 6 #include <linux/bitmap.h> 7 7 #include <linux/if.h> 8 + #include <linux/ip.h> 8 9 #include <linux/netdevice.h> 9 10 #include <linux/rcupdate.h> 10 11 #include <linux/timer.h> 11 12 #include <linux/sysctl.h> 12 13 #include <linux/rtnetlink.h> 13 - 14 - enum 15 - { 16 - IPV4_DEVCONF_FORWARDING=1, 17 - IPV4_DEVCONF_MC_FORWARDING, 18 - IPV4_DEVCONF_PROXY_ARP, 19 - IPV4_DEVCONF_ACCEPT_REDIRECTS, 20 - IPV4_DEVCONF_SECURE_REDIRECTS, 21 - IPV4_DEVCONF_SEND_REDIRECTS, 22 - IPV4_DEVCONF_SHARED_MEDIA, 23 - IPV4_DEVCONF_RP_FILTER, 24 - IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE, 25 - IPV4_DEVCONF_BOOTP_RELAY, 26 - IPV4_DEVCONF_LOG_MARTIANS, 27 - IPV4_DEVCONF_TAG, 28 - IPV4_DEVCONF_ARPFILTER, 29 - IPV4_DEVCONF_MEDIUM_ID, 30 - IPV4_DEVCONF_NOXFRM, 31 - IPV4_DEVCONF_NOPOLICY, 32 - IPV4_DEVCONF_FORCE_IGMP_VERSION, 33 - IPV4_DEVCONF_ARP_ANNOUNCE, 34 - IPV4_DEVCONF_ARP_IGNORE, 35 - IPV4_DEVCONF_PROMOTE_SECONDARIES, 36 - IPV4_DEVCONF_ARP_ACCEPT, 37 - IPV4_DEVCONF_ARP_NOTIFY, 38 - IPV4_DEVCONF_ACCEPT_LOCAL, 39 - IPV4_DEVCONF_SRC_VMARK, 40 - IPV4_DEVCONF_PROXY_ARP_PVLAN, 41 - IPV4_DEVCONF_ROUTE_LOCALNET, 42 - __IPV4_DEVCONF_MAX 43 - }; 44 - 45 - #define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1) 46 14 47 15 struct ipv4_devconf { 48 16 void *sysctl;
+1
include/linux/ipv6.h
··· 101 101 #define IP6SKB_FORWARDED 2 102 102 #define IP6SKB_REROUTED 4 103 103 #define IP6SKB_ROUTERALERT 8 104 + #define IP6SKB_FRAGMENTED 16 104 105 }; 105 106 106 107 #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
+71
include/linux/lockref.h
··· 1 + #ifndef __LINUX_LOCKREF_H 2 + #define __LINUX_LOCKREF_H 3 + 4 + /* 5 + * Locked reference counts. 6 + * 7 + * These are different from just plain atomic refcounts in that they 8 + * are atomic with respect to the spinlock that goes with them. In 9 + * particular, there can be implementations that don't actually get 10 + * the spinlock for the common decrement/increment operations, but they 11 + * still have to check that the operation is done semantically as if 12 + * the spinlock had been taken (using a cmpxchg operation that covers 13 + * both the lock and the count word, or using memory transactions, for 14 + * example). 15 + */ 16 + 17 + #include <linux/spinlock.h> 18 + 19 + struct lockref { 20 + spinlock_t lock; 21 + unsigned int count; 22 + }; 23 + 24 + /** 25 + * lockref_get - Increments reference count unconditionally 26 + * @lockcnt: pointer to lockref structure 27 + * 28 + * This operation is only valid if you already hold a reference 29 + * to the object, so you know the count cannot be zero. 30 + */ 31 + static inline void lockref_get(struct lockref *lockref) 32 + { 33 + spin_lock(&lockref->lock); 34 + lockref->count++; 35 + spin_unlock(&lockref->lock); 36 + } 37 + 38 + /** 39 + * lockref_get_not_zero - Increments count unless the count is 0 40 + * @lockcnt: pointer to lockref structure 41 + * Return: 1 if count updated successfully or 0 if count is 0 42 + */ 43 + static inline int lockref_get_not_zero(struct lockref *lockref) 44 + { 45 + int retval = 0; 46 + 47 + spin_lock(&lockref->lock); 48 + if (lockref->count) { 49 + lockref->count++; 50 + retval = 1; 51 + } 52 + spin_unlock(&lockref->lock); 53 + return retval; 54 + } 55 + 56 + /** 57 + * lockref_put_or_lock - decrements count unless count <= 1 before decrement 58 + * @lockcnt: pointer to lockref structure 59 + * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken 60 + */ 61 + static inline int lockref_put_or_lock(struct lockref *lockref) 62 + { 63 + spin_lock(&lockref->lock); 64 + if (lockref->count <= 1) 65 + return 0; 66 + lockref->count--; 67 + spin_unlock(&lockref->lock); 68 + return 1; 69 + } 70 + 71 + #endif /* __LINUX_LOCKREF_H */
+10 -12
include/linux/mlx5/device.h
··· 309 309 __be16 max_desc_sz_rq; 310 310 u8 rsvd21[2]; 311 311 __be16 max_desc_sz_sq_dc; 312 - u8 rsvd22[4]; 313 - __be16 max_qp_mcg; 314 - u8 rsvd23; 312 + __be32 max_qp_mcg; 313 + u8 rsvd22[3]; 315 314 u8 log_max_mcg; 316 - u8 rsvd24; 315 + u8 rsvd23; 317 316 u8 log_max_pd; 318 - u8 rsvd25; 317 + u8 rsvd24; 319 318 u8 log_max_xrcd; 320 - u8 rsvd26[42]; 319 + u8 rsvd25[42]; 321 320 __be16 log_uar_page_sz; 322 - u8 rsvd27[28]; 321 + u8 rsvd26[28]; 323 322 u8 log_msx_atomic_size_qp; 324 - u8 rsvd28[2]; 323 + u8 rsvd27[2]; 325 324 u8 log_msx_atomic_size_dc; 326 - u8 rsvd29[76]; 325 + u8 rsvd28[76]; 327 326 }; 328 327 329 328 ··· 471 472 struct mlx5_eqe_page_req { 472 473 u8 rsvd0[2]; 473 474 __be16 func_id; 474 - u8 rsvd1[2]; 475 - __be16 num_pages; 476 - __be32 rsvd2[5]; 475 + __be32 num_pages; 476 + __be32 rsvd1[5]; 477 477 }; 478 478 479 479 union ev_data {
+2 -5
include/linux/mlx5/driver.h
··· 358 358 u32 reserved_lkey; 359 359 u8 local_ca_ack_delay; 360 360 u8 log_max_mcg; 361 - u16 max_qp_mcg; 361 + u32 max_qp_mcg; 362 362 int min_page_sz; 363 363 }; 364 364 ··· 691 691 int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 692 692 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 693 693 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 694 - s16 npages); 694 + s32 npages); 695 695 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 696 696 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 697 697 void mlx5_register_debugfs(void); ··· 731 731 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 732 732 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 733 733 734 - typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); 735 - int mlx5_register_health_report_handler(health_handler_t handler); 736 - void mlx5_unregister_health_report_handler(void); 737 734 const char *mlx5_command_str(int command); 738 735 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 739 736 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
+1
include/linux/mm_types.h
··· 332 332 unsigned long pgoff, unsigned long flags); 333 333 #endif 334 334 unsigned long mmap_base; /* base of mmap area */ 335 + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ 335 336 unsigned long task_size; /* size of task vm space */ 336 337 unsigned long highest_vm_end; /* highest vma end address */ 337 338 pgd_t * pgd;
+1
include/linux/regmap.h
··· 16 16 #include <linux/list.h> 17 17 #include <linux/rbtree.h> 18 18 #include <linux/err.h> 19 + #include <linux/bug.h> 19 20 20 21 struct module; 21 22 struct device;
+6
include/linux/sched.h
··· 1535 1535 * Test if a process is not yet dead (at most zombie state) 1536 1536 * If pid_alive fails, then pointers within the task structure 1537 1537 * can be stale and must not be dereferenced. 1538 + * 1539 + * Return: 1 if the process is alive. 0 otherwise. 1538 1540 */ 1539 1541 static inline int pid_alive(struct task_struct *p) 1540 1542 { ··· 1548 1546 * @tsk: Task structure to be checked. 1549 1547 * 1550 1548 * Check if a task structure is the first user space task the kernel created. 1549 + * 1550 + * Return: 1 if the task structure is init. 0 otherwise. 1551 1551 */ 1552 1552 static inline int is_global_init(struct task_struct *tsk) 1553 1553 { ··· 1901 1897 /** 1902 1898 * is_idle_task - is the specified task an idle task? 1903 1899 * @p: the task in question. 1900 + * 1901 + * Return: 1 if @p is an idle task. 0 otherwise. 1904 1902 */ 1905 1903 static inline bool is_idle_task(const struct task_struct *p) 1906 1904 {
+11 -3
include/linux/spinlock.h
··· 117 117 #endif /*arch_spin_is_contended*/ 118 118 #endif 119 119 120 - /* The lock does not imply full memory barrier. */ 121 - #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK 122 - static inline void smp_mb__after_lock(void) { smp_mb(); } 120 + /* 121 + * Despite its name it doesn't necessarily has to be a full barrier. 122 + * It should only guarantee that a STORE before the critical section 123 + * can not be reordered with a LOAD inside this section. 124 + * spin_lock() is the one-way barrier, this LOAD can not escape out 125 + * of the region. So the default implementation simply ensures that 126 + * a STORE can not move into the critical section, smp_wmb() should 127 + * serialize it with another STORE done by spin_lock(). 128 + */ 129 + #ifndef smp_mb__before_spinlock 130 + #define smp_mb__before_spinlock() smp_wmb() 123 131 #endif 124 132 125 133 /**
+2
include/linux/swapops.h
··· 67 67 swp_entry_t arch_entry; 68 68 69 69 BUG_ON(pte_file(pte)); 70 + if (pte_swp_soft_dirty(pte)) 71 + pte = pte_swp_clear_soft_dirty(pte); 70 72 arch_entry = __pte_to_swp_entry(pte); 71 73 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 72 74 }
+5
include/linux/syscalls.h
··· 802 802 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, 803 803 int __user *); 804 804 #else 805 + #ifdef CONFIG_CLONE_BACKWARDS3 806 + asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, 807 + int __user *, int); 808 + #else 805 809 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, 806 810 int __user *, int); 811 + #endif 807 812 #endif 808 813 809 814 asmlinkage long sys_execve(const char __user *filename,
+57
include/linux/wait.h
··· 811 811 __ret; \ 812 812 }) 813 813 814 + #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ 815 + lock, ret) \ 816 + do { \ 817 + DEFINE_WAIT(__wait); \ 818 + \ 819 + for (;;) { \ 820 + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 821 + if (condition) \ 822 + break; \ 823 + if (signal_pending(current)) { \ 824 + ret = -ERESTARTSYS; \ 825 + break; \ 826 + } \ 827 + spin_unlock_irq(&lock); \ 828 + ret = schedule_timeout(ret); \ 829 + spin_lock_irq(&lock); \ 830 + if (!ret) \ 831 + break; \ 832 + } \ 833 + finish_wait(&wq, &__wait); \ 834 + } while (0) 835 + 836 + /** 837 + * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. 838 + * The condition is checked under the lock. This is expected 839 + * to be called with the lock taken. 840 + * @wq: the waitqueue to wait on 841 + * @condition: a C expression for the event to wait for 842 + * @lock: a locked spinlock_t, which will be released before schedule() 843 + * and reacquired afterwards. 844 + * @timeout: timeout, in jiffies 845 + * 846 + * The process is put to sleep (TASK_INTERRUPTIBLE) until the 847 + * @condition evaluates to true or signal is received. The @condition is 848 + * checked each time the waitqueue @wq is woken up. 849 + * 850 + * wake_up() has to be called after changing any variable that could 851 + * change the result of the wait condition. 852 + * 853 + * This is supposed to be called while holding the lock. The lock is 854 + * dropped before going to sleep and is reacquired afterwards. 855 + * 856 + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it 857 + * was interrupted by a signal, and the remaining jiffies otherwise 858 + * if the condition evaluated to true before the timeout elapsed. 859 + */ 860 + #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ 861 + timeout) \ 862 + ({ \ 863 + int __ret = timeout; \ 864 + \ 865 + if (!(condition)) \ 866 + __wait_event_interruptible_lock_irq_timeout( \ 867 + wq, condition, lock, __ret); \ 868 + __ret; \ 869 + }) 870 + 814 871 815 872 /* 816 873 * These are the old interfaces to sleep waiting for an event.
+1 -6
include/net/busy_poll.h
··· 122 122 if (rc > 0) 123 123 /* local bh are disabled so it is ok to use _BH */ 124 124 NET_ADD_STATS_BH(sock_net(sk), 125 - LINUX_MIB_LOWLATENCYRXPACKETS, rc); 125 + LINUX_MIB_BUSYPOLLRXPACKETS, rc); 126 126 127 127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 128 128 !need_resched() && !busy_loop_timeout(end_time)); ··· 158 158 } 159 159 160 160 static inline bool sk_can_busy_loop(struct sock *sk) 161 - { 162 - return false; 163 - } 164 - 165 - static inline bool sk_busy_poll(struct sock *sk, int nonblock) 166 161 { 167 162 return false; 168 163 }
+2
include/net/ip6_route.h
··· 135 135 extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, 136 136 __be32 mtu); 137 137 extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark); 138 + extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, 139 + u32 mark); 138 140 extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); 139 141 140 142 struct netlink_callback;
-14
include/net/ip_tunnels.h
··· 145 145 return INET_ECN_encapsulate(tos, inner); 146 146 } 147 147 148 - static inline void tunnel_ip_select_ident(struct sk_buff *skb, 149 - const struct iphdr *old_iph, 150 - struct dst_entry *dst) 151 - { 152 - struct iphdr *iph = ip_hdr(skb); 153 - 154 - /* Use inner packet iph-id if possible. */ 155 - if (skb->protocol == htons(ETH_P_IP) && old_iph->id) 156 - iph->id = old_iph->id; 157 - else 158 - __ip_select_ident(iph, dst, 159 - (skb_shinfo(skb)->gso_segs ?: 1) - 1); 160 - } 161 - 162 148 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); 163 149 int iptunnel_xmit(struct net *net, struct rtable *rt, 164 150 struct sk_buff *skb,
+8 -1
include/net/sch_generic.h
··· 683 683 u64 rate_bytes_ps; /* bytes per second */ 684 684 u32 mult; 685 685 u16 overhead; 686 + u8 linklayer; 686 687 u8 shift; 687 688 }; 688 689 689 690 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 690 691 unsigned int len) 691 692 { 692 - return ((u64)(len + r->overhead) * r->mult) >> r->shift; 693 + len += r->overhead; 694 + 695 + if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 696 + return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 697 + 698 + return ((u64)len * r->mult) >> r->shift; 693 699 } 694 700 695 701 extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); ··· 706 700 memset(res, 0, sizeof(*res)); 707 701 res->rate = r->rate_bytes_ps; 708 702 res->overhead = r->overhead; 703 + res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 709 704 } 710 705 711 706 #endif
+1
include/uapi/linux/cm4000_cs.h
··· 2 2 #define _UAPI_CM4000_H_ 3 3 4 4 #include <linux/types.h> 5 + #include <linux/ioctl.h> 5 6 6 7 #define MAX_ATR 33 7 8
+34
include/uapi/linux/ip.h
··· 133 133 __u8 reserved; 134 134 }; 135 135 136 + /* index values for the variables in ipv4_devconf */ 137 + enum 138 + { 139 + IPV4_DEVCONF_FORWARDING=1, 140 + IPV4_DEVCONF_MC_FORWARDING, 141 + IPV4_DEVCONF_PROXY_ARP, 142 + IPV4_DEVCONF_ACCEPT_REDIRECTS, 143 + IPV4_DEVCONF_SECURE_REDIRECTS, 144 + IPV4_DEVCONF_SEND_REDIRECTS, 145 + IPV4_DEVCONF_SHARED_MEDIA, 146 + IPV4_DEVCONF_RP_FILTER, 147 + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE, 148 + IPV4_DEVCONF_BOOTP_RELAY, 149 + IPV4_DEVCONF_LOG_MARTIANS, 150 + IPV4_DEVCONF_TAG, 151 + IPV4_DEVCONF_ARPFILTER, 152 + IPV4_DEVCONF_MEDIUM_ID, 153 + IPV4_DEVCONF_NOXFRM, 154 + IPV4_DEVCONF_NOPOLICY, 155 + IPV4_DEVCONF_FORCE_IGMP_VERSION, 156 + IPV4_DEVCONF_ARP_ANNOUNCE, 157 + IPV4_DEVCONF_ARP_IGNORE, 158 + IPV4_DEVCONF_PROMOTE_SECONDARIES, 159 + IPV4_DEVCONF_ARP_ACCEPT, 160 + IPV4_DEVCONF_ARP_NOTIFY, 161 + IPV4_DEVCONF_ACCEPT_LOCAL, 162 + IPV4_DEVCONF_SRC_VMARK, 163 + IPV4_DEVCONF_PROXY_ARP_PVLAN, 164 + IPV4_DEVCONF_ROUTE_LOCALNET, 165 + __IPV4_DEVCONF_MAX 166 + }; 167 + 168 + #define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1) 169 + 136 170 #endif /* _UAPI_LINUX_IP_H */
+9 -1
include/uapi/linux/pkt_sched.h
··· 73 73 #define TC_H_ROOT (0xFFFFFFFFU) 74 74 #define TC_H_INGRESS (0xFFFFFFF1U) 75 75 76 + /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ 77 + enum tc_link_layer { 78 + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ 79 + TC_LINKLAYER_ETHERNET, 80 + TC_LINKLAYER_ATM, 81 + }; 82 + #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ 83 + 76 84 struct tc_ratespec { 77 85 unsigned char cell_log; 78 - unsigned char __reserved; 86 + __u8 linklayer; /* lower 4 bits */ 79 87 unsigned short overhead; 80 88 short cell_align; 81 89 unsigned short mpu;
+1 -1
include/uapi/linux/snmp.h
··· 253 253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ 254 254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ 255 255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ 256 - LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ 256 + LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */ 257 257 __LINUX_MIB_MAX 258 258 }; 259 259
+1 -1
init/Kconfig
··· 955 955 Memory Resource Controller Swap Extension comes with its price in 956 956 a bigger memory consumption. General purpose distribution kernels 957 957 which want to enable the feature but keep it disabled by default 958 - and let the user enable it by swapaccount boot command line 958 + and let the user enable it by swapaccount=1 boot command line 959 959 parameter should have this option unselected. 960 960 For those who want to have the feature enabled by default should 961 961 select this option (if, for some reason, they need to disable it
+3 -2
ipc/msg.c
··· 839 839 840 840 static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) 841 841 { 842 - struct msg_msg *msg; 842 + struct msg_msg *msg, *found = NULL; 843 843 long count = 0; 844 844 845 845 list_for_each_entry(msg, &msq->q_messages, m_list) { ··· 848 848 *msgtyp, mode)) { 849 849 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { 850 850 *msgtyp = msg->m_type - 1; 851 + found = msg; 851 852 } else if (mode == SEARCH_NUMBER) { 852 853 if (*msgtyp == count) 853 854 return msg; ··· 858 857 } 859 858 } 860 859 861 - return ERR_PTR(-EAGAIN); 860 + return found ?: ERR_PTR(-EAGAIN); 862 861 } 863 862 864 863 long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
+13 -7
kernel/cpuset.c
··· 475 475 476 476 /* 477 477 * Cpusets with tasks - existing or newly being attached - can't 478 - * have empty cpus_allowed or mems_allowed. 478 + * be changed to have empty cpus_allowed or mems_allowed. 479 479 */ 480 480 ret = -ENOSPC; 481 - if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && 482 - (cpumask_empty(trial->cpus_allowed) && 483 - nodes_empty(trial->mems_allowed))) 484 - goto out; 481 + if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) { 482 + if (!cpumask_empty(cur->cpus_allowed) && 483 + cpumask_empty(trial->cpus_allowed)) 484 + goto out; 485 + if (!nodes_empty(cur->mems_allowed) && 486 + nodes_empty(trial->mems_allowed)) 487 + goto out; 488 + } 485 489 486 490 ret = 0; 487 491 out: ··· 1612 1608 { 1613 1609 struct cpuset *cs = cgroup_cs(cgrp); 1614 1610 cpuset_filetype_t type = cft->private; 1615 - int retval = -ENODEV; 1611 + int retval = 0; 1616 1612 1617 1613 mutex_lock(&cpuset_mutex); 1618 - if (!is_cpuset_online(cs)) 1614 + if (!is_cpuset_online(cs)) { 1615 + retval = -ENODEV; 1619 1616 goto out_unlock; 1617 + } 1620 1618 1621 1619 switch (type) { 1622 1620 case FILE_CPU_EXCLUSIVE:
+6
kernel/fork.c
··· 1679 1679 int __user *, parent_tidptr, 1680 1680 int __user *, child_tidptr, 1681 1681 int, tls_val) 1682 + #elif defined(CONFIG_CLONE_BACKWARDS3) 1683 + SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 1684 + int, stack_size, 1685 + int __user *, parent_tidptr, 1686 + int __user *, child_tidptr, 1687 + int, tls_val) 1682 1688 #else 1683 1689 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 1684 1690 int __user *, parent_tidptr,
+2 -2
kernel/mutex.c
··· 686 686 might_sleep(); 687 687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 688 688 0, &ctx->dep_map, _RET_IP_, ctx); 689 - if (!ret && ctx->acquired > 0) 689 + if (!ret && ctx->acquired > 1) 690 690 return ww_mutex_deadlock_injection(lock, ctx); 691 691 692 692 return ret; ··· 702 702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 703 703 0, &ctx->dep_map, _RET_IP_, ctx); 704 704 705 - if (!ret && ctx->acquired > 0) 705 + if (!ret && ctx->acquired > 1) 706 706 return ww_mutex_deadlock_injection(lock, ctx); 707 707 708 708 return ret;
+13 -7
kernel/power/qos.c
··· 296 296 } 297 297 EXPORT_SYMBOL_GPL(pm_qos_request_active); 298 298 299 + static void __pm_qos_update_request(struct pm_qos_request *req, 300 + s32 new_value) 301 + { 302 + trace_pm_qos_update_request(req->pm_qos_class, new_value); 303 + 304 + if (new_value != req->node.prio) 305 + pm_qos_update_target( 306 + pm_qos_array[req->pm_qos_class]->constraints, 307 + &req->node, PM_QOS_UPDATE_REQ, new_value); 308 + } 309 + 299 310 /** 300 311 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout 301 312 * @work: work struct for the delayed work (timeout) ··· 319 308 struct pm_qos_request, 320 309 work); 321 310 322 - pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); 311 + __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); 323 312 } 324 313 325 314 /** ··· 375 364 } 376 365 377 366 cancel_delayed_work_sync(&req->work); 378 - 379 - trace_pm_qos_update_request(req->pm_qos_class, new_value); 380 - if (new_value != req->node.prio) 381 - pm_qos_update_target( 382 - pm_qos_array[req->pm_qos_class]->constraints, 383 - &req->node, PM_QOS_UPDATE_REQ, new_value); 367 + __pm_qos_update_request(req, new_value); 384 368 } 385 369 EXPORT_SYMBOL_GPL(pm_qos_update_request); 386 370
+74 -22
kernel/sched/core.c
··· 933 933 /** 934 934 * task_curr - is this task currently executing on a CPU? 935 935 * @p: the task in question. 936 + * 937 + * Return: 1 if the task is currently executing. 0 otherwise. 936 938 */ 937 939 inline int task_curr(const struct task_struct *p) 938 940 { ··· 1484 1482 * the simpler "current->state = TASK_RUNNING" to mark yourself 1485 1483 * runnable without the overhead of this. 1486 1484 * 1487 - * Returns %true if @p was woken up, %false if it was already running 1485 + * Return: %true if @p was woken up, %false if it was already running. 1488 1486 * or @state didn't match @p's state. 1489 1487 */ 1490 1488 static int ··· 1493 1491 unsigned long flags; 1494 1492 int cpu, success = 0; 1495 1493 1496 - smp_wmb(); 1494 + /* 1495 + * If we are going to wake up a thread waiting for CONDITION we 1496 + * need to ensure that CONDITION=1 done by the caller can not be 1497 + * reordered with p->state check below. This pairs with mb() in 1498 + * set_current_state() the waiting thread does. 1499 + */ 1500 + smp_mb__before_spinlock(); 1497 1501 raw_spin_lock_irqsave(&p->pi_lock, flags); 1498 1502 if (!(p->state & state)) 1499 1503 goto out; ··· 1585 1577 * @p: The process to be woken up. 1586 1578 * 1587 1579 * Attempt to wake up the nominated process and move it to the set of runnable 1588 - * processes. Returns 1 if the process was woken up, 0 if it was already 1589 - * running. 1580 + * processes. 1581 + * 1582 + * Return: 1 if the process was woken up, 0 if it was already running. 1590 1583 * 1591 1584 * It may be assumed that this function implies a write memory barrier before 1592 1585 * changing the task state if and only if any tasks are woken up. ··· 2200 2191 * This makes sure that uptime, CFS vruntime, load 2201 2192 * balancing, etc... continue to move forward, even 2202 2193 * with a very low granularity. 2194 + * 2195 + * Return: Maximum deferment in nanoseconds. 2203 2196 */ 2204 2197 u64 scheduler_tick_max_deferment(void) 2205 2198 { ··· 2405 2394 if (sched_feat(HRTICK)) 2406 2395 hrtick_clear(rq); 2407 2396 2397 + /* 2398 + * Make sure that signal_pending_state()->signal_pending() below 2399 + * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 2400 + * done by the caller to avoid the race with signal_wake_up(). 2401 + */ 2402 + smp_mb__before_spinlock(); 2408 2403 raw_spin_lock_irq(&rq->lock); 2409 2404 2410 2405 switch_count = &prev->nivcsw; ··· 2813 2796 * specified timeout to expire. The timeout is in jiffies. It is not 2814 2797 * interruptible. 2815 2798 * 2816 - * The return value is 0 if timed out, and positive (at least 1, or number of 2817 - * jiffies left till timeout) if completed. 2799 + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left 2800 + * till timeout) if completed. 2818 2801 */ 2819 2802 unsigned long __sched 2820 2803 wait_for_completion_timeout(struct completion *x, unsigned long timeout) ··· 2846 2829 * specified timeout to expire. The timeout is in jiffies. It is not 2847 2830 * interruptible. The caller is accounted as waiting for IO. 2848 2831 * 2849 - * The return value is 0 if timed out, and positive (at least 1, or number of 2850 - * jiffies left till timeout) if completed. 2832 + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left 2833 + * till timeout) if completed. 2851 2834 */ 2852 2835 unsigned long __sched 2853 2836 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) ··· 2863 2846 * This waits for completion of a specific task to be signaled. It is 2864 2847 * interruptible. 2865 2848 * 2866 - * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2849 + * Return: -ERESTARTSYS if interrupted, 0 if completed. 2867 2850 */ 2868 2851 int __sched wait_for_completion_interruptible(struct completion *x) 2869 2852 { ··· 2882 2865 * This waits for either a completion of a specific task to be signaled or for a 2883 2866 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 2884 2867 * 2885 - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2886 - * positive (at least 1, or number of jiffies left till timeout) if completed. 2868 + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, 2869 + * or number of jiffies left till timeout) if completed. 2887 2870 */ 2888 2871 long __sched 2889 2872 wait_for_completion_interruptible_timeout(struct completion *x, ··· 2900 2883 * This waits to be signaled for completion of a specific task. It can be 2901 2884 * interrupted by a kill signal. 2902 2885 * 2903 - * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2886 + * Return: -ERESTARTSYS if interrupted, 0 if completed. 2904 2887 */ 2905 2888 int __sched wait_for_completion_killable(struct completion *x) 2906 2889 { ··· 2920 2903 * signaled or for a specified timeout to expire. It can be 2921 2904 * interrupted by a kill signal. The timeout is in jiffies. 2922 2905 * 2923 - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2924 - * positive (at least 1, or number of jiffies left till timeout) if completed. 2906 + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, 2907 + * or number of jiffies left till timeout) if completed. 2925 2908 */ 2926 2909 long __sched 2927 2910 wait_for_completion_killable_timeout(struct completion *x, ··· 2935 2918 * try_wait_for_completion - try to decrement a completion without blocking 2936 2919 * @x: completion structure 2937 2920 * 2938 - * Returns: 0 if a decrement cannot be done without blocking 2921 + * Return: 0 if a decrement cannot be done without blocking 2939 2922 * 1 if a decrement succeeded. 2940 2923 * 2941 2924 * If a completion is being used as a counting completion, ··· 2962 2945 * completion_done - Test to see if a completion has any waiters 2963 2946 * @x: completion structure 2964 2947 * 2965 - * Returns: 0 if there are waiters (wait_for_completion() in progress) 2948 + * Return: 0 if there are waiters (wait_for_completion() in progress) 2966 2949 * 1 if there are no waiters. 2967 2950 * 2968 2951 */ ··· 3199 3182 * task_prio - return the priority value of a given task. 3200 3183 * @p: the task in question. 3201 3184 * 3202 - * This is the priority value as seen by users in /proc. 3185 + * Return: The priority value as seen by users in /proc. 3203 3186 * RT tasks are offset by -200. Normal tasks are centered 3204 3187 * around 0, value goes from -16 to +15. 3205 3188 */ ··· 3211 3194 /** 3212 3195 * task_nice - return the nice value of a given task. 3213 3196 * @p: the task in question. 3197 + * 3198 + * Return: The nice value [ -20 ... 0 ... 19 ]. 3214 3199 */ 3215 3200 int task_nice(const struct task_struct *p) 3216 3201 { ··· 3223 3204 /** 3224 3205 * idle_cpu - is a given cpu idle currently? 3225 3206 * @cpu: the processor in question. 3207 + * 3208 + * Return: 1 if the CPU is currently idle. 0 otherwise. 3226 3209 */ 3227 3210 int idle_cpu(int cpu) 3228 3211 { ··· 3247 3226 /** 3248 3227 * idle_task - return the idle task for a given cpu. 3249 3228 * @cpu: the processor in question. 3229 + * 3230 + * Return: The idle task for the cpu @cpu. 3250 3231 */ 3251 3232 struct task_struct *idle_task(int cpu) 3252 3233 { ··· 3258 3235 /** 3259 3236 * find_process_by_pid - find a process with a matching PID value. 3260 3237 * @pid: the pid in question. 3238 + * 3239 + * The task of @pid, if found. %NULL otherwise. 3261 3240 */ 3262 3241 static struct task_struct *find_process_by_pid(pid_t pid) 3263 3242 { ··· 3457 3432 * @policy: new policy. 3458 3433 * @param: structure containing the new RT priority. 3459 3434 * 3435 + * Return: 0 on success. An error code otherwise. 3436 + * 3460 3437 * NOTE that the task may be already dead. 3461 3438 */ 3462 3439 int sched_setscheduler(struct task_struct *p, int policy, ··· 3478 3451 * current context has permission. For example, this is needed in 3479 3452 * stop_machine(): we create temporary high priority worker threads, 3480 3453 * but our caller might not have that capability. 3454 + * 3455 + * Return: 0 on success. An error code otherwise. 3481 3456 */ 3482 3457 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3483 3458 const struct sched_param *param) ··· 3514 3485 * @pid: the pid in question. 3515 3486 * @policy: new policy. 3516 3487 * @param: structure containing the new RT priority. 3488 + * 3489 + * Return: 0 on success. An error code otherwise. 3517 3490 */ 3518 3491 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3519 3492 struct sched_param __user *, param) ··· 3531 3500 * sys_sched_setparam - set/change the RT priority of a thread 3532 3501 * @pid: the pid in question. 3533 3502 * @param: structure containing the new RT priority. 3503 + * 3504 + * Return: 0 on success. An error code otherwise. 3534 3505 */ 3535 3506 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3536 3507 { ··· 3542 3509 /** 3543 3510 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3544 3511 * @pid: the pid in question. 3512 + * 3513 + * Return: On success, the policy of the thread. Otherwise, a negative error 3514 + * code. 3545 3515 */ 3546 3516 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3547 3517 { ··· 3571 3535 * sys_sched_getparam - get the RT priority of a thread 3572 3536 * @pid: the pid in question. 3573 3537 * @param: structure containing the RT priority. 3538 + * 3539 + * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 3540 + * code. 3574 3541 */ 3575 3542 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3576 3543 { ··· 3698 3659 * @pid: pid of the process 3699 3660 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3700 3661 * @user_mask_ptr: user-space pointer to the new cpu mask 3662 + * 3663 + * Return: 0 on success. An error code otherwise. 3701 3664 */ 3702 3665 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 3703 3666 unsigned long __user *, user_mask_ptr) ··· 3751 3710 * @pid: pid of the process 3752 3711 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3753 3712 * @user_mask_ptr: user-space pointer to hold the current cpu mask 3713 + * 3714 + * Return: 0 on success. An error code otherwise. 3754 3715 */ 3755 3716 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 3756 3717 unsigned long __user *, user_mask_ptr) ··· 3787 3744 * 3788 3745 * This function yields the current CPU to other tasks. If there are no 3789 3746 * other threads running on this CPU then this function will return. 3747 + * 3748 + * Return: 0. 3790 3749 */ 3791 3750 SYSCALL_DEFINE0(sched_yield) 3792 3751 { ··· 3914 3869 * It's the caller's job to ensure that the target task struct 3915 3870 * can't go away on us before we can do any checks. 3916 3871 * 3917 - * Returns: 3872 + * Return: 3918 3873 * true (>0) if we indeed boosted the target task. 3919 3874 * false (0) if we failed to boost the target. 3920 3875 * -ESRCH if there's no task to yield to. ··· 4017 3972 * sys_sched_get_priority_max - return maximum RT priority. 4018 3973 * @policy: scheduling class. 4019 3974 * 4020 - * this syscall returns the maximum rt_priority that can be used 4021 - * by a given scheduling class. 3975 + * Return: On success, this syscall returns the maximum 3976 + * rt_priority that can be used by a given scheduling class. 3977 + * On failure, a negative error code is returned. 4022 3978 */ 4023 3979 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4024 3980 { ··· 4043 3997 * sys_sched_get_priority_min - return minimum RT priority. 4044 3998 * @policy: scheduling class. 4045 3999 * 4046 - * this syscall returns the minimum rt_priority that can be used 4047 - * by a given scheduling class. 4000 + * Return: On success, this syscall returns the minimum 4001 + * rt_priority that can be used by a given scheduling class. 4002 + * On failure, a negative error code is returned. 4048 4003 */ 4049 4004 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4050 4005 { ··· 4071 4024 * 4072 4025 * this syscall writes the default timeslice value of a given process 4073 4026 * into the user-space timespec buffer. A value of '0' means infinity. 4027 + * 4028 + * Return: On success, 0 and the timeslice is in @interval. Otherwise, 4029 + * an error code. 4074 4030 */ 4075 4031 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4076 4032 struct timespec __user *, interval) ··· 6687 6637 * @cpu: the processor in question. 6688 6638 * 6689 6639 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6640 + * 6641 + * Return: The current task for @cpu. 6690 6642 */ 6691 6643 struct task_struct *curr_task(int cpu) 6692 6644 {
+2 -2
kernel/sched/cpupri.c
··· 62 62 * any discrepancies created by racing against the uncertainty of the current 63 63 * priority configuration. 64 64 * 65 - * Returns: (int)bool - CPUs were found 65 + * Return: (int)bool - CPUs were found 66 66 */ 67 67 int cpupri_find(struct cpupri *cp, struct task_struct *p, 68 68 struct cpumask *lowest_mask) ··· 203 203 * cpupri_init - initialize the cpupri structure 204 204 * @cp: The cpupri context 205 205 * 206 - * Returns: -ENOMEM if memory fails. 206 + * Return: -ENOMEM on memory allocation failure. 207 207 */ 208 208 int cpupri_init(struct cpupri *cp) 209 209 {
+8 -2
kernel/sched/fair.c
··· 2032 2032 */ 2033 2033 update_entity_load_avg(curr, 1); 2034 2034 update_cfs_rq_blocked_load(cfs_rq, 1); 2035 + update_cfs_shares(cfs_rq); 2035 2036 2036 2037 #ifdef CONFIG_SCHED_HRTICK 2037 2038 /* ··· 4325 4324 * get_sd_load_idx - Obtain the load index for a given sched domain. 4326 4325 * @sd: The sched_domain whose load_idx is to be obtained. 4327 4326 * @idle: The Idle status of the CPU for whose sd load_icx is obtained. 4327 + * 4328 + * Return: The load index. 4328 4329 */ 4329 4330 static inline int get_sd_load_idx(struct sched_domain *sd, 4330 4331 enum cpu_idle_type idle) ··· 4621 4618 * 4622 4619 * Determine if @sg is a busier group than the previously selected 4623 4620 * busiest group. 4621 + * 4622 + * Return: %true if @sg is a busier group than the previously selected 4623 + * busiest group. %false otherwise. 4624 4624 */ 4625 4625 static bool update_sd_pick_busiest(struct lb_env *env, 4626 4626 struct sd_lb_stats *sds, ··· 4741 4735 * assuming lower CPU number will be equivalent to lower a SMT thread 4742 4736 * number. 4743 4737 * 4744 - * Returns 1 when packing is required and a task should be moved to 4738 + * Return: 1 when packing is required and a task should be moved to 4745 4739 * this CPU. The amount of the imbalance is returned in *imbalance. 4746 4740 * 4747 4741 * @env: The load balancing environment. ··· 4919 4913 * @balance: Pointer to a variable indicating if this_cpu 4920 4914 * is the appropriate cpu to perform load balancing at this_level. 4921 4915 * 4922 - * Returns: - the busiest group if imbalance exists. 4916 + * Return: - The busiest group if imbalance exists. 4923 4917 * - If no imbalance and user has opted for power-savings balance, 4924 4918 * return the least loaded group whose CPUs can be 4925 4919 * put to idle by rebalancing its tasks onto our group.
+1 -1
kernel/time/sched_clock.c
··· 121 121 BUG_ON(bits > 32); 122 122 WARN_ON(!irqs_disabled()); 123 123 read_sched_clock = read; 124 - sched_clock_mask = (1 << bits) - 1; 124 + sched_clock_mask = (1ULL << bits) - 1; 125 125 cd.rate = rate; 126 126 127 127 /* calculate the mult/shift to convert counter ticks to ns. */
+2 -3
kernel/time/tick-sched.c
··· 182 182 * Don't allow the user to think they can get 183 183 * full NO_HZ with this machine. 184 184 */ 185 - WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); 185 + WARN_ONCE(have_nohz_full_mask, 186 + "NO_HZ FULL will not work with unstable sched clock"); 186 187 return false; 187 188 } 188 189 #endif ··· 344 343 345 344 void __init tick_nohz_init(void) 346 345 { 347 - int cpu; 348 - 349 346 if (!have_nohz_full_mask) { 350 347 if (tick_nohz_init_all() < 0) 351 348 return;
+25 -18
kernel/time/timer_list.c
··· 265 265 static int timer_list_show(struct seq_file *m, void *v) 266 266 { 267 267 struct timer_list_iter *iter = v; 268 - u64 now = ktime_to_ns(ktime_get()); 269 268 270 269 if (iter->cpu == -1 && !iter->second_pass) 271 - timer_list_header(m, now); 270 + timer_list_header(m, iter->now); 272 271 else if (!iter->second_pass) 273 272 print_cpu(m, iter->cpu, iter->now); 274 273 #ifdef CONFIG_GENERIC_CLOCKEVENTS ··· 297 298 return; 298 299 } 299 300 301 + static void *move_iter(struct timer_list_iter *iter, loff_t offset) 302 + { 303 + for (; offset; offset--) { 304 + iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); 305 + if (iter->cpu >= nr_cpu_ids) { 306 + #ifdef CONFIG_GENERIC_CLOCKEVENTS 307 + if (!iter->second_pass) { 308 + iter->cpu = -1; 309 + iter->second_pass = true; 310 + } else 311 + return NULL; 312 + #else 313 + return NULL; 314 + #endif 315 + } 316 + } 317 + return iter; 318 + } 319 + 300 320 static void *timer_list_start(struct seq_file *file, loff_t *offset) 301 321 { 302 322 struct timer_list_iter *iter = file->private; 303 323 304 - if (!*offset) { 305 - iter->cpu = -1; 324 + if (!*offset) 306 325 iter->now = ktime_to_ns(ktime_get()); 307 - } else if (iter->cpu >= nr_cpu_ids) { 308 - #ifdef CONFIG_GENERIC_CLOCKEVENTS 309 - if (!iter->second_pass) { 310 - iter->cpu = -1; 311 - iter->second_pass = true; 312 - } else 313 - return NULL; 314 - #else 315 - return NULL; 316 - #endif 317 - } 318 - return iter; 326 + iter->cpu = -1; 327 + iter->second_pass = false; 328 + return move_iter(iter, *offset); 319 329 } 320 330 321 331 static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) 322 332 { 323 333 struct timer_list_iter *iter = file->private; 324 - iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); 325 334 ++*offset; 326 - return timer_list_start(file, offset); 335 + return move_iter(iter, 1); 327 336 } 328 337 329 338 static void timer_list_stop(struct seq_file *seq, void *v)
+1 -2
kernel/wait.c
··· 363 363 364 364 /** 365 365 * wake_up_atomic_t - Wake up a waiter on a atomic_t 366 - * @word: The word being waited on, a kernel virtual address 367 - * @bit: The bit of the word being waited on 366 + * @p: The atomic_t being waited on, a kernel virtual address 368 367 * 369 368 * Wake up anyone waiting for the atomic_t to go to zero. 370 369 *
+2 -2
lib/lz4/lz4_compress.c
··· 437 437 exit: 438 438 return ret; 439 439 } 440 - EXPORT_SYMBOL_GPL(lz4_compress); 440 + EXPORT_SYMBOL(lz4_compress); 441 441 442 - MODULE_LICENSE("GPL"); 442 + MODULE_LICENSE("Dual BSD/GPL"); 443 443 MODULE_DESCRIPTION("LZ4 compressor");
+3 -3
lib/lz4/lz4_decompress.c
··· 299 299 return ret; 300 300 } 301 301 #ifndef STATIC 302 - EXPORT_SYMBOL_GPL(lz4_decompress); 302 + EXPORT_SYMBOL(lz4_decompress); 303 303 #endif 304 304 305 305 int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, ··· 319 319 return ret; 320 320 } 321 321 #ifndef STATIC 322 - EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize); 322 + EXPORT_SYMBOL(lz4_decompress_unknownoutputsize); 323 323 324 - MODULE_LICENSE("GPL"); 324 + MODULE_LICENSE("Dual BSD/GPL"); 325 325 MODULE_DESCRIPTION("LZ4 Decompressor"); 326 326 #endif
+2 -2
lib/lz4/lz4hc_compress.c
··· 533 533 exit: 534 534 return ret; 535 535 } 536 - EXPORT_SYMBOL_GPL(lz4hc_compress); 536 + EXPORT_SYMBOL(lz4hc_compress); 537 537 538 - MODULE_LICENSE("GPL"); 538 + MODULE_LICENSE("Dual BSD/GPL"); 539 539 MODULE_DESCRIPTION("LZ4HC compressor");
+9 -4
mm/fremap.c
··· 57 57 unsigned long addr, unsigned long pgoff, pgprot_t prot) 58 58 { 59 59 int err = -ENOMEM; 60 - pte_t *pte; 60 + pte_t *pte, ptfile; 61 61 spinlock_t *ptl; 62 62 63 63 pte = get_locked_pte(mm, addr, &ptl); 64 64 if (!pte) 65 65 goto out; 66 66 67 - if (!pte_none(*pte)) 68 - zap_pte(mm, vma, addr, pte); 67 + ptfile = pgoff_to_pte(pgoff); 69 68 70 - set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 69 + if (!pte_none(*pte)) { 70 + if (pte_present(*pte) && pte_soft_dirty(*pte)) 71 + pte_file_mksoft_dirty(ptfile); 72 + zap_pte(mm, vma, addr, pte); 73 + } 74 + 75 + set_pte_at(mm, addr, pte, ptfile); 71 76 /* 72 77 * We don't need to run update_mmu_cache() here because the "file pte" 73 78 * being installed by install_file_pte() is not a real pte - it's a
+1 -1
mm/hugetlb.c
··· 2490 2490 2491 2491 mm = vma->vm_mm; 2492 2492 2493 - tlb_gather_mmu(&tlb, mm, 0); 2493 + tlb_gather_mmu(&tlb, mm, start, end); 2494 2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 2495 2495 tlb_finish_mmu(&tlb, start, end); 2496 2496 }
+2 -3
mm/memcontrol.c
··· 3195 3195 if (!s->memcg_params) 3196 3196 return -ENOMEM; 3197 3197 3198 - INIT_WORK(&s->memcg_params->destroy, 3199 - kmem_cache_destroy_work_func); 3200 3198 if (memcg) { 3201 3199 s->memcg_params->memcg = memcg; 3202 3200 s->memcg_params->root_cache = root_cache; 3201 + INIT_WORK(&s->memcg_params->destroy, 3202 + kmem_cache_destroy_work_func); 3203 3203 } else 3204 3204 s->memcg_params->is_root_cache = true; 3205 3205 ··· 6969 6969 #ifdef CONFIG_MEMCG_SWAP 6970 6970 static int __init enable_swap_account(char *s) 6971 6971 { 6972 - /* consider enabled if no parameter or 1 is given */ 6973 6972 if (!strcmp(s, "1")) 6974 6973 really_do_swap_account = 1; 6975 6974 else if (!strcmp(s, "0"))
+31 -18
mm/memory.c
··· 209 209 * tear-down from @mm. The @fullmm argument is used when @mm is without 210 210 * users and we're going to destroy the full address space (exit/execve). 211 211 */ 212 - void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) 212 + void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 213 213 { 214 214 tlb->mm = mm; 215 215 216 - tlb->fullmm = fullmm; 216 + /* Is it from 0 to ~0? */ 217 + tlb->fullmm = !(start | (end+1)); 217 218 tlb->need_flush_all = 0; 218 - tlb->start = -1UL; 219 - tlb->end = 0; 219 + tlb->start = start; 220 + tlb->end = end; 220 221 tlb->need_flush = 0; 221 222 tlb->local.next = NULL; 222 223 tlb->local.nr = 0; ··· 257 256 { 258 257 struct mmu_gather_batch *batch, *next; 259 258 260 - tlb->start = start; 261 - tlb->end = end; 262 259 tlb_flush_mmu(tlb); 263 260 264 261 /* keep the page table cache within bounds */ ··· 1098 1099 spinlock_t *ptl; 1099 1100 pte_t *start_pte; 1100 1101 pte_t *pte; 1101 - unsigned long range_start = addr; 1102 1102 1103 1103 again: 1104 1104 init_rss_vec(rss); ··· 1139 1141 continue; 1140 1142 if (unlikely(details) && details->nonlinear_vma 1141 1143 && linear_page_index(details->nonlinear_vma, 1142 - addr) != page->index) 1143 - set_pte_at(mm, addr, pte, 1144 - pgoff_to_pte(page->index)); 1144 + addr) != page->index) { 1145 + pte_t ptfile = pgoff_to_pte(page->index); 1146 + if (pte_soft_dirty(ptent)) 1147 + pte_file_mksoft_dirty(ptfile); 1148 + set_pte_at(mm, addr, pte, ptfile); 1149 + } 1145 1150 if (PageAnon(page)) 1146 1151 rss[MM_ANONPAGES]--; 1147 1152 else { ··· 1203 1202 * and page-free while holding it. 1204 1203 */ 1205 1204 if (force_flush) { 1205 + unsigned long old_end; 1206 + 1206 1207 force_flush = 0; 1207 1208 1208 - #ifdef HAVE_GENERIC_MMU_GATHER 1209 - tlb->start = range_start; 1209 + /* 1210 + * Flush the TLB just for the previous segment, 1211 + * then update the range to be the remaining 1212 + * TLB range. 1213 + */ 1214 + old_end = tlb->end; 1210 1215 tlb->end = addr; 1211 - #endif 1216 + 1212 1217 tlb_flush_mmu(tlb); 1213 - if (addr != end) { 1214 - range_start = addr; 1218 + 1219 + tlb->start = addr; 1220 + tlb->end = old_end; 1221 + 1222 + if (addr != end) 1215 1223 goto again; 1216 - } 1217 1224 } 1218 1225 1219 1226 return addr; ··· 1406 1397 unsigned long end = start + size; 1407 1398 1408 1399 lru_add_drain(); 1409 - tlb_gather_mmu(&tlb, mm, 0); 1400 + tlb_gather_mmu(&tlb, mm, start, end); 1410 1401 update_hiwater_rss(mm); 1411 1402 mmu_notifier_invalidate_range_start(mm, start, end); 1412 1403 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) ··· 1432 1423 unsigned long end = address + size; 1433 1424 1434 1425 lru_add_drain(); 1435 - tlb_gather_mmu(&tlb, mm, 0); 1426 + tlb_gather_mmu(&tlb, mm, address, end); 1436 1427 update_hiwater_rss(mm); 1437 1428 mmu_notifier_invalidate_range_start(mm, address, end); 1438 1429 unmap_single_vma(&tlb, vma, address, end, details); ··· 3124 3115 exclusive = 1; 3125 3116 } 3126 3117 flush_icache_page(vma, page); 3118 + if (pte_swp_soft_dirty(orig_pte)) 3119 + pte = pte_mksoft_dirty(pte); 3127 3120 set_pte_at(mm, address, page_table, pte); 3128 3121 if (page == swapcache) 3129 3122 do_page_add_anon_rmap(page, vma, address, exclusive); ··· 3419 3408 entry = mk_pte(page, vma->vm_page_prot); 3420 3409 if (flags & FAULT_FLAG_WRITE) 3421 3410 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3411 + else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) 3412 + pte_mksoft_dirty(entry); 3422 3413 if (anon) { 3423 3414 inc_mm_counter_fast(mm, MM_ANONPAGES); 3424 3415 page_add_new_anon_rmap(page, vma, address);
+2 -2
mm/mmap.c
··· 2336 2336 struct mmu_gather tlb; 2337 2337 2338 2338 lru_add_drain(); 2339 - tlb_gather_mmu(&tlb, mm, 0); 2339 + tlb_gather_mmu(&tlb, mm, start, end); 2340 2340 update_hiwater_rss(mm); 2341 2341 unmap_vmas(&tlb, vma, start, end); 2342 2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, ··· 2709 2709 2710 2710 lru_add_drain(); 2711 2711 flush_cache_mm(mm); 2712 - tlb_gather_mmu(&tlb, mm, 1); 2712 + tlb_gather_mmu(&tlb, mm, 0, -1); 2713 2713 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2714 2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2715 2715 unmap_vmas(&tlb, vma, 0, -1);
+20 -1
mm/mremap.c
··· 15 15 #include <linux/swap.h> 16 16 #include <linux/capability.h> 17 17 #include <linux/fs.h> 18 + #include <linux/swapops.h> 18 19 #include <linux/highmem.h> 19 20 #include <linux/security.h> 20 21 #include <linux/syscalls.h> ··· 68 67 VM_BUG_ON(pmd_trans_huge(*pmd)); 69 68 70 69 return pmd; 70 + } 71 + 72 + static pte_t move_soft_dirty_pte(pte_t pte) 73 + { 74 + /* 75 + * Set soft dirty bit so we can notice 76 + * in userspace the ptes were moved. 77 + */ 78 + #ifdef CONFIG_MEM_SOFT_DIRTY 79 + if (pte_present(pte)) 80 + pte = pte_mksoft_dirty(pte); 81 + else if (is_swap_pte(pte)) 82 + pte = pte_swp_mksoft_dirty(pte); 83 + else if (pte_file(pte)) 84 + pte = pte_file_mksoft_dirty(pte); 85 + #endif 86 + return pte; 71 87 } 72 88 73 89 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, ··· 144 126 continue; 145 127 pte = ptep_get_and_clear(mm, old_addr, old_pte); 146 128 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 147 - set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte)); 129 + pte = move_soft_dirty_pte(pte); 130 + set_pte_at(mm, new_addr, new_pte, pte); 148 131 } 149 132 150 133 arch_leave_lazy_mmu_mode();
+11 -3
mm/rmap.c
··· 1236 1236 swp_entry_to_pte(make_hwpoison_entry(page))); 1237 1237 } else if (PageAnon(page)) { 1238 1238 swp_entry_t entry = { .val = page_private(page) }; 1239 + pte_t swp_pte; 1239 1240 1240 1241 if (PageSwapCache(page)) { 1241 1242 /* ··· 1265 1264 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1266 1265 entry = make_migration_entry(page, pte_write(pteval)); 1267 1266 } 1268 - set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1267 + swp_pte = swp_entry_to_pte(entry); 1268 + if (pte_soft_dirty(pteval)) 1269 + swp_pte = pte_swp_mksoft_dirty(swp_pte); 1270 + set_pte_at(mm, address, pte, swp_pte); 1269 1271 BUG_ON(pte_file(*pte)); 1270 1272 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1271 1273 (TTU_ACTION(flags) == TTU_MIGRATION)) { ··· 1405 1401 pteval = ptep_clear_flush(vma, address, pte); 1406 1402 1407 1403 /* If nonlinear, store the file page offset in the pte. */ 1408 - if (page->index != linear_page_index(vma, address)) 1409 - set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1404 + if (page->index != linear_page_index(vma, address)) { 1405 + pte_t ptfile = pgoff_to_pte(page->index); 1406 + if (pte_soft_dirty(pteval)) 1407 + pte_file_mksoft_dirty(ptfile); 1408 + set_pte_at(mm, address, pte, ptfile); 1409 + } 1410 1410 1411 1411 /* Move the dirty bit to the physical page now the pte is gone. */ 1412 1412 if (pte_dirty(pteval))
+1 -7
mm/shmem.c
··· 2909 2909 2910 2910 /* common code */ 2911 2911 2912 - static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen) 2913 - { 2914 - return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", 2915 - dentry->d_name.name); 2916 - } 2917 - 2918 2912 static struct dentry_operations anon_ops = { 2919 - .d_dname = shmem_dname 2913 + .d_dname = simple_dname 2920 2914 }; 2921 2915 2922 2916 /**
+2
mm/slab.h
··· 162 162 163 163 static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) 164 164 { 165 + if (!s->memcg_params) 166 + return NULL; 165 167 return s->memcg_params->memcg_caches[idx]; 166 168 } 167 169
+17 -2
mm/swapfile.c
··· 866 866 } 867 867 #endif /* CONFIG_HIBERNATION */ 868 868 869 + static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) 870 + { 871 + #ifdef CONFIG_MEM_SOFT_DIRTY 872 + /* 873 + * When pte keeps soft dirty bit the pte generated 874 + * from swap entry does not has it, still it's same 875 + * pte from logical point of view. 876 + */ 877 + pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); 878 + return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); 879 + #else 880 + return pte_same(pte, swp_pte); 881 + #endif 882 + } 883 + 869 884 /* 870 885 * No need to decide whether this PTE shares the swap entry with others, 871 886 * just let do_wp_page work it out if a write is requested later - to ··· 907 892 } 908 893 909 894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 910 - if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 895 + if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { 911 896 mem_cgroup_cancel_charge_swapin(memcg); 912 897 ret = 0; 913 898 goto out; ··· 962 947 * swapoff spends a _lot_ of time in this loop! 963 948 * Test inline before going to call unuse_pte. 964 949 */ 965 - if (unlikely(pte_same(*pte, swp_pte))) { 950 + if (unlikely(maybe_same_pte(*pte, swp_pte))) { 966 951 pte_unmap(pte); 967 952 ret = unuse_pte(vma, pmd, addr, entry, page); 968 953 if (ret)
+6 -1
net/8021q/vlan_core.c
··· 91 91 92 92 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 93 93 { 94 - return vlan_dev_priv(dev)->real_dev; 94 + struct net_device *ret = vlan_dev_priv(dev)->real_dev; 95 + 96 + while (is_vlan_dev(ret)) 97 + ret = vlan_dev_priv(ret)->real_dev; 98 + 99 + return ret; 95 100 } 96 101 EXPORT_SYMBOL(vlan_dev_real_dev); 97 102
+2
net/batman-adv/bridge_loop_avoidance.c
··· 1529 1529 * in these cases, the skb is further handled by this function and 1530 1530 * returns 1, otherwise it returns 0 and the caller shall further 1531 1531 * process the skb. 1532 + * 1533 + * This call might reallocate skb data. 1532 1534 */ 1533 1535 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1534 1536 unsigned short vid)
+12 -1
net/batman-adv/gateway_client.c
··· 508 508 return 0; 509 509 } 510 510 511 + /* this call might reallocate skb data */ 511 512 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) 512 513 { 513 514 int ret = false; ··· 569 568 return ret; 570 569 } 571 570 571 + /* this call might reallocate skb data */ 572 572 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 573 573 { 574 574 struct ethhdr *ethhdr; ··· 621 619 622 620 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) 623 621 return false; 622 + 623 + /* skb->data might have been reallocated by pskb_may_pull() */ 624 + ethhdr = (struct ethhdr *)skb->data; 625 + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) 626 + ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); 627 + 624 628 udphdr = (struct udphdr *)(skb->data + *header_len); 625 629 *header_len += sizeof(*udphdr); 626 630 ··· 642 634 return true; 643 635 } 644 636 637 + /* this call might reallocate skb data */ 645 638 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 646 - struct sk_buff *skb, struct ethhdr *ethhdr) 639 + struct sk_buff *skb) 647 640 { 648 641 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; 649 642 struct batadv_orig_node *orig_dst_node = NULL; 650 643 struct batadv_gw_node *curr_gw = NULL; 644 + struct ethhdr *ethhdr; 651 645 bool ret, out_of_range = false; 652 646 unsigned int header_len = 0; 653 647 uint8_t curr_tq_avg; ··· 658 648 if (!ret) 659 649 goto out; 660 650 651 + ethhdr = (struct ethhdr *)skb->data; 661 652 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 662 653 ethhdr->h_dest); 663 654 if (!orig_dst_node)
+1 -2
net/batman-adv/gateway_client.h
··· 34 34 void batadv_gw_node_purge(struct batadv_priv *bat_priv); 35 35 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); 36 36 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); 37 - bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 38 - struct sk_buff *skb, struct ethhdr *ethhdr); 37 + bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); 39 38 40 39 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
+8 -1
net/batman-adv/soft-interface.c
··· 180 180 if (batadv_bla_tx(bat_priv, skb, vid)) 181 181 goto dropped; 182 182 183 + /* skb->data might have been reallocated by batadv_bla_tx() */ 184 + ethhdr = (struct ethhdr *)skb->data; 185 + 183 186 /* Register the client MAC in the transtable */ 184 187 if (!is_multicast_ether_addr(ethhdr->h_source)) 185 188 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); ··· 223 220 default: 224 221 break; 225 222 } 223 + 224 + /* reminder: ethhdr might have become unusable from here on 225 + * (batadv_gw_is_dhcp_target() might have reallocated skb data) 226 + */ 226 227 } 227 228 228 229 /* ethernet packet should be broadcasted */ ··· 273 266 /* unicast packet */ 274 267 } else { 275 268 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { 276 - ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); 269 + ret = batadv_gw_out_of_range(bat_priv, skb); 277 270 if (ret) 278 271 goto dropped; 279 272 }
+17 -6
net/batman-adv/unicast.c
··· 326 326 * @skb: the skb containing the payload to encapsulate 327 327 * @orig_node: the destination node 328 328 * 329 - * Returns false if the payload could not be encapsulated or true otherwise 329 + * Returns false if the payload could not be encapsulated or true otherwise. 330 + * 331 + * This call might reallocate skb data. 330 332 */ 331 333 static bool batadv_unicast_prepare_skb(struct sk_buff *skb, 332 334 struct batadv_orig_node *orig_node) ··· 345 343 * @orig_node: the destination node 346 344 * @packet_subtype: the batman 4addr packet subtype to use 347 345 * 348 - * Returns false if the payload could not be encapsulated or true otherwise 346 + * Returns false if the payload could not be encapsulated or true otherwise. 347 + * 348 + * This call might reallocate skb data. 349 349 */ 350 350 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, 351 351 struct sk_buff *skb, ··· 405 401 struct batadv_neigh_node *neigh_node; 406 402 int data_len = skb->len; 407 403 int ret = NET_RX_DROP; 408 - unsigned int dev_mtu; 404 + unsigned int dev_mtu, header_len; 409 405 410 406 /* get routing information */ 411 407 if (is_multicast_ether_addr(ethhdr->h_dest)) { ··· 432 428 433 429 switch (packet_type) { 434 430 case BATADV_UNICAST: 435 - batadv_unicast_prepare_skb(skb, orig_node); 431 + if (!batadv_unicast_prepare_skb(skb, orig_node)) 432 + goto out; 433 + 434 + header_len = sizeof(struct batadv_unicast_packet); 436 435 break; 437 436 case BATADV_UNICAST_4ADDR: 438 - batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, 439 - packet_subtype); 437 + if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, 438 + packet_subtype)) 439 + goto out; 440 + 441 + header_len = sizeof(struct batadv_unicast_4addr_packet); 440 442 break; 441 443 default: 442 444 /* this function supports UNICAST and UNICAST_4ADDR only. It ··· 451 441 goto out; 452 442 } 453 443 444 + ethhdr = (struct ethhdr *)(skb->data + header_len); 454 445 unicast_packet = (struct batadv_unicast_packet *)skb->data; 455 446 456 447 /* inform the destination node that we are still missing a correct route
+5 -5
net/bridge/br_fdb.c
··· 161 161 if (!pv) 162 162 return; 163 163 164 - for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 164 + for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) { 165 165 f = __br_fdb_get(br, br->dev->dev_addr, vid); 166 166 if (f && f->is_local && !f->dst) 167 167 fdb_delete(br, f); ··· 730 730 /* VID was specified, so use it. */ 731 731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 732 732 } else { 733 - if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 733 + if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { 734 734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); 735 735 goto out; 736 736 } ··· 739 739 * specify a VLAN. To be nice, add/update entry for every 740 740 * vlan on this port. 741 741 */ 742 - for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 742 + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { 743 743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 744 744 if (err) 745 745 goto out; ··· 817 817 818 818 err = __br_fdb_delete(p, addr, vid); 819 819 } else { 820 - if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 820 + if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { 821 821 err = __br_fdb_delete(p, addr, 0); 822 822 goto out; 823 823 } ··· 827 827 * vlan on this port. 828 828 */ 829 829 err = -ENOENT; 830 - for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 830 + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { 831 831 err &= __br_fdb_delete(p, addr, vid); 832 832 } 833 833 }
+1 -1
net/bridge/br_multicast.c
··· 1195 1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1196 1196 if (max_delay) 1197 1197 group = &mld->mld_mca; 1198 - } else if (skb->len >= sizeof(*mld2q)) { 1198 + } else { 1199 1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1200 1200 err = -EINVAL; 1201 1201 goto out;
+2 -2
net/bridge/br_netlink.c
··· 132 132 else 133 133 pv = br_get_vlan_info(br); 134 134 135 - if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) 135 + if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) 136 136 goto done; 137 137 138 138 af = nla_nest_start(skb, IFLA_AF_SPEC); ··· 140 140 goto nla_put_failure; 141 141 142 142 pvid = br_get_pvid(pv); 143 - for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 143 + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { 144 144 vinfo.vid = vid; 145 145 vinfo.flags = 0; 146 146 if (vid == pvid)
+1 -1
net/bridge/br_sysfs_br.c
··· 1 1 /* 2 - * Sysfs attributes of bridge ports 2 + * Sysfs attributes of bridge 3 3 * Linux ethernet bridge 4 4 * 5 5 * Authors:
+2 -2
net/bridge/br_vlan.c
··· 108 108 109 109 clear_bit(vid, v->vlan_bitmap); 110 110 v->num_vlans--; 111 - if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 111 + if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) { 112 112 if (v->port_idx) 113 113 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 114 114 else ··· 122 122 { 123 123 smp_wmb(); 124 124 v->pvid = 0; 125 - bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN); 125 + bitmap_zero(v->vlan_bitmap, VLAN_N_VID); 126 126 if (v->port_idx) 127 127 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 128 128 else
+1
net/core/flow_dissector.c
··· 65 65 nhoff += sizeof(struct ipv6hdr); 66 66 break; 67 67 } 68 + case __constant_htons(ETH_P_8021AD): 68 69 case __constant_htons(ETH_P_8021Q): { 69 70 const struct vlan_hdr *vlan; 70 71 struct vlan_hdr _vlan;
+8 -6
net/core/neighbour.c
··· 1441 1441 atomic_set(&p->refcnt, 1); 1442 1442 p->reachable_time = 1443 1443 neigh_rand_reach_time(p->base_reachable_time); 1444 - 1445 - if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1446 - kfree(p); 1447 - return NULL; 1448 - } 1449 - 1450 1444 dev_hold(dev); 1451 1445 p->dev = dev; 1452 1446 write_pnet(&p->net, hold_net(net)); 1453 1447 p->sysctl_table = NULL; 1448 + 1449 + if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1450 + release_net(net); 1451 + dev_put(dev); 1452 + kfree(p); 1453 + return NULL; 1454 + } 1455 + 1454 1456 write_lock_bh(&tbl->lock); 1455 1457 p->next = tbl->parms.next; 1456 1458 tbl->parms.next = p;
+2 -2
net/core/rtnetlink.c
··· 2156 2156 /* If aging addresses are supported device will need to 2157 2157 * implement its own handler for this. 2158 2158 */ 2159 - if (ndm->ndm_state & NUD_PERMANENT) { 2159 + if (!(ndm->ndm_state & NUD_PERMANENT)) { 2160 2160 pr_info("%s: FDB only supports static addresses\n", dev->name); 2161 2161 return -EINVAL; 2162 2162 } ··· 2384 2384 struct nlattr *extfilt; 2385 2385 u32 filter_mask = 0; 2386 2386 2387 - extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), 2387 + extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), 2388 2388 IFLA_EXT_MASK); 2389 2389 if (extfilt) 2390 2390 filter_mask = nla_get_u32(extfilt);
+1 -1
net/ipv4/esp4.c
··· 477 477 } 478 478 479 479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 480 - net_adj) & ~(align - 1)) + (net_adj - 2); 480 + net_adj) & ~(align - 1)) + net_adj - 2; 481 481 } 482 482 483 483 static void esp4_err(struct sk_buff *skb, u32 info)
+1 -4
net/ipv4/fib_trie.c
··· 71 71 #include <linux/init.h> 72 72 #include <linux/list.h> 73 73 #include <linux/slab.h> 74 - #include <linux/prefetch.h> 75 74 #include <linux/export.h> 76 75 #include <net/net_namespace.h> 77 76 #include <net/ip.h> ··· 1760 1761 if (!c) 1761 1762 continue; 1762 1763 1763 - if (IS_LEAF(c)) { 1764 - prefetch(rcu_dereference_rtnl(p->child[idx])); 1764 + if (IS_LEAF(c)) 1765 1765 return (struct leaf *) c; 1766 - } 1767 1766 1768 1767 /* Rescan start scanning in new node */ 1769 1768 p = (struct tnode *) c;
+1 -1
net/ipv4/ip_gre.c
··· 383 383 if (daddr) 384 384 memcpy(&iph->daddr, daddr, 4); 385 385 if (iph->daddr) 386 - return t->hlen; 386 + return t->hlen + sizeof(*iph); 387 387 388 388 return -(t->hlen + sizeof(*iph)); 389 389 }
+1 -3
net/ipv4/ip_tunnel_core.c
··· 76 76 iph->daddr = dst; 77 77 iph->saddr = src; 78 78 iph->ttl = ttl; 79 - tunnel_ip_select_ident(skb, 80 - (const struct iphdr *)skb_inner_network_header(skb), 81 - &rt->dst); 79 + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); 82 80 83 81 err = ip_local_out(skb); 84 82 if (unlikely(net_xmit_eval(err)))
+1 -1
net/ipv4/proc.c
··· 273 273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), 274 274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), 275 275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), 276 - SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), 276 + SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), 277 277 SNMP_MIB_SENTINEL 278 278 }; 279 279
+7
net/ipv4/tcp.c
··· 1121 1121 goto wait_for_memory; 1122 1122 1123 1123 /* 1124 + * All packets are restored as if they have 1125 + * already been sent. 1126 + */ 1127 + if (tp->repair) 1128 + TCP_SKB_CB(skb)->when = tcp_time_stamp; 1129 + 1130 + /* 1124 1131 * Check whether we can use HW checksum. 1125 1132 */ 1126 1133 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
+7 -5
net/ipv4/tcp_cubic.c
··· 206 206 */ 207 207 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 208 208 { 209 - u64 offs; 210 - u32 delta, t, bic_target, max_cnt; 209 + u32 delta, bic_target, max_cnt; 210 + u64 offs, t; 211 211 212 212 ca->ack_cnt++; /* count the number of ACKs */ 213 213 ··· 250 250 * if the cwnd < 1 million packets !!! 251 251 */ 252 252 253 + t = (s32)(tcp_time_stamp - ca->epoch_start); 254 + t += msecs_to_jiffies(ca->delay_min >> 3); 253 255 /* change the unit from HZ to bictcp_HZ */ 254 - t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) 255 - - ca->epoch_start) << BICTCP_HZ) / HZ; 256 + t <<= BICTCP_HZ; 257 + do_div(t, HZ); 256 258 257 259 if (t < ca->bic_K) /* t - K */ 258 260 offs = ca->bic_K - t; ··· 416 414 return; 417 415 418 416 /* Discard delay samples right after fast recovery */ 419 - if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) 417 + if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) 420 418 return; 421 419 422 420 delay = (rtt_us << 3) / USEC_PER_MSEC;
+4 -6
net/ipv6/addrconf.c
··· 1126 1126 if (ifp->flags & IFA_F_OPTIMISTIC) 1127 1127 addr_flags |= IFA_F_OPTIMISTIC; 1128 1128 1129 - ift = !max_addresses || 1130 - ipv6_count_addresses(idev) < max_addresses ? 1131 - ipv6_add_addr(idev, &addr, NULL, tmp_plen, 1132 - ipv6_addr_scope(&addr), addr_flags, 1133 - tmp_valid_lft, tmp_prefered_lft) : NULL; 1134 - if (IS_ERR_OR_NULL(ift)) { 1129 + ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen, 1130 + ipv6_addr_scope(&addr), addr_flags, 1131 + tmp_valid_lft, tmp_prefered_lft); 1132 + if (IS_ERR(ift)) { 1135 1133 in6_ifa_put(ifp); 1136 1134 in6_dev_put(idev); 1137 1135 pr_info("%s: retry temporary address regeneration\n", __func__);
+1 -1
net/ipv6/esp6.c
··· 425 425 net_adj = 0; 426 426 427 427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 428 - net_adj) & ~(align - 1)) + (net_adj - 2); 428 + net_adj) & ~(align - 1)) + net_adj - 2; 429 429 } 430 430 431 431 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+12 -4
net/ipv6/ip6_fib.c
··· 993 993 994 994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { 995 995 #ifdef CONFIG_IPV6_SUBTREES 996 - if (fn->subtree) 997 - fn = fib6_lookup_1(fn->subtree, args + 1); 996 + if (fn->subtree) { 997 + struct fib6_node *sfn; 998 + sfn = fib6_lookup_1(fn->subtree, 999 + args + 1); 1000 + if (!sfn) 1001 + goto backtrack; 1002 + fn = sfn; 1003 + } 998 1004 #endif 999 - if (!fn || fn->fn_flags & RTN_RTINFO) 1005 + if (fn->fn_flags & RTN_RTINFO) 1000 1006 return fn; 1001 1007 } 1002 1008 } 1003 - 1009 + #ifdef CONFIG_IPV6_SUBTREES 1010 + backtrack: 1011 + #endif 1004 1012 if (fn->fn_flags & RTN_ROOT) 1005 1013 break; 1006 1014
+3 -1
net/ipv6/ndisc.c
··· 1369 1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) 1370 1370 return; 1371 1371 1372 - if (!ndopts.nd_opts_rh) 1372 + if (!ndopts.nd_opts_rh) { 1373 + ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0); 1373 1374 return; 1375 + } 1374 1376 1375 1377 hdr = (u8 *)ndopts.nd_opts_rh; 1376 1378 hdr += 8;
+5
net/ipv6/reassembly.c
··· 490 490 ipv6_hdr(head)->payload_len = htons(payload_len); 491 491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); 492 492 IP6CB(head)->nhoff = nhoff; 493 + IP6CB(head)->flags |= IP6SKB_FRAGMENTED; 493 494 494 495 /* Yes, and fold redundant checksum back. 8) */ 495 496 if (head->ip_summed == CHECKSUM_COMPLETE) ··· 525 524 struct net *net = dev_net(skb_dst(skb)->dev); 526 525 int evicted; 527 526 527 + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) 528 + goto fail_hdr; 529 + 528 530 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 529 531 530 532 /* Jumbo payload inhibits frag. header */ ··· 548 544 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 549 545 550 546 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 547 + IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; 551 548 return 1; 552 549 } 553 550
+21
net/ipv6/route.c
··· 1178 1178 } 1179 1179 EXPORT_SYMBOL_GPL(ip6_redirect); 1180 1180 1181 + void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, 1182 + u32 mark) 1183 + { 1184 + const struct ipv6hdr *iph = ipv6_hdr(skb); 1185 + const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); 1186 + struct dst_entry *dst; 1187 + struct flowi6 fl6; 1188 + 1189 + memset(&fl6, 0, sizeof(fl6)); 1190 + fl6.flowi6_oif = oif; 1191 + fl6.flowi6_mark = mark; 1192 + fl6.flowi6_flags = 0; 1193 + fl6.daddr = msg->dest; 1194 + fl6.saddr = iph->daddr; 1195 + 1196 + dst = ip6_route_output(net, NULL, &fl6); 1197 + if (!dst->error) 1198 + rt6_do_redirect(dst, NULL, skb); 1199 + dst_release(dst); 1200 + } 1201 + 1181 1202 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 1182 1203 { 1183 1204 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
+36 -18
net/mac80211/mlme.c
··· 31 31 #include "led.h" 32 32 33 33 #define IEEE80211_AUTH_TIMEOUT (HZ / 5) 34 + #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) 34 35 #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) 35 36 #define IEEE80211_AUTH_MAX_TRIES 3 36 37 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) 37 38 #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 39 + #define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) 38 40 #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) 39 41 #define IEEE80211_ASSOC_MAX_TRIES 3 40 42 ··· 211 209 struct ieee80211_channel *channel, 212 210 const struct ieee80211_ht_operation *ht_oper, 213 211 const struct ieee80211_vht_operation *vht_oper, 214 - struct cfg80211_chan_def *chandef, bool verbose) 212 + struct cfg80211_chan_def *chandef, bool tracking) 215 213 { 214 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 216 215 struct cfg80211_chan_def vht_chandef; 217 216 u32 ht_cfreq, ret; 218 217 ··· 232 229 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, 233 230 channel->band); 234 231 /* check that channel matches the right operating channel */ 235 - if (channel->center_freq != ht_cfreq) { 232 + if (!tracking && channel->center_freq != ht_cfreq) { 236 233 /* 237 234 * It's possible that some APs are confused here; 238 235 * Netgear WNDR3700 sometimes reports 4 higher than ··· 240 237 * since we look at probe response/beacon data here 241 238 * it should be OK. 242 239 */ 243 - if (verbose) 244 - sdata_info(sdata, 245 - "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", 246 - channel->center_freq, ht_cfreq, 247 - ht_oper->primary_chan, channel->band); 240 + sdata_info(sdata, 241 + "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", 242 + channel->center_freq, ht_cfreq, 243 + ht_oper->primary_chan, channel->band); 248 244 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; 249 245 goto out; 250 246 } ··· 297 295 channel->band); 298 296 break; 299 297 default: 300 - if (verbose) 298 + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) 301 299 sdata_info(sdata, 302 300 "AP VHT operation IE has invalid channel width (%d), disable VHT\n", 303 301 vht_oper->chan_width); ··· 306 304 } 307 305 308 306 if (!cfg80211_chandef_valid(&vht_chandef)) { 309 - if (verbose) 307 + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) 310 308 sdata_info(sdata, 311 309 "AP VHT information is invalid, disable VHT\n"); 312 310 ret = IEEE80211_STA_DISABLE_VHT; ··· 319 317 } 320 318 321 319 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { 322 - if (verbose) 320 + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) 323 321 sdata_info(sdata, 324 322 "AP VHT information doesn't match HT, disable VHT\n"); 325 323 ret = IEEE80211_STA_DISABLE_VHT; ··· 335 333 if (ret & IEEE80211_STA_DISABLE_VHT) 336 334 vht_chandef = *chandef; 337 335 336 + /* 337 + * Ignore the DISABLED flag when we're already connected and only 338 + * tracking the APs beacon for bandwidth changes - otherwise we 339 + * might get disconnected here if we connect to an AP, update our 340 + * regulatory information based on the AP's country IE and the 341 + * information we have is wrong/outdated and disables the channel 342 + * that we're actually using for the connection to the AP. 343 + */ 338 344 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 339 - IEEE80211_CHAN_DISABLED)) { 345 + tracking ? 0 : 346 + IEEE80211_CHAN_DISABLED)) { 340 347 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { 341 348 ret = IEEE80211_STA_DISABLE_HT | 342 349 IEEE80211_STA_DISABLE_VHT; 343 - goto out; 350 + break; 344 351 } 345 352 346 353 ret |= chandef_downgrade(chandef); 347 354 } 348 355 349 - if (chandef->width != vht_chandef.width && verbose) 356 + if (chandef->width != vht_chandef.width && !tracking) 350 357 sdata_info(sdata, 351 358 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); 352 359 ··· 395 384 396 385 /* calculate new channel (type) based on HT/VHT operation IEs */ 397 386 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, 398 - vht_oper, &chandef, false); 387 + vht_oper, &chandef, true); 399 388 400 389 /* 401 390 * Downgrade the new channel if we associated with restricted ··· 3405 3394 3406 3395 if (tx_flags == 0) { 3407 3396 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3408 - ifmgd->auth_data->timeout_started = true; 3397 + auth_data->timeout_started = true; 3409 3398 run_again(sdata, auth_data->timeout); 3410 3399 } else { 3411 - auth_data->timeout_started = false; 3400 + auth_data->timeout = 3401 + round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); 3402 + auth_data->timeout_started = true; 3403 + run_again(sdata, auth_data->timeout); 3412 3404 } 3413 3405 3414 3406 return 0; ··· 3448 3434 assoc_data->timeout_started = true; 3449 3435 run_again(sdata, assoc_data->timeout); 3450 3436 } else { 3451 - assoc_data->timeout_started = false; 3437 + assoc_data->timeout = 3438 + round_jiffies_up(jiffies + 3439 + IEEE80211_ASSOC_TIMEOUT_LONG); 3440 + assoc_data->timeout_started = true; 3441 + run_again(sdata, assoc_data->timeout); 3452 3442 } 3453 3443 3454 3444 return 0; ··· 3847 3829 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, 3848 3830 cbss->channel, 3849 3831 ht_oper, vht_oper, 3850 - &chandef, true); 3832 + &chandef, false); 3851 3833 3852 3834 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), 3853 3835 local->rx_chains);
+8 -4
net/netfilter/nf_conntrack_proto_tcp.c
··· 526 526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 527 527 __u32 seq, ack, sack, end, win, swin; 528 528 s16 receiver_offset; 529 - bool res; 529 + bool res, in_recv_win; 530 530 531 531 /* 532 532 * Get the required data from the packet. ··· 649 649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 650 650 receiver->td_scale); 651 651 652 + /* Is the ending sequence in the receive window (if available)? */ 653 + in_recv_win = !receiver->td_maxwin || 654 + after(end, sender->td_end - receiver->td_maxwin - 1); 655 + 652 656 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", 653 657 before(seq, sender->td_maxend + 1), 654 - after(end, sender->td_end - receiver->td_maxwin - 1), 658 + (in_recv_win ? 1 : 0), 655 659 before(sack, receiver->td_end + 1), 656 660 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); 657 661 658 662 if (before(seq, sender->td_maxend + 1) && 659 - after(end, sender->td_end - receiver->td_maxwin - 1) && 663 + in_recv_win && 660 664 before(sack, receiver->td_end + 1) && 661 665 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { 662 666 /* ··· 729 725 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 730 726 "nf_ct_tcp: %s ", 731 727 before(seq, sender->td_maxend + 1) ? 732 - after(end, sender->td_end - receiver->td_maxwin - 1) ? 728 + in_recv_win ? 733 729 before(sack, receiver->td_end + 1) ? 734 730 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" 735 731 : "ACK is under the lower bound (possible overly delayed ACK)"
+17 -13
net/netfilter/xt_TCPMSS.c
··· 52 52 { 53 53 const struct xt_tcpmss_info *info = par->targinfo; 54 54 struct tcphdr *tcph; 55 - unsigned int tcplen, i; 55 + int len, tcp_hdrlen; 56 + unsigned int i; 56 57 __be16 oldval; 57 58 u16 newmss; 58 59 u8 *opt; ··· 65 64 if (!skb_make_writable(skb, skb->len)) 66 65 return -1; 67 66 68 - tcplen = skb->len - tcphoff; 69 - tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 67 + len = skb->len - tcphoff; 68 + if (len < (int)sizeof(struct tcphdr)) 69 + return -1; 70 70 71 - /* Header cannot be larger than the packet */ 72 - if (tcplen < tcph->doff*4) 71 + tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 72 + tcp_hdrlen = tcph->doff * 4; 73 + 74 + if (len < tcp_hdrlen) 73 75 return -1; 74 76 75 77 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { ··· 91 87 newmss = info->mss; 92 88 93 89 opt = (u_int8_t *)tcph; 94 - for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { 95 - if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && 96 - opt[i+1] == TCPOLEN_MSS) { 90 + for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { 91 + if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { 97 92 u_int16_t oldmss; 98 93 99 94 oldmss = (opt[i+2] << 8) | opt[i+3]; ··· 115 112 } 116 113 117 114 /* There is data after the header so the option can't be added 118 - without moving it, and doing so may make the SYN packet 119 - itself too large. Accept the packet unmodified instead. */ 120 - if (tcplen > tcph->doff*4) 115 + * without moving it, and doing so may make the SYN packet 116 + * itself too large. Accept the packet unmodified instead. 117 + */ 118 + if (len > tcp_hdrlen) 121 119 return 0; 122 120 123 121 /* ··· 147 143 newmss = min(newmss, (u16)1220); 148 144 149 145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 150 - memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 146 + memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); 151 147 152 148 inet_proto_csum_replace2(&tcph->check, skb, 153 - htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); 149 + htons(len), htons(len + TCPOLEN_MSS), 1); 154 150 opt[0] = TCPOPT_MSS; 155 151 opt[1] = TCPOLEN_MSS; 156 152 opt[2] = (newmss & 0xff00) >> 8;
+6 -4
net/netfilter/xt_TCPOPTSTRIP.c
··· 38 38 struct tcphdr *tcph; 39 39 u_int16_t n, o; 40 40 u_int8_t *opt; 41 - int len; 41 + int len, tcp_hdrlen; 42 42 43 43 /* This is a fragment, no TCP header is available */ 44 44 if (par->fragoff != 0) ··· 52 52 return NF_DROP; 53 53 54 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 55 - if (tcph->doff * 4 > len) 55 + tcp_hdrlen = tcph->doff * 4; 56 + 57 + if (len < tcp_hdrlen) 56 58 return NF_DROP; 57 59 58 60 opt = (u_int8_t *)tcph; ··· 63 61 * Walk through all TCP options - if we find some option to remove, 64 62 * set all octets to %TCPOPT_NOP and adjust checksum. 65 63 */ 66 - for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { 64 + for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) { 67 65 optl = optlen(opt, i); 68 66 69 - if (i + optl > tcp_hdrlen(skb)) 67 + if (i + optl > tcp_hdrlen) 70 68 break; 71 69 72 70 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
+1
net/openvswitch/actions.c
··· 535 535 { 536 536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); 537 537 538 + OVS_CB(skb)->tun_key = NULL; 538 539 return do_execute_actions(dp, skb, acts->actions, 539 540 acts->actions_len, false); 540 541 }
-3
net/openvswitch/datapath.c
··· 2076 2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 2077 2077 return 0; 2078 2078 2079 - rtnl_unlock(); 2080 - return 0; 2081 - 2082 2079 exit_free: 2083 2080 kfree_skb(reply); 2084 2081 exit_unlock:
+1 -1
net/openvswitch/flow.c
··· 240 240 struct flex_array *buckets; 241 241 int i, err; 242 242 243 - buckets = flex_array_alloc(sizeof(struct hlist_head *), 243 + buckets = flex_array_alloc(sizeof(struct hlist_head), 244 244 n_buckets, GFP_KERNEL); 245 245 if (!buckets) 246 246 return NULL;
+2
net/packet/af_packet.c
··· 3259 3259 3260 3260 if (po->tp_version == TPACKET_V3) { 3261 3261 lv = sizeof(struct tpacket_stats_v3); 3262 + st.stats3.tp_packets += st.stats3.tp_drops; 3262 3263 data = &st.stats3; 3263 3264 } else { 3264 3265 lv = sizeof(struct tpacket_stats); 3266 + st.stats1.tp_packets += st.stats1.tp_drops; 3265 3267 data = &st.stats1; 3266 3268 } 3267 3269
+41
net/sched/sch_api.c
··· 285 285 return q; 286 286 } 287 287 288 + /* The linklayer setting were not transferred from iproute2, in older 289 + * versions, and the rate tables lookup systems have been dropped in 290 + * the kernel. To keep backward compatible with older iproute2 tc 291 + * utils, we detect the linklayer setting by detecting if the rate 292 + * table were modified. 293 + * 294 + * For linklayer ATM table entries, the rate table will be aligned to 295 + * 48 bytes, thus some table entries will contain the same value. The 296 + * mpu (min packet unit) is also encoded into the old rate table, thus 297 + * starting from the mpu, we find low and high table entries for 298 + * mapping this cell. If these entries contain the same value, when 299 + * the rate tables have been modified for linklayer ATM. 300 + * 301 + * This is done by rounding mpu to the nearest 48 bytes cell/entry, 302 + * and then roundup to the next cell, calc the table entry one below, 303 + * and compare. 304 + */ 305 + static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) 306 + { 307 + int low = roundup(r->mpu, 48); 308 + int high = roundup(low+1, 48); 309 + int cell_low = low >> r->cell_log; 310 + int cell_high = (high >> r->cell_log) - 1; 311 + 312 + /* rtab is too inaccurate at rates > 100Mbit/s */ 313 + if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { 314 + pr_debug("TC linklayer: Giving up ATM detection\n"); 315 + return TC_LINKLAYER_ETHERNET; 316 + } 317 + 318 + if ((cell_high > cell_low) && (cell_high < 256) 319 + && (rtab[cell_low] == rtab[cell_high])) { 320 + pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", 321 + cell_low, cell_high, rtab[cell_high]); 322 + return TC_LINKLAYER_ATM; 323 + } 324 + return TC_LINKLAYER_ETHERNET; 325 + } 326 + 288 327 static struct qdisc_rate_table *qdisc_rtab_list; 289 328 290 329 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) ··· 347 308 rtab->rate = *r; 348 309 rtab->refcnt = 1; 349 310 memcpy(rtab->data, nla_data(tab), 1024); 311 + if (r->linklayer == TC_LINKLAYER_UNAWARE) 312 + r->linklayer = __detect_linklayer(r, rtab->data); 350 313 rtab->next = qdisc_rtab_list; 351 314 qdisc_rtab_list = rtab; 352 315 }
+7 -1
net/sched/sch_generic.c
··· 25 25 #include <linux/rcupdate.h> 26 26 #include <linux/list.h> 27 27 #include <linux/slab.h> 28 + #include <linux/if_vlan.h> 28 29 #include <net/sch_generic.h> 29 30 #include <net/pkt_sched.h> 30 31 #include <net/dst.h> ··· 208 207 209 208 unsigned long dev_trans_start(struct net_device *dev) 210 209 { 211 - unsigned long val, res = dev->trans_start; 210 + unsigned long val, res; 212 211 unsigned int i; 213 212 213 + if (is_vlan_dev(dev)) 214 + dev = vlan_dev_real_dev(dev); 215 + res = dev->trans_start; 214 216 for (i = 0; i < dev->num_tx_queues; i++) { 215 217 val = netdev_get_tx_queue(dev, i)->trans_start; 216 218 if (val && time_after(val, res)) 217 219 res = val; 218 220 } 219 221 dev->trans_start = res; 222 + 220 223 return res; 221 224 } 222 225 EXPORT_SYMBOL(dev_trans_start); ··· 909 904 memset(r, 0, sizeof(*r)); 910 905 r->overhead = conf->overhead; 911 906 r->rate_bytes_ps = conf->rate; 907 + r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); 912 908 r->mult = 1; 913 909 /* 914 910 * The deal here is to replace a divide by a reciprocal one
+13
net/sched/sch_htb.c
··· 1329 1329 struct htb_sched *q = qdisc_priv(sch); 1330 1330 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1331 1331 struct nlattr *opt = tca[TCA_OPTIONS]; 1332 + struct qdisc_rate_table *rtab = NULL, *ctab = NULL; 1332 1333 struct nlattr *tb[TCA_HTB_MAX + 1]; 1333 1334 struct tc_htb_opt *hopt; 1334 1335 ··· 1350 1349 hopt = nla_data(tb[TCA_HTB_PARMS]); 1351 1350 if (!hopt->rate.rate || !hopt->ceil.rate) 1352 1351 goto failure; 1352 + 1353 + /* Keeping backward compatible with rate_table based iproute2 tc */ 1354 + if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) { 1355 + rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); 1356 + if (rtab) 1357 + qdisc_put_rtab(rtab); 1358 + } 1359 + if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) { 1360 + ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); 1361 + if (ctab) 1362 + qdisc_put_rtab(ctab); 1363 + } 1353 1364 1354 1365 if (!cl) { /* new class */ 1355 1366 struct Qdisc *new_q;
+2 -2
net/sctp/associola.c
··· 846 846 else 847 847 spc_state = SCTP_ADDR_AVAILABLE; 848 848 /* Don't inform ULP about transition from PF to 849 - * active state and set cwnd to 1, see SCTP 849 + * active state and set cwnd to 1 MTU, see SCTP 850 850 * Quick failover draft section 5.1, point 5 851 851 */ 852 852 if (transport->state == SCTP_PF) { 853 853 ulp_notify = false; 854 - transport->cwnd = 1; 854 + transport->cwnd = asoc->pathmtu; 855 855 } 856 856 transport->state = SCTP_ACTIVE; 857 857 break;
+2 -2
net/sctp/transport.c
··· 181 181 return; 182 182 } 183 183 184 - call_rcu(&transport->rcu, sctp_transport_destroy_rcu); 185 - 186 184 sctp_packet_free(&transport->packet); 187 185 188 186 if (transport->asoc) 189 187 sctp_association_put(transport->asoc); 188 + 189 + call_rcu(&transport->rcu, sctp_transport_destroy_rcu); 190 190 } 191 191 192 192 /* Start T3_rtx timer if it is not already running and update the heartbeat
+7 -2
net/tipc/bearer.c
··· 460 460 { 461 461 struct tipc_link *l_ptr; 462 462 struct tipc_link *temp_l_ptr; 463 + struct tipc_link_req *temp_req; 463 464 464 465 pr_info("Disabling bearer <%s>\n", b_ptr->name); 465 466 spin_lock_bh(&b_ptr->lock); ··· 469 468 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 470 469 tipc_link_delete(l_ptr); 471 470 } 472 - if (b_ptr->link_req) 473 - tipc_disc_delete(b_ptr->link_req); 471 + temp_req = b_ptr->link_req; 472 + b_ptr->link_req = NULL; 474 473 spin_unlock_bh(&b_ptr->lock); 474 + 475 + if (temp_req) 476 + tipc_disc_delete(temp_req); 477 + 475 478 memset(b_ptr, 0, sizeof(struct tipc_bearer)); 476 479 } 477 480
+1 -1
net/vmw_vsock/af_vsock.c
··· 347 347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 348 348 struct vsock_sock *vsk; 349 349 list_for_each_entry(vsk, &vsock_connected_table[i], 350 - connected_table); 350 + connected_table) 351 351 fn(sk_vsock(vsk)); 352 352 } 353 353
+1
net/wireless/core.c
··· 765 765 cfg80211_leave_mesh(rdev, dev); 766 766 break; 767 767 case NL80211_IFTYPE_AP: 768 + case NL80211_IFTYPE_P2P_GO: 768 769 cfg80211_stop_ap(rdev, dev); 769 770 break; 770 771 default:
+15 -13
net/wireless/nl80211.c
··· 441 441 goto out_unlock; 442 442 } 443 443 *rdev = wiphy_to_dev((*wdev)->wiphy); 444 - cb->args[0] = (*rdev)->wiphy_idx; 444 + /* 0 is the first index - add 1 to parse only once */ 445 + cb->args[0] = (*rdev)->wiphy_idx + 1; 445 446 cb->args[1] = (*wdev)->identifier; 446 447 } else { 447 - struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); 448 + /* subtract the 1 again here */ 449 + struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 448 450 struct wireless_dev *tmp; 449 451 450 452 if (!wiphy) { ··· 2622 2620 2623 2621 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 2624 2622 NL80211_CMD_NEW_KEY); 2625 - if (IS_ERR(hdr)) 2626 - return PTR_ERR(hdr); 2623 + if (!hdr) 2624 + return -ENOBUFS; 2627 2625 2628 2626 cookie.msg = msg; 2629 2627 cookie.idx = key_idx; ··· 6507 6505 NL80211_CMD_TESTMODE); 6508 6506 struct nlattr *tmdata; 6509 6507 6508 + if (!hdr) 6509 + break; 6510 + 6510 6511 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { 6511 6512 genlmsg_cancel(skb, hdr); 6512 6513 break; ··· 6954 6949 6955 6950 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 6956 6951 NL80211_CMD_REMAIN_ON_CHANNEL); 6957 - 6958 - if (IS_ERR(hdr)) { 6959 - err = PTR_ERR(hdr); 6952 + if (!hdr) { 6953 + err = -ENOBUFS; 6960 6954 goto free_msg; 6961 6955 } 6962 6956 ··· 7253 7249 7254 7250 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 7255 7251 NL80211_CMD_FRAME); 7256 - 7257 - if (IS_ERR(hdr)) { 7258 - err = PTR_ERR(hdr); 7252 + if (!hdr) { 7253 + err = -ENOBUFS; 7259 7254 goto free_msg; 7260 7255 } 7261 7256 } ··· 8133 8130 8134 8131 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 8135 8132 NL80211_CMD_PROBE_CLIENT); 8136 - 8137 - if (IS_ERR(hdr)) { 8138 - err = PTR_ERR(hdr); 8133 + if (!hdr) { 8134 + err = -ENOBUFS; 8139 8135 goto free_msg; 8140 8136 } 8141 8137
+4 -6
net/wireless/sme.c
··· 976 976 struct net_device *dev, u16 reason, bool wextev) 977 977 { 978 978 struct wireless_dev *wdev = dev->ieee80211_ptr; 979 - int err; 979 + int err = 0; 980 980 981 981 ASSERT_WDEV_LOCK(wdev); 982 982 983 983 kfree(wdev->connect_keys); 984 984 wdev->connect_keys = NULL; 985 985 986 - if (wdev->conn) { 986 + if (wdev->conn) 987 987 err = cfg80211_sme_disconnect(wdev, reason); 988 - } else if (!rdev->ops->disconnect) { 988 + else if (!rdev->ops->disconnect) 989 989 cfg80211_mlme_down(rdev, dev); 990 - err = 0; 991 - } else { 990 + else if (wdev->current_bss) 992 991 err = rdev_disconnect(rdev, dev, reason); 993 - } 994 992 995 993 return err; 996 994 }
+3 -3
sound/pci/hda/hda_generic.c
··· 522 522 } 523 523 524 524 #define nid_has_mute(codec, nid, dir) \ 525 - check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) 525 + check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) 526 526 #define nid_has_volume(codec, nid, dir) \ 527 527 check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) 528 528 ··· 624 624 if (enable) 625 625 val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; 626 626 } 627 - if (caps & AC_AMPCAP_MUTE) { 627 + if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { 628 628 if (!enable) 629 629 val |= HDA_AMP_MUTE; 630 630 } ··· 648 648 { 649 649 unsigned int mask = 0xff; 650 650 651 - if (caps & AC_AMPCAP_MUTE) { 651 + if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { 652 652 if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) 653 653 mask &= ~0x80; 654 654 }
+11
sound/pci/hda/patch_realtek.c
··· 1031 1031 ALC880_FIXUP_GPIO2, 1032 1032 ALC880_FIXUP_MEDION_RIM, 1033 1033 ALC880_FIXUP_LG, 1034 + ALC880_FIXUP_LG_LW25, 1034 1035 ALC880_FIXUP_W810, 1035 1036 ALC880_FIXUP_EAPD_COEF, 1036 1037 ALC880_FIXUP_TCL_S700, ··· 1087 1086 { 0x16, 0x411111f0 }, 1088 1087 { 0x18, 0x411111f0 }, 1089 1088 { 0x1a, 0x411111f0 }, 1089 + { } 1090 + } 1091 + }, 1092 + [ALC880_FIXUP_LG_LW25] = { 1093 + .type = HDA_FIXUP_PINS, 1094 + .v.pins = (const struct hda_pintbl[]) { 1095 + { 0x1a, 0x0181344f }, /* line-in */ 1096 + { 0x1b, 0x0321403f }, /* headphone */ 1090 1097 { } 1091 1098 } 1092 1099 }, ··· 1350 1341 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), 1351 1342 SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), 1352 1343 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), 1344 + SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25), 1353 1345 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), 1354 1346 1355 1347 /* Below is the copied entries from alc880_quirks.c. ··· 4339 4329 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 4340 4330 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 4341 4331 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 4332 + SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), 4342 4333 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 4343 4334 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4344 4335 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+4 -1
sound/soc/codecs/cs42l52.c
··· 195 195 196 196 static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); 197 197 198 + static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); 199 + 198 200 static const unsigned int limiter_tlv[] = { 199 201 TLV_DB_RANGE_HEAD(2), 200 202 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), ··· 453 451 SOC_ENUM("Beep Pitch", beep_pitch_enum), 454 452 SOC_ENUM("Beep on Time", beep_ontime_enum), 455 453 SOC_ENUM("Beep off Time", beep_offtime_enum), 456 - SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), 454 + SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL, 455 + 0, 0x07, 0x1f, beep_tlv), 457 456 SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), 458 457 SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), 459 458 SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
+14 -4
sound/soc/codecs/sgtl5000.c
··· 153 153 static int power_vag_event(struct snd_soc_dapm_widget *w, 154 154 struct snd_kcontrol *kcontrol, int event) 155 155 { 156 + const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP; 157 + 156 158 switch (event) { 157 159 case SND_SOC_DAPM_POST_PMU: 158 160 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, ··· 162 160 break; 163 161 164 162 case SND_SOC_DAPM_PRE_PMD: 165 - snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 166 - SGTL5000_VAG_POWERUP, 0); 167 - msleep(400); 163 + /* 164 + * Don't clear VAG_POWERUP, when both DAC and ADC are 165 + * operational to prevent inadvertently starving the 166 + * other one of them. 167 + */ 168 + if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) & 169 + mask) != mask) { 170 + snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 171 + SGTL5000_VAG_POWERUP, 0); 172 + msleep(400); 173 + } 168 174 break; 169 175 default: 170 176 break; ··· 398 388 SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), 399 389 SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", 400 390 SGTL5000_CHIP_ANA_ADC_CTRL, 401 - 8, 2, 0, capture_6db_attenuate), 391 + 8, 1, 0, capture_6db_attenuate), 402 392 SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), 403 393 404 394 SOC_DOUBLE_TLV("Headphone Playback Volume",
+4 -3
sound/soc/soc-dapm.c
··· 679 679 return -EINVAL; 680 680 } 681 681 682 - path = list_first_entry(&w->sources, struct snd_soc_dapm_path, 683 - list_sink); 684 - if (!path) { 682 + if (list_empty(&w->sources)) { 685 683 dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); 686 684 return -EINVAL; 687 685 } 686 + 687 + path = list_first_entry(&w->sources, struct snd_soc_dapm_path, 688 + list_sink); 688 689 689 690 ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); 690 691 if (ret < 0)
+1 -1
sound/soc/tegra/tegra30_i2s.c
··· 228 228 reg = TEGRA30_I2S_CIF_RX_CTRL; 229 229 } else { 230 230 val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; 231 - reg = TEGRA30_I2S_CIF_RX_CTRL; 231 + reg = TEGRA30_I2S_CIF_TX_CTRL; 232 232 } 233 233 234 234 regmap_write(i2s->regmap, reg, val);
+15 -1
sound/usb/6fire/midi.c
··· 19 19 #include "chip.h" 20 20 #include "comm.h" 21 21 22 + enum { 23 + MIDI_BUFSIZE = 64 24 + }; 25 + 22 26 static void usb6fire_midi_out_handler(struct urb *urb) 23 27 { 24 28 struct midi_runtime *rt = urb->context; ··· 160 156 if (!rt) 161 157 return -ENOMEM; 162 158 159 + rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL); 160 + if (!rt->out_buffer) { 161 + kfree(rt); 162 + return -ENOMEM; 163 + } 164 + 163 165 rt->chip = chip; 164 166 rt->in_received = usb6fire_midi_in_received; 165 167 rt->out_buffer[0] = 0x80; /* 'send midi' command */ ··· 179 169 180 170 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); 181 171 if (ret < 0) { 172 + kfree(rt->out_buffer); 182 173 kfree(rt); 183 174 snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); 184 175 return ret; ··· 208 197 209 198 void usb6fire_midi_destroy(struct sfire_chip *chip) 210 199 { 211 - kfree(chip->midi); 200 + struct midi_runtime *rt = chip->midi; 201 + 202 + kfree(rt->out_buffer); 203 + kfree(rt); 212 204 chip->midi = NULL; 213 205 }
+1 -5
sound/usb/6fire/midi.h
··· 16 16 17 17 #include "common.h" 18 18 19 - enum { 20 - MIDI_BUFSIZE = 64 21 - }; 22 - 23 19 struct midi_runtime { 24 20 struct sfire_chip *chip; 25 21 struct snd_rawmidi *instance; ··· 28 32 struct snd_rawmidi_substream *out; 29 33 struct urb out_urb; 30 34 u8 out_serial; /* serial number of out packet */ 31 - u8 out_buffer[MIDI_BUFSIZE]; 35 + u8 *out_buffer; 32 36 int buffer_offset; 33 37 34 38 void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
+40 -1
sound/usb/6fire/pcm.c
··· 582 582 urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; 583 583 } 584 584 585 + static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt) 586 + { 587 + int i; 588 + 589 + for (i = 0; i < PCM_N_URBS; i++) { 590 + rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB 591 + * PCM_MAX_PACKET_SIZE, GFP_KERNEL); 592 + if (!rt->out_urbs[i].buffer) 593 + return -ENOMEM; 594 + rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB 595 + * PCM_MAX_PACKET_SIZE, GFP_KERNEL); 596 + if (!rt->in_urbs[i].buffer) 597 + return -ENOMEM; 598 + } 599 + return 0; 600 + } 601 + 602 + static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt) 603 + { 604 + int i; 605 + 606 + for (i = 0; i < PCM_N_URBS; i++) { 607 + kfree(rt->out_urbs[i].buffer); 608 + kfree(rt->in_urbs[i].buffer); 609 + } 610 + } 611 + 585 612 int usb6fire_pcm_init(struct sfire_chip *chip) 586 613 { 587 614 int i; ··· 619 592 620 593 if (!rt) 621 594 return -ENOMEM; 595 + 596 + ret = usb6fire_pcm_buffers_init(rt); 597 + if (ret) { 598 + usb6fire_pcm_buffers_destroy(rt); 599 + kfree(rt); 600 + return ret; 601 + } 622 602 623 603 rt->chip = chip; 624 604 rt->stream_state = STREAM_DISABLED; ··· 648 614 649 615 ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); 650 616 if (ret < 0) { 617 + usb6fire_pcm_buffers_destroy(rt); 651 618 kfree(rt); 652 619 snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); 653 620 return ret; ··· 660 625 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); 661 626 662 627 if (ret) { 628 + usb6fire_pcm_buffers_destroy(rt); 663 629 kfree(rt); 664 630 snd_printk(KERN_ERR PREFIX 665 631 "error preallocating pcm buffers.\n"); ··· 705 669 706 670 void usb6fire_pcm_destroy(struct sfire_chip *chip) 707 671 { 708 - kfree(chip->pcm); 672 + struct pcm_runtime *rt = chip->pcm; 673 + 674 + usb6fire_pcm_buffers_destroy(rt); 675 + kfree(rt); 709 676 chip->pcm = NULL; 710 677 }
+1 -1
sound/usb/6fire/pcm.h
··· 32 32 struct urb instance; 33 33 struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; 34 34 /* END DO NOT SEPARATE */ 35 - u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; 35 + u8 *buffer; 36 36 37 37 struct pcm_urb *peer; 38 38 };
+1
sound/usb/mixer.c
··· 888 888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ 889 889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ 890 890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ 891 + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ 891 892 case USB_ID(0x046d, 0x0991): 892 893 /* Most audio usb devices lie about volume resolution. 893 894 * Most Logitech webcams have res = 384.
+3 -3
sound/usb/quirks.c
··· 319 319 if (altsd->bNumEndpoints < 1) 320 320 return -ENODEV; 321 321 epd = get_endpoint(alts, 0); 322 - if (!usb_endpoint_xfer_bulk(epd) || 322 + if (!usb_endpoint_xfer_bulk(epd) && 323 323 !usb_endpoint_xfer_int(epd)) 324 324 return -ENODEV; 325 325 326 326 switch (USB_ID_VENDOR(chip->usb_id)) { 327 327 case 0x0499: /* Yamaha */ 328 328 err = create_yamaha_midi_quirk(chip, iface, driver, alts); 329 - if (err < 0 && err != -ENODEV) 329 + if (err != -ENODEV) 330 330 return err; 331 331 break; 332 332 case 0x0582: /* Roland */ 333 333 err = create_roland_midi_quirk(chip, iface, driver, alts); 334 - if (err < 0 && err != -ENODEV) 334 + if (err != -ENODEV) 335 335 return err; 336 336 break; 337 337 }