Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'asoc-v3.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next

ASoC: Final updates for v3.13

A few final updates for v3.13, all driver updates apart from some DPCM
and Coverity fixes which should have minor impact on practical systems.

+2459 -1241
+1 -1
Documentation/connector/ucon.c
··· 71 71 nlh->nlmsg_seq = seq++; 72 72 nlh->nlmsg_pid = getpid(); 73 73 nlh->nlmsg_type = NLMSG_DONE; 74 - nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); 74 + nlh->nlmsg_len = size; 75 75 nlh->nlmsg_flags = 0; 76 76 77 77 m = NLMSG_DATA(nlh);
+76 -3
MAINTAINERS
··· 1009 1009 M: Jason Cooper <jason@lakedaemon.net> 1010 1010 M: Andrew Lunn <andrew@lunn.ch> 1011 1011 M: Gregory Clement <gregory.clement@free-electrons.com> 1012 + M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1012 1013 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1013 1014 S: Maintained 1014 1015 F: arch/arm/mach-mvebu/ ··· 1017 1016 ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support 1018 1017 M: Jason Cooper <jason@lakedaemon.net> 1019 1018 M: Andrew Lunn <andrew@lunn.ch> 1019 + M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1020 1020 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1021 1021 S: Maintained 1022 1022 F: arch/arm/mach-dove/ ··· 1149 1147 F: drivers/net/ethernet/i825xx/ether1* 1150 1148 F: drivers/net/ethernet/seeq/ether3* 1151 1149 F: drivers/scsi/arm/ 1150 + 1151 + ARM/Rockchip SoC support 1152 + M: Heiko Stuebner <heiko@sntech.de> 1153 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1154 + S: Maintained 1155 + F: arch/arm/mach-rockchip/ 1156 + F: drivers/*/*rockchip* 1152 1157 1153 1158 ARM/SHARK MACHINE SUPPORT 1154 1159 M: Alexander Schulz <alex@shark-linux.de> ··· 1800 1791 1801 1792 BONDING DRIVER 1802 1793 M: Jay Vosburgh <fubar@us.ibm.com> 1794 + M: Veaceslav Falico <vfalico@redhat.com> 1803 1795 M: Andy Gospodarek <andy@greyhouse.net> 1804 1796 L: netdev@vger.kernel.org 1805 1797 W: http://sourceforge.net/projects/bonding/ ··· 2728 2718 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 2729 2719 M: Vinod Koul <vinod.koul@intel.com> 2730 2720 M: Dan Williams <dan.j.williams@intel.com> 2721 + L: dmaengine@vger.kernel.org 2722 + Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 2731 2723 S: Supported 2732 2724 F: drivers/dma/ 2733 2725 F: include/linux/dma* ··· 2833 2821 L: dri-devel@lists.freedesktop.org 2834 2822 L: linux-tegra@vger.kernel.org 2835 2823 T: git git://anongit.freedesktop.org/tegra/linux.git 2836 - S: Maintained 2824 + S: Supported 2837 2825 F: drivers/gpu/host1x/ 2838 2826 F: include/uapi/drm/tegra_drm.h 2839 2827 F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt ··· 4369 4357 4370 4358 INTEL I/OAT DMA DRIVER 4371 4359 M: Dan Williams <dan.j.williams@intel.com> 4372 - S: Maintained 4360 + M: Dave Jiang <dave.jiang@intel.com> 4361 + L: dmaengine@vger.kernel.org 4362 + Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 4363 + S: Supported 4373 4364 F: drivers/dma/ioat* 4374 4365 4375 4366 INTEL IOMMU (VT-d) ··· 8316 8301 S: Maintained 8317 8302 F: drivers/media/rc/ttusbir.c 8318 8303 8319 - TEGRA SUPPORT 8304 + TEGRA ARCHITECTURE SUPPORT 8320 8305 M: Stephen Warren <swarren@wwwdotorg.org> 8306 + M: Thierry Reding <thierry.reding@gmail.com> 8321 8307 L: linux-tegra@vger.kernel.org 8322 8308 Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ 8323 8309 T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git 8324 8310 S: Supported 8325 8311 N: [^a-z]tegra 8312 + 8313 + TEGRA ASOC DRIVER 8314 + M: Stephen Warren <swarren@wwwdotorg.org> 8315 + S: Supported 8316 + F: sound/soc/tegra/ 8317 + 8318 + TEGRA CLOCK DRIVER 8319 + M: Peter De Schrijver <pdeschrijver@nvidia.com> 8320 + M: Prashant Gaikwad <pgaikwad@nvidia.com> 8321 + S: Supported 8322 + F: drivers/clk/tegra/ 8323 + 8324 + TEGRA DMA DRIVER 8325 + M: Laxman Dewangan <ldewangan@nvidia.com> 8326 + S: Supported 8327 + F: drivers/dma/tegra20-apb-dma.c 8328 + 8329 + TEGRA GPIO DRIVER 8330 + M: Stephen Warren <swarren@wwwdotorg.org> 8331 + S: Supported 8332 + F: drivers/gpio/gpio-tegra.c 8333 + 8334 + TEGRA I2C DRIVER 8335 + M: Laxman Dewangan <ldewangan@nvidia.com> 8336 + S: Supported 8337 + F: drivers/i2c/busses/i2c-tegra.c 8338 + 8339 + TEGRA IOMMU DRIVERS 8340 + M: Hiroshi Doyu <hdoyu@nvidia.com> 8341 + S: Supported 8342 + F: drivers/iommu/tegra* 8343 + 8344 + TEGRA KBC DRIVER 8345 + M: Rakesh Iyer <riyer@nvidia.com> 8346 + M: Laxman Dewangan <ldewangan@nvidia.com> 8347 + S: Supported 8348 + F: drivers/input/keyboard/tegra-kbc.c 8349 + 8350 + TEGRA PINCTRL DRIVER 8351 + M: Stephen Warren <swarren@wwwdotorg.org> 8352 + S: Supported 8353 + F: drivers/pinctrl/pinctrl-tegra* 8354 + 8355 + TEGRA PWM DRIVER 8356 + M: Thierry Reding <thierry.reding@gmail.com> 8357 + S: Supported 8358 + F: drivers/pwm/pwm-tegra.c 8359 + 8360 + TEGRA SERIAL DRIVER 8361 + M: Laxman Dewangan <ldewangan@nvidia.com> 8362 + S: Supported 8363 + F: drivers/tty/serial/serial-tegra.c 8364 + 8365 + TEGRA SPI DRIVER 8366 + M: Laxman Dewangan <ldewangan@nvidia.com> 8367 + S: Supported 8368 + F: drivers/spi/spi-tegra* 8326 8369 8327 8370 TEHUTI ETHERNET DRIVER 8328 8371 M: Andy Gospodarek <andy@greyhouse.net>
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 12 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = One Giant Leap for Frogkind 6 6 7 7 # *DOCUMENTATION*
+4 -5
arch/arm/boot/dts/integratorcp.dts
··· 9 9 model = "ARM Integrator/CP"; 10 10 compatible = "arm,integrator-cp"; 11 11 12 - aliases { 13 - arm,timer-primary = &timer2; 14 - arm,timer-secondary = &timer1; 15 - }; 16 - 17 12 chosen { 18 13 bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk"; 19 14 }; ··· 19 24 }; 20 25 21 26 timer0: timer@13000000 { 27 + /* TIMER0 runs @ 25MHz */ 22 28 compatible = "arm,integrator-cp-timer"; 29 + status = "disabled"; 23 30 }; 24 31 25 32 timer1: timer@13000100 { 33 + /* TIMER1 runs @ 1MHz */ 26 34 compatible = "arm,integrator-cp-timer"; 27 35 }; 28 36 29 37 timer2: timer@13000200 { 38 + /* TIMER2 runs @ 1MHz */ 30 39 compatible = "arm,integrator-cp-timer"; 31 40 }; 32 41
+1
arch/arm/net/bpf_jit_32.c
··· 930 930 { 931 931 if (fp->bpf_func != sk_run_filter) 932 932 module_free(NULL, fp->bpf_func); 933 + kfree(fp); 933 934 }
+2
arch/parisc/configs/712_defconfig
··· 40 40 CONFIG_LLC2=m 41 41 CONFIG_NET_PKTGEN=m 42 42 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43 + CONFIG_DEVTMPFS=y 44 + CONFIG_DEVTMPFS_MOUNT=y 43 45 # CONFIG_STANDALONE is not set 44 46 # CONFIG_PREVENT_FIRMWARE_BUILD is not set 45 47 CONFIG_PARPORT=y
+2
arch/parisc/configs/a500_defconfig
··· 79 79 CONFIG_LLC2=m 80 80 CONFIG_NET_PKTGEN=m 81 81 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 82 + CONFIG_DEVTMPFS=y 83 + CONFIG_DEVTMPFS_MOUNT=y 82 84 # CONFIG_STANDALONE is not set 83 85 # CONFIG_PREVENT_FIRMWARE_BUILD is not set 84 86 CONFIG_BLK_DEV_UMEM=m
+3
arch/parisc/configs/b180_defconfig
··· 4 4 CONFIG_IKCONFIG_PROC=y 5 5 CONFIG_LOG_BUF_SHIFT=16 6 6 CONFIG_SYSFS_DEPRECATED_V2=y 7 + CONFIG_BLK_DEV_INITRD=y 7 8 CONFIG_SLAB=y 8 9 CONFIG_MODULES=y 9 10 CONFIG_MODVERSIONS=y ··· 28 27 # CONFIG_INET_LRO is not set 29 28 CONFIG_IPV6=y 30 29 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 30 + CONFIG_DEVTMPFS=y 31 + CONFIG_DEVTMPFS_MOUNT=y 31 32 # CONFIG_PREVENT_FIRMWARE_BUILD is not set 32 33 CONFIG_PARPORT=y 33 34 CONFIG_PARPORT_PC=y
+3
arch/parisc/configs/c3000_defconfig
··· 5 5 CONFIG_IKCONFIG_PROC=y 6 6 CONFIG_LOG_BUF_SHIFT=16 7 7 CONFIG_SYSFS_DEPRECATED_V2=y 8 + CONFIG_BLK_DEV_INITRD=y 8 9 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 9 10 CONFIG_EXPERT=y 10 11 CONFIG_KALLSYMS_ALL=y ··· 40 39 CONFIG_IP_NF_QUEUE=m 41 40 CONFIG_NET_PKTGEN=m 42 41 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 42 + CONFIG_DEVTMPFS=y 43 + CONFIG_DEVTMPFS_MOUNT=y 43 44 # CONFIG_STANDALONE is not set 44 45 # CONFIG_PREVENT_FIRMWARE_BUILD is not set 45 46 CONFIG_BLK_DEV_UMEM=m
+2
arch/parisc/configs/c8000_defconfig
··· 62 62 CONFIG_LLC2=m 63 63 CONFIG_DNS_RESOLVER=y 64 64 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 65 + CONFIG_DEVTMPFS=y 66 + CONFIG_DEVTMPFS_MOUNT=y 65 67 # CONFIG_STANDALONE is not set 66 68 CONFIG_PARPORT=y 67 69 CONFIG_PARPORT_PC=y
+2
arch/parisc/configs/default_defconfig
··· 49 49 CONFIG_INET6_IPCOMP=y 50 50 CONFIG_LLC2=m 51 51 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 52 + CONFIG_DEVTMPFS=y 53 + CONFIG_DEVTMPFS_MOUNT=y 52 54 # CONFIG_STANDALONE is not set 53 55 # CONFIG_PREVENT_FIRMWARE_BUILD is not set 54 56 CONFIG_PARPORT=y
-1
arch/parisc/kernel/cache.c
··· 602 602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 603 603 } 604 604 } 605 - EXPORT_SYMBOL_GPL(flush_cache_page); 606 605 607 606 #ifdef CONFIG_PARISC_TMPALIAS 608 607
+4
arch/parisc/kernel/head.S
··· 195 195 ldw MEM_PDC_HI(%r0),%r6 196 196 depd %r6, 31, 32, %r3 /* move to upper word */ 197 197 198 + mfctl %cr30,%r6 /* PCX-W2 firmware bug */ 199 + 198 200 ldo PDC_PSW(%r0),%arg0 /* 21 */ 199 201 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ 200 202 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */ ··· 205 203 copy %r0,%arg3 206 204 207 205 stext_pdc_ret: 206 + mtctl %r6,%cr30 /* restore task thread info */ 207 + 208 208 /* restore rfi target address*/ 209 209 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 210 210 tophys_r1 %r10
+1
arch/powerpc/net/bpf_jit_comp.c
··· 691 691 { 692 692 if (fp->bpf_func != sk_run_filter) 693 693 module_free(NULL, fp->bpf_func); 694 + kfree(fp); 694 695 }
+3 -1
arch/s390/include/asm/pgtable.h
··· 748 748 749 749 static inline void pgste_set_pte(pte_t *ptep, pte_t entry) 750 750 { 751 - if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) { 751 + if (!MACHINE_HAS_ESOP && 752 + (pte_val(entry) & _PAGE_PRESENT) && 753 + (pte_val(entry) & _PAGE_WRITE)) { 752 754 /* 753 755 * Without enhanced suppression-on-protection force 754 756 * the dirty bit on for all writable ptes.
+14 -14
arch/s390/include/asm/timex.h
··· 71 71 72 72 typedef unsigned long long cycles_t; 73 73 74 - static inline unsigned long long get_tod_clock(void) 75 - { 76 - unsigned long long clk; 77 - 78 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 79 - asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); 80 - #else 81 - asm volatile("stck %0" : "=Q" (clk) : : "cc"); 82 - #endif 83 - return clk; 84 - } 85 - 86 74 static inline void get_tod_clock_ext(char *clk) 87 75 { 88 76 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 89 77 } 90 78 91 - static inline unsigned long long get_tod_clock_xt(void) 79 + static inline unsigned long long get_tod_clock(void) 92 80 { 93 81 unsigned char clk[16]; 94 82 get_tod_clock_ext(clk); 95 83 return *((unsigned long long *)&clk[1]); 84 + } 85 + 86 + static inline unsigned long long get_tod_clock_fast(void) 87 + { 88 + #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 89 + unsigned long long clk; 90 + 91 + asm volatile("stckf %0" : "=Q" (clk) : : "cc"); 92 + return clk; 93 + #else 94 + return get_tod_clock(); 95 + #endif 96 96 } 97 97 98 98 static inline cycles_t get_cycles(void) ··· 125 125 */ 126 126 static inline unsigned long long get_tod_clock_monotonic(void) 127 127 { 128 - return get_tod_clock_xt() - sched_clock_base_cc; 128 + return get_tod_clock() - sched_clock_base_cc; 129 129 } 130 130 131 131 /**
+2 -2
arch/s390/kernel/compat_signal.c
··· 99 99 break; 100 100 } 101 101 } 102 - return err; 102 + return err ? -EFAULT : 0; 103 103 } 104 104 105 105 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) ··· 148 148 break; 149 149 } 150 150 } 151 - return err; 151 + return err ? -EFAULT : 0; 152 152 } 153 153 154 154 static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
+1 -1
arch/s390/kernel/debug.c
··· 867 867 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 868 868 int exception) 869 869 { 870 - active->id.stck = get_tod_clock(); 870 + active->id.stck = get_tod_clock_fast(); 871 871 active->id.fields.cpuid = smp_processor_id(); 872 872 active->caller = __builtin_return_address(0); 873 873 active->id.fields.exception = exception;
+3 -3
arch/s390/kvm/interrupt.c
··· 385 385 } 386 386 387 387 if ((!rc) && (vcpu->arch.sie_block->ckc < 388 - get_tod_clock() + vcpu->arch.sie_block->epoch)) { 388 + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { 389 389 if ((!psw_extint_disabled(vcpu)) && 390 390 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 391 391 rc = 1; ··· 425 425 goto no_timer; 426 426 } 427 427 428 - now = get_tod_clock() + vcpu->arch.sie_block->epoch; 428 + now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 429 429 if (vcpu->arch.sie_block->ckc < now) { 430 430 __unset_cpu_idle(vcpu); 431 431 return 0; ··· 515 515 } 516 516 517 517 if ((vcpu->arch.sie_block->ckc < 518 - get_tod_clock() + vcpu->arch.sie_block->epoch)) 518 + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 519 519 __try_deliver_ckc_interrupt(vcpu); 520 520 521 521 if (atomic_read(&fi->active)) {
+7 -7
arch/s390/lib/delay.c
··· 44 44 do { 45 45 set_clock_comparator(end); 46 46 vtime_stop_cpu(); 47 - } while (get_tod_clock() < end); 47 + } while (get_tod_clock_fast() < end); 48 48 lockdep_on(); 49 49 __ctl_load(cr0, 0, 0); 50 50 __ctl_load(cr6, 6, 6); ··· 55 55 { 56 56 u64 clock_saved, end; 57 57 58 - end = get_tod_clock() + (usecs << 12); 58 + end = get_tod_clock_fast() + (usecs << 12); 59 59 do { 60 60 clock_saved = 0; 61 61 if (end < S390_lowcore.clock_comparator) { ··· 65 65 vtime_stop_cpu(); 66 66 if (clock_saved) 67 67 local_tick_enable(clock_saved); 68 - } while (get_tod_clock() < end); 68 + } while (get_tod_clock_fast() < end); 69 69 } 70 70 71 71 /* ··· 109 109 { 110 110 u64 end; 111 111 112 - end = get_tod_clock() + (usecs << 12); 113 - while (get_tod_clock() < end) 112 + end = get_tod_clock_fast() + (usecs << 12); 113 + while (get_tod_clock_fast() < end) 114 114 cpu_relax(); 115 115 } 116 116 ··· 120 120 121 121 nsecs <<= 9; 122 122 do_div(nsecs, 125); 123 - end = get_tod_clock() + nsecs; 123 + end = get_tod_clock_fast() + nsecs; 124 124 if (nsecs & ~0xfffUL) 125 125 __udelay(nsecs >> 12); 126 - while (get_tod_clock() < end) 126 + while (get_tod_clock_fast() < end) 127 127 barrier(); 128 128 } 129 129 EXPORT_SYMBOL(__ndelay);
+3 -1
arch/s390/net/bpf_jit_comp.c
··· 881 881 struct bpf_binary_header *header = (void *)addr; 882 882 883 883 if (fp->bpf_func == sk_run_filter) 884 - return; 884 + goto free_filter; 885 885 set_memory_rw(addr, header->pages); 886 886 module_free(NULL, header); 887 + free_filter: 888 + kfree(fp); 887 889 }
+1
arch/sparc/net/bpf_jit_comp.c
··· 808 808 { 809 809 if (fp->bpf_func != sk_run_filter) 810 810 module_free(NULL, fp->bpf_func); 811 + kfree(fp); 811 812 }
+13 -5
arch/x86/net/bpf_jit_comp.c
··· 772 772 return; 773 773 } 774 774 775 + static void bpf_jit_free_deferred(struct work_struct *work) 776 + { 777 + struct sk_filter *fp = container_of(work, struct sk_filter, work); 778 + unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 779 + struct bpf_binary_header *header = (void *)addr; 780 + 781 + set_memory_rw(addr, header->pages); 782 + module_free(NULL, header); 783 + kfree(fp); 784 + } 785 + 775 786 void bpf_jit_free(struct sk_filter *fp) 776 787 { 777 788 if (fp->bpf_func != sk_run_filter) { 778 - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 779 - struct bpf_binary_header *header = (void *)addr; 780 - 781 - set_memory_rw(addr, header->pages); 782 - module_free(NULL, header); 789 + INIT_WORK(&fp->work, bpf_jit_free_deferred); 790 + schedule_work(&fp->work); 783 791 } 784 792 }
+1 -1
drivers/ata/ahci.c
··· 1343 1343 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1344 1344 host->flags |= ATA_HOST_PARALLEL_SCAN; 1345 1345 else 1346 - printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 1346 + dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n"); 1347 1347 1348 1348 if (pi.flags & ATA_FLAG_EM) 1349 1349 ahci_reset_em(host);
+1 -1
drivers/ata/ahci_platform.c
··· 184 184 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 185 185 host->flags |= ATA_HOST_PARALLEL_SCAN; 186 186 else 187 - printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 187 + dev_info(dev, "SSS flag set, parallel bus scan disabled\n"); 188 188 189 189 if (pi.flags & ATA_FLAG_EM) 190 190 ahci_reset_em(host);
+9 -1
drivers/ata/libahci.c
··· 778 778 rc = ap->ops->transmit_led_message(ap, 779 779 emp->led_state, 780 780 4); 781 + /* 782 + * If busy, give a breather but do not 783 + * release EH ownership by using msleep() 784 + * instead of ata_msleep(). EM Transmit 785 + * bit is busy for the whole host and 786 + * releasing ownership will cause other 787 + * ports to fail the same way. 788 + */ 781 789 if (rc == -EBUSY) 782 - ata_msleep(ap, 1); 790 + msleep(1); 783 791 else 784 792 break; 785 793 }
+3 -3
drivers/ata/libata-eh.c
··· 1322 1322 * should be retried. To be used from EH. 1323 1323 * 1324 1324 * SCSI midlayer limits the number of retries to scmd->allowed. 1325 - * scmd->retries is decremented for commands which get retried 1325 + * scmd->allowed is incremented for commands which get retried 1326 1326 * due to unrelated failures (qc->err_mask is zero). 1327 1327 */ 1328 1328 void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1329 1329 { 1330 1330 struct scsi_cmnd *scmd = qc->scsicmd; 1331 - if (!qc->err_mask && scmd->retries) 1332 - scmd->retries--; 1331 + if (!qc->err_mask) 1332 + scmd->allowed++; 1333 1333 __ata_eh_qc_complete(qc); 1334 1334 } 1335 1335
+1 -1
drivers/ata/pata_isapnp.c
··· 78 78 79 79 ap->ioaddr.cmd_addr = cmd_addr; 80 80 81 - if (pnp_port_valid(idev, 1) == 0) { 81 + if (pnp_port_valid(idev, 1)) { 82 82 ctl_addr = devm_ioport_map(&idev->dev, 83 83 pnp_port_start(idev, 1), 1); 84 84 ap->ioaddr.altstatus_addr = ctl_addr;
+18
drivers/connector/cn_proc.c
··· 65 65 66 66 msg = (struct cn_msg *)buffer; 67 67 ev = (struct proc_event *)msg->data; 68 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 68 69 get_seq(&msg->seq, &ev->cpu); 69 70 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 70 71 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 81 80 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 82 81 msg->ack = 0; /* not used */ 83 82 msg->len = sizeof(*ev); 83 + msg->flags = 0; /* not used */ 84 84 /* If cn_netlink_send() failed, the data is not sent */ 85 85 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 86 86 } ··· 98 96 99 97 msg = (struct cn_msg *)buffer; 100 98 ev = (struct proc_event *)msg->data; 99 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 101 100 get_seq(&msg->seq, &ev->cpu); 102 101 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 103 102 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 109 106 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 110 107 msg->ack = 0; /* not used */ 111 108 msg->len = sizeof(*ev); 109 + msg->flags = 0; /* not used */ 112 110 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 113 111 } 114 112 ··· 126 122 127 123 msg = (struct cn_msg *)buffer; 128 124 ev = (struct proc_event *)msg->data; 125 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 129 126 ev->what = which_id; 130 127 ev->event_data.id.process_pid = task->pid; 131 128 ev->event_data.id.process_tgid = task->tgid; ··· 150 145 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 151 146 msg->ack = 0; /* not used */ 152 147 msg->len = sizeof(*ev); 148 + msg->flags = 0; /* not used */ 153 149 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 154 150 } 155 151 ··· 166 160 167 161 msg = (struct cn_msg *)buffer; 168 162 ev = (struct proc_event *)msg->data; 163 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 169 164 get_seq(&msg->seq, &ev->cpu); 170 165 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 171 166 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 177 170 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 178 171 msg->ack = 0; /* not used */ 179 172 msg->len = sizeof(*ev); 173 + msg->flags = 0; /* not used */ 180 174 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 181 175 } 182 176 ··· 193 185 194 186 msg = (struct cn_msg *)buffer; 195 187 ev = (struct proc_event *)msg->data; 188 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 196 189 get_seq(&msg->seq, &ev->cpu); 197 190 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 198 191 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 212 203 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 213 204 msg->ack = 0; /* not used */ 214 205 msg->len = sizeof(*ev); 206 + msg->flags = 0; /* not used */ 215 207 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 216 208 } 217 209 ··· 228 218 229 219 msg = (struct cn_msg *)buffer; 230 220 ev = (struct proc_event *)msg->data; 221 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 231 222 get_seq(&msg->seq, &ev->cpu); 232 223 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 233 224 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 240 229 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 241 230 msg->ack = 0; /* not used */ 242 231 msg->len = sizeof(*ev); 232 + msg->flags = 0; /* not used */ 243 233 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 244 234 } 245 235 ··· 256 244 257 245 msg = (struct cn_msg *)buffer; 258 246 ev = (struct proc_event *)msg->data; 247 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 259 248 get_seq(&msg->seq, &ev->cpu); 260 249 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 261 250 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 267 254 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 268 255 msg->ack = 0; /* not used */ 269 256 msg->len = sizeof(*ev); 257 + msg->flags = 0; /* not used */ 270 258 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 271 259 } 272 260 ··· 283 269 284 270 msg = (struct cn_msg *)buffer; 285 271 ev = (struct proc_event *)msg->data; 272 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 286 273 get_seq(&msg->seq, &ev->cpu); 287 274 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 288 275 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 296 281 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 297 282 msg->ack = 0; /* not used */ 298 283 msg->len = sizeof(*ev); 284 + msg->flags = 0; /* not used */ 299 285 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 300 286 } 301 287 ··· 320 304 321 305 msg = (struct cn_msg *)buffer; 322 306 ev = (struct proc_event *)msg->data; 307 + memset(&ev->event_data, 0, sizeof(ev->event_data)); 323 308 msg->seq = rcvd_seq; 324 309 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 325 310 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ··· 330 313 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 331 314 msg->ack = rcvd_ack + 1; 332 315 msg->len = sizeof(*ev); 316 + msg->flags = 0; /* not used */ 333 317 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 334 318 } 335 319
+5 -4
drivers/connector/connector.c
··· 109 109 110 110 data = nlmsg_data(nlh); 111 111 112 - memcpy(data, msg, sizeof(*data) + msg->len); 112 + memcpy(data, msg, size); 113 113 114 114 NETLINK_CB(skb).dst_group = group; 115 115 ··· 157 157 static void cn_rx_skb(struct sk_buff *__skb) 158 158 { 159 159 struct nlmsghdr *nlh; 160 - int err; 161 160 struct sk_buff *skb; 161 + int len, err; 162 162 163 163 skb = skb_get(__skb); 164 164 165 165 if (skb->len >= NLMSG_HDRLEN) { 166 166 nlh = nlmsg_hdr(skb); 167 + len = nlmsg_len(nlh); 167 168 168 - if (nlh->nlmsg_len < sizeof(struct cn_msg) || 169 + if (len < (int)sizeof(struct cn_msg) || 169 170 skb->len < nlh->nlmsg_len || 170 - nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { 171 + len > CONNECTOR_MAX_MSG_SIZE) { 171 172 kfree_skb(skb); 172 173 return; 173 174 }
+4 -4
drivers/cpufreq/acpi-cpufreq.c
··· 986 986 { 987 987 int ret; 988 988 989 + if (acpi_disabled) 990 + return -ENODEV; 991 + 989 992 /* don't keep reloading if cpufreq_driver exists */ 990 993 if (cpufreq_get_current_driver()) 991 - return 0; 992 - 993 - if (acpi_disabled) 994 - return 0; 994 + return -EEXIST; 995 995 996 996 pr_debug("acpi_cpufreq_init\n"); 997 997
+18 -20
drivers/cpufreq/intel_pstate.c
··· 48 48 } 49 49 50 50 struct sample { 51 - int core_pct_busy; 51 + int32_t core_pct_busy; 52 52 u64 aperf; 53 53 u64 mperf; 54 54 int freq; ··· 68 68 int32_t i_gain; 69 69 int32_t d_gain; 70 70 int deadband; 71 - int last_err; 71 + int32_t last_err; 72 72 }; 73 73 74 74 struct cpudata { ··· 153 153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 154 154 } 155 155 156 - static signed int pid_calc(struct _pid *pid, int busy) 156 + static signed int pid_calc(struct _pid *pid, int32_t busy) 157 157 { 158 - signed int err, result; 158 + signed int result; 159 159 int32_t pterm, dterm, fp_error; 160 160 int32_t integral_limit; 161 161 162 - err = pid->setpoint - busy; 163 - fp_error = int_tofp(err); 162 + fp_error = int_tofp(pid->setpoint) - busy; 164 163 165 - if (abs(err) <= pid->deadband) 164 + if (abs(fp_error) <= int_tofp(pid->deadband)) 166 165 return 0; 167 166 168 167 pterm = mul_fp(pid->p_gain, fp_error); ··· 175 176 if (pid->integral < -integral_limit) 176 177 pid->integral = -integral_limit; 177 178 178 - dterm = mul_fp(pid->d_gain, (err - pid->last_err)); 179 - pid->last_err = err; 179 + dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 180 + pid->last_err = fp_error; 180 181 181 182 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 182 183 ··· 366 367 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 367 368 { 368 369 int max_perf = cpu->pstate.turbo_pstate; 370 + int max_perf_adj; 369 371 int min_perf; 370 372 if (limits.no_turbo) 371 373 max_perf = cpu->pstate.max_pstate; 372 374 373 - max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 374 - *max = clamp_t(int, max_perf, 375 + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 376 + *max = clamp_t(int, max_perf_adj, 375 377 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 376 378 377 379 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); ··· 436 436 struct sample *sample) 437 437 { 438 438 u64 core_pct; 439 - core_pct = div64_u64(sample->aperf * 100, sample->mperf); 440 - sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 439 + core_pct = div64_u64(int_tofp(sample->aperf * 100), 440 + sample->mperf); 441 + sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); 441 442 442 443 sample->core_pct_busy = core_pct; 443 444 } ··· 470 469 mod_timer_pinned(&cpu->timer, jiffies + delay); 471 470 } 472 471 473 - static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 472 + static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) 474 473 { 475 - int32_t busy_scaled; 476 474 int32_t core_busy, max_pstate, current_pstate; 477 475 478 - core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 476 + core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; 479 477 max_pstate = int_tofp(cpu->pstate.max_pstate); 480 478 current_pstate = int_tofp(cpu->pstate.current_pstate); 481 - busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 482 - 483 - return fp_toint(busy_scaled); 479 + return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 484 480 } 485 481 486 482 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 487 483 { 488 - int busy_scaled; 484 + int32_t busy_scaled; 489 485 struct _pid *pid; 490 486 signed int ctl = 0; 491 487 int steps;
+2
drivers/dma/edma.c
··· 305 305 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 306 306 EDMA_SLOT_ANY); 307 307 if (echan->slot[i] < 0) { 308 + kfree(edesc); 308 309 dev_err(dev, "Failed to allocate slot\n"); 309 310 kfree(edesc); 310 311 return NULL; ··· 347 346 ccnt = sg_dma_len(sg) / (acnt * bcnt); 348 347 if (ccnt > (SZ_64K - 1)) { 349 348 dev_err(dev, "Exceeded max SG segment size\n"); 349 + kfree(edesc); 350 350 return NULL; 351 351 } 352 352 cidx = acnt * bcnt;
+8 -1
drivers/gpu/drm/drm_drv.c
··· 402 402 cmd = ioctl->cmd_drv; 403 403 } 404 404 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 405 + u32 drv_size; 406 + 405 407 ioctl = &drm_ioctls[nr]; 406 - cmd = ioctl->cmd; 408 + 409 + drv_size = _IOC_SIZE(ioctl->cmd); 407 410 usize = asize = _IOC_SIZE(cmd); 411 + if (drv_size > asize) 412 + asize = drv_size; 413 + 414 + cmd = ioctl->cmd; 408 415 } else 409 416 goto err_i1; 410 417
+4 -1
drivers/gpu/drm/i915/i915_drv.c
··· 505 505 intel_modeset_suspend_hw(dev); 506 506 } 507 507 508 + i915_gem_suspend_gtt_mappings(dev); 509 + 508 510 i915_save_state(dev); 509 511 510 512 intel_opregion_fini(dev); ··· 650 648 mutex_lock(&dev->struct_mutex); 651 649 i915_gem_restore_gtt_mappings(dev); 652 650 mutex_unlock(&dev->struct_mutex); 653 - } 651 + } else if (drm_core_check_feature(dev, DRIVER_MODESET)) 652 + i915_check_and_clear_faults(dev); 654 653 655 654 __i915_drm_thaw(dev); 656 655
+6 -2
drivers/gpu/drm/i915/i915_drv.h
··· 497 497 498 498 /* FIXME: Need a more generic return type */ 499 499 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 500 - enum i915_cache_level level); 500 + enum i915_cache_level level, 501 + bool valid); /* Create a valid PTE */ 501 502 void (*clear_range)(struct i915_address_space *vm, 502 503 unsigned int first_entry, 503 - unsigned int num_entries); 504 + unsigned int num_entries, 505 + bool use_scratch); 504 506 void (*insert_entries)(struct i915_address_space *vm, 505 507 struct sg_table *st, 506 508 unsigned int first_entry, ··· 2067 2065 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 2068 2066 struct drm_i915_gem_object *obj); 2069 2067 2068 + void i915_check_and_clear_faults(struct drm_device *dev); 2069 + void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 2070 2070 void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2071 2071 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2072 2072 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+85 -24
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 58 58 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 59 59 60 60 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 61 - enum i915_cache_level level) 61 + enum i915_cache_level level, 62 + bool valid) 62 63 { 63 - gen6_gtt_pte_t pte = GEN6_PTE_VALID; 64 + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 64 65 pte |= GEN6_PTE_ADDR_ENCODE(addr); 65 66 66 67 switch (level) { ··· 80 79 } 81 80 82 81 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, 83 - enum i915_cache_level level) 82 + enum i915_cache_level level, 83 + bool valid) 84 84 { 85 - gen6_gtt_pte_t pte = GEN6_PTE_VALID; 85 + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 86 86 pte |= GEN6_PTE_ADDR_ENCODE(addr); 87 87 88 88 switch (level) { ··· 107 105 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 108 106 109 107 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 110 - enum i915_cache_level level) 108 + enum i915_cache_level level, 109 + bool valid) 111 110 { 112 - gen6_gtt_pte_t pte = GEN6_PTE_VALID; 111 + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 113 112 pte |= GEN6_PTE_ADDR_ENCODE(addr); 114 113 115 114 /* Mark the page as writeable. Other platforms don't have a ··· 125 122 } 126 123 127 124 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, 128 - enum i915_cache_level level) 125 + enum i915_cache_level level, 126 + bool valid) 129 127 { 130 - gen6_gtt_pte_t pte = GEN6_PTE_VALID; 128 + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 131 129 pte |= HSW_PTE_ADDR_ENCODE(addr); 132 130 133 131 if (level != I915_CACHE_NONE) ··· 138 134 } 139 135 140 136 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, 141 - enum i915_cache_level level) 137 + enum i915_cache_level level, 138 + bool valid) 142 139 { 143 - gen6_gtt_pte_t pte = GEN6_PTE_VALID; 140 + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 144 141 pte |= HSW_PTE_ADDR_ENCODE(addr); 145 142 146 143 switch (level) { ··· 241 236 /* PPGTT support for Sandybdrige/Gen6 and later */ 242 237 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 243 238 unsigned first_entry, 244 - unsigned num_entries) 239 + unsigned num_entries, 240 + bool use_scratch) 245 241 { 246 242 struct i915_hw_ppgtt *ppgtt = 247 243 container_of(vm, struct i915_hw_ppgtt, base); ··· 251 245 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 252 246 unsigned last_pte, i; 253 247 254 - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 248 + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); 255 249 256 250 while (num_entries) { 257 251 last_pte = first_pte + num_entries; ··· 288 282 dma_addr_t page_addr; 289 283 290 284 page_addr = sg_page_iter_dma_address(&sg_iter); 291 - pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); 285 + pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); 292 286 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 293 287 kunmap_atomic(pt_vaddr); 294 288 act_pt++; ··· 373 367 } 374 368 375 369 ppgtt->base.clear_range(&ppgtt->base, 0, 376 - ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); 370 + ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); 377 371 378 372 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 379 373 ··· 450 444 { 451 445 ppgtt->base.clear_range(&ppgtt->base, 452 446 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, 453 - obj->base.size >> PAGE_SHIFT); 447 + obj->base.size >> PAGE_SHIFT, 448 + true); 454 449 } 455 450 456 451 extern int intel_iommu_gfx_mapped; ··· 492 485 dev_priv->mm.interruptible = interruptible; 493 486 } 494 487 488 + void i915_check_and_clear_faults(struct drm_device *dev) 489 + { 490 + struct drm_i915_private *dev_priv = dev->dev_private; 491 + struct intel_ring_buffer *ring; 492 + int i; 493 + 494 + if (INTEL_INFO(dev)->gen < 6) 495 + return; 496 + 497 + for_each_ring(ring, dev_priv, i) { 498 + u32 fault_reg; 499 + fault_reg = I915_READ(RING_FAULT_REG(ring)); 500 + if (fault_reg & RING_FAULT_VALID) { 501 + DRM_DEBUG_DRIVER("Unexpected fault\n" 502 + "\tAddr: 0x%08lx\\n" 503 + "\tAddress space: %s\n" 504 + "\tSource ID: %d\n" 505 + "\tType: %d\n", 506 + fault_reg & PAGE_MASK, 507 + fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", 508 + RING_FAULT_SRCID(fault_reg), 509 + RING_FAULT_FAULT_TYPE(fault_reg)); 510 + I915_WRITE(RING_FAULT_REG(ring), 511 + fault_reg & ~RING_FAULT_VALID); 512 + } 513 + } 514 + POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); 515 + } 516 + 517 + void i915_gem_suspend_gtt_mappings(struct drm_device *dev) 518 + { 519 + struct drm_i915_private *dev_priv = dev->dev_private; 520 + 521 + /* Don't bother messing with faults pre GEN6 as we have little 522 + * documentation supporting that it's a good idea. 523 + */ 524 + if (INTEL_INFO(dev)->gen < 6) 525 + return; 526 + 527 + i915_check_and_clear_faults(dev); 528 + 529 + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 530 + dev_priv->gtt.base.start / PAGE_SIZE, 531 + dev_priv->gtt.base.total / PAGE_SIZE, 532 + false); 533 + } 534 + 495 535 void i915_gem_restore_gtt_mappings(struct drm_device *dev) 496 536 { 497 537 struct drm_i915_private *dev_priv = dev->dev_private; 498 538 struct drm_i915_gem_object *obj; 499 539 540 + i915_check_and_clear_faults(dev); 541 + 500 542 /* First fill our portion of the GTT with scratch pages */ 501 543 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 502 544 dev_priv->gtt.base.start / PAGE_SIZE, 503 - dev_priv->gtt.base.total / PAGE_SIZE); 545 + dev_priv->gtt.base.total / PAGE_SIZE, 546 + true); 504 547 505 548 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 506 549 i915_gem_clflush_object(obj, obj->pin_display); ··· 593 536 594 537 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 595 538 addr = sg_page_iter_dma_address(&sg_iter); 596 - iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]); 539 + iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]); 597 540 i++; 598 541 } 599 542 ··· 605 548 */ 606 549 if (i != 0) 607 550 WARN_ON(readl(&gtt_entries[i-1]) != 608 - vm->pte_encode(addr, level)); 551 + vm->pte_encode(addr, level, true)); 609 552 610 553 /* This next bit makes the above posting read even more important. We 611 554 * want to flush the TLBs only after we're certain all the PTE updates ··· 617 560 618 561 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 619 562 unsigned int first_entry, 620 - unsigned int num_entries) 563 + unsigned int num_entries, 564 + bool use_scratch) 621 565 { 622 566 struct drm_i915_private *dev_priv = vm->dev->dev_private; 623 567 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = ··· 631 573 first_entry, num_entries, max_entries)) 632 574 num_entries = max_entries; 633 575 634 - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 576 + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); 577 + 635 578 for (i = 0; i < num_entries; i++) 636 579 iowrite32(scratch_pte, &gtt_base[i]); 637 580 readl(gtt_base); ··· 653 594 654 595 static void i915_ggtt_clear_range(struct i915_address_space *vm, 655 596 unsigned int first_entry, 656 - unsigned int num_entries) 597 + unsigned int num_entries, 598 + bool unused) 657 599 { 658 600 intel_gtt_clear_range(first_entry, num_entries); 659 601 } ··· 682 622 683 623 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 684 624 entry, 685 - obj->base.size >> PAGE_SHIFT); 625 + obj->base.size >> PAGE_SHIFT, 626 + true); 686 627 687 628 obj->has_global_gtt_mapping = 0; 688 629 } ··· 770 709 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; 771 710 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 772 711 hole_start, hole_end); 773 - ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); 712 + ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); 774 713 } 775 714 776 715 /* And finally clear the reserved guard page */ 777 - ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); 716 + ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); 778 717 } 779 718 780 719 static bool
+6
drivers/gpu/drm/i915/i915_reg.h
··· 604 604 #define ARB_MODE_SWIZZLE_IVB (1<<5) 605 605 #define RENDER_HWS_PGA_GEN7 (0x04080) 606 606 #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 607 + #define RING_FAULT_GTTSEL_MASK (1<<11) 608 + #define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) 609 + #define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) 610 + #define RING_FAULT_VALID (1<<0) 607 611 #define DONE_REG 0x40b0 608 612 #define BSD_HWS_PGA_GEN7 (0x04180) 609 613 #define BLT_HWS_PGA_GEN7 (0x04280) ··· 4283 4279 #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 4284 4280 4285 4281 #define SOUTH_DSPCLK_GATE_D 0xc2020 4282 + #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) 4286 4283 #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 4284 + #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) 4287 4285 #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) 4288 4286 4289 4287 /* CPU: FDI_TX */
+3 -1
drivers/gpu/drm/i915/intel_pm.c
··· 4759 4759 * gating for the panel power sequencer or it will fail to 4760 4760 * start up when no ports are active. 4761 4761 */ 4762 - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 4762 + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 4763 + PCH_DPLUNIT_CLOCK_GATE_DISABLE | 4764 + PCH_CPUNIT_CLOCK_GATE_DISABLE); 4763 4765 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 4764 4766 DPLS_EDP_PPS_FIX_DIS); 4765 4767 /* The below fixes the weird display corruption, a few pixels shifted
+36 -18
drivers/gpu/drm/radeon/atombios_encoders.c
··· 707 707 switch (connector->connector_type) { 708 708 case DRM_MODE_CONNECTOR_DVII: 709 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 710 - if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 711 - (drm_detect_hdmi_monitor(radeon_connector->edid) && 712 - (radeon_connector->audio == RADEON_AUDIO_AUTO))) 713 - return ATOM_ENCODER_MODE_HDMI; 714 - else if (radeon_connector->use_digital) 710 + if (radeon_audio != 0) { 711 + if (radeon_connector->use_digital && 712 + (radeon_connector->audio == RADEON_AUDIO_ENABLE)) 713 + return ATOM_ENCODER_MODE_HDMI; 714 + else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 715 + (radeon_connector->audio == RADEON_AUDIO_AUTO)) 716 + return ATOM_ENCODER_MODE_HDMI; 717 + else if (radeon_connector->use_digital) 718 + return ATOM_ENCODER_MODE_DVI; 719 + else 720 + return ATOM_ENCODER_MODE_CRT; 721 + } else if (radeon_connector->use_digital) { 715 722 return ATOM_ENCODER_MODE_DVI; 716 - else 723 + } else { 717 724 return ATOM_ENCODER_MODE_CRT; 725 + } 718 726 break; 719 727 case DRM_MODE_CONNECTOR_DVID: 720 728 case DRM_MODE_CONNECTOR_HDMIA: 721 729 default: 722 - if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 723 - (drm_detect_hdmi_monitor(radeon_connector->edid) && 724 - (radeon_connector->audio == RADEON_AUDIO_AUTO))) 725 - return ATOM_ENCODER_MODE_HDMI; 726 - else 730 + if (radeon_audio != 0) { 731 + if (radeon_connector->audio == RADEON_AUDIO_ENABLE) 732 + return ATOM_ENCODER_MODE_HDMI; 733 + else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 734 + (radeon_connector->audio == RADEON_AUDIO_AUTO)) 735 + return ATOM_ENCODER_MODE_HDMI; 736 + else 737 + return ATOM_ENCODER_MODE_DVI; 738 + } else { 727 739 return ATOM_ENCODER_MODE_DVI; 740 + } 728 741 break; 729 742 case DRM_MODE_CONNECTOR_LVDS: 730 743 return ATOM_ENCODER_MODE_LVDS; ··· 745 732 case DRM_MODE_CONNECTOR_DisplayPort: 746 733 dig_connector = radeon_connector->con_priv; 747 734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 748 - (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 735 + (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 749 736 return ATOM_ENCODER_MODE_DP; 750 - else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 751 - (drm_detect_hdmi_monitor(radeon_connector->edid) && 752 - (radeon_connector->audio == RADEON_AUDIO_AUTO))) 753 - return ATOM_ENCODER_MODE_HDMI; 754 - else 737 + } else if (radeon_audio != 0) { 738 + if (radeon_connector->audio == RADEON_AUDIO_ENABLE) 739 + return ATOM_ENCODER_MODE_HDMI; 740 + else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 741 + (radeon_connector->audio == RADEON_AUDIO_AUTO)) 742 + return ATOM_ENCODER_MODE_HDMI; 743 + else 744 + return ATOM_ENCODER_MODE_DVI; 745 + } else { 755 746 return ATOM_ENCODER_MODE_DVI; 747 + } 756 748 break; 757 749 case DRM_MODE_CONNECTOR_eDP: 758 750 return ATOM_ENCODER_MODE_DP; ··· 1673 1655 * does the same thing and more. 1674 1656 */ 1675 1657 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && 1676 - (rdev->family != CHIP_RS880)) 1658 + (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880)) 1677 1659 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1678 1660 } 1679 1661 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+4
drivers/gpu/drm/radeon/cik.c
··· 1694 1694 fw_name); 1695 1695 release_firmware(rdev->smc_fw); 1696 1696 rdev->smc_fw = NULL; 1697 + err = 0; 1697 1698 } else if (rdev->smc_fw->size != smc_req_size) { 1698 1699 printk(KERN_ERR 1699 1700 "cik_smc: Bogus length %zu in firmware \"%s\"\n", ··· 3183 3182 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3184 3183 if (r) { 3185 3184 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3185 + radeon_scratch_free(rdev, scratch); 3186 3186 return r; 3187 3187 } 3188 3188 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ··· 3200 3198 r = radeon_fence_wait(ib.fence, false); 3201 3199 if (r) { 3202 3200 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3201 + radeon_scratch_free(rdev, scratch); 3202 + radeon_ib_free(rdev, &ib); 3203 3203 return r; 3204 3204 } 3205 3205 for (i = 0; i < rdev->usec_timeout; i++) {
+3
drivers/gpu/drm/radeon/dce6_afmt.c
··· 113 113 u8 *sadb; 114 114 int sad_count; 115 115 116 + /* XXX: setting this register causes hangs on some asics */ 117 + return; 118 + 116 119 if (!dig->afmt->pin) 117 120 return; 118 121
+3
drivers/gpu/drm/radeon/evergreen_hdmi.c
··· 67 67 u8 *sadb; 68 68 int sad_count; 69 69 70 + /* XXX: setting this register causes hangs on some asics */ 71 + return; 72 + 70 73 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 71 74 if (connector->encoder == encoder) 72 75 radeon_connector = to_radeon_connector(connector);
+1
drivers/gpu/drm/radeon/ni.c
··· 804 804 fw_name); 805 805 release_firmware(rdev->smc_fw); 806 806 rdev->smc_fw = NULL; 807 + err = 0; 807 808 } else if (rdev->smc_fw->size != smc_req_size) { 808 809 printk(KERN_ERR 809 810 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+1
drivers/gpu/drm/radeon/r600.c
··· 2302 2302 fw_name); 2303 2303 release_firmware(rdev->smc_fw); 2304 2304 rdev->smc_fw = NULL; 2305 + err = 0; 2305 2306 } else if (rdev->smc_fw->size != smc_req_size) { 2306 2307 printk(KERN_ERR 2307 2308 "smc: Bogus length %zu in firmware \"%s\"\n",
+3
drivers/gpu/drm/radeon/r600_hdmi.c
··· 309 309 u8 *sadb; 310 310 int sad_count; 311 311 312 + /* XXX: setting this register causes hangs on some asics */ 313 + return; 314 + 312 315 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 313 316 if (connector->encoder == encoder) 314 317 radeon_connector = to_radeon_connector(connector);
+21 -12
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1658 1658 drm_object_attach_property(&radeon_connector->base.base, 1659 1659 rdev->mode_info.underscan_vborder_property, 1660 1660 0); 1661 - drm_object_attach_property(&radeon_connector->base.base, 1662 - rdev->mode_info.audio_property, 1663 - RADEON_AUDIO_DISABLE); 1661 + if (radeon_audio != 0) 1662 + drm_object_attach_property(&radeon_connector->base.base, 1663 + rdev->mode_info.audio_property, 1664 + (radeon_audio == 1) ? 1665 + RADEON_AUDIO_AUTO : 1666 + RADEON_AUDIO_DISABLE); 1664 1667 subpixel_order = SubPixelHorizontalRGB; 1665 1668 connector->interlace_allowed = true; 1666 1669 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) ··· 1757 1754 rdev->mode_info.underscan_vborder_property, 1758 1755 0); 1759 1756 } 1760 - if (ASIC_IS_DCE2(rdev)) { 1757 + if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1761 1758 drm_object_attach_property(&radeon_connector->base.base, 1762 - rdev->mode_info.audio_property, 1763 - RADEON_AUDIO_DISABLE); 1759 + rdev->mode_info.audio_property, 1760 + (radeon_audio == 1) ? 1761 + RADEON_AUDIO_AUTO : 1762 + RADEON_AUDIO_DISABLE); 1764 1763 } 1765 1764 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1766 1765 radeon_connector->dac_load_detect = true; ··· 1804 1799 rdev->mode_info.underscan_vborder_property, 1805 1800 0); 1806 1801 } 1807 - if (ASIC_IS_DCE2(rdev)) { 1802 + if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1808 1803 drm_object_attach_property(&radeon_connector->base.base, 1809 - rdev->mode_info.audio_property, 1810 - RADEON_AUDIO_DISABLE); 1804 + rdev->mode_info.audio_property, 1805 + (radeon_audio == 1) ? 1806 + RADEON_AUDIO_AUTO : 1807 + RADEON_AUDIO_DISABLE); 1811 1808 } 1812 1809 subpixel_order = SubPixelHorizontalRGB; 1813 1810 connector->interlace_allowed = true; ··· 1850 1843 rdev->mode_info.underscan_vborder_property, 1851 1844 0); 1852 1845 } 1853 - if (ASIC_IS_DCE2(rdev)) { 1846 + if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1854 1847 drm_object_attach_property(&radeon_connector->base.base, 1855 - rdev->mode_info.audio_property, 1856 - RADEON_AUDIO_DISABLE); 1848 + rdev->mode_info.audio_property, 1849 + (radeon_audio == 1) ? 1850 + RADEON_AUDIO_AUTO : 1851 + RADEON_AUDIO_DISABLE); 1857 1852 } 1858 1853 connector->interlace_allowed = true; 1859 1854 /* in theory with a DP to VGA converter... */
+1 -2
drivers/gpu/drm/radeon/radeon_cs.c
··· 85 85 VRAM, also but everything into VRAM on AGP cards to avoid 86 86 image corruptions */ 87 87 if (p->ring == R600_RING_TYPE_UVD_INDEX && 88 - p->rdev->family < CHIP_PALM && 89 88 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 90 - 89 + /* TODO: is this still needed for NI+ ? */ 91 90 p->relocs[i].lobj.domain = 92 91 RADEON_GEM_DOMAIN_VRAM; 93 92
+2 -2
drivers/gpu/drm/radeon/radeon_drv.c
··· 153 153 int radeon_testing = 0; 154 154 int radeon_connector_table = 0; 155 155 int radeon_tv = 1; 156 - int radeon_audio = 1; 156 + int radeon_audio = -1; 157 157 int radeon_disp_priority = 0; 158 158 int radeon_hw_i2c = 0; 159 159 int radeon_pcie_gen2 = -1; ··· 196 196 MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 197 197 module_param_named(tv, radeon_tv, int, 0444); 198 198 199 - MODULE_PARM_DESC(audio, "Audio enable (1 = enable)"); 199 + MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)"); 200 200 module_param_named(audio, radeon_audio, int, 0444); 201 201 202 202 MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+2 -1
drivers/gpu/drm/radeon/radeon_uvd.c
··· 476 476 return -EINVAL; 477 477 } 478 478 479 - if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) && 479 + /* TODO: is this still necessary on NI+ ? */ 480 + if ((cmd == 0 || cmd == 0x3) && 480 481 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 481 482 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 482 483 start, end);
+1
drivers/gpu/drm/radeon/si.c
··· 1681 1681 fw_name); 1682 1682 release_firmware(rdev->smc_fw); 1683 1683 rdev->smc_fw = NULL; 1684 + err = 0; 1684 1685 } else if (rdev->smc_fw->size != smc_req_size) { 1685 1686 printk(KERN_ERR 1686 1687 "si_smc: Bogus length %zu in firmware \"%s\"\n",
+2 -2
drivers/gpu/drm/radeon/uvd_v1_0.c
··· 212 212 /* enable VCPU clock */ 213 213 WREG32(UVD_VCPU_CNTL, 1 << 9); 214 214 215 - /* enable UMC and NC0 */ 216 - WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13))); 215 + /* enable UMC */ 216 + WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); 217 217 218 218 /* boot up the VCPU */ 219 219 WREG32(UVD_SOFT_RESET, 0);
+12 -5
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 740 740 struct vmw_fpriv *vmw_fp; 741 741 742 742 vmw_fp = vmw_fpriv(file_priv); 743 - ttm_object_file_release(&vmw_fp->tfile); 744 - if (vmw_fp->locked_master) 743 + 744 + if (vmw_fp->locked_master) { 745 + struct vmw_master *vmaster = 746 + vmw_master(vmw_fp->locked_master); 747 + 748 + ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 749 + ttm_vt_unlock(&vmaster->lock); 745 750 drm_master_put(&vmw_fp->locked_master); 751 + } 752 + 753 + ttm_object_file_release(&vmw_fp->tfile); 746 754 kfree(vmw_fp); 747 755 } 748 756 ··· 933 925 934 926 vmw_fp->locked_master = drm_master_get(file_priv->master); 935 927 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 936 - vmw_execbuf_release_pinned_bo(dev_priv); 937 - 938 928 if (unlikely((ret != 0))) { 939 929 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 940 930 drm_master_put(&vmw_fp->locked_master); 941 931 } 942 932 943 - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 933 + ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 934 + vmw_execbuf_release_pinned_bo(dev_priv); 944 935 945 936 if (!dev_priv->enable_fb) { 946 937 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 970 970 if (new_backup) 971 971 res->backup_offset = new_backup_offset; 972 972 973 - if (!res->func->may_evict) 973 + if (!res->func->may_evict || res->id == -1) 974 974 return; 975 975 976 976 write_lock(&dev_priv->resource_lock);
+7 -5
drivers/hid/hid-core.c
··· 319 319 320 320 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 321 321 { 322 - __u32 raw_value; 322 + __s32 raw_value; 323 323 switch (item->tag) { 324 324 case HID_GLOBAL_ITEM_TAG_PUSH: 325 325 ··· 370 370 return 0; 371 371 372 372 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 373 - /* Units exponent negative numbers are given through a 374 - * two's complement. 375 - * See "6.2.2.7 Global Items" for more information. */ 376 - raw_value = item_udata(item); 373 + /* Many devices provide unit exponent as a two's complement 374 + * nibble due to the common misunderstanding of HID 375 + * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 376 + * both this and the standard encoding. */ 377 + raw_value = item_sdata(item); 377 378 if (!(raw_value & 0xfffffff0)) 378 379 parser->global.unit_exponent = hid_snto32(raw_value, 4); 379 380 else ··· 1871 1870 1872 1871 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 1873 1872 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1873 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1874 1874 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1875 1875 { } 1876 1876 };
+6
drivers/hid/hid-ids.h
··· 633 633 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 634 634 635 635 #define USB_VENDOR_ID_NINTENDO 0x057e 636 + #define USB_VENDOR_ID_NINTENDO2 0x054c 636 637 #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 637 638 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 638 639 ··· 793 792 #define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 794 793 #define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 795 794 #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 795 + #define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8 796 + #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 796 797 797 798 #define USB_VENDOR_ID_THINGM 0x27b8 798 799 #define USB_DEVICE_ID_BLINK1 0x01ed ··· 921 918 922 919 #define USB_VENDOR_ID_PRIMAX 0x0461 923 920 #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 921 + 922 + #define USB_VENDOR_ID_SIS 0x0457 923 + #define USB_DEVICE_ID_SIS_TS 0x1013 924 924 925 925 #endif
+4 -9
drivers/hid/hid-input.c
··· 192 192 return -EINVAL; 193 193 } 194 194 195 + 195 196 /** 196 197 * hidinput_calc_abs_res - calculate an absolute axis resolution 197 198 * @field: the HID report field to calculate resolution for ··· 235 234 case ABS_MT_TOOL_Y: 236 235 case ABS_MT_TOUCH_MAJOR: 237 236 case ABS_MT_TOUCH_MINOR: 238 - if (field->unit & 0xffffff00) /* Not a length */ 239 - return 0; 240 - unit_exponent += hid_snto32(field->unit >> 4, 4) - 1; 241 - switch (field->unit & 0xf) { 242 - case 0x1: /* If centimeters */ 237 + if (field->unit == 0x11) { /* If centimeters */ 243 238 /* Convert to millimeters */ 244 239 unit_exponent += 1; 245 - break; 246 - case 0x3: /* If inches */ 240 + } else if (field->unit == 0x13) { /* If inches */ 247 241 /* Convert to millimeters */ 248 242 prev = physical_extents; 249 243 physical_extents *= 254; 250 244 if (physical_extents < prev) 251 245 return 0; 252 246 unit_exponent -= 1; 253 - break; 254 - default: 247 + } else { 255 248 return 0; 256 249 } 257 250 break;
+4 -1
drivers/hid/hid-wiimote-core.c
··· 834 834 goto done; 835 835 } 836 836 837 - if (vendor == USB_VENDOR_ID_NINTENDO) { 837 + if (vendor == USB_VENDOR_ID_NINTENDO || 838 + vendor == USB_VENDOR_ID_NINTENDO2) { 838 839 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { 839 840 devtype = WIIMOTE_DEV_GEN10; 840 841 goto done; ··· 1855 1854 1856 1855 static const struct hid_device_id wiimote_hid_devices[] = { 1857 1856 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1857 + USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1858 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, 1858 1859 USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1859 1860 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1860 1861 USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+3
drivers/hid/usbhid/hid-quirks.c
··· 110 110 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, 111 111 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, 112 112 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, 113 + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS }, 114 + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, 115 + { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS }, 113 116 114 117 { 0, 0 } 115 118 };
+11
drivers/infiniband/Kconfig
··· 31 31 libibverbs, libibcm and a hardware driver library from 32 32 <http://www.openfabrics.org/git/>. 33 33 34 + config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 35 + bool "Experimental and unstable ABI for userspace access to flow steering verbs" 36 + depends on INFINIBAND_USER_ACCESS 37 + depends on STAGING 38 + ---help--- 39 + The final ABI for userspace access to flow steering verbs 40 + has not been defined. To use the current ABI, *WHICH WILL 41 + CHANGE IN THE FUTURE*, say Y here. 42 + 43 + If unsure, say N. 44 + 34 45 config INFINIBAND_USER_MEM 35 46 bool 36 47 depends on INFINIBAND_USER_ACCESS != n
+2
drivers/infiniband/core/uverbs.h
··· 217 217 IB_UVERBS_DECLARE_CMD(create_xsrq); 218 218 IB_UVERBS_DECLARE_CMD(open_xrcd); 219 219 IB_UVERBS_DECLARE_CMD(close_xrcd); 220 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 220 221 IB_UVERBS_DECLARE_CMD(create_flow); 221 222 IB_UVERBS_DECLARE_CMD(destroy_flow); 223 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 222 224 223 225 #endif /* UVERBS_H */
+4
drivers/infiniband/core/uverbs_cmd.c
··· 54 54 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 55 55 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 56 56 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 57 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 57 58 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 59 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 58 60 59 61 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 60 62 do { \ ··· 2601 2599 return ret ? ret : in_len; 2602 2600 } 2603 2601 2602 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 2604 2603 static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, 2605 2604 union ib_flow_spec *ib_spec) 2606 2605 { ··· 2827 2824 2828 2825 return ret ? ret : in_len; 2829 2826 } 2827 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 2830 2828 2831 2829 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2832 2830 struct ib_uverbs_create_xsrq *cmd,
+6
drivers/infiniband/core/uverbs_main.c
··· 115 115 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, 116 116 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, 117 117 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, 118 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 118 119 [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow, 119 120 [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow 121 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 120 122 }; 121 123 122 124 static void ib_uverbs_add_one(struct ib_device *device); ··· 607 605 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) 608 606 return -ENOSYS; 609 607 608 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 610 609 if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) { 611 610 struct ib_uverbs_cmd_hdr_ex hdr_ex; 612 611 ··· 624 621 (hdr_ex.out_words + 625 622 hdr_ex.provider_out_words) * 4); 626 623 } else { 624 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 627 625 if (hdr.in_words * 4 != count) 628 626 return -EINVAL; 629 627 ··· 632 628 buf + sizeof(hdr), 633 629 hdr.in_words * 4, 634 630 hdr.out_words * 4); 631 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 635 632 } 633 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 636 634 } 637 635 638 636 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
+2
drivers/infiniband/hw/mlx4/main.c
··· 1691 1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; 1692 1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; 1693 1693 1694 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 1694 1695 ibdev->ib_dev.uverbs_cmd_mask |= 1695 1696 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | 1696 1697 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); 1698 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 1697 1699 } 1698 1700 1699 1701 mlx4_ib_alloc_eqs(dev, ibdev);
+1 -1
drivers/infiniband/ulp/isert/ib_isert.c
··· 594 594 595 595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 596 596 597 - if (device->use_frwr) 597 + if (device && device->use_frwr) 598 598 isert_conn_free_frwr_pool(isert_conn); 599 599 600 600 if (isert_conn->conn_qp) {
+1 -1
drivers/md/bcache/request.c
··· 1000 1000 1001 1001 if (bio->bi_rw & REQ_FLUSH) { 1002 1002 /* Also need to send a flush to the backing device */ 1003 - struct bio *flush = bio_alloc_bioset(0, GFP_NOIO, 1003 + struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 1004 1004 dc->disk.bio_split); 1005 1005 1006 1006 flush->bi_rw = WRITE_FLUSH;
+3 -2
drivers/md/md.c
··· 8111 8111 u64 *p; 8112 8112 int lo, hi; 8113 8113 int rv = 1; 8114 + unsigned long flags; 8114 8115 8115 8116 if (bb->shift < 0) 8116 8117 /* badblocks are disabled */ ··· 8126 8125 sectors = next - s; 8127 8126 } 8128 8127 8129 - write_seqlock_irq(&bb->lock); 8128 + write_seqlock_irqsave(&bb->lock, flags); 8130 8129 8131 8130 p = bb->page; 8132 8131 lo = 0; ··· 8242 8241 bb->changed = 1; 8243 8242 if (!acknowledged) 8244 8243 bb->unacked_exist = 1; 8245 - write_sequnlock_irq(&bb->lock); 8244 + write_sequnlock_irqrestore(&bb->lock, flags); 8246 8245 8247 8246 return rv; 8248 8247 }
+1
drivers/md/raid1.c
··· 1479 1479 } 1480 1480 } 1481 1481 if (rdev 1482 + && rdev->recovery_offset == MaxSector 1482 1483 && !test_bit(Faulty, &rdev->flags) 1483 1484 && !test_and_set_bit(In_sync, &rdev->flags)) { 1484 1485 count++;
+1
drivers/md/raid10.c
··· 1782 1782 } 1783 1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1784 1784 } else if (tmp->rdev 1785 + && tmp->rdev->recovery_offset == MaxSector 1785 1786 && !test_bit(Faulty, &tmp->rdev->flags) 1786 1787 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1787 1788 count++;
+20
drivers/md/raid5.c
··· 778 778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 779 779 bi->bi_io_vec[0].bv_offset = 0; 780 780 bi->bi_size = STRIPE_SIZE; 781 + /* 782 + * If this is discard request, set bi_vcnt 0. We don't 783 + * want to confuse SCSI because SCSI will replace payload 784 + */ 785 + if (rw & REQ_DISCARD) 786 + bi->bi_vcnt = 0; 781 787 if (rrdev) 782 788 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 783 789 ··· 822 816 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 823 817 rbi->bi_io_vec[0].bv_offset = 0; 824 818 rbi->bi_size = STRIPE_SIZE; 819 + /* 820 + * If this is discard request, set bi_vcnt 0. We don't 821 + * want to confuse SCSI because SCSI will replace payload 822 + */ 823 + if (rw & REQ_DISCARD) 824 + rbi->bi_vcnt = 0; 825 825 if (conf->mddev->gendisk) 826 826 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 827 827 rbi, disk_devt(conf->mddev->gendisk), ··· 2922 2910 } 2923 2911 /* now that discard is done we can proceed with any sync */ 2924 2912 clear_bit(STRIPE_DISCARD, &sh->state); 2913 + /* 2914 + * SCSI discard will change some bio fields and the stripe has 2915 + * no updated data, so remove it from hash list and the stripe 2916 + * will be reinitialized 2917 + */ 2918 + spin_lock_irq(&conf->device_lock); 2919 + remove_hash(sh); 2920 + spin_unlock_irq(&conf->device_lock); 2925 2921 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 2926 2922 set_bit(STRIPE_HANDLE, &sh->state); 2927 2923
+1 -8
drivers/media/dvb-frontends/tda10071.c
··· 912 912 { 0xd5, 0x03, 0x03 }, 913 913 }; 914 914 915 - /* firmware status */ 916 - ret = tda10071_rd_reg(priv, 0x51, &tmp); 917 - if (ret) 918 - goto error; 919 - 920 - if (!tmp) { 915 + if (priv->warm) { 921 916 /* warm state - wake up device from sleep */ 922 - priv->warm = 1; 923 917 924 918 for (i = 0; i < ARRAY_SIZE(tab); i++) { 925 919 ret = tda10071_wr_reg_mask(priv, tab[i].reg, ··· 931 937 goto error; 932 938 } else { 933 939 /* cold state - try to download firmware */ 934 - priv->warm = 0; 935 940 936 941 /* request the firmware, this will block and timeout */ 937 942 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+6 -9
drivers/media/i2c/ad9389b.c
··· 628 628 629 629 static const struct v4l2_dv_timings_cap ad9389b_timings_cap = { 630 630 .type = V4L2_DV_BT_656_1120, 631 - .bt = { 632 - .max_width = 1920, 633 - .max_height = 1200, 634 - .min_pixelclock = 25000000, 635 - .max_pixelclock = 170000000, 636 - .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 631 + /* keep this initialization for compatibility with GCC < 4.4.6 */ 632 + .reserved = { 0 }, 633 + V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000, 634 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 637 635 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 638 - .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 639 - V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 640 - }, 636 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | 637 + V4L2_DV_BT_CAP_CUSTOM) 641 638 }; 642 639 643 640 static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
+9 -9
drivers/media/i2c/adv7511.c
··· 119 119 120 120 static const struct v4l2_dv_timings_cap adv7511_timings_cap = { 121 121 .type = V4L2_DV_BT_656_1120, 122 - .bt = { 123 - .max_width = ADV7511_MAX_WIDTH, 124 - .max_height = ADV7511_MAX_HEIGHT, 125 - .min_pixelclock = ADV7511_MIN_PIXELCLOCK, 126 - .max_pixelclock = ADV7511_MAX_PIXELCLOCK, 127 - .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 122 + /* keep this initialization for compatibility with GCC < 4.4.6 */ 123 + .reserved = { 0 }, 124 + V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT, 125 + ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK, 126 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 128 127 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 129 - .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 130 - V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 131 - }, 128 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | 129 + V4L2_DV_BT_CAP_CUSTOM) 132 130 }; 133 131 134 132 static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd) ··· 1124 1126 state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1); 1125 1127 if (state->i2c_edid == NULL) { 1126 1128 v4l2_err(sd, "failed to register edid i2c client\n"); 1129 + err = -ENOMEM; 1127 1130 goto err_entity; 1128 1131 } 1129 1132 ··· 1132 1133 state->work_queue = create_singlethread_workqueue(sd->name); 1133 1134 if (state->work_queue == NULL) { 1134 1135 v4l2_err(sd, "could not create workqueue\n"); 1136 + err = -ENOMEM; 1135 1137 goto err_unreg_cec; 1136 1138 } 1137 1139
+12 -18
drivers/media/i2c/adv7842.c
··· 546 546 547 547 static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = { 548 548 .type = V4L2_DV_BT_656_1120, 549 - .bt = { 550 - .max_width = 1920, 551 - .max_height = 1200, 552 - .min_pixelclock = 25000000, 553 - .max_pixelclock = 170000000, 554 - .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 549 + /* keep this initialization for compatibility with GCC < 4.4.6 */ 550 + .reserved = { 0 }, 551 + V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000, 552 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 555 553 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 556 - .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 557 - V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 558 - }, 554 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | 555 + V4L2_DV_BT_CAP_CUSTOM) 559 556 }; 560 557 561 558 static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = { 562 559 .type = V4L2_DV_BT_656_1120, 563 - .bt = { 564 - .max_width = 1920, 565 - .max_height = 1200, 566 - .min_pixelclock = 25000000, 567 - .max_pixelclock = 225000000, 568 - .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 560 + /* keep this initialization for compatibility with GCC < 4.4.6 */ 561 + .reserved = { 0 }, 562 + V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000, 563 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | 569 564 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 570 - .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 571 - V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 572 - }, 565 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | 566 + V4L2_DV_BT_CAP_CUSTOM) 573 567 }; 574 568 575 569 static inline const struct v4l2_dv_timings_cap *
+4 -8
drivers/media/i2c/ths8200.c
··· 46 46 47 47 static const struct v4l2_dv_timings_cap ths8200_timings_cap = { 48 48 .type = V4L2_DV_BT_656_1120, 49 - .bt = { 50 - .max_width = 1920, 51 - .max_height = 1080, 52 - .min_pixelclock = 25000000, 53 - .max_pixelclock = 148500000, 54 - .standards = V4L2_DV_BT_STD_CEA861, 55 - .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE, 56 - }, 49 + /* keep this initialization for compatibility with GCC < 4.4.6 */ 50 + .reserved = { 0 }, 51 + V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000, 52 + V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE) 57 53 }; 58 54 59 55 static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
+1
drivers/media/pci/saa7134/saa7134-video.c
··· 1455 1455 1456 1456 /* stop video capture */ 1457 1457 if (res_check(fh, RESOURCE_VIDEO)) { 1458 + pm_qos_remove_request(&dev->qos_request); 1458 1459 videobuf_streamoff(&fh->cap); 1459 1460 res_free(dev,fh,RESOURCE_VIDEO); 1460 1461 }
+1
drivers/media/platform/s5p-jpeg/jpeg-core.c
··· 1423 1423 jpeg->vfd_decoder->release = video_device_release; 1424 1424 jpeg->vfd_decoder->lock = &jpeg->lock; 1425 1425 jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev; 1426 + jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M; 1426 1427 1427 1428 ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1); 1428 1429 if (ret) {
+1 -1
drivers/media/platform/sh_vou.c
··· 776 776 v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1, 777 777 &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0); 778 778 779 - for (i = 0; ARRAY_SIZE(vou_fmt); i++) 779 + for (i = 0; i < ARRAY_SIZE(vou_fmt); i++) 780 780 if (vou_fmt[i].pfmt == pix->pixelformat) 781 781 return 0; 782 782
+2 -3
drivers/media/platform/soc_camera/mx3_camera.c
··· 266 266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; 267 267 struct idmac_video_param *video = &ichan->params.video; 268 268 const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt; 269 - unsigned long flags; 270 269 dma_cookie_t cookie; 271 270 size_t new_size; 272 271 ··· 327 328 memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); 328 329 #endif 329 330 330 - spin_lock_irqsave(&mx3_cam->lock, flags); 331 + spin_lock_irq(&mx3_cam->lock); 331 332 list_add_tail(&buf->queue, &mx3_cam->capture); 332 333 333 334 if (!mx3_cam->active) ··· 350 351 if (mx3_cam->active == buf) 351 352 mx3_cam->active = NULL; 352 353 353 - spin_unlock_irqrestore(&mx3_cam->lock, flags); 354 + spin_unlock_irq(&mx3_cam->lock); 354 355 error: 355 356 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 356 357 }
+2 -1
drivers/media/tuners/e4000.c
··· 19 19 */ 20 20 21 21 #include "e4000_priv.h" 22 + #include <linux/math64.h> 22 23 23 24 /* write multiple registers */ 24 25 static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) ··· 234 233 * or more. 235 234 */ 236 235 f_vco = c->frequency * e4000_pll_lut[i].mul; 237 - sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock; 236 + sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock); 238 237 buf[0] = f_vco / priv->cfg->clock; 239 238 buf[1] = (sigma_delta >> 0) & 0xff; 240 239 buf[2] = (sigma_delta >> 8) & 0xff;
+7
drivers/media/usb/stkwebcam/stk-webcam.c
··· 111 111 DMI_MATCH(DMI_PRODUCT_NAME, "F3JC") 112 112 } 113 113 }, 114 + { 115 + .ident = "T12Rg-H", 116 + .matches = { 117 + DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"), 118 + DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H") 119 + } 120 + }, 114 121 {} 115 122 }; 116 123
+18
drivers/media/usb/uvc/uvc_driver.c
··· 2090 2090 .bInterfaceSubClass = 1, 2091 2091 .bInterfaceProtocol = 0, 2092 2092 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 2093 + /* Microsoft Lifecam NX-3000 */ 2094 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2095 + | USB_DEVICE_ID_MATCH_INT_INFO, 2096 + .idVendor = 0x045e, 2097 + .idProduct = 0x0721, 2098 + .bInterfaceClass = USB_CLASS_VIDEO, 2099 + .bInterfaceSubClass = 1, 2100 + .bInterfaceProtocol = 0, 2101 + .driver_info = UVC_QUIRK_PROBE_DEF }, 2093 2102 /* Microsoft Lifecam VX-7000 */ 2094 2103 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2095 2104 | USB_DEVICE_ID_MATCH_INT_INFO, ··· 2179 2170 | USB_DEVICE_ID_MATCH_INT_INFO, 2180 2171 .idVendor = 0x05a9, 2181 2172 .idProduct = 0x2640, 2173 + .bInterfaceClass = USB_CLASS_VIDEO, 2174 + .bInterfaceSubClass = 1, 2175 + .bInterfaceProtocol = 0, 2176 + .driver_info = UVC_QUIRK_PROBE_DEF }, 2177 + /* Dell SP2008WFP Monitor */ 2178 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2179 + | USB_DEVICE_ID_MATCH_INT_INFO, 2180 + .idVendor = 0x05a9, 2181 + .idProduct = 0x2641, 2182 2182 .bInterfaceClass = USB_CLASS_VIDEO, 2183 2183 .bInterfaceSubClass = 1, 2184 2184 .bInterfaceProtocol = 0,
+3 -1
drivers/media/v4l2-core/videobuf2-core.c
··· 353 353 354 354 if (b->m.planes[plane].bytesused > length) 355 355 return -EINVAL; 356 - if (b->m.planes[plane].data_offset >= 356 + 357 + if (b->m.planes[plane].data_offset > 0 && 358 + b->m.planes[plane].data_offset >= 357 359 b->m.planes[plane].bytesused) 358 360 return -EINVAL; 359 361 }
+82 -5
drivers/media/v4l2-core/videobuf2-dma-contig.c
··· 423 423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 424 424 } 425 425 426 + static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, 427 + struct vm_area_struct *vma, unsigned long *res) 428 + { 429 + unsigned long pfn, start_pfn, prev_pfn; 430 + unsigned int i; 431 + int ret; 432 + 433 + if (!vma_is_io(vma)) 434 + return -EFAULT; 435 + 436 + ret = follow_pfn(vma, start, &pfn); 437 + if (ret) 438 + return ret; 439 + 440 + start_pfn = pfn; 441 + start += PAGE_SIZE; 442 + 443 + for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) { 444 + prev_pfn = pfn; 445 + ret = follow_pfn(vma, start, &pfn); 446 + 447 + if (ret) { 448 + pr_err("no page for address %lu\n", start); 449 + return ret; 450 + } 451 + if (pfn != prev_pfn + 1) 452 + return -EINVAL; 453 + } 454 + 455 + *res = start_pfn; 456 + return 0; 457 + } 458 + 426 459 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, 427 460 int n_pages, struct vm_area_struct *vma, int write) 428 461 { ··· 465 432 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) { 466 433 unsigned long pfn; 467 434 int ret = follow_pfn(vma, start, &pfn); 435 + 436 + if (!pfn_valid(pfn)) 437 + return -EINVAL; 468 438 469 439 if (ret) { 470 440 pr_err("no page for address %lu\n", start); ··· 504 468 struct vb2_dc_buf *buf = buf_priv; 505 469 struct sg_table *sgt = buf->dma_sgt; 506 470 507 - dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 508 - if (!vma_is_io(buf->vma)) 509 - vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 471 + if (sgt) { 472 + dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 473 + if (!vma_is_io(buf->vma)) 474 + vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 510 475 511 - sg_free_table(sgt); 512 - kfree(sgt); 476 + sg_free_table(sgt); 477 + kfree(sgt); 478 + } 513 479 vb2_put_vma(buf->vma); 514 480 kfree(buf); 515 481 } 482 + 483 + /* 484 + * For some kind of reserved memory there might be no struct page available, 485 + * so all that can be done to support such 'pages' is to try to convert 486 + * pfn to dma address or at the last resort just assume that 487 + * dma address == physical address (like it has been assumed in earlier version 488 + * of videobuf2-dma-contig 489 + */ 490 + 491 + #ifdef __arch_pfn_to_dma 492 + static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) 493 + { 494 + return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); 495 + } 496 + #elif defined(__pfn_to_bus) 497 + static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) 498 + { 499 + return (dma_addr_t)__pfn_to_bus(pfn); 500 + } 501 + #elif defined(__pfn_to_phys) 502 + static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) 503 + { 504 + return (dma_addr_t)__pfn_to_phys(pfn); 505 + } 506 + #else 507 + static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) 508 + { 509 + /* really, we cannot do anything better at this point */ 510 + return (dma_addr_t)(pfn) << PAGE_SHIFT; 511 + } 512 + #endif 516 513 517 514 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, 518 515 unsigned long size, int write) ··· 617 548 /* extract page list from userspace mapping */ 618 549 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); 619 550 if (ret) { 551 + unsigned long pfn; 552 + if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { 553 + buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn); 554 + buf->size = size; 555 + kfree(pages); 556 + return buf; 557 + } 558 + 620 559 pr_err("failed to get user pages\n"); 621 560 goto fail_vma; 622 561 }
+1 -1
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
··· 349 349 350 350 int common_nfc_set_geometry(struct gpmi_nand_data *this) 351 351 { 352 - return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this); 352 + return legacy_set_geometry(this); 353 353 } 354 354 355 355 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+6 -1
drivers/mtd/nand/pxa3xx_nand.c
··· 1320 1320 for (cs = 0; cs < pdata->num_cs; cs++) { 1321 1321 struct mtd_info *mtd = info->host[cs]->mtd; 1322 1322 1323 - mtd->name = pdev->name; 1323 + /* 1324 + * The mtd name matches the one used in 'mtdparts' kernel 1325 + * parameter. This name cannot be changed or otherwise 1326 + * user's mtd partitions configuration would get broken. 1327 + */ 1328 + mtd->name = "pxa3xx_nand-0"; 1324 1329 info->cs = cs; 1325 1330 ret = pxa3xx_nand_scan(mtd); 1326 1331 if (ret) {
+2 -2
drivers/net/can/at91_can.c
··· 1405 1405 1406 1406 static const struct platform_device_id at91_can_id_table[] = { 1407 1407 { 1408 - .name = "at91_can", 1408 + .name = "at91sam9x5_can", 1409 1409 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data, 1410 1410 }, { 1411 - .name = "at91sam9x5_can", 1411 + .name = "at91_can", 1412 1412 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data, 1413 1413 }, { 1414 1414 /* sentinel */
+5 -5
drivers/net/can/dev.c
··· 705 705 size_t size; 706 706 707 707 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ 708 - size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ 708 + size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ 709 709 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ 710 - size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ 711 - size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ 710 + size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */ 711 + size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ 712 712 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ 713 - size += sizeof(struct can_berr_counter); 713 + size += nla_total_size(sizeof(struct can_berr_counter)); 714 714 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ 715 - size += sizeof(struct can_bittiming_const); 715 + size += nla_total_size(sizeof(struct can_bittiming_const)); 716 716 717 717 return size; 718 718 }
+10 -4
drivers/net/can/flexcan.c
··· 62 62 #define FLEXCAN_MCR_BCC BIT(16) 63 63 #define FLEXCAN_MCR_LPRIO_EN BIT(13) 64 64 #define FLEXCAN_MCR_AEN BIT(12) 65 - #define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf) 65 + #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f) 66 66 #define FLEXCAN_MCR_IDAM_A (0 << 8) 67 67 #define FLEXCAN_MCR_IDAM_B (1 << 8) 68 68 #define FLEXCAN_MCR_IDAM_C (2 << 8) ··· 735 735 * 736 736 */ 737 737 reg_mcr = flexcan_read(&regs->mcr); 738 + reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); 738 739 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | 739 740 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | 740 - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS; 741 + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS | 742 + FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID); 741 743 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); 742 744 flexcan_write(reg_mcr, &regs->mcr); 743 745 ··· 772 770 priv->reg_ctrl_default = reg_ctrl; 773 771 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 774 772 flexcan_write(reg_ctrl, &regs->ctrl); 773 + 774 + /* Abort any pending TX, mark Mailbox as INACTIVE */ 775 + flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), 776 + &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); 775 777 776 778 /* acceptance mask/acceptance code (accept everything) */ 777 779 flexcan_write(0x0, &regs->rxgmask); ··· 985 979 } 986 980 987 981 static const struct of_device_id flexcan_of_match[] = { 988 - { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, 989 - { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, 990 982 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 983 + { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, 984 + { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, 991 985 { /* sentinel */ }, 992 986 }; 993 987 MODULE_DEVICE_TABLE(of, flexcan_of_match);
+10 -5
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 1197 1197 /* TM (timers) host DB constants */ 1198 1198 #define TM_ILT_PAGE_SZ_HW 0 1199 1199 #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ 1200 - /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ 1201 - #define TM_CONN_NUM 1024 1200 + #define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \ 1201 + BNX2X_VF_CIDS + \ 1202 + CNIC_ISCSI_CID_MAX) 1202 1203 #define TM_ILT_SZ (8 * TM_CONN_NUM) 1203 1204 #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) 1204 1205 ··· 1528 1527 #define PCI_32BIT_FLAG (1 << 1) 1529 1528 #define ONE_PORT_FLAG (1 << 2) 1530 1529 #define NO_WOL_FLAG (1 << 3) 1531 - #define USING_DAC_FLAG (1 << 4) 1532 1530 #define USING_MSIX_FLAG (1 << 5) 1533 1531 #define USING_MSI_FLAG (1 << 6) 1534 1532 #define DISABLE_MSI_FLAG (1 << 7) ··· 1621 1621 u16 rx_ticks_int; 1622 1622 u16 rx_ticks; 1623 1623 /* Maximal coalescing timeout in us */ 1624 - #define BNX2X_MAX_COALESCE_TOUT (0xf0*12) 1624 + #define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR) 1625 1625 1626 1626 u32 lin_cnt; 1627 1627 ··· 2072 2072 2073 2073 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2074 2074 u8 src_type, u8 dst_type); 2075 - int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); 2075 + int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2076 + u32 *comp); 2076 2077 2077 2078 /* FLR related routines */ 2078 2079 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); ··· 2499 2498 }; 2500 2499 2501 2500 void bnx2x_set_local_cmng(struct bnx2x *bp); 2501 + 2502 + #define MCPR_SCRATCH_BASE(bp) \ 2503 + (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2504 + 2502 2505 #endif /* bnx2x.h */
+1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 681 681 } 682 682 } 683 683 #endif 684 + skb_record_rx_queue(skb, fp->rx_queue); 684 685 napi_gro_receive(&fp->napi, skb); 685 686 } 686 687
+2 -38
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
··· 891 891 * will re-enable parity attentions right after the dump. 892 892 */ 893 893 894 - /* Disable parity on path 0 */ 895 - bnx2x_pretend_func(bp, 0); 896 894 bnx2x_disable_blocks_parity(bp); 897 - 898 - /* Disable parity on path 1 */ 899 - bnx2x_pretend_func(bp, 1); 900 - bnx2x_disable_blocks_parity(bp); 901 - 902 - /* Return to current function */ 903 - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 904 895 905 896 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 906 897 dump_hdr.preset = DUMP_ALL_PRESETS; ··· 919 928 /* Actually read the registers */ 920 929 __bnx2x_get_regs(bp, p); 921 930 922 - /* Re-enable parity attentions on path 0 */ 923 - bnx2x_pretend_func(bp, 0); 931 + /* Re-enable parity attentions */ 924 932 bnx2x_clear_blocks_parity(bp); 925 933 bnx2x_enable_blocks_parity(bp); 926 - 927 - /* Re-enable parity attentions on path 1 */ 928 - bnx2x_pretend_func(bp, 1); 929 - bnx2x_clear_blocks_parity(bp); 930 - bnx2x_enable_blocks_parity(bp); 931 - 932 - /* Return to current function */ 933 - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 934 934 } 935 935 936 936 static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) ··· 975 993 * will re-enable parity attentions right after the dump. 976 994 */ 977 995 978 - /* Disable parity on path 0 */ 979 - bnx2x_pretend_func(bp, 0); 980 996 bnx2x_disable_blocks_parity(bp); 981 - 982 - /* Disable parity on path 1 */ 983 - bnx2x_pretend_func(bp, 1); 984 - bnx2x_disable_blocks_parity(bp); 985 - 986 - /* Return to current function */ 987 - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 988 997 989 998 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 990 999 dump_hdr.preset = bp->dump_preset_idx; ··· 1005 1032 /* Actually read the registers */ 1006 1033 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); 1007 1034 1008 - /* Re-enable parity attentions on path 0 */ 1009 - bnx2x_pretend_func(bp, 0); 1035 + /* Re-enable parity attentions */ 1010 1036 bnx2x_clear_blocks_parity(bp); 1011 1037 bnx2x_enable_blocks_parity(bp); 1012 - 1013 - /* Re-enable parity attentions on path 1 */ 1014 - bnx2x_pretend_func(bp, 1); 1015 - bnx2x_clear_blocks_parity(bp); 1016 - bnx2x_enable_blocks_parity(bp); 1017 - 1018 - /* Return to current function */ 1019 - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1020 1038 1021 1039 return 0; 1022 1040 }
+25 -13
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
··· 640 640 * [30] MCP Latched ump_tx_parity 641 641 * [31] MCP Latched scpad_parity 642 642 */ 643 - #define MISC_AEU_ENABLE_MCP_PRTY_BITS \ 643 + #define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \ 644 644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 645 645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 646 - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 646 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) 647 + 648 + #define MISC_AEU_ENABLE_MCP_PRTY_BITS \ 649 + (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \ 647 650 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 648 651 649 652 /* Below registers control the MCP parity attention output. When 650 653 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are 651 654 * enabled, when cleared - disabled. 652 655 */ 653 - static const u32 mcp_attn_ctl_regs[] = { 654 - MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 655 - MISC_REG_AEU_ENABLE4_NIG_0, 656 - MISC_REG_AEU_ENABLE4_PXP_0, 657 - MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 658 - MISC_REG_AEU_ENABLE4_NIG_1, 659 - MISC_REG_AEU_ENABLE4_PXP_1 656 + static const struct { 657 + u32 addr; 658 + u32 bits; 659 + } mcp_attn_ctl_regs[] = { 660 + { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 661 + MISC_AEU_ENABLE_MCP_PRTY_BITS }, 662 + { MISC_REG_AEU_ENABLE4_NIG_0, 663 + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, 664 + { MISC_REG_AEU_ENABLE4_PXP_0, 665 + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, 666 + { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 667 + MISC_AEU_ENABLE_MCP_PRTY_BITS }, 668 + { MISC_REG_AEU_ENABLE4_NIG_1, 669 + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, 670 + { MISC_REG_AEU_ENABLE4_PXP_1, 671 + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS } 660 672 }; 661 673 662 674 static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) ··· 677 665 u32 reg_val; 678 666 679 667 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { 680 - reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); 668 + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr); 681 669 682 670 if (enable) 683 - reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; 671 + reg_val |= mcp_attn_ctl_regs[i].bits; 684 672 else 685 - reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; 673 + reg_val &= ~mcp_attn_ctl_regs[i].bits; 686 674 687 - REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); 675 + REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val); 688 676 } 689 677 } 690 678
+212 -176
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 503 503 } 504 504 505 505 /* issue a dmae command over the init-channel and wait for completion */ 506 - int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) 506 + int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 507 + u32 *comp) 507 508 { 508 - u32 *wb_comp = bnx2x_sp(bp, wb_comp); 509 509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 510 510 int rc = 0; 511 511 ··· 518 518 spin_lock_bh(&bp->dmae_lock); 519 519 520 520 /* reset completion */ 521 - *wb_comp = 0; 521 + *comp = 0; 522 522 523 523 /* post the command on the channel used for initializations */ 524 524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 525 525 526 526 /* wait for completion */ 527 527 udelay(5); 528 - while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 528 + while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 529 529 530 530 if (!cnt || 531 531 (bp->recovery_state != BNX2X_RECOVERY_DONE && ··· 537 537 cnt--; 538 538 udelay(50); 539 539 } 540 - if (*wb_comp & DMAE_PCI_ERR_FLAG) { 540 + if (*comp & DMAE_PCI_ERR_FLAG) { 541 541 BNX2X_ERR("DMAE PCI error!\n"); 542 542 rc = DMAE_PCI_ERROR; 543 543 } ··· 574 574 dmae.len = len32; 575 575 576 576 /* issue the command and wait for completion */ 577 - rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 577 + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 578 578 if (rc) { 579 579 BNX2X_ERR("DMAE returned failure %d\n", rc); 580 580 bnx2x_panic(); ··· 611 611 dmae.len = len32; 612 612 613 613 /* issue the command and wait for completion */ 614 - rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 614 + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 615 615 if (rc) { 616 616 BNX2X_ERR("DMAE returned failure %d\n", rc); 617 617 bnx2x_panic(); ··· 751 751 return rc; 752 752 } 753 753 754 + #define MCPR_TRACE_BUFFER_SIZE (0x800) 755 + #define SCRATCH_BUFFER_SIZE(bp) \ 756 + (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) 757 + 754 758 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 755 759 { 756 760 u32 addr, val; ··· 779 775 trace_shmem_base = bp->common.shmem_base; 780 776 else 781 777 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 782 - addr = trace_shmem_base - 0x800; 778 + 779 + /* sanity */ 780 + if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || 781 + trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + 782 + SCRATCH_BUFFER_SIZE(bp)) { 783 + BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", 784 + trace_shmem_base); 785 + return; 786 + } 787 + 788 + addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; 783 789 784 790 /* validate TRCB signature */ 785 791 mark = REG_RD(bp, addr); ··· 801 787 /* read cyclic buffer pointer */ 802 788 addr += 4; 803 789 mark = REG_RD(bp, addr); 804 - mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 805 - + ((mark + 0x3) & ~0x3) - 0x08000000; 790 + mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; 791 + if (mark >= trace_shmem_base || mark < addr + 4) { 792 + BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); 793 + return; 794 + } 806 795 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 807 796 808 797 printk("%s", lvl); 809 798 810 799 /* dump buffer after the mark */ 811 - for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 800 + for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { 812 801 for (word = 0; word < 8; word++) 813 802 data[word] = htonl(REG_RD(bp, offset + 4*word)); 814 803 data[8] = 0x0; ··· 4297 4280 pr_cont("%s%s", idx ? ", " : "", blk); 4298 4281 } 4299 4282 4300 - static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4301 - int par_num, bool print) 4283 + static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4284 + int *par_num, bool print) 4302 4285 { 4303 - int i = 0; 4304 - u32 cur_bit = 0; 4286 + u32 cur_bit; 4287 + bool res; 4288 + int i; 4289 + 4290 + res = false; 4291 + 4305 4292 for (i = 0; sig; i++) { 4306 - cur_bit = ((u32)0x1 << i); 4293 + cur_bit = (0x1UL << i); 4307 4294 if (sig & cur_bit) { 4308 - switch (cur_bit) { 4309 - case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4310 - if (print) { 4311 - _print_next_block(par_num++, "BRB"); 4295 + res |= true; /* Each bit is real error! */ 4296 + 4297 + if (print) { 4298 + switch (cur_bit) { 4299 + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4300 + _print_next_block((*par_num)++, "BRB"); 4312 4301 _print_parity(bp, 4313 4302 BRB1_REG_BRB1_PRTY_STS); 4314 - } 4315 - break; 4316 - case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4317 - if (print) { 4318 - _print_next_block(par_num++, "PARSER"); 4303 + break; 4304 + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4305 + _print_next_block((*par_num)++, 4306 + "PARSER"); 4319 4307 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4320 - } 4321 - break; 4322 - case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4323 - if (print) { 4324 - _print_next_block(par_num++, "TSDM"); 4308 + break; 4309 + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4310 + _print_next_block((*par_num)++, "TSDM"); 4325 4311 _print_parity(bp, 4326 4312 TSDM_REG_TSDM_PRTY_STS); 4327 - } 4328 - break; 4329 - case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4330 - if (print) { 4331 - _print_next_block(par_num++, 4313 + break; 4314 + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4315 + _print_next_block((*par_num)++, 4332 4316 "SEARCHER"); 4333 4317 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4334 - } 4335 - break; 4336 - case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4337 - if (print) { 4338 - _print_next_block(par_num++, "TCM"); 4339 - _print_parity(bp, 4340 - TCM_REG_TCM_PRTY_STS); 4341 - } 4342 - break; 4343 - case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4344 - if (print) { 4345 - _print_next_block(par_num++, "TSEMI"); 4318 + break; 4319 + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4320 + _print_next_block((*par_num)++, "TCM"); 4321 + _print_parity(bp, TCM_REG_TCM_PRTY_STS); 4322 + break; 4323 + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4324 + _print_next_block((*par_num)++, 4325 + "TSEMI"); 4346 4326 _print_parity(bp, 4347 4327 TSEM_REG_TSEM_PRTY_STS_0); 4348 4328 _print_parity(bp, 4349 4329 TSEM_REG_TSEM_PRTY_STS_1); 4350 - } 4351 - break; 4352 - case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4353 - if (print) { 4354 - _print_next_block(par_num++, "XPB"); 4330 + break; 4331 + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4332 + _print_next_block((*par_num)++, "XPB"); 4355 4333 _print_parity(bp, GRCBASE_XPB + 4356 4334 PB_REG_PB_PRTY_STS); 4335 + break; 4357 4336 } 4358 - break; 4359 4337 } 4360 4338 4361 4339 /* Clear the bit */ ··· 4358 4346 } 4359 4347 } 4360 4348 4361 - return par_num; 4349 + return res; 4362 4350 } 4363 4351 4364 - static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4365 - int par_num, bool *global, 4352 + static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4353 + int *par_num, bool *global, 4366 4354 bool print) 4367 4355 { 4368 - int i = 0; 4369 - u32 cur_bit = 0; 4356 + u32 cur_bit; 4357 + bool res; 4358 + int i; 4359 + 4360 + res = false; 4361 + 4370 4362 for (i = 0; sig; i++) { 4371 - cur_bit = ((u32)0x1 << i); 4363 + cur_bit = (0x1UL << i); 4372 4364 if (sig & cur_bit) { 4365 + res |= true; /* Each bit is real error! */ 4373 4366 switch (cur_bit) { 4374 4367 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4375 4368 if (print) { 4376 - _print_next_block(par_num++, "PBF"); 4369 + _print_next_block((*par_num)++, "PBF"); 4377 4370 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4378 4371 } 4379 4372 break; 4380 4373 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4381 4374 if (print) { 4382 - _print_next_block(par_num++, "QM"); 4375 + _print_next_block((*par_num)++, "QM"); 4383 4376 _print_parity(bp, QM_REG_QM_PRTY_STS); 4384 4377 } 4385 4378 break; 4386 4379 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4387 4380 if (print) { 4388 - _print_next_block(par_num++, "TM"); 4381 + _print_next_block((*par_num)++, "TM"); 4389 4382 _print_parity(bp, TM_REG_TM_PRTY_STS); 4390 4383 } 4391 4384 break; 4392 4385 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4393 4386 if (print) { 4394 - _print_next_block(par_num++, "XSDM"); 4387 + _print_next_block((*par_num)++, "XSDM"); 4395 4388 _print_parity(bp, 4396 4389 XSDM_REG_XSDM_PRTY_STS); 4397 4390 } 4398 4391 break; 4399 4392 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4400 4393 if (print) { 4401 - _print_next_block(par_num++, "XCM"); 4394 + _print_next_block((*par_num)++, "XCM"); 4402 4395 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4403 4396 } 4404 4397 break; 4405 4398 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4406 4399 if (print) { 4407 - _print_next_block(par_num++, "XSEMI"); 4400 + _print_next_block((*par_num)++, 4401 + "XSEMI"); 4408 4402 _print_parity(bp, 4409 4403 XSEM_REG_XSEM_PRTY_STS_0); 4410 4404 _print_parity(bp, ··· 4419 4401 break; 4420 4402 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4421 4403 if (print) { 4422 - _print_next_block(par_num++, 4404 + _print_next_block((*par_num)++, 4423 4405 "DOORBELLQ"); 4424 4406 _print_parity(bp, 4425 4407 DORQ_REG_DORQ_PRTY_STS); ··· 4427 4409 break; 4428 4410 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4429 4411 if (print) { 4430 - _print_next_block(par_num++, "NIG"); 4412 + _print_next_block((*par_num)++, "NIG"); 4431 4413 if (CHIP_IS_E1x(bp)) { 4432 4414 _print_parity(bp, 4433 4415 NIG_REG_NIG_PRTY_STS); ··· 4441 4423 break; 4442 4424 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4443 4425 if (print) 4444 - _print_next_block(par_num++, 4426 + _print_next_block((*par_num)++, 4445 4427 "VAUX PCI CORE"); 4446 4428 *global = true; 4447 4429 break; 4448 4430 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4449 4431 if (print) { 4450 - _print_next_block(par_num++, "DEBUG"); 4432 + _print_next_block((*par_num)++, 4433 + "DEBUG"); 4451 4434 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4452 4435 } 4453 4436 break; 4454 4437 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4455 4438 if (print) { 4456 - _print_next_block(par_num++, "USDM"); 4439 + _print_next_block((*par_num)++, "USDM"); 4457 4440 _print_parity(bp, 4458 4441 USDM_REG_USDM_PRTY_STS); 4459 4442 } 4460 4443 break; 4461 4444 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4462 4445 if (print) { 4463 - _print_next_block(par_num++, "UCM"); 4446 + _print_next_block((*par_num)++, "UCM"); 4464 4447 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4465 4448 } 4466 4449 break; 4467 4450 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4468 4451 if (print) { 4469 - _print_next_block(par_num++, "USEMI"); 4452 + _print_next_block((*par_num)++, 4453 + "USEMI"); 4470 4454 _print_parity(bp, 4471 4455 USEM_REG_USEM_PRTY_STS_0); 4472 4456 _print_parity(bp, ··· 4477 4457 break; 4478 4458 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4479 4459 if (print) { 4480 - _print_next_block(par_num++, "UPB"); 4460 + _print_next_block((*par_num)++, "UPB"); 4481 4461 _print_parity(bp, GRCBASE_UPB + 4482 4462 PB_REG_PB_PRTY_STS); 4483 4463 } 4484 4464 break; 4485 4465 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4486 4466 if (print) { 4487 - _print_next_block(par_num++, "CSDM"); 4467 + _print_next_block((*par_num)++, "CSDM"); 4488 4468 _print_parity(bp, 4489 4469 CSDM_REG_CSDM_PRTY_STS); 4490 4470 } 4491 4471 break; 4492 4472 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4493 4473 if (print) { 4494 - _print_next_block(par_num++, "CCM"); 4474 + _print_next_block((*par_num)++, "CCM"); 4495 4475 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4496 4476 } 4497 4477 break; ··· 4502 4482 } 4503 4483 } 4504 4484 4505 - return par_num; 4485 + return res; 4506 4486 } 4507 4487 4508 - static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4509 - int par_num, bool print) 4488 + static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4489 + int *par_num, bool print) 4510 4490 { 4511 - int i = 0; 4512 - u32 cur_bit = 0; 4491 + u32 cur_bit; 4492 + bool res; 4493 + int i; 4494 + 4495 + res = false; 4496 + 4513 4497 for (i = 0; sig; i++) { 4514 - cur_bit = ((u32)0x1 << i); 4498 + cur_bit = (0x1UL << i); 4515 4499 if (sig & cur_bit) { 4516 - switch (cur_bit) { 4517 - case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4518 - if (print) { 4519 - _print_next_block(par_num++, "CSEMI"); 4500 + res |= true; /* Each bit is real error! */ 4501 + if (print) { 4502 + switch (cur_bit) { 4503 + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4504 + _print_next_block((*par_num)++, 4505 + "CSEMI"); 4520 4506 _print_parity(bp, 4521 4507 CSEM_REG_CSEM_PRTY_STS_0); 4522 4508 _print_parity(bp, 4523 4509 CSEM_REG_CSEM_PRTY_STS_1); 4524 - } 4525 - break; 4526 - case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4527 - if (print) { 4528 - _print_next_block(par_num++, "PXP"); 4510 + break; 4511 + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4512 + _print_next_block((*par_num)++, "PXP"); 4529 4513 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4530 4514 _print_parity(bp, 4531 4515 PXP2_REG_PXP2_PRTY_STS_0); 4532 4516 _print_parity(bp, 4533 4517 PXP2_REG_PXP2_PRTY_STS_1); 4534 - } 4535 - break; 4536 - case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4537 - if (print) 4538 - _print_next_block(par_num++, 4539 - "PXPPCICLOCKCLIENT"); 4540 - break; 4541 - case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4542 - if (print) { 4543 - _print_next_block(par_num++, "CFC"); 4518 + break; 4519 + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4520 + _print_next_block((*par_num)++, 4521 + "PXPPCICLOCKCLIENT"); 4522 + break; 4523 + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4524 + _print_next_block((*par_num)++, "CFC"); 4544 4525 _print_parity(bp, 4545 4526 CFC_REG_CFC_PRTY_STS); 4546 - } 4547 - break; 4548 - case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4549 - if (print) { 4550 - _print_next_block(par_num++, "CDU"); 4527 + break; 4528 + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4529 + _print_next_block((*par_num)++, "CDU"); 4551 4530 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4552 - } 4553 - break; 4554 - case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4555 - if (print) { 4556 - _print_next_block(par_num++, "DMAE"); 4531 + break; 4532 + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4533 + _print_next_block((*par_num)++, "DMAE"); 4557 4534 _print_parity(bp, 4558 4535 DMAE_REG_DMAE_PRTY_STS); 4559 - } 4560 - break; 4561 - case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4562 - if (print) { 4563 - _print_next_block(par_num++, "IGU"); 4536 + break; 4537 + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4538 + _print_next_block((*par_num)++, "IGU"); 4564 4539 if (CHIP_IS_E1x(bp)) 4565 4540 _print_parity(bp, 4566 4541 HC_REG_HC_PRTY_STS); 4567 4542 else 4568 4543 _print_parity(bp, 4569 4544 IGU_REG_IGU_PRTY_STS); 4570 - } 4571 - break; 4572 - case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4573 - if (print) { 4574 - _print_next_block(par_num++, "MISC"); 4545 + break; 4546 + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4547 + _print_next_block((*par_num)++, "MISC"); 4575 4548 _print_parity(bp, 4576 4549 MISC_REG_MISC_PRTY_STS); 4550 + break; 4577 4551 } 4578 - break; 4579 4552 } 4580 4553 4581 4554 /* Clear the bit */ ··· 4576 4563 } 4577 4564 } 4578 4565 4579 - return par_num; 4566 + return res; 4580 4567 } 4581 4568 4582 - static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4583 - bool *global, bool print) 4569 + static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, 4570 + int *par_num, bool *global, 4571 + bool print) 4584 4572 { 4585 - int i = 0; 4586 - u32 cur_bit = 0; 4573 + bool res = false; 4574 + u32 cur_bit; 4575 + int i; 4576 + 4587 4577 for (i = 0; sig; i++) { 4588 - cur_bit = ((u32)0x1 << i); 4578 + cur_bit = (0x1UL << i); 4589 4579 if (sig & cur_bit) { 4590 4580 switch (cur_bit) { 4591 4581 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4592 4582 if (print) 4593 - _print_next_block(par_num++, "MCP ROM"); 4583 + _print_next_block((*par_num)++, 4584 + "MCP ROM"); 4594 4585 *global = true; 4586 + res |= true; 4595 4587 break; 4596 4588 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4597 4589 if (print) 4598 - _print_next_block(par_num++, 4590 + _print_next_block((*par_num)++, 4599 4591 "MCP UMP RX"); 4600 4592 *global = true; 4593 + res |= true; 4601 4594 break; 4602 4595 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4603 4596 if (print) 4604 - _print_next_block(par_num++, 4597 + _print_next_block((*par_num)++, 4605 4598 "MCP UMP TX"); 4606 4599 *global = true; 4600 + res |= true; 4607 4601 break; 4608 4602 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4609 4603 if (print) 4610 - _print_next_block(par_num++, 4604 + _print_next_block((*par_num)++, 4611 4605 "MCP SCPAD"); 4612 - *global = true; 4606 + /* clear latched SCPAD PATIRY from MCP */ 4607 + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 4608 + 1UL << 10); 4613 4609 break; 4614 4610 } 4615 4611 ··· 4627 4605 } 4628 4606 } 4629 4607 4630 - return par_num; 4608 + return res; 4631 4609 } 4632 4610 4633 - static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4634 - int par_num, bool print) 4611 + static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4612 + int *par_num, bool print) 4635 4613 { 4636 - int i = 0; 4637 - u32 cur_bit = 0; 4614 + u32 cur_bit; 4615 + bool res; 4616 + int i; 4617 + 4618 + res = false; 4619 + 4638 4620 for (i = 0; sig; i++) { 4639 - cur_bit = ((u32)0x1 << i); 4621 + cur_bit = (0x1UL << i); 4640 4622 if (sig & cur_bit) { 4641 - switch (cur_bit) { 4642 - case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4643 - if (print) { 4644 - _print_next_block(par_num++, "PGLUE_B"); 4623 + res |= true; /* Each bit is real error! */ 4624 + if (print) { 4625 + switch (cur_bit) { 4626 + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4627 + _print_next_block((*par_num)++, 4628 + "PGLUE_B"); 4645 4629 _print_parity(bp, 4646 - PGLUE_B_REG_PGLUE_B_PRTY_STS); 4647 - } 4648 - break; 4649 - case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4650 - if (print) { 4651 - _print_next_block(par_num++, "ATC"); 4630 + PGLUE_B_REG_PGLUE_B_PRTY_STS); 4631 + break; 4632 + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4633 + _print_next_block((*par_num)++, "ATC"); 4652 4634 _print_parity(bp, 4653 4635 ATC_REG_ATC_PRTY_STS); 4636 + break; 4654 4637 } 4655 - break; 4656 4638 } 4657 - 4658 4639 /* Clear the bit */ 4659 4640 sig &= ~cur_bit; 4660 4641 } 4661 4642 } 4662 4643 4663 - return par_num; 4644 + return res; 4664 4645 } 4665 4646 4666 4647 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4667 4648 u32 *sig) 4668 4649 { 4650 + bool res = false; 4651 + 4669 4652 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4670 4653 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4671 4654 (sig[2] & HW_PRTY_ASSERT_SET_2) || ··· 4687 4660 if (print) 4688 4661 netdev_err(bp->dev, 4689 4662 "Parity errors detected in blocks: "); 4690 - par_num = bnx2x_check_blocks_with_parity0(bp, 4691 - sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4692 - par_num = bnx2x_check_blocks_with_parity1(bp, 4693 - sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4694 - par_num = bnx2x_check_blocks_with_parity2(bp, 4695 - sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4696 - par_num = bnx2x_check_blocks_with_parity3( 4697 - sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4698 - par_num = bnx2x_check_blocks_with_parity4(bp, 4699 - sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4663 + res |= bnx2x_check_blocks_with_parity0(bp, 4664 + sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); 4665 + res |= bnx2x_check_blocks_with_parity1(bp, 4666 + sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); 4667 + res |= bnx2x_check_blocks_with_parity2(bp, 4668 + sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); 4669 + res |= bnx2x_check_blocks_with_parity3(bp, 4670 + sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); 4671 + res |= bnx2x_check_blocks_with_parity4(bp, 4672 + sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); 4700 4673 4701 4674 if (print) 4702 4675 pr_cont("\n"); 4676 + } 4703 4677 4704 - return true; 4705 - } else 4706 - return false; 4678 + return res; 4707 4679 } 4708 4680 4709 4681 /** ··· 7152 7126 int port = BP_PORT(bp); 7153 7127 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7154 7128 u32 low, high; 7155 - u32 val; 7129 + u32 val, reg; 7156 7130 7157 7131 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7158 7132 ··· 7296 7270 /* Enable DCBX attention for all but E1 */ 7297 7271 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7298 7272 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7273 + 7274 + /* SCPAD_PARITY should NOT trigger close the gates */ 7275 + reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; 7276 + REG_WR(bp, reg, 7277 + REG_RD(bp, reg) & 7278 + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7279 + 7280 + reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; 7281 + REG_WR(bp, reg, 7282 + REG_RD(bp, reg) & 7283 + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7299 7284 7300 7285 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7301 7286 ··· 11722 11685 static int bnx2x_open(struct net_device *dev) 11723 11686 { 11724 11687 struct bnx2x *bp = netdev_priv(dev); 11725 - bool global = false; 11726 - int other_engine = BP_PATH(bp) ? 0 : 1; 11727 - bool other_load_status, load_status; 11728 11688 int rc; 11729 11689 11730 11690 bp->stats_init = true; ··· 11737 11703 * Parity recovery is only relevant for PF driver. 11738 11704 */ 11739 11705 if (IS_PF(bp)) { 11706 + int other_engine = BP_PATH(bp) ? 0 : 1; 11707 + bool other_load_status, load_status; 11708 + bool global = false; 11709 + 11740 11710 other_load_status = bnx2x_get_load_status(bp, other_engine); 11741 11711 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 11742 11712 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || ··· 12118 12080 struct device *dev = &bp->pdev->dev; 12119 12081 12120 12082 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 12121 - bp->flags |= USING_DAC_FLAG; 12122 12083 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 12123 12084 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 12124 12085 return -EIO; ··· 12285 12248 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12286 12249 12287 12250 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 12288 - if (bp->flags & USING_DAC_FLAG) 12289 - dev->features |= NETIF_F_HIGHDMA; 12251 + dev->features |= NETIF_F_HIGHDMA; 12290 12252 12291 12253 /* Add Loopback capability to the device */ 12292 12254 dev->hw_features |= NETIF_F_LOOPBACK; ··· 12648 12612 return BNX2X_MULTI_TX_COS_E1X; 12649 12613 case BCM57712: 12650 12614 case BCM57712_MF: 12651 - case BCM57712_VF: 12652 12615 return BNX2X_MULTI_TX_COS_E2_E3A0; 12653 12616 case BCM57800: 12654 12617 case BCM57800_MF: 12655 - case BCM57800_VF: 12656 12618 case BCM57810: 12657 12619 case BCM57810_MF: 12658 12620 case BCM57840_4_10: 12659 12621 case BCM57840_2_20: 12660 12622 case BCM57840_O: 12661 12623 case BCM57840_MFO: 12662 - case BCM57810_VF: 12663 12624 case BCM57840_MF: 12664 - case BCM57840_VF: 12665 12625 case BCM57811: 12666 12626 case BCM57811_MF: 12667 - case BCM57811_VF: 12668 12627 return BNX2X_MULTI_TX_COS_E3B0; 12628 + case BCM57712_VF: 12629 + case BCM57800_VF: 12630 + case BCM57810_VF: 12631 + case BCM57840_VF: 12632 + case BCM57811_VF: 12669 12633 return 1; 12670 12634 default: 12671 12635 pr_err("Unknown board_type (%d), aborting\n", chip_id);
+17 -12
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 470 470 bnx2x_vfop_qdtor, cmd->done); 471 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 472 472 cmd->block); 473 + } else { 474 + BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); 475 + return -ENOMEM; 473 476 } 474 - DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", 475 - vf->abs_vfid, vfop->rc); 476 - return -ENOMEM; 477 477 } 478 478 479 479 static void ··· 3390 3390 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3391 3391 if (rc) { 3392 3392 BNX2X_ERR("failed to delete eth macs\n"); 3393 - return -EINVAL; 3393 + rc = -EINVAL; 3394 + goto out; 3394 3395 } 3395 3396 3396 3397 /* remove existing uc list macs */ 3397 3398 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3398 3399 if (rc) { 3399 3400 BNX2X_ERR("failed to delete uc_list macs\n"); 3400 - return -EINVAL; 3401 + rc = -EINVAL; 3402 + goto out; 3401 3403 } 3402 3404 3403 3405 /* configure the new mac to device */ ··· 3407 3405 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3408 3406 BNX2X_ETH_MAC, &ramrod_flags); 3409 3407 3408 + out: 3410 3409 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3411 3410 } 3412 3411 ··· 3470 3467 &ramrod_flags); 3471 3468 if (rc) { 3472 3469 BNX2X_ERR("failed to delete vlans\n"); 3473 - return -EINVAL; 3470 + rc = -EINVAL; 3471 + goto out; 3474 3472 } 3475 3473 3476 3474 /* send queue update ramrod to configure default vlan and silent ··· 3505 3501 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3506 3502 if (rc) { 3507 3503 BNX2X_ERR("failed to configure vlan\n"); 3508 - return -EINVAL; 3504 + rc = -EINVAL; 3505 + goto out; 3509 3506 } 3510 3507 3511 3508 /* configure default vlan to vf queue and set silent ··· 3524 3519 rc = bnx2x_queue_state_change(bp, &q_params); 3525 3520 if (rc) { 3526 3521 BNX2X_ERR("Failed to configure default VLAN\n"); 3527 - return rc; 3522 + goto out; 3528 3523 } 3529 3524 3530 3525 /* clear the flag indicating that this VF needs its vlan 3531 - * (will only be set if the HV configured th Vlan before vf was 3532 - * and we were called because the VF came up later 3526 + * (will only be set if the HV configured the Vlan before vf was 3527 + * up and we were called because the VF came up later 3533 3528 */ 3529 + out: 3534 3530 vf->cfg_flags &= ~VF_CFG_VLAN; 3535 - 3536 3531 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3537 3532 } 3538 - return 0; 3533 + return rc; 3539 3534 } 3540 3535 3541 3536 /* crc is the first field in the bulletin board. Compute the crc over the
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
··· 196 196 197 197 } else if (bp->func_stx) { 198 198 *stats_comp = 0; 199 - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 199 + bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); 200 200 } 201 201 } 202 202
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
··· 980 980 dmae.len = len32; 981 981 982 982 /* issue the command and wait for completion */ 983 - return bnx2x_issue_dmae_with_comp(bp, &dmae); 983 + return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 984 984 } 985 985 986 986 static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+14 -9
drivers/net/ethernet/calxeda/xgmac.c
··· 106 106 #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ 107 107 108 108 #define XGMAC_ADDR_AE 0x80000000 109 - #define XGMAC_MAX_FILTER_ADDR 31 110 109 111 110 /* PMT Control and Status */ 112 111 #define XGMAC_PMT_POINTER_RESET 0x80000000 ··· 383 384 struct device *device; 384 385 struct napi_struct napi; 385 386 387 + int max_macs; 386 388 struct xgmac_extra_stats xstats; 387 389 388 390 spinlock_t stats_lock; ··· 1291 1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", 1292 1292 netdev_mc_count(dev), netdev_uc_count(dev)); 1293 1293 1294 - if (dev->flags & IFF_PROMISC) { 1295 - writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); 1296 - return; 1297 - } 1294 + if (dev->flags & IFF_PROMISC) 1295 + value |= XGMAC_FRAME_FILTER_PR; 1298 1296 1299 1297 memset(hash_filter, 0, sizeof(hash_filter)); 1300 1298 1301 - if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { 1299 + if (netdev_uc_count(dev) > priv->max_macs) { 1302 1300 use_hash = true; 1303 1301 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; 1304 1302 } ··· 1319 1321 goto out; 1320 1322 } 1321 1323 1322 - if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1324 + if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { 1323 1325 use_hash = true; 1324 1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1325 1327 } else { ··· 1340 1342 } 1341 1343 1342 1344 out: 1343 - for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) 1344 - xgmac_set_mac_addr(ioaddr, NULL, reg); 1345 + for (i = reg; i <= priv->max_macs; i++) 1346 + xgmac_set_mac_addr(ioaddr, NULL, i); 1345 1347 for (i = 0; i < XGMAC_NUM_HASH; i++) 1346 1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1347 1349 ··· 1758 1760 1759 1761 uid = readl(priv->base + XGMAC_VERSION); 1760 1762 netdev_info(ndev, "h/w version is 0x%x\n", uid); 1763 + 1764 + /* Figure out how many valid mac address filter registers we have */ 1765 + writel(1, priv->base + XGMAC_ADDR_HIGH(31)); 1766 + if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) 1767 + priv->max_macs = 31; 1768 + else 1769 + priv->max_macs = 7; 1761 1770 1762 1771 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1763 1772 ndev->irq = platform_get_irq(pdev, 0);
+38 -18
drivers/net/ethernet/davicom/dm9000.c
··· 158 158 159 159 /* DM9000 network board routine ---------------------------- */ 160 160 161 - static void 162 - dm9000_reset(board_info_t * db) 163 - { 164 - dev_dbg(db->dev, "resetting device\n"); 165 - 166 - /* RESET device */ 167 - writeb(DM9000_NCR, db->io_addr); 168 - udelay(200); 169 - writeb(NCR_RST, db->io_data); 170 - udelay(200); 171 - } 172 - 173 161 /* 174 162 * Read a byte from I/O port 175 163 */ ··· 177 189 { 178 190 writeb(reg, db->io_addr); 179 191 writeb(value, db->io_data); 192 + } 193 + 194 + static void 195 + dm9000_reset(board_info_t *db) 196 + { 197 + dev_dbg(db->dev, "resetting device\n"); 198 + 199 + /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29 200 + * The essential point is that we have to do a double reset, and the 201 + * instruction is to set LBK into MAC internal loopback mode. 202 + */ 203 + iow(db, DM9000_NCR, 0x03); 204 + udelay(100); /* Application note says at least 20 us */ 205 + if (ior(db, DM9000_NCR) & 1) 206 + dev_err(db->dev, "dm9000 did not respond to first reset\n"); 207 + 208 + iow(db, DM9000_NCR, 0); 209 + iow(db, DM9000_NCR, 0x03); 210 + udelay(100); 211 + if (ior(db, DM9000_NCR) & 1) 212 + dev_err(db->dev, "dm9000 did not respond to second reset\n"); 180 213 } 181 214 182 215 /* routines for sending block to chip */ ··· 753 744 static void dm9000_show_carrier(board_info_t *db, 754 745 unsigned carrier, unsigned nsr) 755 746 { 747 + int lpa; 756 748 struct net_device *ndev = db->ndev; 749 + struct mii_if_info *mii = &db->mii; 757 750 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 758 751 759 - if (carrier) 760 - dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n", 752 + if (carrier) { 753 + lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); 754 + dev_info(db->dev, 755 + "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n", 761 756 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 762 - (ncr & NCR_FDX) ? "full" : "half"); 763 - else 757 + (ncr & NCR_FDX) ? "full" : "half", lpa); 758 + } else { 764 759 dev_info(db->dev, "%s: link down\n", ndev->name); 760 + } 765 761 } 766 762 767 763 static void ··· 904 890 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 905 891 906 892 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 893 + iow(db, DM9000_GPR, 0); 907 894 908 - dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 909 - dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ 895 + /* If we are dealing with DM9000B, some extra steps are required: a 896 + * manual phy reset, and setting init params. 897 + */ 898 + if (db->type == TYPE_DM9000B) { 899 + dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); 900 + dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); 901 + } 910 902 911 903 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 912 904
+2 -1
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 1198 1198 1199 1199 if (lancer_chip(adapter)) { 1200 1200 req->hdr.version = 1; 1201 - req->if_id = cpu_to_le16(adapter->if_handle); 1202 1201 } else if (BEx_chip(adapter)) { 1203 1202 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1204 1203 req->hdr.version = 2; ··· 1205 1206 req->hdr.version = 2; 1206 1207 } 1207 1208 1209 + if (req->hdr.version > 0) 1210 + req->if_id = cpu_to_le16(adapter->if_handle); 1208 1211 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1209 1212 req->ulp_num = BE_ULP1_NUM; 1210 1213 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
+29 -11
drivers/net/ethernet/freescale/gianfar.c
··· 88 88 89 89 #include <asm/io.h> 90 90 #include <asm/reg.h> 91 + #include <asm/mpc85xx.h> 91 92 #include <asm/irq.h> 92 93 #include <asm/uaccess.h> 93 94 #include <linux/module.h> ··· 940 939 } 941 940 } 942 941 943 - static void gfar_detect_errata(struct gfar_private *priv) 942 + static void __gfar_detect_errata_83xx(struct gfar_private *priv) 944 943 { 945 - struct device *dev = &priv->ofdev->dev; 946 944 unsigned int pvr = mfspr(SPRN_PVR); 947 945 unsigned int svr = mfspr(SPRN_SVR); 948 946 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ ··· 957 957 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 958 958 priv->errata |= GFAR_ERRATA_76; 959 959 960 - /* MPC8313 and MPC837x all rev */ 961 - if ((pvr == 0x80850010 && mod == 0x80b0) || 962 - (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 963 - priv->errata |= GFAR_ERRATA_A002; 964 - 965 - /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 966 - if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 967 - (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 960 + /* MPC8313 Rev < 2.0 */ 961 + if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) 968 962 priv->errata |= GFAR_ERRATA_12; 963 + } 964 + 965 + static void __gfar_detect_errata_85xx(struct gfar_private *priv) 966 + { 967 + unsigned int svr = mfspr(SPRN_SVR); 968 + 969 + if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) 970 + priv->errata |= GFAR_ERRATA_12; 971 + if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || 972 + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) 973 + priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ 974 + } 975 + 976 + static void gfar_detect_errata(struct gfar_private *priv) 977 + { 978 + struct device *dev = &priv->ofdev->dev; 979 + 980 + /* no plans to fix */ 981 + priv->errata |= GFAR_ERRATA_A002; 982 + 983 + if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) 984 + __gfar_detect_errata_85xx(priv); 985 + else /* non-mpc85xx parts, i.e. e300 core based */ 986 + __gfar_detect_errata_83xx(priv); 969 987 970 988 if (priv->errata) 971 989 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", ··· 1617 1599 /* Normaly TSEC should not hang on GRS commands, so we should 1618 1600 * actually wait for IEVENT_GRSC flag. 1619 1601 */ 1620 - if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1602 + if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) 1621 1603 return 0; 1622 1604 1623 1605 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+2
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 2655 2655 (hw->phy.media_type != e1000_media_type_copper)) 2656 2656 return -EOPNOTSUPP; 2657 2657 2658 + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); 2659 + 2658 2660 ret_val = igb_get_eee(netdev, &eee_curr); 2659 2661 if (ret_val) 2660 2662 return ret_val;
+3 -4
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 1131 1131 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); 1132 1132 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); 1133 1133 spin_unlock_bh(&mp->mib_counters_lock); 1134 - 1135 - mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1136 1134 } 1137 1135 1138 1136 static void mib_counters_timer_wrapper(unsigned long _mp) 1139 1137 { 1140 1138 struct mv643xx_eth_private *mp = (void *)_mp; 1141 - 1142 1139 mib_counters_update(mp); 1140 + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1143 1141 } 1144 1142 1145 1143 ··· 2235 2237 mp->int_mask |= INT_TX_END_0 << i; 2236 2238 } 2237 2239 2240 + add_timer(&mp->mib_counters_timer); 2238 2241 port_start(mp); 2239 2242 2240 2243 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); ··· 2533 2534 if (!ppdev) 2534 2535 return -ENOMEM; 2535 2536 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 2537 + ppdev->dev.of_node = pnp; 2536 2538 2537 2539 ret = platform_device_add_resources(ppdev, &res, 1); 2538 2540 if (ret) ··· 2916 2916 mp->mib_counters_timer.data = (unsigned long)mp; 2917 2917 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2918 2918 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2919 - add_timer(&mp->mib_counters_timer); 2920 2919 2921 2920 spin_lock_init(&mp->mib_counters_lock); 2922 2921
+24 -17
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 70 70 put_page(page); 71 71 return -ENOMEM; 72 72 } 73 - page_alloc->size = PAGE_SIZE << order; 73 + page_alloc->page_size = PAGE_SIZE << order; 74 74 page_alloc->page = page; 75 75 page_alloc->dma = dma; 76 - page_alloc->offset = frag_info->frag_align; 76 + page_alloc->page_offset = frag_info->frag_align; 77 77 /* Not doing get_page() for each frag is a big win 78 78 * on asymetric workloads. 79 79 */ 80 - atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); 80 + atomic_set(&page->_count, 81 + page_alloc->page_size / frag_info->frag_stride); 81 82 return 0; 82 83 } 83 84 ··· 97 96 for (i = 0; i < priv->num_frags; i++) { 98 97 frag_info = &priv->frag_info[i]; 99 98 page_alloc[i] = ring_alloc[i]; 100 - page_alloc[i].offset += frag_info->frag_stride; 101 - if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) 99 + page_alloc[i].page_offset += frag_info->frag_stride; 100 + 101 + if (page_alloc[i].page_offset + frag_info->frag_stride <= 102 + ring_alloc[i].page_size) 102 103 continue; 104 + 103 105 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) 104 106 goto out; 105 107 } 106 108 107 109 for (i = 0; i < priv->num_frags; i++) { 108 110 frags[i] = ring_alloc[i]; 109 - dma = ring_alloc[i].dma + ring_alloc[i].offset; 111 + dma = ring_alloc[i].dma + ring_alloc[i].page_offset; 110 112 ring_alloc[i] = page_alloc[i]; 111 113 rx_desc->data[i].addr = cpu_to_be64(dma); 112 114 } ··· 121 117 frag_info = &priv->frag_info[i]; 122 118 if (page_alloc[i].page != ring_alloc[i].page) { 123 119 dma_unmap_page(priv->ddev, page_alloc[i].dma, 124 - page_alloc[i].size, PCI_DMA_FROMDEVICE); 120 + page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 125 121 page = page_alloc[i].page; 126 122 atomic_set(&page->_count, 1); 127 123 put_page(page); ··· 135 131 int i) 136 132 { 137 133 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 134 + u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; 138 135 139 - if (frags[i].offset + frag_info->frag_stride > frags[i].size) 140 - dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, 141 - PCI_DMA_FROMDEVICE); 136 + 137 + if (next_frag_end > frags[i].page_size) 138 + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, 139 + PCI_DMA_FROMDEVICE); 142 140 143 141 if (frags[i].page) 144 142 put_page(frags[i].page); ··· 167 161 168 162 page_alloc = &ring->page_alloc[i]; 169 163 dma_unmap_page(priv->ddev, page_alloc->dma, 170 - page_alloc->size, PCI_DMA_FROMDEVICE); 164 + page_alloc->page_size, PCI_DMA_FROMDEVICE); 171 165 page = page_alloc->page; 172 166 atomic_set(&page->_count, 1); 173 167 put_page(page); ··· 190 184 i, page_count(page_alloc->page)); 191 185 192 186 dma_unmap_page(priv->ddev, page_alloc->dma, 193 - page_alloc->size, PCI_DMA_FROMDEVICE); 194 - while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { 187 + page_alloc->page_size, PCI_DMA_FROMDEVICE); 188 + while (page_alloc->page_offset + frag_info->frag_stride < 189 + page_alloc->page_size) { 195 190 put_page(page_alloc->page); 196 - page_alloc->offset += frag_info->frag_stride; 191 + page_alloc->page_offset += frag_info->frag_stride; 197 192 } 198 193 page_alloc->page = NULL; 199 194 } ··· 485 478 /* Save page reference in skb */ 486 479 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 487 480 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 488 - skb_frags_rx[nr].page_offset = frags[nr].offset; 481 + skb_frags_rx[nr].page_offset = frags[nr].page_offset; 489 482 skb->truesize += frag_info->frag_stride; 490 483 frags[nr].page = NULL; 491 484 } ··· 524 517 525 518 /* Get pointer to first fragment so we could copy the headers into the 526 519 * (linear part of the) skb */ 527 - va = page_address(frags[0].page) + frags[0].offset; 520 + va = page_address(frags[0].page) + frags[0].page_offset; 528 521 529 522 if (length <= SMALL_PACKET_SIZE) { 530 523 /* We are copying all relevant data to the skb - temporarily ··· 652 645 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 653 646 DMA_FROM_DEVICE); 654 647 ethh = (struct ethhdr *)(page_address(frags[0].page) + 655 - frags[0].offset); 648 + frags[0].page_offset); 656 649 657 650 if (is_multicast_ether_addr(ethh->h_dest)) { 658 651 struct mlx4_mac_entry *entry;
+2 -2
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 237 237 struct mlx4_en_rx_alloc { 238 238 struct page *page; 239 239 dma_addr_t dma; 240 - u32 offset; 241 - u32 size; 240 + u32 page_offset; 241 + u32 page_size; 242 242 }; 243 243 244 244 struct mlx4_en_tx_ring {
+16 -6
drivers/net/ethernet/moxa/moxart_ether.c
··· 448 448 irq = irq_of_parse_and_map(node, 0); 449 449 if (irq <= 0) { 450 450 netdev_err(ndev, "irq_of_parse_and_map failed\n"); 451 - return -EINVAL; 451 + ret = -EINVAL; 452 + goto irq_map_fail; 452 453 } 453 454 454 455 priv = netdev_priv(ndev); ··· 473 472 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * 474 473 TX_DESC_NUM, &priv->tx_base, 475 474 GFP_DMA | GFP_KERNEL); 476 - if (priv->tx_desc_base == NULL) 475 + if (priv->tx_desc_base == NULL) { 476 + ret = -ENOMEM; 477 477 goto init_fail; 478 + } 478 479 479 480 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * 480 481 RX_DESC_NUM, &priv->rx_base, 481 482 GFP_DMA | GFP_KERNEL); 482 - if (priv->rx_desc_base == NULL) 483 + if (priv->rx_desc_base == NULL) { 484 + ret = -ENOMEM; 483 485 goto init_fail; 486 + } 484 487 485 488 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, 486 489 GFP_ATOMIC); 487 - if (!priv->tx_buf_base) 490 + if (!priv->tx_buf_base) { 491 + ret = -ENOMEM; 488 492 goto init_fail; 493 + } 489 494 490 495 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, 491 496 GFP_ATOMIC); 492 - if (!priv->rx_buf_base) 497 + if (!priv->rx_buf_base) { 498 + ret = -ENOMEM; 493 499 goto init_fail; 500 + } 494 501 495 502 platform_set_drvdata(pdev, ndev); 496 503 ··· 531 522 init_fail: 532 523 netdev_err(ndev, "init failed\n"); 533 524 moxart_mac_free_memory(ndev); 534 - 525 + irq_map_fail: 526 + free_netdev(ndev); 535 527 return ret; 536 528 } 537 529
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
··· 665 665 return err; 666 666 } 667 667 668 - if (channel->tx_count) { 668 + if (qlcnic_82xx_check(adapter) && channel->tx_count) { 669 669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count); 670 670 if (err) 671 671 return err;
+5 -8
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 2257 2257 2258 2258 err = qlcnic_alloc_adapter_resources(adapter); 2259 2259 if (err) 2260 - goto err_out_free_netdev; 2260 + goto err_out_free_wq; 2261 2261 2262 2262 adapter->dev_rst_time = jiffies; 2263 2263 adapter->ahw->revision_id = pdev->revision; ··· 2395 2395 2396 2396 err_out_free_hw: 2397 2397 qlcnic_free_adapter_resources(adapter); 2398 + 2399 + err_out_free_wq: 2400 + destroy_workqueue(adapter->qlcnic_wq); 2398 2401 2399 2402 err_out_free_netdev: 2400 2403 free_netdev(netdev); ··· 3651 3648 u8 max_hw = QLCNIC_MAX_TX_RINGS; 3652 3649 u32 max_allowed; 3653 3650 3654 - if (!qlcnic_82xx_check(adapter)) { 3655 - netdev_err(netdev, "No Multi TX-Q support\n"); 3656 - return -EINVAL; 3657 - } 3658 - 3659 3651 if (!qlcnic_use_msi_x && !qlcnic_use_msi) { 3660 3652 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n"); 3661 3653 return -EINVAL; ··· 3690 3692 u8 max_hw = adapter->ahw->max_rx_ques; 3691 3693 u32 max_allowed; 3692 3694 3693 - if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && 3694 - !qlcnic_use_msi) { 3695 + if (!qlcnic_use_msi_x && !qlcnic_use_msi) { 3695 3696 netdev_err(netdev, "No RSS support in INT-x mode\n"); 3696 3697 return -EINVAL; 3697 3698 }
+4
drivers/net/ethernet/renesas/sh_eth.c
··· 688 688 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 689 689 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 690 690 EESR_TDE | EESR_ECI, 691 + .fdr_value = 0x0000070f, 692 + .rmcr_value = 0x00000001, 691 693 692 694 .apr = 1, 693 695 .mpr = 1, 694 696 .tpauser = 1, 695 697 .bculr = 1, 696 698 .hw_swap = 1, 699 + .rpadir = 1, 700 + .rpadir_value = 2 << 16, 697 701 .no_trimd = 1, 698 702 .no_ade = 1, 699 703 .tsu = 1,
+63 -20
drivers/net/ethernet/sfc/ef10.c
··· 444 444 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), 445 445 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), 446 446 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), 447 + EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), 448 + EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), 449 + EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), 450 + EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), 451 + EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), 452 + EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), 453 + EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), 454 + EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), 455 + EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), 456 + EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), 457 + EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), 458 + EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), 447 459 }; 448 460 449 461 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ ··· 510 498 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ 511 499 (1ULL << EF10_STAT_rx_length_error)) 512 500 513 - #if BITS_PER_LONG == 64 514 - #define STAT_MASK_BITMAP(bits) (bits) 515 - #else 516 - #define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 517 - #endif 501 + /* These statistics are only provided if the firmware supports the 502 + * capability PM_AND_RXDP_COUNTERS. 503 + */ 504 + #define HUNT_PM_AND_RXDP_STAT_MASK ( \ 505 + (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ 506 + (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ 507 + (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ 508 + (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ 509 + (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ 510 + (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ 511 + (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ 512 + (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ 513 + (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ 514 + (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ 515 + (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \ 516 + (1ULL << EF10_STAT_rx_dp_emerg_wait)) 518 517 519 - static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) 518 + static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 520 519 { 521 - static const unsigned long hunt_40g_stat_mask[] = { 522 - STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 523 - HUNT_40G_EXTRA_STAT_MASK) 524 - }; 525 - static const unsigned long hunt_10g_only_stat_mask[] = { 526 - STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 527 - HUNT_10G_ONLY_STAT_MASK) 528 - }; 520 + u64 raw_mask = HUNT_COMMON_STAT_MASK; 529 521 u32 port_caps = efx_mcdi_phy_get_caps(efx); 522 + struct efx_ef10_nic_data *nic_data = efx->nic_data; 530 523 531 524 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) 532 - return hunt_40g_stat_mask; 525 + raw_mask |= HUNT_40G_EXTRA_STAT_MASK; 533 526 else 534 - return hunt_10g_only_stat_mask; 527 + raw_mask |= HUNT_10G_ONLY_STAT_MASK; 528 + 529 + if (nic_data->datapath_caps & 530 + (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) 531 + raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; 532 + 533 + return raw_mask; 534 + } 535 + 536 + static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) 537 + { 538 + u64 raw_mask = efx_ef10_raw_stat_mask(efx); 539 + 540 + #if BITS_PER_LONG == 64 541 + mask[0] = raw_mask; 542 + #else 543 + mask[0] = raw_mask & 0xffffffff; 544 + mask[1] = raw_mask >> 32; 545 + #endif 535 546 } 536 547 537 548 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 538 549 { 550 + DECLARE_BITMAP(mask, EF10_STAT_COUNT); 551 + 552 + efx_ef10_get_stat_mask(efx, mask); 539 553 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 540 - efx_ef10_stat_mask(efx), names); 554 + mask, names); 541 555 } 542 556 543 557 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) 544 558 { 545 559 struct efx_ef10_nic_data *nic_data = efx->nic_data; 546 - const unsigned long *stats_mask = efx_ef10_stat_mask(efx); 560 + DECLARE_BITMAP(mask, EF10_STAT_COUNT); 547 561 __le64 generation_start, generation_end; 548 562 u64 *stats = nic_data->stats; 549 563 __le64 *dma_stats; 564 + 565 + efx_ef10_get_stat_mask(efx, mask); 550 566 551 567 dma_stats = efx->stats_buffer.addr; 552 568 nic_data = efx->nic_data; ··· 583 543 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 584 544 return 0; 585 545 rmb(); 586 - efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, 546 + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 587 547 stats, efx->stats_buffer.addr, false); 548 + rmb(); 588 549 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 589 550 if (generation_end != generation_start) 590 551 return -EAGAIN; ··· 604 563 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, 605 564 struct rtnl_link_stats64 *core_stats) 606 565 { 607 - const unsigned long *mask = efx_ef10_stat_mask(efx); 566 + DECLARE_BITMAP(mask, EF10_STAT_COUNT); 608 567 struct efx_ef10_nic_data *nic_data = efx->nic_data; 609 568 u64 *stats = nic_data->stats; 610 569 size_t stats_count = 0, index; 611 570 int retry; 571 + 572 + efx_ef10_get_stat_mask(efx, mask); 612 573 613 574 /* If we're unlucky enough to read statistics during the DMA, wait 614 575 * up to 10ms for it to finish (typically takes <500us)
+17 -1
drivers/net/ethernet/sfc/mcdi.c
··· 963 963 bool *was_attached) 964 964 { 965 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 966 - MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); 966 + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); 967 967 size_t outlen; 968 968 int rc; 969 969 ··· 979 979 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { 980 980 rc = -EIO; 981 981 goto fail; 982 + } 983 + 984 + /* We currently assume we have control of the external link 985 + * and are completely trusted by firmware. Abort probing 986 + * if that's not true for this function. 987 + */ 988 + if (driver_operating && 989 + outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN && 990 + (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) & 991 + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 992 + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != 993 + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 994 + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) { 995 + netif_err(efx, probe, efx->net_dev, 996 + "This driver version only supports one function per port\n"); 997 + return -ENODEV; 982 998 } 983 999 984 1000 if (was_attached != NULL)
+54 -2
drivers/net/ethernet/sfc/mcdi_pcol.h
··· 2574 2574 #define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ 2575 2575 #define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ 2576 2576 #define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ 2577 - #define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */ 2578 - #define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */ 2577 + /* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS 2578 + * capability only. 2579 + */ 2580 + #define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c 2581 + /* enum: PM discard_bb_overflow counter. Valid for EF10 with 2582 + * PM_AND_RXDP_COUNTERS capability only. 2583 + */ 2584 + #define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d 2585 + /* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS 2586 + * capability only. 2587 + */ 2588 + #define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e 2589 + /* enum: PM discard_vfifo_full counter. Valid for EF10 with 2590 + * PM_AND_RXDP_COUNTERS capability only. 2591 + */ 2592 + #define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f 2593 + /* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS 2594 + * capability only. 2595 + */ 2596 + #define MC_CMD_MAC_PM_TRUNC_QBB 0x40 2597 + /* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS 2598 + * capability only. 2599 + */ 2600 + #define MC_CMD_MAC_PM_DISCARD_QBB 0x41 2601 + /* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS 2602 + * capability only. 2603 + */ 2604 + #define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 2605 + /* enum: RXDP counter: Number of packets dropped due to the queue being 2606 + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. 2607 + */ 2608 + #define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 2609 + /* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 2610 + * with PM_AND_RXDP_COUNTERS capability only. 2611 + */ 2612 + #define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 2613 + /* enum: RXDP counter: Number of non-host packets. Valid for EF10 with 2614 + * PM_AND_RXDP_COUNTERS capability only. 2615 + */ 2616 + #define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 2617 + /* enum: RXDP counter: Number of times an emergency descriptor fetch was 2618 + * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. 2619 + */ 2620 + #define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47 2621 + /* enum: RXDP counter: Number of times the DPCPU waited for an existing 2622 + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. 2623 + */ 2624 + #define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48 2625 + /* enum: Start of GMAC stats buffer space, for Siena only. */ 2626 + #define MC_CMD_GMAC_DMABUF_START 0x40 2627 + /* enum: End of GMAC stats buffer space, for Siena only. */ 2628 + #define MC_CMD_GMAC_DMABUF_END 0x5f 2579 2629 #define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ 2580 2630 #define MC_CMD_MAC_NSTATS 0x61 /* enum */ 2581 2631 ··· 5115 5065 #define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 5116 5066 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 5117 5067 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 5068 + #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 5069 + #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 5118 5070 /* RxDPCPU firmware id. */ 5119 5071 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 5120 5072 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+3 -6
drivers/net/ethernet/sfc/nic.c
··· 469 469 * @count: Length of the @desc array 470 470 * @mask: Bitmask of which elements of @desc are enabled 471 471 * @stats: Buffer to update with the converted statistics. The length 472 - * of this array must be at least the number of set bits in the 473 - * first @count bits of @mask. 472 + * of this array must be at least @count. 474 473 * @dma_buf: DMA buffer containing hardware statistics 475 474 * @accumulate: If set, the converted values will be added rather than 476 475 * directly stored to the corresponding elements of @stats ··· 502 503 } 503 504 504 505 if (accumulate) 505 - *stats += val; 506 + stats[index] += val; 506 507 else 507 - *stats = val; 508 + stats[index] = val; 508 509 } 509 - 510 - ++stats; 511 510 } 512 511 }
+12
drivers/net/ethernet/sfc/nic.h
··· 386 386 EF10_STAT_rx_align_error, 387 387 EF10_STAT_rx_length_error, 388 388 EF10_STAT_rx_nodesc_drops, 389 + EF10_STAT_rx_pm_trunc_bb_overflow, 390 + EF10_STAT_rx_pm_discard_bb_overflow, 391 + EF10_STAT_rx_pm_trunc_vfifo_full, 392 + EF10_STAT_rx_pm_discard_vfifo_full, 393 + EF10_STAT_rx_pm_trunc_qbb, 394 + EF10_STAT_rx_pm_discard_qbb, 395 + EF10_STAT_rx_pm_discard_mapping, 396 + EF10_STAT_rx_dp_q_disabled_packets, 397 + EF10_STAT_rx_dp_di_dropped_packets, 398 + EF10_STAT_rx_dp_streaming_packets, 399 + EF10_STAT_rx_dp_emerg_fetch, 400 + EF10_STAT_rx_dp_emerg_wait, 389 401 EF10_STAT_COUNT 390 402 }; 391 403
+2 -4
drivers/net/ethernet/smsc/smc91x.h
··· 1124 1124 void __iomem *__ioaddr = ioaddr; \ 1125 1125 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1126 1126 __len -= 2; \ 1127 - SMC_outw(*(u16 *)__ptr, ioaddr, \ 1128 - DATA_REG(lp)); \ 1127 + SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ 1129 1128 __ptr += 2; \ 1130 1129 } \ 1131 1130 if (SMC_CAN_USE_DATACS && lp->datacs) \ ··· 1132 1133 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ 1133 1134 if (__len & 2) { \ 1134 1135 __ptr += (__len & ~3); \ 1135 - SMC_outw(*((u16 *)__ptr), ioaddr, \ 1136 - DATA_REG(lp)); \ 1136 + SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ 1137 1137 } \ 1138 1138 } else if (SMC_16BIT(lp)) \ 1139 1139 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
+8 -11
drivers/net/ethernet/ti/cpsw.c
··· 639 639 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 640 640 { 641 641 struct cpsw_priv *priv = dev_id; 642 - u32 rx, tx, rx_thresh; 643 - 644 - rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat); 645 - rx = __raw_readl(&priv->wr_regs->rx_stat); 646 - tx = __raw_readl(&priv->wr_regs->tx_stat); 647 - if (!rx_thresh && !rx && !tx) 648 - return IRQ_NONE; 649 642 650 643 cpsw_intr_disable(priv); 651 644 if (priv->irq_enabled == true) { ··· 1162 1169 } 1163 1170 } 1164 1171 1172 + napi_enable(&priv->napi); 1165 1173 cpdma_ctlr_start(priv->dma); 1166 1174 cpsw_intr_enable(priv); 1167 - napi_enable(&priv->napi); 1168 1175 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1169 1176 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1170 1177 ··· 1764 1771 } 1765 1772 data->mac_control = prop; 1766 1773 1767 - if (!of_property_read_u32(node, "dual_emac", &prop)) 1768 - data->dual_emac = prop; 1774 + if (of_property_read_bool(node, "dual_emac")) 1775 + data->dual_emac = 1; 1769 1776 1770 1777 /* 1771 1778 * Populate all the child nodes here... ··· 1775 1782 if (ret) 1776 1783 pr_warn("Doesn't have any child node\n"); 1777 1784 1778 - for_each_node_by_name(slave_node, "slave") { 1785 + for_each_child_of_node(node, slave_node) { 1779 1786 struct cpsw_slave_data *slave_data = data->slave_data + i; 1780 1787 const void *mac_addr = NULL; 1781 1788 u32 phyid; ··· 1783 1790 const __be32 *parp; 1784 1791 struct device_node *mdio_node; 1785 1792 struct platform_device *mdio; 1793 + 1794 + /* This is no slave child node, continue */ 1795 + if (strcmp(slave_node->name, "slave")) 1796 + continue; 1786 1797 1787 1798 parp = of_get_property(slave_node, "phy_id", &lenp); 1788 1799 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
+1 -2
drivers/net/ethernet/ti/davinci_emac.c
··· 876 876 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { 877 877 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 878 878 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); 879 - } 880 - if (!netdev_mc_empty(ndev)) { 879 + } else if (!netdev_mc_empty(ndev)) { 881 880 struct netdev_hw_addr *ha; 882 881 883 882 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
-1
drivers/net/hamradio/yam.c
··· 975 975 return -EINVAL; /* Cannot change this parameter when up */ 976 976 if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL) 977 977 return -ENOBUFS; 978 - ym->bitrate = 9600; 979 978 if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) { 980 979 kfree(ym); 981 980 return -EFAULT;
+9 -22
drivers/net/ieee802154/mrf24j40.c
··· 82 82 83 83 struct mutex buffer_mutex; /* only used to protect buf */ 84 84 struct completion tx_complete; 85 - struct work_struct irqwork; 86 85 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ 87 86 }; 88 87 ··· 343 344 if (ret) 344 345 goto err; 345 346 347 + INIT_COMPLETION(devrec->tx_complete); 348 + 346 349 /* Set TXNTRIG bit of TXNCON to send packet */ 347 350 ret = read_short_reg(devrec, REG_TXNCON, &val); 348 351 if (ret) ··· 354 353 if (skb->data[0] & IEEE802154_FC_ACK_REQ) 355 354 val |= 0x4; 356 355 write_short_reg(devrec, REG_TXNCON, val); 357 - 358 - INIT_COMPLETION(devrec->tx_complete); 359 356 360 357 /* Wait for the device to send the TX complete interrupt. */ 361 358 ret = wait_for_completion_interruptible_timeout( ··· 589 590 static irqreturn_t mrf24j40_isr(int irq, void *data) 590 591 { 591 592 struct mrf24j40 *devrec = data; 592 - 593 - disable_irq_nosync(irq); 594 - 595 - schedule_work(&devrec->irqwork); 596 - 597 - return IRQ_HANDLED; 598 - } 599 - 600 - static void mrf24j40_isrwork(struct work_struct *work) 601 - { 602 - struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork); 603 593 u8 intstat; 604 594 int ret; 605 595 ··· 606 618 mrf24j40_handle_rx(devrec); 607 619 608 620 out: 609 - enable_irq(devrec->spi->irq); 621 + return IRQ_HANDLED; 610 622 } 611 623 612 624 static int mrf24j40_probe(struct spi_device *spi) ··· 630 642 631 643 mutex_init(&devrec->buffer_mutex); 632 644 init_completion(&devrec->tx_complete); 633 - INIT_WORK(&devrec->irqwork, mrf24j40_isrwork); 634 645 devrec->spi = spi; 635 646 spi_set_drvdata(spi, devrec); 636 647 ··· 675 688 val &= ~0x3; /* Clear RX mode (normal) */ 676 689 write_short_reg(devrec, REG_RXMCR, val); 677 690 678 - ret = request_irq(spi->irq, 679 - mrf24j40_isr, 680 - IRQF_TRIGGER_FALLING, 681 - dev_name(&spi->dev), 682 - devrec); 691 + ret = request_threaded_irq(spi->irq, 692 + NULL, 693 + mrf24j40_isr, 694 + IRQF_TRIGGER_LOW|IRQF_ONESHOT, 695 + dev_name(&spi->dev), 696 + devrec); 683 697 684 698 if (ret) { 685 699 dev_err(printdev(devrec), "Unable to get IRQ"); ··· 709 721 dev_dbg(printdev(devrec), "remove\n"); 710 722 711 723 free_irq(spi->irq, devrec); 712 - flush_work(&devrec->irqwork); /* TODO: Is this the right call? */ 713 724 ieee802154_unregister_device(devrec->dev); 714 725 ieee802154_free_device(devrec->dev); 715 726 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
+5 -3
drivers/net/tun.c
··· 1293 1293 if (unlikely(!noblock)) 1294 1294 add_wait_queue(&tfile->wq.wait, &wait); 1295 1295 while (len) { 1296 - current->state = TASK_INTERRUPTIBLE; 1296 + if (unlikely(!noblock)) 1297 + current->state = TASK_INTERRUPTIBLE; 1297 1298 1298 1299 /* Read frames from the queue */ 1299 1300 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { ··· 1321 1320 break; 1322 1321 } 1323 1322 1324 - current->state = TASK_RUNNING; 1325 - if (unlikely(!noblock)) 1323 + if (unlikely(!noblock)) { 1324 + current->state = TASK_RUNNING; 1326 1325 remove_wait_queue(&tfile->wq.wait, &wait); 1326 + } 1327 1327 1328 1328 return ret; 1329 1329 }
+20 -3
drivers/net/usb/ax88179_178a.c
··· 36 36 #define AX_RXHDR_L4_TYPE_TCP 16 37 37 #define AX_RXHDR_L3CSUM_ERR 2 38 38 #define AX_RXHDR_L4CSUM_ERR 1 39 - #define AX_RXHDR_CRC_ERR ((u32)BIT(31)) 40 - #define AX_RXHDR_DROP_ERR ((u32)BIT(30)) 39 + #define AX_RXHDR_CRC_ERR ((u32)BIT(29)) 40 + #define AX_RXHDR_DROP_ERR ((u32)BIT(31)) 41 41 #define AX_ACCESS_MAC 0x01 42 42 #define AX_ACCESS_PHY 0x02 43 43 #define AX_ACCESS_EEPROM 0x04 ··· 1406 1406 .tx_fixup = ax88179_tx_fixup, 1407 1407 }; 1408 1408 1409 + static const struct driver_info samsung_info = { 1410 + .description = "Samsung USB Ethernet Adapter", 1411 + .bind = ax88179_bind, 1412 + .unbind = ax88179_unbind, 1413 + .status = ax88179_status, 1414 + .link_reset = ax88179_link_reset, 1415 + .reset = ax88179_reset, 1416 + .stop = ax88179_stop, 1417 + .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1418 + .rx_fixup = ax88179_rx_fixup, 1419 + .tx_fixup = ax88179_tx_fixup, 1420 + }; 1421 + 1409 1422 static const struct usb_device_id products[] = { 1410 1423 { 1411 1424 /* ASIX AX88179 10/100/1000 */ ··· 1431 1418 }, { 1432 1419 /* Sitecom USB 3.0 to Gigabit Adapter */ 1433 1420 USB_DEVICE(0x0df6, 0x0072), 1434 - .driver_info = (unsigned long) &sitecom_info, 1421 + .driver_info = (unsigned long)&sitecom_info, 1422 + }, { 1423 + /* Samsung USB Ethernet Adapter */ 1424 + USB_DEVICE(0x04e8, 0xa100), 1425 + .driver_info = (unsigned long)&samsung_info, 1435 1426 }, 1436 1427 { }, 1437 1428 };
+1
drivers/net/usb/qmi_wwan.c
··· 714 714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 715 715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 716 716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 717 + {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 717 718 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 718 719 719 720 /* 4. Gobi 1000 devices */
+3 -1
drivers/net/usb/usbnet.c
··· 1688 1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && 1689 1689 !(info->flags & FLAG_MULTI_PACKET)) { 1690 1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL); 1691 - if (!dev->padding_pkt) 1691 + if (!dev->padding_pkt) { 1692 + status = -ENOMEM; 1692 1693 goto out4; 1694 + } 1693 1695 } 1694 1696 1695 1697 status = register_netdev (net);
+13 -1
drivers/net/virtio_net.c
··· 938 938 return -EINVAL; 939 939 } else { 940 940 vi->curr_queue_pairs = queue_pairs; 941 - schedule_delayed_work(&vi->refill, 0); 941 + /* virtnet_open() will refill when device is going to up. */ 942 + if (dev->flags & IFF_UP) 943 + schedule_delayed_work(&vi->refill, 0); 942 944 } 943 945 944 946 return 0; ··· 1118 1116 { 1119 1117 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); 1120 1118 1119 + mutex_lock(&vi->config_lock); 1120 + 1121 + if (!vi->config_enable) 1122 + goto done; 1123 + 1121 1124 switch(action & ~CPU_TASKS_FROZEN) { 1122 1125 case CPU_ONLINE: 1123 1126 case CPU_DOWN_FAILED: ··· 1135 1128 default: 1136 1129 break; 1137 1130 } 1131 + 1132 + done: 1133 + mutex_unlock(&vi->config_lock); 1138 1134 return NOTIFY_OK; 1139 1135 } 1140 1136 ··· 1743 1733 vi->config_enable = true; 1744 1734 mutex_unlock(&vi->config_lock); 1745 1735 1736 + rtnl_lock(); 1746 1737 virtnet_set_queues(vi, vi->curr_queue_pairs); 1738 + rtnl_unlock(); 1747 1739 1748 1740 return 0; 1749 1741 }
+1
drivers/net/wan/farsync.c
··· 1972 1972 } 1973 1973 1974 1974 i = port->index; 1975 + memset(&sync, 0, sizeof(sync)); 1975 1976 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed); 1976 1977 /* Lucky card and linux use same encoding here */ 1977 1978 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
+1
drivers/net/wan/wanxl.c
··· 355 355 ifr->ifr_settings.size = size; /* data size wanted */ 356 356 return -ENOBUFS; 357 357 } 358 + memset(&line, 0, sizeof(line)); 358 359 line.clock_type = get_status(port)->clocking; 359 360 line.clock_rate = 0; 360 361 line.loopback = 0;
+11 -12
drivers/net/wireless/ath/ath9k/main.c
··· 208 208 struct ath_hw *ah = sc->sc_ah; 209 209 struct ath_common *common = ath9k_hw_common(ah); 210 210 unsigned long flags; 211 + int i; 211 212 212 213 if (ath_startrecv(sc) != 0) { 213 214 ath_err(common, "Unable to restart recv logic\n"); ··· 236 235 } 237 236 work: 238 237 ath_restart_work(sc); 238 + 239 + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 240 + if (!ATH_TXQ_SETUP(sc, i)) 241 + continue; 242 + 243 + spin_lock_bh(&sc->tx.txq[i].axq_lock); 244 + ath_txq_schedule(sc, &sc->tx.txq[i]); 245 + spin_unlock_bh(&sc->tx.txq[i].axq_lock); 246 + } 239 247 } 240 248 241 249 ieee80211_wake_queues(sc->hw); ··· 549 539 550 540 static int ath_reset(struct ath_softc *sc) 551 541 { 552 - int i, r; 542 + int r; 553 543 554 544 ath9k_ps_wakeup(sc); 555 - 556 545 r = ath_reset_internal(sc, NULL); 557 - 558 - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 559 - if (!ATH_TXQ_SETUP(sc, i)) 560 - continue; 561 - 562 - spin_lock_bh(&sc->tx.txq[i].axq_lock); 563 - ath_txq_schedule(sc, &sc->tx.txq[i]); 564 - spin_unlock_bh(&sc->tx.txq[i].axq_lock); 565 - } 566 - 567 546 ath9k_ps_restore(sc); 568 547 569 548 return r;
+6 -3
drivers/net/wireless/ath/ath9k/xmit.c
··· 1969 1969 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1970 1970 struct ath_atx_tid *tid, struct sk_buff *skb) 1971 1971 { 1972 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1972 1973 struct ath_frame_info *fi = get_frame_info(skb); 1973 1974 struct list_head bf_head; 1974 - struct ath_buf *bf; 1975 - 1976 - bf = fi->bf; 1975 + struct ath_buf *bf = fi->bf; 1977 1976 1978 1977 INIT_LIST_HEAD(&bf_head); 1979 1978 list_add_tail(&bf->list, &bf_head); 1980 1979 bf->bf_state.bf_type = 0; 1980 + if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { 1981 + bf->bf_state.bf_type = BUF_AMPDU; 1982 + ath_tx_addto_baw(sc, tid, bf); 1983 + } 1981 1984 1982 1985 bf->bf_next = NULL; 1983 1986 bf->bf_lastbf = bf;
+2
drivers/net/wireless/cw1200/cw1200_spi.c
··· 237 237 struct hwbus_priv *self = dev_id; 238 238 239 239 if (self->core) { 240 + cw1200_spi_lock(self); 240 241 cw1200_irq_handler(self->core); 242 + cw1200_spi_unlock(self); 241 243 return IRQ_HANDLED; 242 244 } else { 243 245 return IRQ_NONE;
+6
drivers/net/wireless/iwlwifi/iwl-6000.c
··· 240 240 .ht_params = &iwl6000_ht_params, 241 241 }; 242 242 243 + const struct iwl_cfg iwl6035_2agn_sff_cfg = { 244 + .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN", 245 + IWL_DEVICE_6035, 246 + .ht_params = &iwl6000_ht_params, 247 + }; 248 + 243 249 const struct iwl_cfg iwl1030_bgn_cfg = { 244 250 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", 245 251 IWL_DEVICE_6030,
+1
drivers/net/wireless/iwlwifi/iwl-config.h
··· 280 280 extern const struct iwl_cfg iwl2000_2bgn_d_cfg; 281 281 extern const struct iwl_cfg iwl2030_2bgn_cfg; 282 282 extern const struct iwl_cfg iwl6035_2agn_cfg; 283 + extern const struct iwl_cfg iwl6035_2agn_sff_cfg; 283 284 extern const struct iwl_cfg iwl105_bgn_cfg; 284 285 extern const struct iwl_cfg iwl105_bgn_d_cfg; 285 286 extern const struct iwl_cfg iwl135_bgn_cfg;
+4 -2
drivers/net/wireless/iwlwifi/iwl-trans.h
··· 601 601 { 602 602 int ret; 603 603 604 - WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 605 - "%s bad state = %d", __func__, trans->state); 604 + if (trans->state != IWL_TRANS_FW_ALIVE) { 605 + IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 606 + return -EIO; 607 + } 606 608 607 609 if (!(cmd->flags & CMD_ASYNC)) 608 610 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+4 -1
drivers/net/wireless/iwlwifi/mvm/power.c
··· 273 273 if (!mvmvif->queue_params[ac].uapsd) 274 274 continue; 275 275 276 - cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); 276 + if (mvm->cur_ucode != IWL_UCODE_WOWLAN) 277 + cmd->flags |= 278 + cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); 279 + 277 280 cmd->uapsd_ac_flags |= BIT(ac); 278 281 279 282 /* QNDP TID - the highest TID with no admission control */
+11 -1
drivers/net/wireless/iwlwifi/mvm/scan.c
··· 394 394 return false; 395 395 } 396 396 397 + /* 398 + * If scan cannot be aborted, it means that we had a 399 + * SCAN_COMPLETE_NOTIFICATION in the pipe and it called 400 + * ieee80211_scan_completed already. 401 + */ 397 402 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n", 398 403 *resp); 399 404 return true; ··· 422 417 SCAN_COMPLETE_NOTIFICATION }; 423 418 int ret; 424 419 420 + if (mvm->scan_status == IWL_MVM_SCAN_NONE) 421 + return; 422 + 425 423 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, 426 424 scan_abort_notif, 427 425 ARRAY_SIZE(scan_abort_notif), 428 426 iwl_mvm_scan_abort_notif, NULL); 429 427 430 - ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); 428 + ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 429 + CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL); 431 430 if (ret) { 432 431 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); 432 + /* mac80211's state will be cleaned in the fw_restart flow */ 433 433 goto out_remove_notif; 434 434 } 435 435
+42
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 139 139 140 140 /* 6x00 Series */ 141 141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, 142 + {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, 142 143 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, 144 + {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, 143 145 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, 144 146 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, 145 147 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, 146 148 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, 147 149 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, 148 150 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, 151 + {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, 149 152 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 150 153 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 151 154 ··· 156 153 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, 157 154 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, 158 155 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, 156 + {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, 159 157 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, 160 158 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, 159 + {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, 161 160 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, 161 + {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, 162 162 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, 163 163 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, 164 164 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, 165 + {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, 165 166 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, 166 167 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ 167 168 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ ··· 247 240 248 241 /* 6x35 Series */ 249 242 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, 243 + {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, 250 244 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, 245 + {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, 251 246 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, 247 + {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, 252 248 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, 253 249 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, 254 250 ··· 270 260 #if IS_ENABLED(CONFIG_IWLMVM) 271 261 /* 7000 Series */ 272 262 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, 263 + {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, 273 264 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, 274 265 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, 266 + {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, 275 267 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, 276 268 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, 277 269 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, 278 270 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, 271 + {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, 279 272 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, 273 + {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, 280 274 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, 281 275 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, 276 + {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, 282 277 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, 278 + {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, 283 279 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, 284 280 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, 285 281 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, 286 282 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, 287 283 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, 288 284 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, 285 + {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, 286 + {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, 287 + {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, 288 + {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, 289 + {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, 289 290 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, 291 + {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, 290 292 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, 291 293 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, 292 294 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, 295 + {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, 293 296 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, 294 297 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, 298 + {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, 295 299 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, 296 300 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, 297 301 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, 302 + {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, 303 + {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, 298 304 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, 305 + {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, 299 306 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, 307 + {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, 300 308 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, 301 309 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, 310 + {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, 302 311 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, 303 312 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, 313 + {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, 314 + {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, 315 + {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, 316 + {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, 304 317 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, 318 + {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, 305 319 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, 306 320 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, 307 321 308 322 /* 3160 Series */ 309 323 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, 324 + {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, 310 325 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, 326 + {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, 311 327 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, 312 328 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, 313 329 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, 330 + {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, 314 331 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, 332 + {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, 333 + {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, 315 334 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, 335 + {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, 316 336 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, 337 + {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, 317 338 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, 318 339 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, 319 340 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, 320 341 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, 342 + {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, 321 343 #endif /* CONFIG_IWLMVM */ 322 344 323 345 {0}
+4 -4
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 1401 1401 spin_lock_init(&trans_pcie->reg_lock); 1402 1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1403 1403 1404 + err = pci_enable_device(pdev); 1405 + if (err) 1406 + goto out_no_pci; 1407 + 1404 1408 if (!cfg->base_params->pcie_l1_allowed) { 1405 1409 /* 1406 1410 * W/A - seems to solve weird behavior. We need to remove this ··· 1415 1411 PCIE_LINK_STATE_L1 | 1416 1412 PCIE_LINK_STATE_CLKPM); 1417 1413 } 1418 - 1419 - err = pci_enable_device(pdev); 1420 - if (err) 1421 - goto out_no_pci; 1422 1414 1423 1415 pci_set_master(pdev); 1424 1416
+2
drivers/net/wireless/iwlwifi/pcie/tx.c
··· 1102 1102 * non-AGG queue. 1103 1103 */ 1104 1104 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 1105 + 1106 + ssn = trans_pcie->txq[txq_id].q.read_ptr; 1105 1107 } 1106 1108 1107 1109 /* Place first TFD at index corresponding to start sequence number.
+8 -2
drivers/net/wireless/mwifiex/join.c
··· 1422 1422 */ 1423 1423 int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) 1424 1424 { 1425 + int ret = 0; 1426 + 1425 1427 if (!priv->media_connected) 1426 1428 return 0; 1427 1429 1428 1430 switch (priv->bss_mode) { 1429 1431 case NL80211_IFTYPE_STATION: 1430 1432 case NL80211_IFTYPE_P2P_CLIENT: 1431 - return mwifiex_deauthenticate_infra(priv, mac); 1433 + ret = mwifiex_deauthenticate_infra(priv, mac); 1434 + if (ret) 1435 + cfg80211_disconnected(priv->netdev, 0, NULL, 0, 1436 + GFP_KERNEL); 1437 + break; 1432 1438 case NL80211_IFTYPE_ADHOC: 1433 1439 return mwifiex_send_cmd_sync(priv, 1434 1440 HostCmd_CMD_802_11_AD_HOC_STOP, ··· 1446 1440 break; 1447 1441 } 1448 1442 1449 - return 0; 1443 + return ret; 1450 1444 } 1451 1445 EXPORT_SYMBOL_GPL(mwifiex_deauthenticate); 1452 1446
+5 -3
drivers/net/wireless/mwifiex/main.c
··· 358 358 } 359 359 } while (true); 360 360 361 - if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) 362 - goto process_start; 363 - 364 361 spin_lock_irqsave(&adapter->main_proc_lock, flags); 362 + if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) { 363 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 364 + goto process_start; 365 + } 366 + 365 367 adapter->mwifiex_processing = false; 366 368 spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 367 369
+2 -1
drivers/net/wireless/mwifiex/sta_event.c
··· 118 118 dev_dbg(adapter->dev, 119 119 "info: successfully disconnected from %pM: reason code %d\n", 120 120 priv->cfg_bssid, reason_code); 121 - if (priv->bss_mode == NL80211_IFTYPE_STATION) { 121 + if (priv->bss_mode == NL80211_IFTYPE_STATION || 122 + priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { 122 123 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, 123 124 GFP_KERNEL); 124 125 }
+1 -8
drivers/net/wireless/rt2x00/rt2x00pci.c
··· 105 105 goto exit_release_regions; 106 106 } 107 107 108 - pci_enable_msi(pci_dev); 109 - 110 108 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 111 109 if (!hw) { 112 110 rt2x00_probe_err("Failed to allocate hardware\n"); 113 111 retval = -ENOMEM; 114 - goto exit_disable_msi; 112 + goto exit_release_regions; 115 113 } 116 114 117 115 pci_set_drvdata(pci_dev, hw); ··· 150 152 exit_free_device: 151 153 ieee80211_free_hw(hw); 152 154 153 - exit_disable_msi: 154 - pci_disable_msi(pci_dev); 155 - 156 155 exit_release_regions: 157 156 pci_release_regions(pci_dev); 158 157 ··· 173 178 rt2x00lib_remove_dev(rt2x00dev); 174 179 rt2x00pci_free_reg(rt2x00dev); 175 180 ieee80211_free_hw(hw); 176 - 177 - pci_disable_msi(pci_dev); 178 181 179 182 /* 180 183 * Free the PCI device data.
+2 -1
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
··· 343 343 (bool)GET_RX_DESC_PAGGR(pdesc)); 344 344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 345 345 if (phystatus) { 346 - p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE); 346 + p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + 347 + stats->rx_bufshift); 347 348 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, 348 349 p_drvinfo); 349 350 }
+4
drivers/net/xen-netback/xenbus.c
··· 39 39 static void connect(struct backend_info *); 40 40 static void backend_create_xenvif(struct backend_info *be); 41 41 static void unregister_hotplug_status_watch(struct backend_info *be); 42 + static void set_backend_state(struct backend_info *be, 43 + enum xenbus_state state); 42 44 43 45 static int netback_remove(struct xenbus_device *dev) 44 46 { 45 47 struct backend_info *be = dev_get_drvdata(&dev->dev); 48 + 49 + set_backend_state(be, XenbusStateClosed); 46 50 47 51 unregister_hotplug_status_watch(be); 48 52 if (be->vif) {
+1
drivers/platform/x86/Kconfig
··· 504 504 depends on BACKLIGHT_CLASS_DEVICE 505 505 depends on RFKILL || RFKILL = n 506 506 depends on HOTPLUG_PCI 507 + depends on ACPI_VIDEO || ACPI_VIDEO = n 507 508 select INPUT_SPARSEKMAP 508 509 select LEDS_CLASS 509 510 select NEW_LEDS
+9 -17
drivers/platform/x86/sony-laptop.c
··· 127 127 "default is -1 (automatic)"); 128 128 #endif 129 129 130 - static int kbd_backlight = 1; 130 + static int kbd_backlight = -1; 131 131 module_param(kbd_backlight, int, 0444); 132 132 MODULE_PARM_DESC(kbd_backlight, 133 133 "set this to 0 to disable keyboard backlight, " 134 - "1 to enable it (default: 0)"); 134 + "1 to enable it (default: no change from current value)"); 135 135 136 - static int kbd_backlight_timeout; /* = 0 */ 136 + static int kbd_backlight_timeout = -1; 137 137 module_param(kbd_backlight_timeout, int, 0444); 138 138 MODULE_PARM_DESC(kbd_backlight_timeout, 139 - "set this to 0 to set the default 10 seconds timeout, " 140 - "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " 141 - "(default: 0)"); 139 + "meaningful values vary from 0 to 3 and their meaning depends " 140 + "on the model (default: no change from current value)"); 142 141 143 142 #ifdef CONFIG_PM_SLEEP 144 143 static void sony_nc_kbd_backlight_resume(void); ··· 1843 1844 if (!kbdbl_ctl) 1844 1845 return -ENOMEM; 1845 1846 1847 + kbdbl_ctl->mode = kbd_backlight; 1848 + kbdbl_ctl->timeout = kbd_backlight_timeout; 1846 1849 kbdbl_ctl->handle = handle; 1847 1850 if (handle == 0x0137) 1848 1851 kbdbl_ctl->base = 0x0C00; ··· 1871 1870 if (ret) 1872 1871 goto outmode; 1873 1872 1874 - __sony_nc_kbd_backlight_mode_set(kbd_backlight); 1875 - __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout); 1873 + __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode); 1874 + __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout); 1876 1875 1877 1876 return 0; 1878 1877 ··· 1887 1886 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1888 1887 { 1889 1888 if (kbdbl_ctl) { 1890 - int result; 1891 - 1892 1889 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); 1893 1890 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); 1894 - 1895 - /* restore the default hw behaviour */ 1896 - sony_call_snc_handle(kbdbl_ctl->handle, 1897 - kbdbl_ctl->base | 0x10000, &result); 1898 - sony_call_snc_handle(kbdbl_ctl->handle, 1899 - kbdbl_ctl->base + 0x200, &result); 1900 - 1901 1891 kfree(kbdbl_ctl); 1902 1892 kbdbl_ctl = NULL; 1903 1893 }
+71 -27
drivers/s390/block/dasd_eckd.c
··· 2077 2077 int intensity = 0; 2078 2078 int r0_perm; 2079 2079 int nr_tracks; 2080 + int use_prefix; 2080 2081 2081 2082 startdev = dasd_alias_get_start_dev(base); 2082 2083 if (!startdev) ··· 2107 2106 intensity = fdata->intensity; 2108 2107 } 2109 2108 2109 + use_prefix = base_priv->features.feature[8] & 0x01; 2110 + 2110 2111 switch (intensity) { 2111 2112 case 0x00: /* Normal format */ 2112 2113 case 0x08: /* Normal format, use cdl. */ 2113 2114 cplength = 2 + (rpt*nr_tracks); 2114 - datasize = sizeof(struct PFX_eckd_data) + 2115 - sizeof(struct LO_eckd_data) + 2116 - rpt * nr_tracks * sizeof(struct eckd_count); 2115 + if (use_prefix) 2116 + datasize = sizeof(struct PFX_eckd_data) + 2117 + sizeof(struct LO_eckd_data) + 2118 + rpt * nr_tracks * sizeof(struct eckd_count); 2119 + else 2120 + datasize = sizeof(struct DE_eckd_data) + 2121 + sizeof(struct LO_eckd_data) + 2122 + rpt * nr_tracks * sizeof(struct eckd_count); 2117 2123 break; 2118 2124 case 0x01: /* Write record zero and format track. */ 2119 2125 case 0x09: /* Write record zero and format track, use cdl. */ 2120 2126 cplength = 2 + rpt * nr_tracks; 2121 - datasize = sizeof(struct PFX_eckd_data) + 2122 - sizeof(struct LO_eckd_data) + 2123 - sizeof(struct eckd_count) + 2124 - rpt * nr_tracks * sizeof(struct eckd_count); 2127 + if (use_prefix) 2128 + datasize = sizeof(struct PFX_eckd_data) + 2129 + sizeof(struct LO_eckd_data) + 2130 + sizeof(struct eckd_count) + 2131 + rpt * nr_tracks * sizeof(struct eckd_count); 2132 + else 2133 + datasize = sizeof(struct DE_eckd_data) + 2134 + sizeof(struct LO_eckd_data) + 2135 + sizeof(struct eckd_count) + 2136 + rpt * nr_tracks * sizeof(struct eckd_count); 2125 2137 break; 2126 2138 case 0x04: /* Invalidate track. */ 2127 2139 case 0x0c: /* Invalidate track, use cdl. */ 2128 2140 cplength = 3; 2129 - datasize = sizeof(struct PFX_eckd_data) + 2130 - sizeof(struct LO_eckd_data) + 2131 - sizeof(struct eckd_count); 2141 + if (use_prefix) 2142 + datasize = sizeof(struct PFX_eckd_data) + 2143 + sizeof(struct LO_eckd_data) + 2144 + sizeof(struct eckd_count); 2145 + else 2146 + datasize = sizeof(struct DE_eckd_data) + 2147 + sizeof(struct LO_eckd_data) + 2148 + sizeof(struct eckd_count); 2132 2149 break; 2133 2150 default: 2134 2151 dev_warn(&startdev->cdev->dev, ··· 2166 2147 2167 2148 switch (intensity & ~0x08) { 2168 2149 case 0x00: /* Normal format. */ 2169 - prefix(ccw++, (struct PFX_eckd_data *) data, 2170 - fdata->start_unit, fdata->stop_unit, 2171 - DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2172 - /* grant subsystem permission to format R0 */ 2173 - if (r0_perm) 2174 - ((struct PFX_eckd_data *)data) 2175 - ->define_extent.ga_extended |= 0x04; 2176 - data += sizeof(struct PFX_eckd_data); 2150 + if (use_prefix) { 2151 + prefix(ccw++, (struct PFX_eckd_data *) data, 2152 + fdata->start_unit, fdata->stop_unit, 2153 + DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2154 + /* grant subsystem permission to format R0 */ 2155 + if (r0_perm) 2156 + ((struct PFX_eckd_data *)data) 2157 + ->define_extent.ga_extended |= 0x04; 2158 + data += sizeof(struct PFX_eckd_data); 2159 + } else { 2160 + define_extent(ccw++, (struct DE_eckd_data *) data, 2161 + fdata->start_unit, fdata->stop_unit, 2162 + DASD_ECKD_CCW_WRITE_CKD, startdev); 2163 + /* grant subsystem permission to format R0 */ 2164 + if (r0_perm) 2165 + ((struct DE_eckd_data *) data) 2166 + ->ga_extended |= 0x04; 2167 + data += sizeof(struct DE_eckd_data); 2168 + } 2177 2169 ccw[-1].flags |= CCW_FLAG_CC; 2178 2170 locate_record(ccw++, (struct LO_eckd_data *) data, 2179 2171 fdata->start_unit, 0, rpt*nr_tracks, ··· 2193 2163 data += sizeof(struct LO_eckd_data); 2194 2164 break; 2195 2165 case 0x01: /* Write record zero + format track. */ 2196 - prefix(ccw++, (struct PFX_eckd_data *) data, 2197 - fdata->start_unit, fdata->stop_unit, 2198 - DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2199 - base, startdev); 2200 - data += sizeof(struct PFX_eckd_data); 2166 + if (use_prefix) { 2167 + prefix(ccw++, (struct PFX_eckd_data *) data, 2168 + fdata->start_unit, fdata->stop_unit, 2169 + DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2170 + base, startdev); 2171 + data += sizeof(struct PFX_eckd_data); 2172 + } else { 2173 + define_extent(ccw++, (struct DE_eckd_data *) data, 2174 + fdata->start_unit, fdata->stop_unit, 2175 + DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev); 2176 + data += sizeof(struct DE_eckd_data); 2177 + } 2201 2178 ccw[-1].flags |= CCW_FLAG_CC; 2202 2179 locate_record(ccw++, (struct LO_eckd_data *) data, 2203 2180 fdata->start_unit, 0, rpt * nr_tracks + 1, ··· 2213 2176 data += sizeof(struct LO_eckd_data); 2214 2177 break; 2215 2178 case 0x04: /* Invalidate track. */ 2216 - prefix(ccw++, (struct PFX_eckd_data *) data, 2217 - fdata->start_unit, fdata->stop_unit, 2218 - DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2219 - data += sizeof(struct PFX_eckd_data); 2179 + if (use_prefix) { 2180 + prefix(ccw++, (struct PFX_eckd_data *) data, 2181 + fdata->start_unit, fdata->stop_unit, 2182 + DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2183 + data += sizeof(struct PFX_eckd_data); 2184 + } else { 2185 + define_extent(ccw++, (struct DE_eckd_data *) data, 2186 + fdata->start_unit, fdata->stop_unit, 2187 + DASD_ECKD_CCW_WRITE_CKD, startdev); 2188 + data += sizeof(struct DE_eckd_data); 2189 + } 2220 2190 ccw[-1].flags |= CCW_FLAG_CC; 2221 2191 locate_record(ccw++, (struct LO_eckd_data *) data, 2222 2192 fdata->start_unit, 0, 1,
+2 -2
drivers/s390/char/sclp.c
··· 486 486 timeout = 0; 487 487 if (timer_pending(&sclp_request_timer)) { 488 488 /* Get timeout TOD value */ 489 - timeout = get_tod_clock() + 489 + timeout = get_tod_clock_fast() + 490 490 sclp_tod_from_jiffies(sclp_request_timer.expires - 491 491 jiffies); 492 492 } ··· 508 508 while (sclp_running_state != sclp_running_state_idle) { 509 509 /* Check for expired request timer */ 510 510 if (timer_pending(&sclp_request_timer) && 511 - get_tod_clock() > timeout && 511 + get_tod_clock_fast() > timeout && 512 512 del_timer(&sclp_request_timer)) 513 513 sclp_request_timer.function(sclp_request_timer.data); 514 514 cpu_relax();
+1 -1
drivers/s390/char/vmlogrdr.c
··· 313 313 int ret; 314 314 315 315 dev_num = iminor(inode); 316 - if (dev_num > MAXMINOR) 316 + if (dev_num >= MAXMINOR) 317 317 return -ENODEV; 318 318 logptr = &sys_ser[dev_num]; 319 319
+2 -2
drivers/s390/cio/cio.c
··· 878 878 atomic_inc(&chpid_reset_count); 879 879 } 880 880 /* Wait for machine check for all channel paths. */ 881 - timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 881 + timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12); 882 882 while (atomic_read(&chpid_reset_count) != 0) { 883 - if (get_tod_clock() > timeout) 883 + if (get_tod_clock_fast() > timeout) 884 884 break; 885 885 cpu_relax(); 886 886 }
+5 -5
drivers/s390/cio/qdio_main.c
··· 338 338 retries++; 339 339 340 340 if (!start_time) { 341 - start_time = get_tod_clock(); 341 + start_time = get_tod_clock_fast(); 342 342 goto again; 343 343 } 344 - if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 + if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) 345 345 goto again; 346 346 } 347 347 if (retries) { ··· 504 504 int count, stop; 505 505 unsigned char state = 0; 506 506 507 - q->timestamp = get_tod_clock(); 507 + q->timestamp = get_tod_clock_fast(); 508 508 509 509 /* 510 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved ··· 595 595 * At this point we know, that inbound first_to_check 596 596 * has (probably) not moved (see qdio_inbound_processing). 597 597 */ 598 - if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 + if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 599 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 600 600 q->first_to_check); 601 601 return 1; ··· 728 728 int count, stop; 729 729 unsigned char state = 0; 730 730 731 - q->timestamp = get_tod_clock(); 731 + q->timestamp = get_tod_clock_fast(); 732 732 733 733 if (need_siga_sync(q)) 734 734 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+8 -8
drivers/scsi/BusLogic.c
··· 696 696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, 697 697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, 698 698 pci_device)) != NULL) { 699 - struct blogic_adapter *adapter = adapter; 699 + struct blogic_adapter *host_adapter = adapter; 700 700 struct blogic_adapter_info adapter_info; 701 701 enum blogic_isa_ioport mod_ioaddr_req; 702 702 unsigned char bus; ··· 744 744 known and enabled, note that the particular Standard ISA I/O 745 745 Address should not be probed. 746 746 */ 747 - adapter->io_addr = io_addr; 748 - blogic_intreset(adapter); 749 - if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, 747 + host_adapter->io_addr = io_addr; 748 + blogic_intreset(host_adapter); 749 + if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, 750 750 &adapter_info, sizeof(adapter_info)) == 751 751 sizeof(adapter_info)) { 752 752 if (adapter_info.isa_port < 6) ··· 762 762 I/O Address assigned at system initialization. 763 763 */ 764 764 mod_ioaddr_req = BLOGIC_IO_DISABLE; 765 - blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, 765 + blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, 766 766 sizeof(mod_ioaddr_req), NULL, 0); 767 767 /* 768 768 For the first MultiMaster Host Adapter enumerated, ··· 779 779 780 780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; 781 781 fetch_localram.count = sizeof(autoscsi_byte45); 782 - blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, 782 + blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM, 783 783 &fetch_localram, sizeof(fetch_localram), 784 784 &autoscsi_byte45, 785 785 sizeof(autoscsi_byte45)); 786 - blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, 787 - sizeof(id)); 786 + blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0, 787 + &id, sizeof(id)); 788 788 if (id.fw_ver_digit1 == '5') 789 789 force_scan_order = 790 790 autoscsi_byte45.force_scan_order;
+1 -1
drivers/scsi/qla2xxx/qla_dbg.c
··· 20 20 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 21 21 * | | | 0x2011-0x2012, | 22 22 * | | | 0x2016 | 23 - * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b | 23 + * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b | 24 24 * | | | 0x3027-0x3028 | 25 25 * | | | 0x303d-0x3041 | 26 26 * | | | 0x302d,0x3033 |
+9
drivers/scsi/qla2xxx/qla_isr.c
··· 1957 1957 que = MSW(sts->handle); 1958 1958 req = ha->req_q_map[que]; 1959 1959 1960 + /* Check for invalid queue pointer */ 1961 + if (req == NULL || 1962 + que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 1963 + ql_dbg(ql_dbg_io, vha, 0x3059, 1964 + "Invalid status handle (0x%x): Bad req pointer. req=%p, " 1965 + "que=%u.\n", sts->handle, req, que); 1966 + return; 1967 + } 1968 + 1960 1969 /* Validate handle. */ 1961 1970 if (handle < req->num_outstanding_cmds) 1962 1971 sp = req->outstanding_cmds[handle];
+1 -1
drivers/scsi/sd.c
··· 2854 2854 gd->events |= DISK_EVENT_MEDIA_CHANGE; 2855 2855 } 2856 2856 2857 + blk_pm_runtime_init(sdp->request_queue, dev); 2857 2858 add_disk(gd); 2858 2859 if (sdkp->capacity) 2859 2860 sd_dif_config_host(sdkp); ··· 2863 2862 2864 2863 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2865 2864 sdp->removable ? "removable " : ""); 2866 - blk_pm_runtime_init(sdp->request_queue, dev); 2867 2865 scsi_autopm_put_device(sdp); 2868 2866 put_device(&sdkp->dev); 2869 2867 }
+1
drivers/staging/media/msi3101/Kconfig
··· 1 1 config USB_MSI3101 2 2 tristate "Mirics MSi3101 SDR Dongle" 3 3 depends on USB && VIDEO_DEV && VIDEO_V4L2 4 + select VIDEOBUF2_VMALLOC
+8 -2
drivers/staging/media/msi3101/sdr-msi3101.c
··· 1131 1131 /* Absolute min and max number of buffers available for mmap() */ 1132 1132 *nbuffers = 32; 1133 1133 *nplanes = 1; 1134 - sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */ 1134 + /* 1135 + * 3, wMaxPacketSize 3x 1024 bytes 1136 + * 504, max IQ sample pairs per 1024 frame 1137 + * 2, two samples, I and Q 1138 + * 4, 32-bit float 1139 + */ 1140 + sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */ 1135 1141 dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n", 1136 1142 __func__, *nbuffers, sizes[0]); 1137 1143 return 0; ··· 1663 1657 f->frequency * 625UL / 10UL); 1664 1658 } 1665 1659 1666 - const struct v4l2_ioctl_ops msi3101_ioctl_ops = { 1660 + static const struct v4l2_ioctl_ops msi3101_ioctl_ops = { 1667 1661 .vidioc_querycap = msi3101_querycap, 1668 1662 1669 1663 .vidioc_enum_input = msi3101_enum_input,
+4 -4
drivers/target/target_core_pscsi.c
··· 134 134 * pSCSI Host ID and enable for phba mode 135 135 */ 136 136 sh = scsi_host_lookup(phv->phv_host_id); 137 - if (IS_ERR(sh)) { 137 + if (!sh) { 138 138 pr_err("pSCSI: Unable to locate SCSI Host for" 139 139 " phv_host_id: %d\n", phv->phv_host_id); 140 - return PTR_ERR(sh); 140 + return -EINVAL; 141 141 } 142 142 143 143 phv->phv_lld_host = sh; ··· 515 515 sh = phv->phv_lld_host; 516 516 } else { 517 517 sh = scsi_host_lookup(pdv->pdv_host_id); 518 - if (IS_ERR(sh)) { 518 + if (!sh) { 519 519 pr_err("pSCSI: Unable to locate" 520 520 " pdv_host_id: %d\n", pdv->pdv_host_id); 521 - return PTR_ERR(sh); 521 + return -EINVAL; 522 522 } 523 523 } 524 524 } else {
+5
drivers/target/target_core_sbc.c
··· 263 263 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 264 264 return TCM_INVALID_CDB_FIELD; 265 265 } 266 + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 267 + if (flags[0] & 0x10) { 268 + pr_warn("WRITE SAME with ANCHOR not supported\n"); 269 + return TCM_INVALID_CDB_FIELD; 270 + } 266 271 /* 267 272 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 268 273 * translated into block discard requests within backend code.
+37 -16
drivers/target/target_core_xcopy.c
··· 82 82 mutex_lock(&g_device_mutex); 83 83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 84 84 85 + if (!se_dev->dev_attrib.emulate_3pc) 86 + continue; 87 + 85 88 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 86 89 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 87 90 ··· 360 357 struct se_cmd se_cmd; 361 358 struct xcopy_op *xcopy_op; 362 359 struct completion xpt_passthrough_sem; 360 + unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 363 361 }; 364 362 365 363 static struct se_port xcopy_pt_port; ··· 679 675 680 676 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 681 677 se_cmd->scsi_status); 682 - return 0; 678 + 679 + return (se_cmd->scsi_status) ? -EINVAL : 0; 683 680 } 684 681 685 682 static int target_xcopy_read_source( ··· 713 708 (unsigned long long)src_lba, src_sectors, length); 714 709 715 710 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 716 - DMA_FROM_DEVICE, 0, NULL); 711 + DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 717 712 xop->src_pt_cmd = xpt_cmd; 718 713 719 714 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], ··· 773 768 (unsigned long long)dst_lba, dst_sectors, length); 774 769 775 770 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 776 - DMA_TO_DEVICE, 0, NULL); 771 + DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 777 772 xop->dst_pt_cmd = xpt_cmd; 778 773 779 774 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], ··· 889 884 890 885 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 891 886 { 887 + struct se_device *dev = se_cmd->se_dev; 892 888 struct xcopy_op *xop = NULL; 893 889 unsigned char *p = NULL, *seg_desc; 894 890 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 891 + sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; 895 892 int rc; 896 893 unsigned short tdll; 894 + 895 + if (!dev->dev_attrib.emulate_3pc) { 896 + pr_err("EXTENDED_COPY operation explicitly disabled\n"); 897 + return TCM_UNSUPPORTED_SCSI_OPCODE; 898 + } 897 899 898 900 sa = se_cmd->t_task_cdb[1] & 0x1f; 899 901 if (sa != 0x00) { ··· 908 896 return TCM_UNSUPPORTED_SCSI_OPCODE; 909 897 } 910 898 899 + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 900 + if (!xop) { 901 + pr_err("Unable to allocate xcopy_op\n"); 902 + return TCM_OUT_OF_RESOURCES; 903 + } 904 + xop->xop_se_cmd = se_cmd; 905 + 911 906 p = transport_kmap_data_sg(se_cmd); 912 907 if (!p) { 913 908 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 909 + kfree(xop); 914 910 return TCM_OUT_OF_RESOURCES; 915 911 } 916 912 917 913 list_id = p[0]; 918 - if (list_id != 0x00) { 919 - pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); 920 - goto out; 921 - } 922 - list_id_usage = (p[1] & 0x18); 914 + list_id_usage = (p[1] & 0x18) >> 3; 915 + 923 916 /* 924 917 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 925 918 */ ··· 937 920 goto out; 938 921 } 939 922 940 - xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 941 - if (!xop) { 942 - pr_err("Unable to allocate xcopy_op\n"); 943 - goto out; 944 - } 945 - xop->xop_se_cmd = se_cmd; 946 - 947 923 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 948 924 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 949 925 tdll, sdll, inline_dl); ··· 944 934 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); 945 935 if (rc <= 0) 946 936 goto out; 937 + 938 + if (xop->src_dev->dev_attrib.block_size != 939 + xop->dst_dev->dev_attrib.block_size) { 940 + pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" 941 + " block_size: %u currently unsupported\n", 942 + xop->src_dev->dev_attrib.block_size, 943 + xop->dst_dev->dev_attrib.block_size); 944 + xcopy_pt_undepend_remotedev(xop); 945 + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 946 + goto out; 947 + } 947 948 948 949 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 949 950 rc * XCOPY_TARGET_DESC_LEN); ··· 978 957 if (p) 979 958 transport_kunmap_data_sg(se_cmd); 980 959 kfree(xop); 981 - return TCM_INVALID_CDB_FIELD; 960 + return ret; 982 961 } 983 962 984 963 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
-2
drivers/thermal/samsung/exynos_thermal_common.c
··· 310 310 } 311 311 312 312 th_zone = conf->pzone_data; 313 - if (th_zone->therm_dev) 314 - return; 315 313 316 314 if (th_zone->bind == false) { 317 315 for (i = 0; i < th_zone->cool_dev_size; i++) {
+8 -4
drivers/thermal/samsung/exynos_tmu.c
··· 317 317 318 318 con = readl(data->base + reg->tmu_ctrl); 319 319 320 + if (pdata->test_mux) 321 + con |= (pdata->test_mux << reg->test_mux_addr_shift); 322 + 320 323 if (pdata->reference_voltage) { 321 324 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift); 322 325 con |= pdata->reference_voltage << reg->buf_vref_sel_shift; ··· 491 488 }, 492 489 { 493 490 .compatible = "samsung,exynos4412-tmu", 494 - .data = (void *)EXYNOS5250_TMU_DRV_DATA, 491 + .data = (void *)EXYNOS4412_TMU_DRV_DATA, 495 492 }, 496 493 { 497 494 .compatible = "samsung,exynos5250-tmu", ··· 632 629 if (ret) 633 630 return ret; 634 631 635 - if (pdata->type == SOC_ARCH_EXYNOS || 636 - pdata->type == SOC_ARCH_EXYNOS4210 || 637 - pdata->type == SOC_ARCH_EXYNOS5440) 632 + if (pdata->type == SOC_ARCH_EXYNOS4210 || 633 + pdata->type == SOC_ARCH_EXYNOS4412 || 634 + pdata->type == SOC_ARCH_EXYNOS5250 || 635 + pdata->type == SOC_ARCH_EXYNOS5440) 638 636 data->soc = pdata->type; 639 637 else { 640 638 ret = -EINVAL;
+6 -1
drivers/thermal/samsung/exynos_tmu.h
··· 41 41 42 42 enum soc_type { 43 43 SOC_ARCH_EXYNOS4210 = 1, 44 - SOC_ARCH_EXYNOS, 44 + SOC_ARCH_EXYNOS4412, 45 + SOC_ARCH_EXYNOS5250, 45 46 SOC_ARCH_EXYNOS5440, 46 47 }; 47 48 ··· 85 84 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl 86 85 reg. 87 86 * @tmu_ctrl: TMU main controller register. 87 + * @test_mux_addr_shift: shift bits of test mux address. 88 88 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register. 89 89 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register. 90 90 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register. ··· 152 150 u32 triminfo_reload_shift; 153 151 154 152 u32 tmu_ctrl; 153 + u32 test_mux_addr_shift; 155 154 u32 buf_vref_sel_shift; 156 155 u32 buf_vref_sel_mask; 157 156 u32 therm_trip_mode_shift; ··· 260 257 * @first_point_trim: temp value of the first point trimming 261 258 * @second_point_trim: temp value of the second point trimming 262 259 * @default_temp_offset: default temperature offset in case of no trimming 260 + * @test_mux; information if SoC supports test MUX 263 261 * @cal_type: calibration type for temperature 264 262 * @cal_mode: calibration mode for temperature 265 263 * @freq_clip_table: Table representing frequency reduction percentage. ··· 290 286 u8 first_point_trim; 291 287 u8 second_point_trim; 292 288 u8 default_temp_offset; 289 + u8 test_mux; 293 290 294 291 enum calibration_type cal_type; 295 292 enum calibration_mode cal_mode;
+24 -6
drivers/thermal/samsung/exynos_tmu_data.c
··· 90 90 }; 91 91 #endif 92 92 93 - #if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) 94 - static const struct exynos_tmu_registers exynos5250_tmu_registers = { 93 + #if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250) 94 + static const struct exynos_tmu_registers exynos4412_tmu_registers = { 95 95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 96 96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, 97 97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, 98 98 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON, 99 99 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT, 100 100 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 101 + .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT, 101 102 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, 102 103 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, 103 104 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, ··· 129 128 .emul_time_mask = EXYNOS_EMUL_TIME_MASK, 130 129 }; 131 130 132 - #define EXYNOS5250_TMU_DATA \ 131 + #define EXYNOS4412_TMU_DATA \ 133 132 .threshold_falling = 10, \ 134 133 .trigger_levels[0] = 85, \ 135 134 .trigger_levels[1] = 103, \ ··· 163 162 .temp_level = 103, \ 164 163 }, \ 165 164 .freq_tab_count = 2, \ 166 - .type = SOC_ARCH_EXYNOS, \ 167 - .registers = &exynos5250_tmu_registers, \ 165 + .registers = &exynos4412_tmu_registers, \ 168 166 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 169 167 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 170 168 TMU_SUPPORT_EMUL_TIME) 169 + #endif 171 170 171 + #if defined(CONFIG_SOC_EXYNOS4412) 172 + struct exynos_tmu_init_data const exynos4412_default_tmu_data = { 173 + .tmu_data = { 174 + { 175 + EXYNOS4412_TMU_DATA, 176 + .type = SOC_ARCH_EXYNOS4412, 177 + .test_mux = EXYNOS4412_MUX_ADDR_VALUE, 178 + }, 179 + }, 180 + .tmu_count = 1, 181 + }; 182 + #endif 183 + 184 + #if defined(CONFIG_SOC_EXYNOS5250) 172 185 struct exynos_tmu_init_data const exynos5250_default_tmu_data = { 173 186 .tmu_data = { 174 - { EXYNOS5250_TMU_DATA }, 187 + { 188 + EXYNOS4412_TMU_DATA, 189 + .type = SOC_ARCH_EXYNOS5250, 190 + }, 175 191 }, 176 192 .tmu_count = 1, 177 193 };
+12 -1
drivers/thermal/samsung/exynos_tmu_data.h
··· 95 95 96 96 #define EXYNOS_MAX_TRIGGER_PER_REG 4 97 97 98 + /* Exynos4412 specific */ 99 + #define EXYNOS4412_MUX_ADDR_VALUE 6 100 + #define EXYNOS4412_MUX_ADDR_SHIFT 20 101 + 98 102 /*exynos5440 specific registers*/ 99 103 #define EXYNOS5440_TMU_S0_7_TRIM 0x000 100 104 #define EXYNOS5440_TMU_S0_7_CTRL 0x020 ··· 142 138 #define EXYNOS4210_TMU_DRV_DATA (NULL) 143 139 #endif 144 140 145 - #if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)) 141 + #if defined(CONFIG_SOC_EXYNOS4412) 142 + extern struct exynos_tmu_init_data const exynos4412_default_tmu_data; 143 + #define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data) 144 + #else 145 + #define EXYNOS4412_TMU_DRV_DATA (NULL) 146 + #endif 147 + 148 + #if defined(CONFIG_SOC_EXYNOS5250) 146 149 extern struct exynos_tmu_init_data const exynos5250_default_tmu_data; 147 150 #define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data) 148 151 #else
+1 -1
drivers/thermal/thermal_hwmon.c
··· 159 159 160 160 INIT_LIST_HEAD(&hwmon->tz_list); 161 161 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); 162 - hwmon->device = hwmon_device_register(&tz->device); 162 + hwmon->device = hwmon_device_register(NULL); 163 163 if (IS_ERR(hwmon->device)) { 164 164 result = PTR_ERR(hwmon->device); 165 165 goto free_mem;
+1
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
··· 110 110 } else { 111 111 dev_err(bgp->dev, 112 112 "Failed to read PCB state. Using defaults\n"); 113 + ret = 0; 113 114 } 114 115 } 115 116 *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
+8 -6
drivers/thermal/x86_pkg_temp_thermal.c
··· 316 316 int phy_id = topology_physical_package_id(cpu); 317 317 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); 318 318 bool notify = false; 319 + unsigned long flags; 319 320 320 321 if (!phdev) 321 322 return; 322 323 323 - spin_lock(&pkg_work_lock); 324 + spin_lock_irqsave(&pkg_work_lock, flags); 324 325 ++pkg_work_cnt; 325 326 if (unlikely(phy_id > max_phy_id)) { 326 - spin_unlock(&pkg_work_lock); 327 + spin_unlock_irqrestore(&pkg_work_lock, flags); 327 328 return; 328 329 } 329 330 pkg_work_scheduled[phy_id] = 0; 330 - spin_unlock(&pkg_work_lock); 331 + spin_unlock_irqrestore(&pkg_work_lock, flags); 331 332 332 333 enable_pkg_thres_interrupt(); 333 334 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); ··· 398 397 int thres_count; 399 398 u32 eax, ebx, ecx, edx; 400 399 u8 *temp; 400 + unsigned long flags; 401 401 402 402 cpuid(6, &eax, &ebx, &ecx, &edx); 403 403 thres_count = ebx & 0x07; ··· 422 420 goto err_ret_unlock; 423 421 } 424 422 425 - spin_lock(&pkg_work_lock); 423 + spin_lock_irqsave(&pkg_work_lock, flags); 426 424 if (topology_physical_package_id(cpu) > max_phy_id) 427 425 max_phy_id = topology_physical_package_id(cpu); 428 426 temp = krealloc(pkg_work_scheduled, 429 427 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); 430 428 if (!temp) { 431 - spin_unlock(&pkg_work_lock); 429 + spin_unlock_irqrestore(&pkg_work_lock, flags); 432 430 err = -ENOMEM; 433 431 goto err_ret_free; 434 432 } 435 433 pkg_work_scheduled = temp; 436 434 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; 437 - spin_unlock(&pkg_work_lock); 435 + spin_unlock_irqrestore(&pkg_work_lock, flags); 438 436 439 437 phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu); 440 438 phy_dev_entry->first_cpu = cpu;
+1 -1
drivers/vhost/scsi.c
··· 1056 1056 if (data_direction != DMA_NONE) { 1057 1057 ret = vhost_scsi_map_iov_to_sgl(cmd, 1058 1058 &vq->iov[data_first], data_num, 1059 - data_direction == DMA_TO_DEVICE); 1059 + data_direction == DMA_FROM_DEVICE); 1060 1060 if (unlikely(ret)) { 1061 1061 vq_err(vq, "Failed to map iov to sgl\n"); 1062 1062 goto err_free;
+7 -8
fs/dcache.c
··· 1331 1331 * list is non-empty and continue searching. 1332 1332 */ 1333 1333 1334 - /** 1335 - * have_submounts - check for mounts over a dentry 1336 - * @parent: dentry to check. 1337 - * 1338 - * Return true if the parent or its subdirectories contain 1339 - * a mount point 1340 - */ 1341 - 1342 1334 static enum d_walk_ret check_mount(void *data, struct dentry *dentry) 1343 1335 { 1344 1336 int *ret = data; ··· 1341 1349 return D_WALK_CONTINUE; 1342 1350 } 1343 1351 1352 + /** 1353 + * have_submounts - check for mounts over a dentry 1354 + * @parent: dentry to check. 1355 + * 1356 + * Return true if the parent or its subdirectories contain 1357 + * a mount point 1358 + */ 1344 1359 int have_submounts(struct dentry *parent) 1345 1360 { 1346 1361 int ret = 0;
+1 -1
fs/ecryptfs/crypto.c
··· 408 408 struct page *page) 409 409 { 410 410 return ecryptfs_lower_header_size(crypt_stat) + 411 - (page->index << PAGE_CACHE_SHIFT); 411 + ((loff_t)page->index << PAGE_CACHE_SHIFT); 412 412 } 413 413 414 414 /**
+2 -1
fs/ecryptfs/keystore.c
··· 1149 1149 struct ecryptfs_msg_ctx *msg_ctx; 1150 1150 struct ecryptfs_message *msg = NULL; 1151 1151 char *auth_tok_sig; 1152 - char *payload; 1152 + char *payload = NULL; 1153 1153 size_t payload_len = 0; 1154 1154 int rc; 1155 1155 ··· 1203 1203 } 1204 1204 out: 1205 1205 kfree(msg); 1206 + kfree(payload); 1206 1207 return rc; 1207 1208 } 1208 1209
+2 -2
fs/file_table.c
··· 297 297 delayed_fput(NULL); 298 298 } 299 299 300 - static DECLARE_WORK(delayed_fput_work, delayed_fput); 300 + static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); 301 301 302 302 void fput(struct file *file) 303 303 { ··· 317 317 } 318 318 319 319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) 320 - schedule_work(&delayed_fput_work); 320 + schedule_delayed_work(&delayed_fput_work, 1); 321 321 } 322 322 } 323 323
+1 -2
fs/jfs/jfs_inode.c
··· 95 95 96 96 if (insert_inode_locked(inode) < 0) { 97 97 rc = -EINVAL; 98 - goto fail_unlock; 98 + goto fail_put; 99 99 } 100 100 101 101 inode_init_owner(inode, parent, mode); ··· 156 156 fail_drop: 157 157 dquot_drop(inode); 158 158 inode->i_flags |= S_NOQUOTA; 159 - fail_unlock: 160 159 clear_nlink(inode); 161 160 unlock_new_inode(inode); 162 161 fail_put:
+2 -1
fs/namei.c
··· 2294 2294 * path_mountpoint - look up a path to be umounted 2295 2295 * @dfd: directory file descriptor to start walk from 2296 2296 * @name: full pathname to walk 2297 + * @path: pointer to container for result 2297 2298 * @flags: lookup flags 2298 2299 * 2299 2300 * Look up the given name, but don't attempt to revalidate the last component. 2300 - * Returns 0 and "path" will be valid on success; Retuns error otherwise. 2301 + * Returns 0 and "path" will be valid on success; Returns error otherwise. 2301 2302 */ 2302 2303 static int 2303 2304 path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
+2
fs/seq_file.c
··· 328 328 m->read_pos = offset; 329 329 retval = file->f_pos = offset; 330 330 } 331 + } else { 332 + file->f_pos = offset; 331 333 } 332 334 } 333 335 file->f_version = m->version;
+11 -4
include/linux/filter.h
··· 6 6 7 7 #include <linux/atomic.h> 8 8 #include <linux/compat.h> 9 + #include <linux/workqueue.h> 9 10 #include <uapi/linux/filter.h> 10 11 11 12 #ifdef CONFIG_COMPAT ··· 26 25 { 27 26 atomic_t refcnt; 28 27 unsigned int len; /* Number of filter blocks */ 28 + struct rcu_head rcu; 29 29 unsigned int (*bpf_func)(const struct sk_buff *skb, 30 30 const struct sock_filter *filter); 31 - struct rcu_head rcu; 32 - struct sock_filter insns[0]; 31 + union { 32 + struct sock_filter insns[0]; 33 + struct work_struct work; 34 + }; 33 35 }; 34 36 35 - static inline unsigned int sk_filter_len(const struct sk_filter *fp) 37 + static inline unsigned int sk_filter_size(unsigned int proglen) 36 38 { 37 - return fp->len * sizeof(struct sock_filter) + sizeof(*fp); 39 + return max(sizeof(struct sk_filter), 40 + offsetof(struct sk_filter, insns[proglen])); 38 41 } 39 42 40 43 extern int sk_filter(struct sock *sk, struct sk_buff *skb); ··· 72 67 } 73 68 #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) 74 69 #else 70 + #include <linux/slab.h> 75 71 static inline void bpf_jit_compile(struct sk_filter *fp) 76 72 { 77 73 } 78 74 static inline void bpf_jit_free(struct sk_filter *fp) 79 75 { 76 + kfree(fp); 80 77 } 81 78 #define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns) 82 79 #endif
+3 -2
include/linux/netdevice.h
··· 2264 2264 } 2265 2265 2266 2266 #ifdef CONFIG_XPS 2267 - extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, 2267 + extern int netif_set_xps_queue(struct net_device *dev, 2268 + const struct cpumask *mask, 2268 2269 u16 index); 2269 2270 #else 2270 2271 static inline int netif_set_xps_queue(struct net_device *dev, 2271 - struct cpumask *mask, 2272 + const struct cpumask *mask, 2272 2273 u16 index) 2273 2274 { 2274 2275 return 0;
+1 -1
include/linux/tc_act/tc_defact.h include/uapi/linux/tc_act/tc_defact.h
··· 6 6 struct tc_defact { 7 7 tc_gen; 8 8 }; 9 - 9 + 10 10 enum { 11 11 TCA_DEF_UNSPEC, 12 12 TCA_DEF_TM,
+1 -1
include/linux/yam.h
··· 77 77 78 78 struct yamdrv_ioctl_mcs { 79 79 int cmd; 80 - int bitrate; 80 + unsigned int bitrate; 81 81 unsigned char bits[YAM_FPGA_SIZE]; 82 82 };
+4 -2
include/net/cipso_ipv4.h
··· 290 290 unsigned char err_offset = 0; 291 291 u8 opt_len = opt[1]; 292 292 u8 opt_iter; 293 + u8 tag_len; 293 294 294 295 if (opt_len < 8) { 295 296 err_offset = 1; ··· 303 302 } 304 303 305 304 for (opt_iter = 6; opt_iter < opt_len;) { 306 - if (opt[opt_iter + 1] > (opt_len - opt_iter)) { 305 + tag_len = opt[opt_iter + 1]; 306 + if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) { 307 307 err_offset = opt_iter + 1; 308 308 goto out; 309 309 } 310 - opt_iter += opt[opt_iter + 1]; 310 + opt_iter += tag_len; 311 311 } 312 312 313 313 out:
+12
include/net/dst.h
··· 479 479 { 480 480 return dst_orig; 481 481 } 482 + 483 + static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) 484 + { 485 + return NULL; 486 + } 487 + 482 488 #else 483 489 extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 484 490 const struct flowi *fl, struct sock *sk, 485 491 int flags); 492 + 493 + /* skb attached with this dst needs transformation if dst->xfrm is valid */ 494 + static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) 495 + { 496 + return dst->xfrm; 497 + } 486 498 #endif 487 499 488 500 #endif /* _NET_DST_H */
+2 -4
include/net/ip6_route.h
··· 194 194 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 195 195 } 196 196 197 - static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest) 197 + static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt) 198 198 { 199 - if (rt->rt6i_flags & RTF_GATEWAY) 200 - return &rt->rt6i_gateway; 201 - return dest; 199 + return &rt->rt6i_gateway; 202 200 } 203 201 204 202 #endif
+1 -1
include/net/mac802154.h
··· 133 133 134 134 /* Basic interface to register ieee802154 device */ 135 135 struct ieee802154_dev * 136 - ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops); 136 + ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops); 137 137 void ieee802154_free_device(struct ieee802154_dev *dev); 138 138 int ieee802154_register_device(struct ieee802154_dev *dev); 139 139 void ieee802154_unregister_device(struct ieee802154_dev *dev);
+2 -4
include/net/sock.h
··· 1630 1630 1631 1631 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1632 1632 { 1633 - unsigned int size = sk_filter_len(fp); 1634 - 1635 - atomic_sub(size, &sk->sk_omem_alloc); 1633 + atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc); 1636 1634 sk_filter_release(fp); 1637 1635 } 1638 1636 1639 1637 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1640 1638 { 1641 1639 atomic_inc(&fp->refcnt); 1642 - atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 1640 + atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc); 1643 1641 } 1644 1642 1645 1643 /*
+2
include/sound/cs42l52.h
··· 31 31 /* Charge Pump Freq. Check datasheet Pg73 */ 32 32 unsigned int chgfreq; 33 33 34 + /* Reset GPIO */ 35 + unsigned int reset_gpio; 34 36 }; 35 37 36 38 #endif /* __CS42L52_H */
+2 -2
include/trace/events/target.h
··· 144 144 ), 145 145 146 146 TP_fast_assign( 147 - __entry->unpacked_lun = cmd->se_lun->unpacked_lun; 147 + __entry->unpacked_lun = cmd->orig_fe_lun; 148 148 __entry->opcode = cmd->t_task_cdb[0]; 149 149 __entry->data_length = cmd->data_length; 150 150 __entry->task_attribute = cmd->sam_task_attr; ··· 182 182 ), 183 183 184 184 TP_fast_assign( 185 - __entry->unpacked_lun = cmd->se_lun->unpacked_lun; 185 + __entry->unpacked_lun = cmd->orig_fe_lun; 186 186 __entry->opcode = cmd->t_task_cdb[0]; 187 187 __entry->data_length = cmd->data_length; 188 188 __entry->task_attribute = cmd->sam_task_attr;
+2
include/uapi/drm/drm_mode.h
··· 223 223 __u32 connection; 224 224 __u32 mm_width, mm_height; /**< HxW in millimeters */ 225 225 __u32 subpixel; 226 + 227 + __u32 pad; 226 228 }; 227 229 228 230 #define DRM_MODE_PROP_PENDING (1<<0)
+1
include/uapi/linux/tc_act/Kbuild
··· 1 1 # UAPI Header export list 2 2 header-y += tc_csum.h 3 + header-y += tc_defact.h 3 4 header-y += tc_gact.h 4 5 header-y += tc_ipt.h 5 6 header-y += tc_mirred.h
+6
include/uapi/rdma/ib_user_verbs.h
··· 87 87 IB_USER_VERBS_CMD_CLOSE_XRCD, 88 88 IB_USER_VERBS_CMD_CREATE_XSRQ, 89 89 IB_USER_VERBS_CMD_OPEN_QP, 90 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 90 91 IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 91 92 IB_USER_VERBS_CMD_DESTROY_FLOW 93 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 92 94 }; 93 95 94 96 /* ··· 128 126 __u16 out_words; 129 127 }; 130 128 129 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 131 130 struct ib_uverbs_cmd_hdr_ex { 132 131 __u32 command; 133 132 __u16 in_words; ··· 137 134 __u16 provider_out_words; 138 135 __u32 cmd_hdr_reserved; 139 136 }; 137 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 140 138 141 139 struct ib_uverbs_get_context { 142 140 __u64 response; ··· 700 696 __u64 driver_data[0]; 701 697 }; 702 698 699 + #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 703 700 struct ib_kern_eth_filter { 704 701 __u8 dst_mac[6]; 705 702 __u8 src_mac[6]; ··· 785 780 __u32 comp_mask; 786 781 __u32 flow_handle; 787 782 }; 783 + #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 788 784 789 785 struct ib_uverbs_create_srq { 790 786 __u64 response;
+6 -8
kernel/cgroup.c
··· 2039 2039 2040 2040 /* @tsk either already exited or can't exit until the end */ 2041 2041 if (tsk->flags & PF_EXITING) 2042 - continue; 2042 + goto next; 2043 2043 2044 2044 /* as per above, nr_threads may decrease, but not increase. */ 2045 2045 BUG_ON(i >= group_size); ··· 2047 2047 ent.cgrp = task_cgroup_from_root(tsk, root); 2048 2048 /* nothing to do if this task is already in the cgroup */ 2049 2049 if (ent.cgrp == cgrp) 2050 - continue; 2050 + goto next; 2051 2051 /* 2052 2052 * saying GFP_ATOMIC has no effect here because we did prealloc 2053 2053 * earlier, but it's good form to communicate our expectations. ··· 2055 2055 retval = flex_array_put(group, i, &ent, GFP_ATOMIC); 2056 2056 BUG_ON(retval != 0); 2057 2057 i++; 2058 - 2058 + next: 2059 2059 if (!threadgroup) 2060 2060 break; 2061 2061 } while_each_thread(leader, tsk); ··· 3188 3188 3189 3189 WARN_ON_ONCE(!rcu_read_lock_held()); 3190 3190 3191 - /* if first iteration, visit the leftmost descendant */ 3192 - if (!pos) { 3193 - next = css_leftmost_descendant(root); 3194 - return next != root ? next : NULL; 3195 - } 3191 + /* if first iteration, visit leftmost descendant which may be @root */ 3192 + if (!pos) 3193 + return css_leftmost_descendant(root); 3196 3194 3197 3195 /* if we visited @root, we're done */ 3198 3196 if (pos == root)
+4
kernel/events/core.c
··· 6767 6767 if (ret) 6768 6768 return -EFAULT; 6769 6769 6770 + /* disabled for now */ 6771 + if (attr->mmap2) 6772 + return -EINVAL; 6773 + 6770 6774 if (attr->__reserved_1) 6771 6775 return -EINVAL; 6772 6776
+16 -16
kernel/mutex.c
··· 410 410 static __always_inline int __sched 411 411 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 412 412 struct lockdep_map *nest_lock, unsigned long ip, 413 - struct ww_acquire_ctx *ww_ctx) 413 + struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 414 414 { 415 415 struct task_struct *task = current; 416 416 struct mutex_waiter waiter; ··· 450 450 struct task_struct *owner; 451 451 struct mspin_node node; 452 452 453 - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 453 + if (use_ww_ctx && ww_ctx->acquired > 0) { 454 454 struct ww_mutex *ww; 455 455 456 456 ww = container_of(lock, struct ww_mutex, base); ··· 480 480 if ((atomic_read(&lock->count) == 1) && 481 481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 482 482 lock_acquired(&lock->dep_map, ip); 483 - if (!__builtin_constant_p(ww_ctx == NULL)) { 483 + if (use_ww_ctx) { 484 484 struct ww_mutex *ww; 485 485 ww = container_of(lock, struct ww_mutex, base); 486 486 ··· 551 551 goto err; 552 552 } 553 553 554 - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 554 + if (use_ww_ctx && ww_ctx->acquired > 0) { 555 555 ret = __mutex_lock_check_stamp(lock, ww_ctx); 556 556 if (ret) 557 557 goto err; ··· 575 575 lock_acquired(&lock->dep_map, ip); 576 576 mutex_set_owner(lock); 577 577 578 - if (!__builtin_constant_p(ww_ctx == NULL)) { 578 + if (use_ww_ctx) { 579 579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 580 580 struct mutex_waiter *cur; 581 581 ··· 615 615 { 616 616 might_sleep(); 617 617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 618 - subclass, NULL, _RET_IP_, NULL); 618 + subclass, NULL, _RET_IP_, NULL, 0); 619 619 } 620 620 621 621 EXPORT_SYMBOL_GPL(mutex_lock_nested); ··· 625 625 { 626 626 might_sleep(); 627 627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 628 - 0, nest, _RET_IP_, NULL); 628 + 0, nest, _RET_IP_, NULL, 0); 629 629 } 630 630 631 631 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); ··· 635 635 { 636 636 might_sleep(); 637 637 return __mutex_lock_common(lock, TASK_KILLABLE, 638 - subclass, NULL, _RET_IP_, NULL); 638 + subclass, NULL, _RET_IP_, NULL, 0); 639 639 } 640 640 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 641 641 ··· 644 644 { 645 645 might_sleep(); 646 646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 647 - subclass, NULL, _RET_IP_, NULL); 647 + subclass, NULL, _RET_IP_, NULL, 0); 648 648 } 649 649 650 650 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); ··· 682 682 683 683 might_sleep(); 684 684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 685 - 0, &ctx->dep_map, _RET_IP_, ctx); 685 + 0, &ctx->dep_map, _RET_IP_, ctx, 1); 686 686 if (!ret && ctx->acquired > 1) 687 687 return ww_mutex_deadlock_injection(lock, ctx); 688 688 ··· 697 697 698 698 might_sleep(); 699 699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 700 - 0, &ctx->dep_map, _RET_IP_, ctx); 700 + 0, &ctx->dep_map, _RET_IP_, ctx, 1); 701 701 702 702 if (!ret && ctx->acquired > 1) 703 703 return ww_mutex_deadlock_injection(lock, ctx); ··· 809 809 struct mutex *lock = container_of(lock_count, struct mutex, count); 810 810 811 811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 812 - NULL, _RET_IP_, NULL); 812 + NULL, _RET_IP_, NULL, 0); 813 813 } 814 814 815 815 static noinline int __sched 816 816 __mutex_lock_killable_slowpath(struct mutex *lock) 817 817 { 818 818 return __mutex_lock_common(lock, TASK_KILLABLE, 0, 819 - NULL, _RET_IP_, NULL); 819 + NULL, _RET_IP_, NULL, 0); 820 820 } 821 821 822 822 static noinline int __sched 823 823 __mutex_lock_interruptible_slowpath(struct mutex *lock) 824 824 { 825 825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 826 - NULL, _RET_IP_, NULL); 826 + NULL, _RET_IP_, NULL, 0); 827 827 } 828 828 829 829 static noinline int __sched 830 830 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 831 831 { 832 832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 833 - NULL, _RET_IP_, ctx); 833 + NULL, _RET_IP_, ctx, 1); 834 834 } 835 835 836 836 static noinline int __sched ··· 838 838 struct ww_acquire_ctx *ctx) 839 839 { 840 840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 841 - NULL, _RET_IP_, ctx); 841 + NULL, _RET_IP_, ctx, 1); 842 842 } 843 843 844 844 #endif
+1 -1
kernel/power/hibernate.c
··· 846 846 goto Finish; 847 847 } 848 848 849 - late_initcall(software_resume); 849 + late_initcall_sync(software_resume); 850 850 851 851 852 852 static const char * const hibernation_modes[] = {
+51 -16
kernel/time/clockevents.c
··· 33 33 int res; 34 34 }; 35 35 36 + static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 37 + bool ismax) 38 + { 39 + u64 clc = (u64) latch << evt->shift; 40 + u64 rnd; 41 + 42 + if (unlikely(!evt->mult)) { 43 + evt->mult = 1; 44 + WARN_ON(1); 45 + } 46 + rnd = (u64) evt->mult - 1; 47 + 48 + /* 49 + * Upper bound sanity check. If the backwards conversion is 50 + * not equal latch, we know that the above shift overflowed. 51 + */ 52 + if ((clc >> evt->shift) != (u64)latch) 53 + clc = ~0ULL; 54 + 55 + /* 56 + * Scaled math oddities: 57 + * 58 + * For mult <= (1 << shift) we can safely add mult - 1 to 59 + * prevent integer rounding loss. So the backwards conversion 60 + * from nsec to device ticks will be correct. 61 + * 62 + * For mult > (1 << shift), i.e. device frequency is > 1GHz we 63 + * need to be careful. Adding mult - 1 will result in a value 64 + * which when converted back to device ticks can be larger 65 + * than latch by up to (mult - 1) >> shift. For the min_delta 66 + * calculation we still want to apply this in order to stay 67 + * above the minimum device ticks limit. For the upper limit 68 + * we would end up with a latch value larger than the upper 69 + * limit of the device, so we omit the add to stay below the 70 + * device upper boundary. 71 + * 72 + * Also omit the add if it would overflow the u64 boundary. 73 + */ 74 + if ((~0ULL - clc > rnd) && 75 + (!ismax || evt->mult <= (1U << evt->shift))) 76 + clc += rnd; 77 + 78 + do_div(clc, evt->mult); 79 + 80 + /* Deltas less than 1usec are pointless noise */ 81 + return clc > 1000 ? clc : 1000; 82 + } 83 + 36 84 /** 37 85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 38 86 * @latch: value to convert ··· 90 42 */ 91 43 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92 44 { 93 - u64 clc = (u64) latch << evt->shift; 94 - 95 - if (unlikely(!evt->mult)) { 96 - evt->mult = 1; 97 - WARN_ON(1); 98 - } 99 - 100 - do_div(clc, evt->mult); 101 - if (clc < 1000) 102 - clc = 1000; 103 - if (clc > KTIME_MAX) 104 - clc = KTIME_MAX; 105 - 106 - return clc; 45 + return cev_delta2ns(latch, evt, false); 107 46 } 108 47 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 109 48 ··· 415 380 sec = 600; 416 381 417 382 clockevents_calc_mult_shift(dev, freq, sec); 418 - dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); 419 - dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); 383 + dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 384 + dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 420 385 } 421 386 422 387 /**
+1 -1
net/8021q/vlan_netlink.c
··· 171 171 172 172 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ 173 173 nla_total_size(2) + /* IFLA_VLAN_ID */ 174 - sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ 174 + nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */ 175 175 vlan_qos_map_size(vlan->nr_ingress_mappings) + 176 176 vlan_qos_map_size(vlan->nr_egress_mappings); 177 177 }
+3 -2
net/batman-adv/main.c
··· 65 65 batadv_recv_handler_init(); 66 66 67 67 batadv_iv_init(); 68 + batadv_nc_init(); 68 69 69 70 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 70 71 ··· 143 142 if (ret < 0) 144 143 goto err; 145 144 146 - ret = batadv_nc_init(bat_priv); 145 + ret = batadv_nc_mesh_init(bat_priv); 147 146 if (ret < 0) 148 147 goto err; 149 148 ··· 168 167 batadv_vis_quit(bat_priv); 169 168 170 169 batadv_gw_node_purge(bat_priv); 171 - batadv_nc_free(bat_priv); 170 + batadv_nc_mesh_free(bat_priv); 172 171 batadv_dat_free(bat_priv); 173 172 batadv_bla_free(bat_priv); 174 173
+18 -10
net/batman-adv/network-coding.c
··· 35 35 struct batadv_hard_iface *recv_if); 36 36 37 37 /** 38 + * batadv_nc_init - one-time initialization for network coding 39 + */ 40 + int __init batadv_nc_init(void) 41 + { 42 + int ret; 43 + 44 + /* Register our packet type */ 45 + ret = batadv_recv_handler_register(BATADV_CODED, 46 + batadv_nc_recv_coded_packet); 47 + 48 + return ret; 49 + } 50 + 51 + /** 38 52 * batadv_nc_start_timer - initialise the nc periodic worker 39 53 * @bat_priv: the bat priv with all the soft interface information 40 54 */ ··· 59 45 } 60 46 61 47 /** 62 - * batadv_nc_init - initialise coding hash table and start house keeping 48 + * batadv_nc_mesh_init - initialise coding hash table and start house keeping 63 49 * @bat_priv: the bat priv with all the soft interface information 64 50 */ 65 - int batadv_nc_init(struct batadv_priv *bat_priv) 51 + int batadv_nc_mesh_init(struct batadv_priv *bat_priv) 66 52 { 67 53 bat_priv->nc.timestamp_fwd_flush = jiffies; 68 54 bat_priv->nc.timestamp_sniffed_purge = jiffies; ··· 83 69 84 70 batadv_hash_set_lock_class(bat_priv->nc.coding_hash, 85 71 &batadv_nc_decoding_hash_lock_class_key); 86 - 87 - /* Register our packet type */ 88 - if (batadv_recv_handler_register(BATADV_CODED, 89 - batadv_nc_recv_coded_packet) < 0) 90 - goto err; 91 72 92 73 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); 93 74 batadv_nc_start_timer(bat_priv); ··· 1730 1721 } 1731 1722 1732 1723 /** 1733 - * batadv_nc_free - clean up network coding memory 1724 + * batadv_nc_mesh_free - clean up network coding memory 1734 1725 * @bat_priv: the bat priv with all the soft interface information 1735 1726 */ 1736 - void batadv_nc_free(struct batadv_priv *bat_priv) 1727 + void batadv_nc_mesh_free(struct batadv_priv *bat_priv) 1737 1728 { 1738 - batadv_recv_handler_unregister(BATADV_CODED); 1739 1729 cancel_delayed_work_sync(&bat_priv->nc.work); 1740 1730 1741 1731 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
+10 -4
net/batman-adv/network-coding.h
··· 22 22 23 23 #ifdef CONFIG_BATMAN_ADV_NC 24 24 25 - int batadv_nc_init(struct batadv_priv *bat_priv); 26 - void batadv_nc_free(struct batadv_priv *bat_priv); 25 + int batadv_nc_init(void); 26 + int batadv_nc_mesh_init(struct batadv_priv *bat_priv); 27 + void batadv_nc_mesh_free(struct batadv_priv *bat_priv); 27 28 void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, 28 29 struct batadv_orig_node *orig_node, 29 30 struct batadv_orig_node *orig_neigh_node, ··· 47 46 48 47 #else /* ifdef CONFIG_BATMAN_ADV_NC */ 49 48 50 - static inline int batadv_nc_init(struct batadv_priv *bat_priv) 49 + static inline int batadv_nc_init(void) 51 50 { 52 51 return 0; 53 52 } 54 53 55 - static inline void batadv_nc_free(struct batadv_priv *bat_priv) 54 + static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv) 55 + { 56 + return 0; 57 + } 58 + 59 + static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv) 56 60 { 57 61 return; 58 62 }
+2 -2
net/bridge/br_fdb.c
··· 700 700 701 701 vid = nla_get_u16(tb[NDA_VLAN]); 702 702 703 - if (vid >= VLAN_N_VID) { 703 + if (!vid || vid >= VLAN_VID_MASK) { 704 704 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", 705 705 vid); 706 706 return -EINVAL; ··· 794 794 795 795 vid = nla_get_u16(tb[NDA_VLAN]); 796 796 797 - if (vid >= VLAN_N_VID) { 797 + if (!vid || vid >= VLAN_VID_MASK) { 798 798 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", 799 799 vid); 800 800 return -EINVAL;
+1 -1
net/bridge/br_mdb.c
··· 453 453 call_rcu_bh(&p->rcu, br_multicast_free_pg); 454 454 err = 0; 455 455 456 - if (!mp->ports && !mp->mglist && mp->timer_armed && 456 + if (!mp->ports && !mp->mglist && 457 457 netif_running(br->dev)) 458 458 mod_timer(&mp->timer, jiffies); 459 459 break;
+26 -12
net/bridge/br_multicast.c
··· 272 272 del_timer(&p->timer); 273 273 call_rcu_bh(&p->rcu, br_multicast_free_pg); 274 274 275 - if (!mp->ports && !mp->mglist && mp->timer_armed && 275 + if (!mp->ports && !mp->mglist && 276 276 netif_running(br->dev)) 277 277 mod_timer(&mp->timer, jiffies); 278 278 ··· 620 620 621 621 mp->br = br; 622 622 mp->addr = *group; 623 - 624 623 setup_timer(&mp->timer, br_multicast_group_expired, 625 624 (unsigned long)mp); 626 625 ··· 659 660 struct net_bridge_mdb_entry *mp; 660 661 struct net_bridge_port_group *p; 661 662 struct net_bridge_port_group __rcu **pp; 663 + unsigned long now = jiffies; 662 664 int err; 663 665 664 666 spin_lock(&br->multicast_lock); ··· 674 674 675 675 if (!port) { 676 676 mp->mglist = true; 677 + mod_timer(&mp->timer, now + br->multicast_membership_interval); 677 678 goto out; 678 679 } 679 680 ··· 682 681 (p = mlock_dereference(*pp, br)) != NULL; 683 682 pp = &p->next) { 684 683 if (p->port == port) 685 - goto out; 684 + goto found; 686 685 if ((unsigned long)p->port < (unsigned long)port) 687 686 break; 688 687 } ··· 693 692 rcu_assign_pointer(*pp, p); 694 693 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 695 694 695 + found: 696 + mod_timer(&p->timer, now + br->multicast_membership_interval); 696 697 out: 697 698 err = 0; 698 699 ··· 1194 1191 if (!mp) 1195 1192 goto out; 1196 1193 1197 - mod_timer(&mp->timer, now + br->multicast_membership_interval); 1198 - mp->timer_armed = true; 1199 - 1200 1194 max_delay *= br->multicast_last_member_count; 1201 1195 1202 1196 if (mp->mglist && ··· 1269 1269 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1270 1270 if (!mp) 1271 1271 goto out; 1272 - 1273 - mod_timer(&mp->timer, now + br->multicast_membership_interval); 1274 - mp->timer_armed = true; 1275 1272 1276 1273 max_delay *= br->multicast_last_member_count; 1277 1274 if (mp->mglist && ··· 1355 1358 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1356 1359 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1357 1360 1358 - if (!mp->ports && !mp->mglist && mp->timer_armed && 1361 + if (!mp->ports && !mp->mglist && 1359 1362 netif_running(br->dev)) 1360 1363 mod_timer(&mp->timer, jiffies); 1361 1364 } ··· 1367 1370 br->multicast_last_member_interval; 1368 1371 1369 1372 if (!port) { 1370 - if (mp->mglist && mp->timer_armed && 1373 + if (mp->mglist && 1371 1374 (timer_pending(&mp->timer) ? 1372 1375 time_after(mp->timer.expires, time) : 1373 1376 try_to_del_timer_sync(&mp->timer) >= 0)) { 1374 1377 mod_timer(&mp->timer, time); 1375 1378 } 1379 + 1380 + goto out; 1381 + } 1382 + 1383 + for (p = mlock_dereference(mp->ports, br); 1384 + p != NULL; 1385 + p = mlock_dereference(p->next, br)) { 1386 + if (p->port != port) 1387 + continue; 1388 + 1389 + if (!hlist_unhashed(&p->mglist) && 1390 + (timer_pending(&p->timer) ? 1391 + time_after(p->timer.expires, time) : 1392 + try_to_del_timer_sync(&p->timer) >= 0)) { 1393 + mod_timer(&p->timer, time); 1394 + } 1395 + 1396 + break; 1376 1397 } 1377 1398 out: 1378 1399 spin_unlock(&br->multicast_lock); ··· 1813 1798 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1814 1799 hlist[ver]) { 1815 1800 del_timer(&mp->timer); 1816 - mp->timer_armed = false; 1817 1801 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1818 1802 } 1819 1803 }
+1 -1
net/bridge/br_netlink.c
··· 243 243 244 244 vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); 245 245 246 - if (vinfo->vid >= VLAN_N_VID) 246 + if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) 247 247 return -EINVAL; 248 248 249 249 switch (cmd) {
+1 -4
net/bridge/br_private.h
··· 126 126 struct timer_list timer; 127 127 struct br_ip addr; 128 128 bool mglist; 129 - bool timer_armed; 130 129 }; 131 130 132 131 struct net_bridge_mdb_htable ··· 642 643 * vid wasn't set 643 644 */ 644 645 smp_rmb(); 645 - return (v->pvid & VLAN_TAG_PRESENT) ? 646 - (v->pvid & ~VLAN_TAG_PRESENT) : 647 - VLAN_N_VID; 646 + return v->pvid ?: VLAN_N_VID; 648 647 } 649 648 650 649 #else
+1 -1
net/bridge/br_stp_if.c
··· 134 134 135 135 if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) 136 136 __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); 137 - else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) 137 + else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY) 138 138 __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); 139 139 140 140 if (r == 0) {
+66 -57
net/bridge/br_vlan.c
··· 45 45 return 0; 46 46 } 47 47 48 - if (vid) { 49 - if (v->port_idx) { 50 - p = v->parent.port; 51 - br = p->br; 52 - dev = p->dev; 53 - } else { 54 - br = v->parent.br; 55 - dev = br->dev; 56 - } 57 - ops = dev->netdev_ops; 48 + if (v->port_idx) { 49 + p = v->parent.port; 50 + br = p->br; 51 + dev = p->dev; 52 + } else { 53 + br = v->parent.br; 54 + dev = br->dev; 55 + } 56 + ops = dev->netdev_ops; 58 57 59 - if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { 60 - /* Add VLAN to the device filter if it is supported. 61 - * Stricly speaking, this is not necessary now, since 62 - * devices are made promiscuous by the bridge, but if 63 - * that ever changes this code will allow tagged 64 - * traffic to enter the bridge. 65 - */ 66 - err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), 67 - vid); 68 - if (err) 69 - return err; 70 - } 58 + if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { 59 + /* Add VLAN to the device filter if it is supported. 60 + * Stricly speaking, this is not necessary now, since 61 + * devices are made promiscuous by the bridge, but if 62 + * that ever changes this code will allow tagged 63 + * traffic to enter the bridge. 64 + */ 65 + err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), 66 + vid); 67 + if (err) 68 + return err; 69 + } 71 70 72 - err = br_fdb_insert(br, p, dev->dev_addr, vid); 73 - if (err) { 74 - br_err(br, "failed insert local address into bridge " 75 - "forwarding table\n"); 76 - goto out_filt; 77 - } 78 - 71 + err = br_fdb_insert(br, p, dev->dev_addr, vid); 72 + if (err) { 73 + br_err(br, "failed insert local address into bridge " 74 + "forwarding table\n"); 75 + goto out_filt; 79 76 } 80 77 81 78 set_bit(vid, v->vlan_bitmap); ··· 95 98 __vlan_delete_pvid(v, vid); 96 99 clear_bit(vid, v->untagged_bitmap); 97 100 98 - if (v->port_idx && vid) { 101 + if (v->port_idx) { 99 102 struct net_device *dev = v->parent.port->dev; 100 103 const struct net_device_ops *ops = dev->netdev_ops; 101 104 ··· 189 192 bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, 190 193 struct sk_buff *skb, u16 *vid) 191 194 { 195 + int err; 196 + 192 197 /* If VLAN filtering is disabled on the bridge, all packets are 193 198 * permitted. 194 199 */ ··· 203 204 if (!v) 204 205 return false; 205 206 206 - if (br_vlan_get_tag(skb, vid)) { 207 + err = br_vlan_get_tag(skb, vid); 208 + if (!*vid) { 207 209 u16 pvid = br_get_pvid(v); 208 210 209 - /* Frame did not have a tag. See if pvid is set 210 - * on this port. That tells us which vlan untagged 211 - * traffic belongs to. 211 + /* Frame had a tag with VID 0 or did not have a tag. 212 + * See if pvid is set on this port. That tells us which 213 + * vlan untagged or priority-tagged traffic belongs to. 212 214 */ 213 215 if (pvid == VLAN_N_VID) 214 216 return false; 215 217 216 - /* PVID is set on this port. Any untagged ingress 217 - * frame is considered to belong to this vlan. 218 + /* PVID is set on this port. Any untagged or priority-tagged 219 + * ingress frame is considered to belong to this vlan. 218 220 */ 219 - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); 221 + *vid = pvid; 222 + if (likely(err)) 223 + /* Untagged Frame. */ 224 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); 225 + else 226 + /* Priority-tagged Frame. 227 + * At this point, We know that skb->vlan_tci had 228 + * VLAN_TAG_PRESENT bit and its VID field was 0x000. 229 + * We update only VID field and preserve PCP field. 230 + */ 231 + skb->vlan_tci |= pvid; 232 + 220 233 return true; 221 234 } 222 235 ··· 259 248 return false; 260 249 } 261 250 262 - /* Must be protected by RTNL */ 251 + /* Must be protected by RTNL. 252 + * Must be called with vid in range from 1 to 4094 inclusive. 253 + */ 263 254 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) 264 255 { 265 256 struct net_port_vlans *pv = NULL; ··· 291 278 return err; 292 279 } 293 280 294 - /* Must be protected by RTNL */ 281 + /* Must be protected by RTNL. 282 + * Must be called with vid in range from 1 to 4094 inclusive. 283 + */ 295 284 int br_vlan_delete(struct net_bridge *br, u16 vid) 296 285 { 297 286 struct net_port_vlans *pv; ··· 304 289 if (!pv) 305 290 return -EINVAL; 306 291 307 - if (vid) { 308 - /* If the VID !=0 remove fdb for this vid. VID 0 is special 309 - * in that it's the default and is always there in the fdb. 310 - */ 311 - spin_lock_bh(&br->hash_lock); 312 - fdb_delete_by_addr(br, br->dev->dev_addr, vid); 313 - spin_unlock_bh(&br->hash_lock); 314 - } 292 + spin_lock_bh(&br->hash_lock); 293 + fdb_delete_by_addr(br, br->dev->dev_addr, vid); 294 + spin_unlock_bh(&br->hash_lock); 315 295 316 296 __vlan_del(pv, vid); 317 297 return 0; ··· 339 329 return 0; 340 330 } 341 331 342 - /* Must be protected by RTNL */ 332 + /* Must be protected by RTNL. 333 + * Must be called with vid in range from 1 to 4094 inclusive. 334 + */ 343 335 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 344 336 { 345 337 struct net_port_vlans *pv = NULL; ··· 375 363 return err; 376 364 } 377 365 378 - /* Must be protected by RTNL */ 366 + /* Must be protected by RTNL. 367 + * Must be called with vid in range from 1 to 4094 inclusive. 368 + */ 379 369 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) 380 370 { 381 371 struct net_port_vlans *pv; ··· 388 374 if (!pv) 389 375 return -EINVAL; 390 376 391 - if (vid) { 392 - /* If the VID !=0 remove fdb for this vid. VID 0 is special 393 - * in that it's the default and is always there in the fdb. 394 - */ 395 - spin_lock_bh(&port->br->hash_lock); 396 - fdb_delete_by_addr(port->br, port->dev->dev_addr, vid); 397 - spin_unlock_bh(&port->br->hash_lock); 398 - } 377 + spin_lock_bh(&port->br->hash_lock); 378 + fdb_delete_by_addr(port->br, port->dev->dev_addr, vid); 379 + spin_unlock_bh(&port->br->hash_lock); 399 380 400 381 return __vlan_del(pv, vid); 401 382 }
+2
net/compat.c
··· 71 71 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || 72 72 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 73 73 return -EFAULT; 74 + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 75 + return -EINVAL; 74 76 kmsg->msg_name = compat_ptr(tmp1); 75 77 kmsg->msg_iov = compat_ptr(tmp2); 76 78 kmsg->msg_control = compat_ptr(tmp3);
+2 -1
net/core/dev.c
··· 1917 1917 return new_map; 1918 1918 } 1919 1919 1920 - int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) 1920 + int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 1921 + u16 index) 1921 1922 { 1922 1923 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 1923 1924 struct xps_map *map, *new_map;
+4 -4
net/core/filter.c
··· 644 644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 645 645 646 646 bpf_jit_free(fp); 647 - kfree(fp); 648 647 } 649 648 EXPORT_SYMBOL(sk_filter_release_rcu); 650 649 ··· 682 683 if (fprog->filter == NULL) 683 684 return -EINVAL; 684 685 685 - fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); 686 + fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); 686 687 if (!fp) 687 688 return -ENOMEM; 688 689 memcpy(fp->insns, fprog->filter, fsize); ··· 722 723 { 723 724 struct sk_filter *fp, *old_fp; 724 725 unsigned int fsize = sizeof(struct sock_filter) * fprog->len; 726 + unsigned int sk_fsize = sk_filter_size(fprog->len); 725 727 int err; 726 728 727 729 if (sock_flag(sk, SOCK_FILTER_LOCKED)) ··· 732 732 if (fprog->filter == NULL) 733 733 return -EINVAL; 734 734 735 - fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); 735 + fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL); 736 736 if (!fp) 737 737 return -ENOMEM; 738 738 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 739 - sock_kfree_s(sk, fp, fsize+sizeof(*fp)); 739 + sock_kfree_s(sk, fp, sk_fsize); 740 740 return -EFAULT; 741 741 } 742 742
+2
net/core/secure_seq.c
··· 10 10 11 11 #include <net/secure_seq.h> 12 12 13 + #if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET) 13 14 #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) 14 15 15 16 static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; ··· 30 29 cmpxchg(&net_secret[--i], 0, tmp); 31 30 } 32 31 } 32 + #endif 33 33 34 34 #ifdef CONFIG_INET 35 35 static u32 seq_scale(u32 seq)
+1
net/core/sock.c
··· 2319 2319 sk->sk_ll_usec = sysctl_net_busy_read; 2320 2320 #endif 2321 2321 2322 + sk->sk_pacing_rate = ~0U; 2322 2323 /* 2323 2324 * Before updating sk_refcnt, we must commit prior changes to memory 2324 2325 * (Documentation/RCU/rculist_nulls.txt for details)
+5
net/ieee802154/6lowpan.c
··· 1372 1372 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 1373 1373 if (!real_dev) 1374 1374 return -ENODEV; 1375 + if (real_dev->type != ARPHRD_IEEE802154) 1376 + return -EINVAL; 1375 1377 1376 1378 lowpan_dev_info(dev)->real_dev = real_dev; 1377 1379 lowpan_dev_info(dev)->fragment_tag = 0; ··· 1387 1385 } 1388 1386 1389 1387 entry->ldev = dev; 1388 + 1389 + /* Set the lowpan harware address to the wpan hardware address. */ 1390 + memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN); 1390 1391 1391 1392 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1392 1393 INIT_LIST_HEAD(&entry->list);
+1 -1
net/ipv4/inet_hashtables.c
··· 287 287 if (unlikely(!INET_TW_MATCH(sk, net, acookie, 288 288 saddr, daddr, ports, 289 289 dif))) { 290 - sock_put(sk); 290 + inet_twsk_put(inet_twsk(sk)); 291 291 goto begintw; 292 292 } 293 293 goto out;
+9 -4
net/ipv4/ip_output.c
··· 772 772 /* initialize protocol header pointer */ 773 773 skb->transport_header = skb->network_header + fragheaderlen; 774 774 775 - skb->ip_summed = CHECKSUM_PARTIAL; 776 775 skb->csum = 0; 777 776 778 - /* specify the length of each IP datagram fragment */ 779 - skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; 780 - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 777 + 781 778 __skb_queue_tail(queue, skb); 779 + } else if (skb_is_gso(skb)) { 780 + goto append; 782 781 } 783 782 783 + skb->ip_summed = CHECKSUM_PARTIAL; 784 + /* specify the length of each IP datagram fragment */ 785 + skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; 786 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 787 + 788 + append: 784 789 return skb_append_datato_frags(sk, skb, getfrag, from, 785 790 (length - transhdrlen)); 786 791 }
+11 -3
net/ipv4/ip_vti.c
··· 125 125 iph->saddr, iph->daddr, 0); 126 126 if (tunnel != NULL) { 127 127 struct pcpu_tstats *tstats; 128 + u32 oldmark = skb->mark; 129 + int ret; 128 130 129 - if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 131 + 132 + /* temporarily mark the skb with the tunnel o_key, to 133 + * only match policies with this mark. 134 + */ 135 + skb->mark = be32_to_cpu(tunnel->parms.o_key); 136 + ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb); 137 + skb->mark = oldmark; 138 + if (!ret) 130 139 return -1; 131 140 132 141 tstats = this_cpu_ptr(tunnel->dev->tstats); ··· 144 135 tstats->rx_bytes += skb->len; 145 136 u64_stats_update_end(&tstats->syncp); 146 137 147 - skb->mark = 0; 148 138 secpath_reset(skb); 149 139 skb->dev = tunnel->dev; 150 140 return 1; ··· 175 167 176 168 memset(&fl4, 0, sizeof(fl4)); 177 169 flowi4_init_output(&fl4, tunnel->parms.link, 178 - be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), 170 + be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos), 179 171 RT_SCOPE_UNIVERSE, 180 172 IPPROTO_IPIP, 0, 181 173 dst, tiph->saddr, 0, 0);
+1 -1
net/ipv4/route.c
··· 2072 2072 RT_SCOPE_LINK); 2073 2073 goto make_route; 2074 2074 } 2075 - if (fl4->saddr) { 2075 + if (!fl4->saddr) { 2076 2076 if (ipv4_is_multicast(fl4->daddr)) 2077 2077 fl4->saddr = inet_select_addr(dev_out, 0, 2078 2078 fl4->flowi4_scope);
+7 -2
net/ipv4/tcp_input.c
··· 1284 1284 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1285 1285 } 1286 1286 1287 - TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; 1287 + TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1288 + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1289 + TCP_SKB_CB(prev)->end_seq++; 1290 + 1288 1291 if (skb == tcp_highest_sack(sk)) 1289 1292 tcp_advance_highest_sack(sk, skb); 1290 1293 ··· 3291 3288 tcp_init_cwnd_reduction(sk, true); 3292 3289 tcp_set_ca_state(sk, TCP_CA_CWR); 3293 3290 tcp_end_cwnd_reduction(sk); 3294 - tcp_set_ca_state(sk, TCP_CA_Open); 3291 + tcp_try_keep_open(sk); 3295 3292 NET_INC_STATS_BH(sock_net(sk), 3296 3293 LINUX_MIB_TCPLOSSPROBERECOVERY); 3297 3294 } ··· 5711 5708 tcp_rearm_rto(sk); 5712 5709 } else 5713 5710 tcp_init_metrics(sk); 5711 + 5712 + tcp_update_pacing_rate(sk); 5714 5713 5715 5714 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 5716 5715 tp->lsndtime = tcp_time_stamp;
+9 -5
net/ipv4/tcp_output.c
··· 637 637 unsigned int size = 0; 638 638 unsigned int eff_sacks; 639 639 640 + opts->options = 0; 641 + 640 642 #ifdef CONFIG_TCP_MD5SIG 641 643 *md5 = tp->af_specific->md5_lookup(sk, sk); 642 644 if (unlikely(*md5)) { ··· 986 984 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 987 985 unsigned int mss_now) 988 986 { 989 - if (skb->len <= mss_now || !sk_can_gso(sk) || 990 - skb->ip_summed == CHECKSUM_NONE) { 987 + /* Make sure we own this skb before messing gso_size/gso_segs */ 988 + WARN_ON_ONCE(skb_cloned(skb)); 989 + 990 + if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { 991 991 /* Avoid the costly divide in the normal 992 992 * non-TSO case. 993 993 */ ··· 1069 1065 if (nsize < 0) 1070 1066 nsize = 0; 1071 1067 1072 - if (skb_cloned(skb) && 1073 - skb_is_nonlinear(skb) && 1074 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1068 + if (skb_unclone(skb, GFP_ATOMIC)) 1075 1069 return -ENOMEM; 1076 1070 1077 1071 /* Get a new skb... force flag on. */ ··· 2344 2342 int oldpcount = tcp_skb_pcount(skb); 2345 2343 2346 2344 if (unlikely(oldpcount > 1)) { 2345 + if (skb_unclone(skb, GFP_ATOMIC)) 2346 + return -ENOMEM; 2347 2347 tcp_init_tso_segs(sk, skb, cur_mss); 2348 2348 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2349 2349 }
+1
net/ipv4/xfrm4_policy.c
··· 107 107 108 108 memset(fl4, 0, sizeof(struct flowi4)); 109 109 fl4->flowi4_mark = skb->mark; 110 + fl4->flowi4_oif = skb_dst(skb)->dev->ifindex; 110 111 111 112 if (!ip_is_fragment(iph)) { 112 113 switch (iph->protocol) {
+1 -2
net/ipv6/ah6.c
··· 618 618 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); 619 619 struct xfrm_state *x; 620 620 621 - if (type != ICMPV6_DEST_UNREACH && 622 - type != ICMPV6_PKT_TOOBIG && 621 + if (type != ICMPV6_PKT_TOOBIG && 623 622 type != NDISC_REDIRECT) 624 623 return; 625 624
+1 -2
net/ipv6/esp6.c
··· 436 436 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 437 437 struct xfrm_state *x; 438 438 439 - if (type != ICMPV6_DEST_UNREACH && 440 - type != ICMPV6_PKT_TOOBIG && 439 + if (type != ICMPV6_PKT_TOOBIG && 441 440 type != NDISC_REDIRECT) 442 441 return; 443 442
+1 -1
net/ipv6/inet6_hashtables.c
··· 116 116 } 117 117 if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, 118 118 ports, dif))) { 119 - sock_put(sk); 119 + inet_twsk_put(inet_twsk(sk)); 120 120 goto begintw; 121 121 } 122 122 goto out;
+2 -4
net/ipv6/ip6_gre.c
··· 976 976 if (t->parms.o_flags&GRE_SEQ) 977 977 addend += 4; 978 978 } 979 + t->hlen = addend; 979 980 980 981 if (p->flags & IP6_TNL_F_CAP_XMIT) { 981 982 int strict = (ipv6_addr_type(&p->raddr) & ··· 1003 1002 } 1004 1003 ip6_rt_put(rt); 1005 1004 } 1006 - 1007 - t->hlen = addend; 1008 1005 } 1009 1006 1010 1007 static int ip6gre_tnl_change(struct ip6_tnl *t, ··· 1172 1173 1173 1174 static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1174 1175 { 1175 - struct ip6_tnl *tunnel = netdev_priv(dev); 1176 1176 if (new_mtu < 68 || 1177 - new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) 1177 + new_mtu > 0xFFF8 - dev->hard_header_len) 1178 1178 return -EINVAL; 1179 1179 dev->mtu = new_mtu; 1180 1180 return 0;
+16 -13
net/ipv6/ip6_output.c
··· 105 105 } 106 106 107 107 rcu_read_lock_bh(); 108 - nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); 108 + nexthop = rt6_nexthop((struct rt6_info *)dst); 109 109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); 110 110 if (unlikely(!neigh)) 111 111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); ··· 874 874 */ 875 875 rt = (struct rt6_info *) *dst; 876 876 rcu_read_lock_bh(); 877 - n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); 877 + n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt)); 878 878 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; 879 879 rcu_read_unlock_bh(); 880 880 ··· 1008 1008 1009 1009 { 1010 1010 struct sk_buff *skb; 1011 + struct frag_hdr fhdr; 1011 1012 int err; 1012 1013 1013 1014 /* There is support for UDP large send offload by network ··· 1016 1015 * udp datagram 1017 1016 */ 1018 1017 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { 1019 - struct frag_hdr fhdr; 1020 - 1021 1018 skb = sock_alloc_send_skb(sk, 1022 1019 hh_len + fragheaderlen + transhdrlen + 20, 1023 1020 (flags & MSG_DONTWAIT), &err); ··· 1035 1036 skb->transport_header = skb->network_header + fragheaderlen; 1036 1037 1037 1038 skb->protocol = htons(ETH_P_IPV6); 1038 - skb->ip_summed = CHECKSUM_PARTIAL; 1039 1039 skb->csum = 0; 1040 1040 1041 - /* Specify the length of each IPv6 datagram fragment. 1042 - * It has to be a multiple of 8. 1043 - */ 1044 - skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - 1045 - sizeof(struct frag_hdr)) & ~7; 1046 - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1047 - ipv6_select_ident(&fhdr, rt); 1048 - skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 1049 1041 __skb_queue_tail(&sk->sk_write_queue, skb); 1042 + } else if (skb_is_gso(skb)) { 1043 + goto append; 1050 1044 } 1051 1045 1046 + skb->ip_summed = CHECKSUM_PARTIAL; 1047 + /* Specify the length of each IPv6 datagram fragment. 1048 + * It has to be a multiple of 8. 1049 + */ 1050 + skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - 1051 + sizeof(struct frag_hdr)) & ~7; 1052 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1053 + ipv6_select_ident(&fhdr, rt); 1054 + skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 1055 + 1056 + append: 1052 1057 return skb_append_datato_frags(sk, skb, getfrag, from, 1053 1058 (length - transhdrlen)); 1054 1059 }
+10 -2
net/ipv6/ip6_tunnel.c
··· 1430 1430 static int 1431 1431 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1432 1432 { 1433 - if (new_mtu < IPV6_MIN_MTU) { 1434 - return -EINVAL; 1433 + struct ip6_tnl *tnl = netdev_priv(dev); 1434 + 1435 + if (tnl->parms.proto == IPPROTO_IPIP) { 1436 + if (new_mtu < 68) 1437 + return -EINVAL; 1438 + } else { 1439 + if (new_mtu < IPV6_MIN_MTU) 1440 + return -EINVAL; 1435 1441 } 1442 + if (new_mtu > 0xFFF8 - dev->hard_header_len) 1443 + return -EINVAL; 1436 1444 dev->mtu = new_mtu; 1437 1445 return 0; 1438 1446 }
+1 -2
net/ipv6/ipcomp6.c
··· 64 64 (struct ip_comp_hdr *)(skb->data + offset); 65 65 struct xfrm_state *x; 66 66 67 - if (type != ICMPV6_DEST_UNREACH && 68 - type != ICMPV6_PKT_TOOBIG && 67 + if (type != ICMPV6_PKT_TOOBIG && 69 68 type != NDISC_REDIRECT) 70 69 return; 71 70
+38 -10
net/ipv6/route.c
··· 476 476 } 477 477 478 478 #ifdef CONFIG_IPV6_ROUTER_PREF 479 + struct __rt6_probe_work { 480 + struct work_struct work; 481 + struct in6_addr target; 482 + struct net_device *dev; 483 + }; 484 + 485 + static void rt6_probe_deferred(struct work_struct *w) 486 + { 487 + struct in6_addr mcaddr; 488 + struct __rt6_probe_work *work = 489 + container_of(w, struct __rt6_probe_work, work); 490 + 491 + addrconf_addr_solict_mult(&work->target, &mcaddr); 492 + ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL); 493 + dev_put(work->dev); 494 + kfree(w); 495 + } 496 + 479 497 static void rt6_probe(struct rt6_info *rt) 480 498 { 481 499 struct neighbour *neigh; ··· 517 499 518 500 if (!neigh || 519 501 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { 520 - struct in6_addr mcaddr; 521 - struct in6_addr *target; 502 + struct __rt6_probe_work *work; 522 503 523 - if (neigh) { 504 + work = kmalloc(sizeof(*work), GFP_ATOMIC); 505 + 506 + if (neigh && work) 524 507 neigh->updated = jiffies; 525 - write_unlock(&neigh->lock); 526 - } 527 508 528 - target = (struct in6_addr *)&rt->rt6i_gateway; 529 - addrconf_addr_solict_mult(target, &mcaddr); 530 - ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); 509 + if (neigh) 510 + write_unlock(&neigh->lock); 511 + 512 + if (work) { 513 + INIT_WORK(&work->work, rt6_probe_deferred); 514 + work->target = rt->rt6i_gateway; 515 + dev_hold(rt->dst.dev); 516 + work->dev = rt->dst.dev; 517 + schedule_work(&work->work); 518 + } 531 519 } else { 532 520 out: 533 521 write_unlock(&neigh->lock); ··· 875 851 if (ort->rt6i_dst.plen != 128 && 876 852 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) 877 853 rt->rt6i_flags |= RTF_ANYCAST; 878 - rt->rt6i_gateway = *daddr; 879 854 } 880 855 881 856 rt->rt6i_flags |= RTF_CACHE; ··· 1361 1338 rt->dst.flags |= DST_HOST; 1362 1339 rt->dst.output = ip6_output; 1363 1340 atomic_set(&rt->dst.__refcnt, 1); 1341 + rt->rt6i_gateway = fl6->daddr; 1364 1342 rt->rt6i_dst.addr = fl6->daddr; 1365 1343 rt->rt6i_dst.plen = 128; 1366 1344 rt->rt6i_idev = idev; ··· 1897 1873 in6_dev_hold(rt->rt6i_idev); 1898 1874 rt->dst.lastuse = jiffies; 1899 1875 1900 - rt->rt6i_gateway = ort->rt6i_gateway; 1876 + if (ort->rt6i_flags & RTF_GATEWAY) 1877 + rt->rt6i_gateway = ort->rt6i_gateway; 1878 + else 1879 + rt->rt6i_gateway = *dest; 1901 1880 rt->rt6i_flags = ort->rt6i_flags; 1902 1881 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == 1903 1882 (RTF_DEFAULT | RTF_ADDRCONF)) ··· 2187 2160 else 2188 2161 rt->rt6i_flags |= RTF_LOCAL; 2189 2162 2163 + rt->rt6i_gateway = *addr; 2190 2164 rt->rt6i_dst.addr = *addr; 2191 2165 rt->rt6i_dst.plen = 128; 2192 2166 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
+2 -3
net/ipv6/udp.c
··· 1225 1225 if (tclass < 0) 1226 1226 tclass = np->tclass; 1227 1227 1228 - if (dontfrag < 0) 1229 - dontfrag = np->dontfrag; 1230 - 1231 1228 if (msg->msg_flags&MSG_CONFIRM) 1232 1229 goto do_confirm; 1233 1230 back_from_confirm: ··· 1243 1246 up->pending = AF_INET6; 1244 1247 1245 1248 do_append_data: 1249 + if (dontfrag < 0) 1250 + dontfrag = np->dontfrag; 1246 1251 up->len += ulen; 1247 1252 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1248 1253 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
+1
net/ipv6/xfrm6_policy.c
··· 138 138 139 139 memset(fl6, 0, sizeof(struct flowi6)); 140 140 fl6->flowi6_mark = skb->mark; 141 + fl6->flowi6_oif = skb_dst(skb)->dev->ifindex; 141 142 142 143 fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 143 144 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
+2 -1
net/key/af_key.c
··· 1098 1098 1099 1099 x->id.proto = proto; 1100 1100 x->id.spi = sa->sadb_sa_spi; 1101 - x->props.replay_window = sa->sadb_sa_replay; 1101 + x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay, 1102 + (sizeof(x->replay.bitmap) * 8)); 1102 1103 if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN) 1103 1104 x->props.flags |= XFRM_STATE_NOECN; 1104 1105 if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
+29 -7
net/l2tp/l2tp_core.c
··· 115 115 static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 116 116 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 117 117 118 + static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) 119 + { 120 + return sk->sk_user_data; 121 + } 122 + 118 123 static inline struct l2tp_net *l2tp_pernet(struct net *net) 119 124 { 120 125 BUG_ON(!net); ··· 509 504 return 0; 510 505 511 506 #if IS_ENABLED(CONFIG_IPV6) 512 - if (sk->sk_family == PF_INET6) { 507 + if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) { 513 508 if (!uh->check) { 514 509 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); 515 510 return 1; ··· 1133 1128 /* Queue the packet to IP for output */ 1134 1129 skb->local_df = 1; 1135 1130 #if IS_ENABLED(CONFIG_IPV6) 1136 - if (skb->sk->sk_family == PF_INET6) 1131 + if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1137 1132 error = inet6_csk_xmit(skb, NULL); 1138 1133 else 1139 1134 #endif ··· 1260 1255 1261 1256 /* Calculate UDP checksum if configured to do so */ 1262 1257 #if IS_ENABLED(CONFIG_IPV6) 1263 - if (sk->sk_family == PF_INET6) 1258 + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1264 1259 l2tp_xmit_ipv6_csum(sk, skb, udp_len); 1265 1260 else 1266 1261 #endif ··· 1309 1304 */ 1310 1305 static void l2tp_tunnel_destruct(struct sock *sk) 1311 1306 { 1312 - struct l2tp_tunnel *tunnel; 1307 + struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); 1313 1308 struct l2tp_net *pn; 1314 1309 1315 - tunnel = sk->sk_user_data; 1316 1310 if (tunnel == NULL) 1317 1311 goto end; 1318 1312 ··· 1679 1675 } 1680 1676 1681 1677 /* Check if this socket has already been prepped */ 1682 - tunnel = (struct l2tp_tunnel *)sk->sk_user_data; 1678 + tunnel = l2tp_tunnel(sk); 1683 1679 if (tunnel != NULL) { 1684 1680 /* This socket has already been prepped */ 1685 1681 err = -EBUSY; ··· 1708 1704 if (cfg != NULL) 1709 1705 tunnel->debug = cfg->debug; 1710 1706 1707 + #if IS_ENABLED(CONFIG_IPV6) 1708 + if (sk->sk_family == PF_INET6) { 1709 + struct ipv6_pinfo *np = inet6_sk(sk); 1710 + 1711 + if (ipv6_addr_v4mapped(&np->saddr) && 1712 + ipv6_addr_v4mapped(&np->daddr)) { 1713 + struct inet_sock *inet = inet_sk(sk); 1714 + 1715 + tunnel->v4mapped = true; 1716 + inet->inet_saddr = np->saddr.s6_addr32[3]; 1717 + inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3]; 1718 + inet->inet_daddr = np->daddr.s6_addr32[3]; 1719 + } else { 1720 + tunnel->v4mapped = false; 1721 + } 1722 + } 1723 + #endif 1724 + 1711 1725 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1712 1726 tunnel->encap = encap; 1713 1727 if (encap == L2TP_ENCAPTYPE_UDP) { ··· 1734 1712 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1735 1713 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; 1736 1714 #if IS_ENABLED(CONFIG_IPV6) 1737 - if (sk->sk_family == PF_INET6) 1715 + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1738 1716 udpv6_encap_enable(); 1739 1717 else 1740 1718 #endif
+3
net/l2tp/l2tp_core.h
··· 194 194 struct sock *sock; /* Parent socket */ 195 195 int fd; /* Parent fd, if tunnel socket 196 196 * was created by userspace */ 197 + #if IS_ENABLED(CONFIG_IPV6) 198 + bool v4mapped; 199 + #endif 197 200 198 201 struct work_struct del_work; 199 202
+4
net/l2tp/l2tp_ppp.c
··· 353 353 goto error_put_sess_tun; 354 354 } 355 355 356 + local_bh_disable(); 356 357 l2tp_xmit_skb(session, skb, session->hdr_len); 358 + local_bh_enable(); 357 359 358 360 sock_put(ps->tunnel_sock); 359 361 sock_put(sk); ··· 424 422 skb->data[0] = ppph[0]; 425 423 skb->data[1] = ppph[1]; 426 424 425 + local_bh_disable(); 427 426 l2tp_xmit_skb(session, skb, session->hdr_len); 427 + local_bh_enable(); 428 428 429 429 sock_put(sk_tun); 430 430 sock_put(sk);
+1 -1
net/mac80211/cfg.c
··· 3518 3518 return -EINVAL; 3519 3519 } 3520 3520 band = chanctx_conf->def.chan->band; 3521 - sta = sta_info_get(sdata, peer); 3521 + sta = sta_info_get_bss(sdata, peer); 3522 3522 if (sta) { 3523 3523 qos = test_sta_flag(sta, WLAN_STA_WME); 3524 3524 } else {
+3
net/mac80211/ieee80211_i.h
··· 893 893 * that the scan completed. 894 894 * @SCAN_ABORTED: Set for our scan work function when the driver reported 895 895 * a scan complete for an aborted scan. 896 + * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being 897 + * cancelled. 896 898 */ 897 899 enum { 898 900 SCAN_SW_SCANNING, ··· 902 900 SCAN_ONCHANNEL_SCANNING, 903 901 SCAN_COMPLETED, 904 902 SCAN_ABORTED, 903 + SCAN_HW_CANCELLED, 905 904 }; 906 905 907 906 /**
+2
net/mac80211/offchannel.c
··· 394 394 395 395 if (started) 396 396 ieee80211_start_next_roc(local); 397 + else if (list_empty(&local->roc_list)) 398 + ieee80211_run_deferred_scan(local); 397 399 } 398 400 399 401 out_unlock:
+3
net/mac80211/rx.c
··· 3056 3056 case NL80211_IFTYPE_ADHOC: 3057 3057 if (!bssid) 3058 3058 return 0; 3059 + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3060 + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3061 + return 0; 3059 3062 if (ieee80211_is_beacon(hdr->frame_control)) { 3060 3063 return 1; 3061 3064 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
+19
net/mac80211/scan.c
··· 238 238 enum ieee80211_band band; 239 239 int i, ielen, n_chans; 240 240 241 + if (test_bit(SCAN_HW_CANCELLED, &local->scanning)) 242 + return false; 243 + 241 244 do { 242 245 if (local->hw_scan_band == IEEE80211_NUM_BANDS) 243 246 return false; ··· 943 940 if (!local->scan_req) 944 941 goto out; 945 942 943 + /* 944 + * We have a scan running and the driver already reported completion, 945 + * but the worker hasn't run yet or is stuck on the mutex - mark it as 946 + * cancelled. 947 + */ 948 + if (test_bit(SCAN_HW_SCANNING, &local->scanning) && 949 + test_bit(SCAN_COMPLETED, &local->scanning)) { 950 + set_bit(SCAN_HW_CANCELLED, &local->scanning); 951 + goto out; 952 + } 953 + 946 954 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { 955 + /* 956 + * Make sure that __ieee80211_scan_completed doesn't trigger a 957 + * scan on another band. 958 + */ 959 + set_bit(SCAN_HW_CANCELLED, &local->scanning); 947 960 if (local->ops->cancel_hw_scan) 948 961 drv_cancel_hw_scan(local, 949 962 rcu_dereference_protected(local->scan_sdata,
+3
net/mac80211/status.c
··· 180 180 struct ieee80211_local *local = sta->local; 181 181 struct ieee80211_sub_if_data *sdata = sta->sdata; 182 182 183 + if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) 184 + sta->last_rx = jiffies; 185 + 183 186 if (ieee80211_is_data_qos(mgmt->frame_control)) { 184 187 struct ieee80211_hdr *hdr = (void *) skb->data; 185 188 u8 *qc = ieee80211_get_qos_ctl(hdr);
+2 -1
net/mac80211/tx.c
··· 1120 1120 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1121 1121 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1122 1122 return TX_DROP; 1123 - } else if (info->flags & IEEE80211_TX_CTL_INJECTED || 1123 + } else if (info->flags & (IEEE80211_TX_CTL_INJECTED | 1124 + IEEE80211_TX_INTFL_NL80211_FRAME_TX) || 1124 1125 tx->sdata->control_port_protocol == tx->skb->protocol) { 1125 1126 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1126 1127 }
+5 -4
net/mac80211/util.c
··· 2103 2103 { 2104 2104 struct ieee80211_local *local = sdata->local; 2105 2105 struct ieee80211_supported_band *sband; 2106 - int rate, skip, shift; 2106 + int rate, shift; 2107 2107 u8 i, exrates, *pos; 2108 2108 u32 basic_rates = sdata->vif.bss_conf.basic_rates; 2109 2109 u32 rate_flags; ··· 2131 2131 pos = skb_put(skb, exrates + 2); 2132 2132 *pos++ = WLAN_EID_EXT_SUPP_RATES; 2133 2133 *pos++ = exrates; 2134 - skip = 0; 2135 2134 for (i = 8; i < sband->n_bitrates; i++) { 2136 2135 u8 basic = 0; 2137 2136 if ((rate_flags & sband->bitrates[i].flags) 2138 2137 != rate_flags) 2139 - continue; 2140 - if (skip++ < 8) 2141 2138 continue; 2142 2139 if (need_basic && basic_rates & BIT(i)) 2143 2140 basic = 0x80; ··· 2238 2241 } 2239 2242 2240 2243 rate = cfg80211_calculate_bitrate(&ri); 2244 + if (WARN_ONCE(!rate, 2245 + "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n", 2246 + status->flag, status->rate_idx, status->vht_nss)) 2247 + return 0; 2241 2248 2242 2249 /* rewind from end of MPDU */ 2243 2250 if (status->flag & RX_FLAG_MACTIME_END)
+2 -2
net/netfilter/nf_conntrack_h323_main.c
··· 778 778 flowi6_to_flowi(&fl1), false)) { 779 779 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, 780 780 flowi6_to_flowi(&fl2), false)) { 781 - if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, 782 - sizeof(rt1->rt6i_gateway)) && 781 + if (ipv6_addr_equal(rt6_nexthop(rt1), 782 + rt6_nexthop(rt2)) && 783 783 rt1->dst.dev == rt2->dst.dev) 784 784 ret = 1; 785 785 dst_release(&rt2->dst);
+10 -12
net/sched/sch_fq.c
··· 472 472 if (f->credit > 0 || !q->rate_enable) 473 473 goto out; 474 474 475 - if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { 476 - rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; 475 + rate = q->flow_max_rate; 476 + if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) 477 + rate = min(skb->sk->sk_pacing_rate, rate); 477 478 478 - rate = min(rate, q->flow_max_rate); 479 - } else { 480 - rate = q->flow_max_rate; 481 - if (rate == ~0U) 482 - goto out; 483 - } 484 - if (rate) { 479 + if (rate != ~0U) { 485 480 u32 plen = max(qdisc_pkt_len(skb), q->quantum); 486 481 u64 len = (u64)plen * NSEC_PER_SEC; 487 482 488 - do_div(len, rate); 483 + if (likely(rate)) 484 + do_div(len, rate); 489 485 /* Since socket rate can change later, 490 486 * clamp the delay to 125 ms. 491 487 * TODO: maybe segment the too big skb, as in commit ··· 652 656 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 653 657 654 658 if (tb[TCA_FQ_INITIAL_QUANTUM]) 655 - q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 659 + q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 656 660 657 661 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 658 662 q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); ··· 731 735 if (opts == NULL) 732 736 goto nla_put_failure; 733 737 738 + /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore, 739 + * do not bother giving its value 740 + */ 734 741 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 735 742 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 736 743 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 737 744 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 738 745 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 739 - nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) || 740 746 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 741 747 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 742 748 goto nla_put_failure;
+17
net/sched/sch_netem.c
··· 358 358 return PSCHED_NS2TICKS(ticks); 359 359 } 360 360 361 + static void tfifo_reset(struct Qdisc *sch) 362 + { 363 + struct netem_sched_data *q = qdisc_priv(sch); 364 + struct rb_node *p; 365 + 366 + while ((p = rb_first(&q->t_root))) { 367 + struct sk_buff *skb = netem_rb_to_skb(p); 368 + 369 + rb_erase(p, &q->t_root); 370 + skb->next = NULL; 371 + skb->prev = NULL; 372 + kfree_skb(skb); 373 + } 374 + } 375 + 361 376 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 362 377 { 363 378 struct netem_sched_data *q = qdisc_priv(sch); ··· 535 520 skb->next = NULL; 536 521 skb->prev = NULL; 537 522 len = qdisc_pkt_len(skb); 523 + sch->qstats.backlog -= len; 538 524 kfree_skb(skb); 539 525 } 540 526 } ··· 625 609 struct netem_sched_data *q = qdisc_priv(sch); 626 610 627 611 qdisc_reset_queue(sch); 612 + tfifo_reset(sch); 628 613 if (q->qdisc) 629 614 qdisc_reset(q->qdisc); 630 615 qdisc_watchdog_cancel(&q->watchdog);
+2 -1
net/sctp/output.c
··· 536 536 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 537 537 */ 538 538 if (!sctp_checksum_disable) { 539 - if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { 539 + if (!(dst->dev->features & NETIF_F_SCTP_CSUM) || 540 + (dst_xfrm(dst) != NULL) || packet->ipfragok) { 540 541 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 541 542 542 543 /* 3) Put the resultant value into the checksum field in the
+20 -4
net/socket.c
··· 1964 1964 unsigned int name_len; 1965 1965 }; 1966 1966 1967 + static int copy_msghdr_from_user(struct msghdr *kmsg, 1968 + struct msghdr __user *umsg) 1969 + { 1970 + if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) 1971 + return -EFAULT; 1972 + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1973 + return -EINVAL; 1974 + return 0; 1975 + } 1976 + 1967 1977 static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1968 1978 struct msghdr *msg_sys, unsigned int flags, 1969 1979 struct used_address *used_address) ··· 1992 1982 if (MSG_CMSG_COMPAT & flags) { 1993 1983 if (get_compat_msghdr(msg_sys, msg_compat)) 1994 1984 return -EFAULT; 1995 - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 1996 - return -EFAULT; 1985 + } else { 1986 + err = copy_msghdr_from_user(msg_sys, msg); 1987 + if (err) 1988 + return err; 1989 + } 1997 1990 1998 1991 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 1999 1992 err = -EMSGSIZE; ··· 2204 2191 if (MSG_CMSG_COMPAT & flags) { 2205 2192 if (get_compat_msghdr(msg_sys, msg_compat)) 2206 2193 return -EFAULT; 2207 - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 2208 - return -EFAULT; 2194 + } else { 2195 + err = copy_msghdr_from_user(msg_sys, msg); 2196 + if (err) 2197 + return err; 2198 + } 2209 2199 2210 2200 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2211 2201 err = -EMSGSIZE;
+10
net/unix/af_unix.c
··· 1246 1246 return 0; 1247 1247 } 1248 1248 1249 + static void unix_sock_inherit_flags(const struct socket *old, 1250 + struct socket *new) 1251 + { 1252 + if (test_bit(SOCK_PASSCRED, &old->flags)) 1253 + set_bit(SOCK_PASSCRED, &new->flags); 1254 + if (test_bit(SOCK_PASSSEC, &old->flags)) 1255 + set_bit(SOCK_PASSSEC, &new->flags); 1256 + } 1257 + 1249 1258 static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1250 1259 { 1251 1260 struct sock *sk = sock->sk; ··· 1289 1280 /* attach accepted sock to socket */ 1290 1281 unix_state_lock(tsk); 1291 1282 newsock->state = SS_CONNECTED; 1283 + unix_sock_inherit_flags(sock, newsock); 1292 1284 sock_graft(tsk, newsock); 1293 1285 unix_state_unlock(tsk); 1294 1286 return 0;
+1
net/unix/diag.c
··· 124 124 rep->udiag_family = AF_UNIX; 125 125 rep->udiag_type = sk->sk_type; 126 126 rep->udiag_state = sk->sk_state; 127 + rep->pad = 0; 127 128 rep->udiag_ino = sk_ino; 128 129 sock_diag_save_cookie(sk, rep->udiag_cookie); 129 130
+13 -10
net/wireless/core.c
··· 566 566 /* check and set up bitrates */ 567 567 ieee80211_set_bitrate_flags(wiphy); 568 568 569 - 569 + rtnl_lock(); 570 570 res = device_add(&rdev->wiphy.dev); 571 - if (res) 572 - return res; 573 - 574 - res = rfkill_register(rdev->rfkill); 575 571 if (res) { 576 - device_del(&rdev->wiphy.dev); 572 + rtnl_unlock(); 577 573 return res; 578 574 } 579 575 580 - rtnl_lock(); 581 576 /* set up regulatory info */ 582 577 wiphy_regulatory_register(wiphy); 583 578 ··· 601 606 602 607 rdev->wiphy.registered = true; 603 608 rtnl_unlock(); 609 + 610 + res = rfkill_register(rdev->rfkill); 611 + if (res) { 612 + rfkill_destroy(rdev->rfkill); 613 + rdev->rfkill = NULL; 614 + wiphy_unregister(&rdev->wiphy); 615 + return res; 616 + } 617 + 604 618 return 0; 605 619 } 606 620 EXPORT_SYMBOL(wiphy_register); ··· 644 640 rtnl_unlock(); 645 641 __count == 0; })); 646 642 647 - rfkill_unregister(rdev->rfkill); 643 + if (rdev->rfkill) 644 + rfkill_unregister(rdev->rfkill); 648 645 649 646 rtnl_lock(); 650 647 rdev->wiphy.registered = false; ··· 958 953 case NETDEV_PRE_UP: 959 954 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 960 955 return notifier_from_errno(-EOPNOTSUPP); 961 - if (rfkill_blocked(rdev->rfkill)) 962 - return notifier_from_errno(-ERFKILL); 963 956 ret = cfg80211_can_add_interface(rdev, wdev->iftype); 964 957 if (ret) 965 958 return notifier_from_errno(ret);
+3
net/wireless/core.h
··· 411 411 cfg80211_can_add_interface(struct cfg80211_registered_device *rdev, 412 412 enum nl80211_iftype iftype) 413 413 { 414 + if (rfkill_blocked(rdev->rfkill)) 415 + return -ERFKILL; 416 + 414 417 return cfg80211_can_change_interface(rdev, NULL, iftype); 415 418 } 416 419
+3
net/wireless/ibss.c
··· 263 263 if (chan->flags & IEEE80211_CHAN_DISABLED) 264 264 continue; 265 265 wdev->wext.ibss.chandef.chan = chan; 266 + wdev->wext.ibss.chandef.center_freq1 = 267 + chan->center_freq; 266 268 break; 267 269 } 268 270 ··· 349 347 if (chan) { 350 348 wdev->wext.ibss.chandef.chan = chan; 351 349 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; 350 + wdev->wext.ibss.chandef.center_freq1 = freq; 352 351 wdev->wext.ibss.channel_fixed = true; 353 352 } else { 354 353 /* cfg80211_ibss_wext_join will pick one if needed */
+2 -2
net/wireless/nl80211.c
··· 2421 2421 change = true; 2422 2422 } 2423 2423 2424 - if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) && 2424 + if (flags && (*flags & MONITOR_FLAG_ACTIVE) && 2425 2425 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2426 2426 return -EOPNOTSUPP; 2427 2427 ··· 2483 2483 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 2484 2484 &flags); 2485 2485 2486 - if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && 2486 + if (!err && (flags & MONITOR_FLAG_ACTIVE) && 2487 2487 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2488 2488 return -EOPNOTSUPP; 2489 2489
+6 -1
net/wireless/radiotap.c
··· 97 97 struct ieee80211_radiotap_header *radiotap_header, 98 98 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) 99 99 { 100 + /* check the radiotap header can actually be present */ 101 + if (max_length < sizeof(struct ieee80211_radiotap_header)) 102 + return -EINVAL; 103 + 100 104 /* Linux only supports version 0 radiotap format */ 101 105 if (radiotap_header->it_version) 102 106 return -EINVAL; ··· 135 131 */ 136 132 137 133 if ((unsigned long)iterator->_arg - 138 - (unsigned long)iterator->_rtheader > 134 + (unsigned long)iterator->_rtheader + 135 + sizeof(uint32_t) > 139 136 (unsigned long)iterator->_max_length) 140 137 return -EINVAL; 141 138 }
+21 -7
net/xfrm/xfrm_policy.c
··· 334 334 335 335 atomic_inc(&policy->genid); 336 336 337 - del_timer(&policy->polq.hold_timer); 337 + if (del_timer(&policy->polq.hold_timer)) 338 + xfrm_pol_put(policy); 338 339 xfrm_queue_purge(&policy->polq.hold_queue); 339 340 340 341 if (del_timer(&policy->timer)) ··· 590 589 591 590 spin_lock_bh(&pq->hold_queue.lock); 592 591 skb_queue_splice_init(&pq->hold_queue, &list); 593 - del_timer(&pq->hold_timer); 592 + if (del_timer(&pq->hold_timer)) 593 + xfrm_pol_put(old); 594 594 spin_unlock_bh(&pq->hold_queue.lock); 595 595 596 596 if (skb_queue_empty(&list)) ··· 602 600 spin_lock_bh(&pq->hold_queue.lock); 603 601 skb_queue_splice(&list, &pq->hold_queue); 604 602 pq->timeout = XFRM_QUEUE_TMO_MIN; 605 - mod_timer(&pq->hold_timer, jiffies); 603 + if (!mod_timer(&pq->hold_timer, jiffies)) 604 + xfrm_pol_hold(new); 606 605 spin_unlock_bh(&pq->hold_queue.lock); 607 606 } 608 607 ··· 1772 1769 1773 1770 spin_lock(&pq->hold_queue.lock); 1774 1771 skb = skb_peek(&pq->hold_queue); 1772 + if (!skb) { 1773 + spin_unlock(&pq->hold_queue.lock); 1774 + goto out; 1775 + } 1775 1776 dst = skb_dst(skb); 1776 1777 sk = skb->sk; 1777 1778 xfrm_decode_session(skb, &fl, dst->ops->family); ··· 1794 1787 goto purge_queue; 1795 1788 1796 1789 pq->timeout = pq->timeout << 1; 1797 - mod_timer(&pq->hold_timer, jiffies + pq->timeout); 1798 - return; 1790 + if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) 1791 + xfrm_pol_hold(pol); 1792 + goto out; 1799 1793 } 1800 1794 1801 1795 dst_release(dst); ··· 1827 1819 err = dst_output(skb); 1828 1820 } 1829 1821 1822 + out: 1823 + xfrm_pol_put(pol); 1830 1824 return; 1831 1825 1832 1826 purge_queue: 1833 1827 pq->timeout = 0; 1834 1828 xfrm_queue_purge(&pq->hold_queue); 1829 + xfrm_pol_put(pol); 1835 1830 } 1836 1831 1837 1832 static int xdst_queue_output(struct sk_buff *skb) ··· 1842 1831 unsigned long sched_next; 1843 1832 struct dst_entry *dst = skb_dst(skb); 1844 1833 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 1845 - struct xfrm_policy_queue *pq = &xdst->pols[0]->polq; 1834 + struct xfrm_policy *pol = xdst->pols[0]; 1835 + struct xfrm_policy_queue *pq = &pol->polq; 1846 1836 1847 1837 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 1848 1838 kfree_skb(skb); ··· 1862 1850 if (del_timer(&pq->hold_timer)) { 1863 1851 if (time_before(pq->hold_timer.expires, sched_next)) 1864 1852 sched_next = pq->hold_timer.expires; 1853 + xfrm_pol_put(pol); 1865 1854 } 1866 1855 1867 1856 __skb_queue_tail(&pq->hold_queue, skb); 1868 - mod_timer(&pq->hold_timer, sched_next); 1857 + if (!mod_timer(&pq->hold_timer, sched_next)) 1858 + xfrm_pol_hold(pol); 1869 1859 1870 1860 spin_unlock_bh(&pq->hold_queue.lock); 1871 1861
+29 -27
net/xfrm/xfrm_replay.c
··· 61 61 62 62 switch (event) { 63 63 case XFRM_REPLAY_UPDATE: 64 - if (x->replay_maxdiff && 65 - (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && 66 - (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { 64 + if (!x->replay_maxdiff || 65 + ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) && 66 + (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) { 67 67 if (x->xflags & XFRM_TIME_DEFER) 68 68 event = XFRM_REPLAY_TIMEOUT; 69 69 else ··· 129 129 return 0; 130 130 131 131 diff = x->replay.seq - seq; 132 - if (diff >= min_t(unsigned int, x->props.replay_window, 133 - sizeof(x->replay.bitmap) * 8)) { 132 + if (diff >= x->props.replay_window) { 134 133 x->stats.replay_window++; 135 134 goto err; 136 135 } ··· 301 302 302 303 switch (event) { 303 304 case XFRM_REPLAY_UPDATE: 304 - if (x->replay_maxdiff && 305 - (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && 306 - (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { 305 + if (!x->replay_maxdiff || 306 + ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && 307 + (replay_esn->oseq - preplay_esn->oseq 308 + < x->replay_maxdiff))) { 307 309 if (x->xflags & XFRM_TIME_DEFER) 308 310 event = XFRM_REPLAY_TIMEOUT; 309 311 else ··· 353 353 354 354 switch (event) { 355 355 case XFRM_REPLAY_UPDATE: 356 - if (!x->replay_maxdiff) 357 - break; 358 - 359 - if (replay_esn->seq_hi == preplay_esn->seq_hi) 360 - seq_diff = replay_esn->seq - preplay_esn->seq; 361 - else 362 - seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; 363 - 364 - if (replay_esn->oseq_hi == preplay_esn->oseq_hi) 365 - oseq_diff = replay_esn->oseq - preplay_esn->oseq; 366 - else 367 - oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; 368 - 369 - if (seq_diff < x->replay_maxdiff && 370 - oseq_diff < x->replay_maxdiff) { 371 - 372 - if (x->xflags & XFRM_TIME_DEFER) 373 - event = XFRM_REPLAY_TIMEOUT; 356 + if (x->replay_maxdiff) { 357 + if (replay_esn->seq_hi == preplay_esn->seq_hi) 358 + seq_diff = replay_esn->seq - preplay_esn->seq; 374 359 else 375 - return; 360 + seq_diff = ~preplay_esn->seq + replay_esn->seq 361 + + 1; 362 + 363 + if (replay_esn->oseq_hi == preplay_esn->oseq_hi) 364 + oseq_diff = replay_esn->oseq 365 + - preplay_esn->oseq; 366 + else 367 + oseq_diff = ~preplay_esn->oseq 368 + + replay_esn->oseq + 1; 369 + 370 + if (seq_diff >= x->replay_maxdiff || 371 + oseq_diff >= x->replay_maxdiff) 372 + break; 376 373 } 374 + 375 + if (x->xflags & XFRM_TIME_DEFER) 376 + event = XFRM_REPLAY_TIMEOUT; 377 + else 378 + return; 377 379 378 380 break; 379 381
+3 -2
net/xfrm/xfrm_user.c
··· 446 446 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 447 447 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 448 448 x->props.mode = p->mode; 449 - x->props.replay_window = p->replay_window; 449 + x->props.replay_window = min_t(unsigned int, p->replay_window, 450 + sizeof(x->replay.bitmap) * 8); 450 451 x->props.reqid = p->reqid; 451 452 x->props.family = p->family; 452 453 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); ··· 1857 1856 if (x->km.state != XFRM_STATE_VALID) 1858 1857 goto out; 1859 1858 1860 - err = xfrm_replay_verify_len(x->replay_esn, rp); 1859 + err = xfrm_replay_verify_len(x->replay_esn, re); 1861 1860 if (err) 1862 1861 goto out; 1863 1862
+8 -6
sound/soc/codecs/ab8500-codec.c
··· 2300 2300 case 0: 2301 2301 break; 2302 2302 case 1: 2303 - slot = find_first_bit((unsigned long *)&tx_mask, 32); 2303 + slot = ffs(tx_mask); 2304 2304 snd_soc_update_bits(codec, AB8500_DASLOTCONF1, mask, slot); 2305 2305 snd_soc_update_bits(codec, AB8500_DASLOTCONF3, mask, slot); 2306 2306 snd_soc_update_bits(codec, AB8500_DASLOTCONF2, mask, slot); 2307 2307 snd_soc_update_bits(codec, AB8500_DASLOTCONF4, mask, slot); 2308 2308 break; 2309 2309 case 2: 2310 - slot = find_first_bit((unsigned long *)&tx_mask, 32); 2310 + slot = ffs(tx_mask); 2311 2311 snd_soc_update_bits(codec, AB8500_DASLOTCONF1, mask, slot); 2312 2312 snd_soc_update_bits(codec, AB8500_DASLOTCONF3, mask, slot); 2313 - slot = find_next_bit((unsigned long *)&tx_mask, 32, slot + 1); 2313 + slot = fls(tx_mask); 2314 2314 snd_soc_update_bits(codec, AB8500_DASLOTCONF2, mask, slot); 2315 2315 snd_soc_update_bits(codec, AB8500_DASLOTCONF4, mask, slot); 2316 2316 break; ··· 2341 2341 case 0: 2342 2342 break; 2343 2343 case 1: 2344 - slot = find_first_bit((unsigned long *)&rx_mask, 32); 2344 + slot = ffs(rx_mask); 2345 2345 snd_soc_update_bits(codec, AB8500_ADSLOTSEL(slot), 2346 2346 AB8500_MASK_SLOT(slot), 2347 2347 AB8500_ADSLOTSELX_AD_OUT_TO_SLOT(AB8500_AD_OUT3, slot)); 2348 2348 break; 2349 2349 case 2: 2350 - slot = find_first_bit((unsigned long *)&rx_mask, 32); 2350 + slot = ffs(rx_mask); 2351 2351 snd_soc_update_bits(codec, 2352 2352 AB8500_ADSLOTSEL(slot), 2353 2353 AB8500_MASK_SLOT(slot), 2354 2354 AB8500_ADSLOTSELX_AD_OUT_TO_SLOT(AB8500_AD_OUT3, slot)); 2355 - slot = find_next_bit((unsigned long *)&rx_mask, 32, slot + 1); 2355 + slot = fls(rx_mask); 2356 2356 snd_soc_update_bits(codec, 2357 2357 AB8500_ADSLOTSEL(slot), 2358 2358 AB8500_MASK_SLOT(slot), ··· 2575 2575 /* Create driver private-data struct */ 2576 2576 drvdata = devm_kzalloc(&pdev->dev, sizeof(struct ab8500_codec_drvdata), 2577 2577 GFP_KERNEL); 2578 + if (!drvdata) 2579 + return -ENOMEM; 2578 2580 drvdata->sid_status = SID_UNCONFIGURED; 2579 2581 drvdata->anc_status = ANC_UNCONFIGURED; 2580 2582 dev_set_drvdata(&pdev->dev, drvdata);
+1 -1
sound/soc/codecs/ak4642.c
··· 257 257 * This operation came from example code of 258 258 * "ASAHI KASEI AK4642" (japanese) manual p94. 259 259 */ 260 - snd_soc_write(codec, SG_SL1, PMMP | MGAIN0); 260 + snd_soc_update_bits(codec, SG_SL1, PMMP | MGAIN0, PMMP | MGAIN0); 261 261 snd_soc_write(codec, TIMER, ZTM(0x3) | WTM(0x3)); 262 262 snd_soc_write(codec, ALC_CTL1, ALC | LMTH0); 263 263 snd_soc_update_bits(codec, PW_MGMT1, PMADL, PMADL);
+54 -39
sound/soc/codecs/cs42l52.c
··· 17 17 #include <linux/kernel.h> 18 18 #include <linux/init.h> 19 19 #include <linux/delay.h> 20 + #include <linux/gpio.h> 20 21 #include <linux/pm.h> 21 22 #include <linux/i2c.h> 22 23 #include <linux/input.h> ··· 1117 1116 cs42l52->sysclk = CS42L52_DEFAULT_CLK; 1118 1117 cs42l52->config.format = CS42L52_DEFAULT_FORMAT; 1119 1118 1120 - /* Set Platform MICx CFG */ 1121 - snd_soc_update_bits(codec, CS42L52_MICA_CTL, 1122 - CS42L52_MIC_CTL_TYPE_MASK, 1123 - cs42l52->pdata.mica_cfg << 1124 - CS42L52_MIC_CTL_TYPE_SHIFT); 1125 - 1126 - snd_soc_update_bits(codec, CS42L52_MICB_CTL, 1127 - CS42L52_MIC_CTL_TYPE_MASK, 1128 - cs42l52->pdata.micb_cfg << 1129 - CS42L52_MIC_CTL_TYPE_SHIFT); 1130 - 1131 - /* if Single Ended, Get Mic_Select */ 1132 - if (cs42l52->pdata.mica_cfg) 1133 - snd_soc_update_bits(codec, CS42L52_MICA_CTL, 1134 - CS42L52_MIC_CTL_MIC_SEL_MASK, 1135 - cs42l52->pdata.mica_sel << 1136 - CS42L52_MIC_CTL_MIC_SEL_SHIFT); 1137 - if (cs42l52->pdata.micb_cfg) 1138 - snd_soc_update_bits(codec, CS42L52_MICB_CTL, 1139 - CS42L52_MIC_CTL_MIC_SEL_MASK, 1140 - cs42l52->pdata.micb_sel << 1141 - CS42L52_MIC_CTL_MIC_SEL_SHIFT); 1142 - 1143 - /* Set Platform Charge Pump Freq */ 1144 - snd_soc_update_bits(codec, CS42L52_CHARGE_PUMP, 1145 - CS42L52_CHARGE_PUMP_MASK, 1146 - cs42l52->pdata.chgfreq << 1147 - CS42L52_CHARGE_PUMP_SHIFT); 1148 - 1149 - /* Set Platform Bias Level */ 1150 - snd_soc_update_bits(codec, CS42L52_IFACE_CTL2, 1151 - CS42L52_IFACE_CTL2_BIAS_LVL, 1152 - cs42l52->pdata.micbias_lvl); 1153 - 1154 1119 return ret; 1155 1120 } 1156 1121 ··· 1172 1205 const struct i2c_device_id *id) 1173 1206 { 1174 1207 struct cs42l52_private *cs42l52; 1208 + struct cs42l52_platform_data *pdata = dev_get_platdata(&i2c_client->dev); 1175 1209 int ret; 1176 1210 unsigned int devid = 0; 1177 1211 unsigned int reg; ··· 1190 1222 return ret; 1191 1223 } 1192 1224 1193 - i2c_set_clientdata(i2c_client, cs42l52); 1225 + if (pdata) 1226 + cs42l52->pdata = *pdata; 1194 1227 1195 - if (dev_get_platdata(&i2c_client->dev)) 1196 - memcpy(&cs42l52->pdata, dev_get_platdata(&i2c_client->dev), 1197 - sizeof(cs42l52->pdata)); 1228 + if (cs42l52->pdata.reset_gpio) { 1229 + ret = gpio_request_one(cs42l52->pdata.reset_gpio, 1230 + GPIOF_OUT_INIT_HIGH, "CS42L52 /RST"); 1231 + if (ret < 0) { 1232 + dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n", 1233 + cs42l52->pdata.reset_gpio, ret); 1234 + return ret; 1235 + } 1236 + gpio_set_value_cansleep(cs42l52->pdata.reset_gpio, 0); 1237 + gpio_set_value_cansleep(cs42l52->pdata.reset_gpio, 1); 1238 + } 1239 + 1240 + i2c_set_clientdata(i2c_client, cs42l52); 1198 1241 1199 1242 ret = regmap_register_patch(cs42l52->regmap, cs42l52_threshold_patch, 1200 1243 ARRAY_SIZE(cs42l52_threshold_patch)); ··· 1223 1244 return ret; 1224 1245 } 1225 1246 1226 - regcache_cache_only(cs42l52->regmap, true); 1247 + dev_info(&i2c_client->dev, "Cirrus Logic CS42L52, Revision: %02X\n", 1248 + reg & 0xFF); 1249 + 1250 + /* Set Platform Data */ 1251 + if (cs42l52->pdata.mica_cfg) 1252 + regmap_update_bits(cs42l52->regmap, CS42L52_MICA_CTL, 1253 + CS42L52_MIC_CTL_TYPE_MASK, 1254 + cs42l52->pdata.mica_cfg << 1255 + CS42L52_MIC_CTL_TYPE_SHIFT); 1256 + 1257 + if (cs42l52->pdata.micb_cfg) 1258 + regmap_update_bits(cs42l52->regmap, CS42L52_MICB_CTL, 1259 + CS42L52_MIC_CTL_TYPE_MASK, 1260 + cs42l52->pdata.micb_cfg << 1261 + CS42L52_MIC_CTL_TYPE_SHIFT); 1262 + 1263 + if (cs42l52->pdata.mica_sel) 1264 + regmap_update_bits(cs42l52->regmap, CS42L52_MICA_CTL, 1265 + CS42L52_MIC_CTL_MIC_SEL_MASK, 1266 + cs42l52->pdata.mica_sel << 1267 + CS42L52_MIC_CTL_MIC_SEL_SHIFT); 1268 + if (cs42l52->pdata.micb_sel) 1269 + regmap_update_bits(cs42l52->regmap, CS42L52_MICB_CTL, 1270 + CS42L52_MIC_CTL_MIC_SEL_MASK, 1271 + cs42l52->pdata.micb_sel << 1272 + CS42L52_MIC_CTL_MIC_SEL_SHIFT); 1273 + 1274 + if (cs42l52->pdata.chgfreq) 1275 + regmap_update_bits(cs42l52->regmap, CS42L52_CHARGE_PUMP, 1276 + CS42L52_CHARGE_PUMP_MASK, 1277 + cs42l52->pdata.chgfreq << 1278 + CS42L52_CHARGE_PUMP_SHIFT); 1279 + 1280 + if (cs42l52->pdata.micbias_lvl) 1281 + regmap_update_bits(cs42l52->regmap, CS42L52_IFACE_CTL2, 1282 + CS42L52_IFACE_CTL2_BIAS_LVL, 1283 + cs42l52->pdata.micbias_lvl); 1227 1284 1228 1285 ret = snd_soc_register_codec(&i2c_client->dev, 1229 1286 &soc_codec_dev_cs42l52, &cs42l52_dai, 1);
+1 -1
sound/soc/codecs/cs42l52.h
··· 269 269 #define CS42L52_FIX_BITS1 0x3E 270 270 #define CS42L52_FIX_BITS2 0x47 271 271 272 - #define CS42L52_MAX_REGISTER 0x34 272 + #define CS42L52_MAX_REGISTER 0x47 273 273 274 274 #endif
+2
sound/soc/codecs/ml26124.c
··· 342 342 struct ml26124_priv *priv = snd_soc_codec_get_drvdata(codec); 343 343 int i = get_coeff(priv->mclk, params_rate(hw_params)); 344 344 345 + if (i < 0) 346 + return i; 345 347 priv->substream = substream; 346 348 priv->rate = params_rate(hw_params); 347 349
+4 -3
sound/soc/codecs/rt5640.c
··· 1604 1604 struct snd_soc_pcm_runtime *rtd = substream->private_data; 1605 1605 struct snd_soc_codec *codec = rtd->codec; 1606 1606 struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec); 1607 - unsigned int val_len = 0, val_clk, mask_clk, dai_sel; 1608 - int pre_div, bclk_ms, frame_size; 1607 + unsigned int val_len = 0, val_clk, mask_clk; 1608 + int dai_sel, pre_div, bclk_ms, frame_size; 1609 1609 1610 1610 rt5640->lrck[dai->id] = params_rate(params); 1611 1611 pre_div = get_clk_info(rt5640->sysclk, rt5640->lrck[dai->id]); ··· 1675 1675 { 1676 1676 struct snd_soc_codec *codec = dai->codec; 1677 1677 struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec); 1678 - unsigned int reg_val = 0, dai_sel; 1678 + unsigned int reg_val = 0; 1679 + int dai_sel; 1679 1680 1680 1681 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 1681 1682 case SND_SOC_DAIFMT_CBM_CFM:
+4 -4
sound/soc/codecs/wm0010.c
··· 793 793 wm0010->max_spi_freq = 0; 794 794 } else { 795 795 for (i = 0; i < ARRAY_SIZE(pll_clock_map); i++) 796 - if (freq >= pll_clock_map[i].max_sysclk) 796 + if (freq >= pll_clock_map[i].max_sysclk) { 797 + wm0010->max_spi_freq = pll_clock_map[i].max_pll_spi_speed; 798 + wm0010->pll_clkctrl1 = pll_clock_map[i].pll_clkctrl1; 797 799 break; 798 - 799 - wm0010->max_spi_freq = pll_clock_map[i].max_pll_spi_speed; 800 - wm0010->pll_clkctrl1 = pll_clock_map[i].pll_clkctrl1; 800 + } 801 801 } 802 802 803 803 return 0;
+12
sound/soc/codecs/wm5110.c
··· 983 983 ARIZONA_MUX_ROUTES("ASRC2L", "ASRC2L"), 984 984 ARIZONA_MUX_ROUTES("ASRC2R", "ASRC2R"), 985 985 986 + { "AEC Loopback", "HPOUT1L", "OUT1L" }, 987 + { "AEC Loopback", "HPOUT1R", "OUT1R" }, 986 988 { "HPOUT1L", NULL, "OUT1L" }, 987 989 { "HPOUT1R", NULL, "OUT1R" }, 988 990 991 + { "AEC Loopback", "HPOUT2L", "OUT2L" }, 992 + { "AEC Loopback", "HPOUT2R", "OUT2R" }, 989 993 { "HPOUT2L", NULL, "OUT2L" }, 990 994 { "HPOUT2R", NULL, "OUT2R" }, 991 995 996 + { "AEC Loopback", "HPOUT3L", "OUT3L" }, 997 + { "AEC Loopback", "HPOUT3R", "OUT3R" }, 992 998 { "HPOUT3L", NULL, "OUT3L" }, 993 999 { "HPOUT3R", NULL, "OUT3L" }, 994 1000 1001 + { "AEC Loopback", "SPKOUTL", "OUT4L" }, 995 1002 { "SPKOUTLN", NULL, "OUT4L" }, 996 1003 { "SPKOUTLP", NULL, "OUT4L" }, 997 1004 1005 + { "AEC Loopback", "SPKOUTR", "OUT4R" }, 998 1006 { "SPKOUTRN", NULL, "OUT4R" }, 999 1007 { "SPKOUTRP", NULL, "OUT4R" }, 1000 1008 1009 + { "AEC Loopback", "SPKDAT1L", "OUT5L" }, 1010 + { "AEC Loopback", "SPKDAT1R", "OUT5R" }, 1001 1011 { "SPKDAT1L", NULL, "OUT5L" }, 1002 1012 { "SPKDAT1R", NULL, "OUT5R" }, 1003 1013 1014 + { "AEC Loopback", "SPKDAT2L", "OUT6L" }, 1015 + { "AEC Loopback", "SPKDAT2R", "OUT6R" }, 1004 1016 { "SPKDAT2L", NULL, "OUT6L" }, 1005 1017 { "SPKDAT2R", NULL, "OUT6R" }, 1006 1018
+15 -7
sound/soc/codecs/wm8962.c
··· 1758 1758 WM8962_EQL_B4_GAIN_SHIFT, 31, 0, eq_tlv), 1759 1759 SOC_DOUBLE_R_TLV("EQ5 Volume", WM8962_EQ3, WM8962_EQ23, 1760 1760 WM8962_EQL_B5_GAIN_SHIFT, 31, 0, eq_tlv), 1761 + SND_SOC_BYTES("EQL Coefficients", WM8962_EQ4, 18), 1762 + SND_SOC_BYTES("EQR Coefficients", WM8962_EQ24, 18), 1763 + 1761 1764 1762 1765 SOC_SINGLE("3D Switch", WM8962_THREED1, 0, 1, 0), 1763 1766 SND_SOC_BYTES_MASK("3D Coefficients", WM8962_THREED1, 4, WM8962_THREED_ENA), ··· 1778 1775 SND_SOC_BYTES("HPF Coefficients", WM8962_LHPF2, 1), 1779 1776 WM8962_DSP2_ENABLE("HD Bass Switch", WM8962_HDBASS_ENA_SHIFT), 1780 1777 SND_SOC_BYTES("HD Bass Coefficients", WM8962_HDBASS_AI_1, 30), 1778 + 1779 + SOC_DOUBLE("ALC Switch", WM8962_ALC1, WM8962_ALCL_ENA_SHIFT, 1780 + WM8962_ALCR_ENA_SHIFT, 1, 0), 1781 + SND_SOC_BYTES_MASK("ALC Coefficients", WM8962_ALC1, 4, 1782 + WM8962_ALCL_ENA_MASK | WM8962_ALCR_ENA_MASK), 1781 1783 }; 1782 1784 1783 1785 static const struct snd_kcontrol_new wm8962_spk_mono_controls[] = { ··· 3624 3616 0); 3625 3617 3626 3618 /* Apply static configuration for GPIOs */ 3627 - for (i = 0; i < ARRAY_SIZE(pdata->gpio_init); i++) 3628 - if (pdata->gpio_init[i]) { 3619 + for (i = 0; i < ARRAY_SIZE(wm8962->pdata.gpio_init); i++) 3620 + if (wm8962->pdata.gpio_init[i]) { 3629 3621 wm8962_set_gpio_mode(wm8962, i + 1); 3630 3622 regmap_write(wm8962->regmap, 0x200 + i, 3631 - pdata->gpio_init[i] & 0xffff); 3623 + wm8962->pdata.gpio_init[i] & 0xffff); 3632 3624 } 3633 3625 3634 3626 3635 3627 /* Put the speakers into mono mode? */ 3636 - if (pdata->spk_mono) 3628 + if (wm8962->pdata.spk_mono) 3637 3629 regmap_update_bits(wm8962->regmap, WM8962_CLASS_D_CONTROL_2, 3638 3630 WM8962_SPK_MONO_MASK, WM8962_SPK_MONO); 3639 3631 3640 3632 /* Micbias setup, detection enable and detection 3641 3633 * threasholds. */ 3642 - if (pdata->mic_cfg) 3634 + if (wm8962->pdata.mic_cfg) 3643 3635 regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4, 3644 3636 WM8962_MICDET_ENA | 3645 3637 WM8962_MICDET_THR_MASK | 3646 3638 WM8962_MICSHORT_THR_MASK | 3647 3639 WM8962_MICBIAS_LVL, 3648 - pdata->mic_cfg); 3640 + wm8962->pdata.mic_cfg); 3649 3641 3650 3642 /* Latch volume update bits */ 3651 3643 regmap_update_bits(wm8962->regmap, WM8962_LEFT_INPUT_VOLUME, ··· 3690 3682 } 3691 3683 3692 3684 if (wm8962->irq) { 3693 - if (pdata->irq_active_low) { 3685 + if (wm8962->pdata.irq_active_low) { 3694 3686 trigger = IRQF_TRIGGER_LOW; 3695 3687 irq_pol = WM8962_IRQ_POL; 3696 3688 } else {
+2
sound/soc/codecs/wm8996.c
··· 438 438 struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); 439 439 int block = wm8996_get_retune_mobile_block(kcontrol->id.name); 440 440 441 + if (block < 0) 442 + return block; 441 443 ucontrol->value.enumerated.item[0] = wm8996->retune_mobile_cfg[block]; 442 444 443 445 return 0;
+20 -7
sound/soc/codecs/wm_adsp.c
··· 396 396 ret = regmap_raw_write(adsp->regmap, reg, scratch, 397 397 ctl->len); 398 398 if (ret) { 399 - adsp_err(adsp, "Failed to write %zu bytes to %x\n", 400 - ctl->len, reg); 399 + adsp_err(adsp, "Failed to write %zu bytes to %x: %d\n", 400 + ctl->len, reg, ret); 401 401 kfree(scratch); 402 402 return ret; 403 403 } 404 + adsp_dbg(adsp, "Wrote %zu bytes to %x\n", ctl->len, reg); 404 405 405 406 kfree(scratch); 406 407 ··· 451 450 452 451 ret = regmap_raw_read(adsp->regmap, reg, scratch, ctl->len); 453 452 if (ret) { 454 - adsp_err(adsp, "Failed to read %zu bytes from %x\n", 455 - ctl->len, reg); 453 + adsp_err(adsp, "Failed to read %zu bytes from %x: %d\n", 454 + ctl->len, reg, ret); 456 455 kfree(scratch); 457 456 return ret; 458 457 } 458 + adsp_dbg(adsp, "Read %zu bytes from %x\n", ctl->len, reg); 459 459 460 460 memcpy(buf, scratch, ctl->len); 461 461 kfree(scratch); ··· 570 568 file, header->ver); 571 569 goto out_fw; 572 570 } 571 + adsp_info(dsp, "Firmware version: %d\n", header->ver); 573 572 574 573 if (header->core != dsp->type) { 575 574 adsp_err(dsp, "%s: invalid core %d != %d\n", ··· 692 689 &buf_list); 693 690 if (!buf) { 694 691 adsp_err(dsp, "Out of memory\n"); 695 - return -ENOMEM; 692 + ret = -ENOMEM; 693 + goto out_fw; 696 694 } 697 695 698 696 ret = regmap_raw_write_async(regmap, reg, buf->buf, ··· 1317 1313 le32_to_cpu(blk->len)); 1318 1314 if (ret != 0) { 1319 1315 adsp_err(dsp, 1320 - "%s.%d: Failed to write to %x in %s\n", 1321 - file, blocks, reg, region_name); 1316 + "%s.%d: Failed to write to %x in %s: %d\n", 1317 + file, blocks, reg, region_name, ret); 1322 1318 } 1323 1319 } 1324 1320 ··· 1362 1358 struct snd_soc_codec *codec = w->codec; 1363 1359 struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec); 1364 1360 struct wm_adsp *dsp = &dsps[w->shift]; 1361 + struct wm_adsp_alg_region *alg_region; 1365 1362 struct wm_coeff_ctl *ctl; 1366 1363 int ret; 1367 1364 int val; ··· 1440 1435 1441 1436 list_for_each_entry(ctl, &dsp->ctl_list, list) 1442 1437 ctl->enabled = 0; 1438 + 1439 + while (!list_empty(&dsp->alg_regions)) { 1440 + alg_region = list_first_entry(&dsp->alg_regions, 1441 + struct wm_adsp_alg_region, 1442 + list); 1443 + list_del(&alg_region->list); 1444 + kfree(alg_region); 1445 + } 1443 1446 break; 1444 1447 1445 1448 default:
+1
sound/soc/codecs/wm_hubs.c
··· 530 530 hubs->hp_startup_mode); 531 531 break; 532 532 } 533 + break; 533 534 534 535 case SND_SOC_DAPM_PRE_PMD: 535 536 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+10 -5
sound/soc/fsl/eukrea-tlv320.c
··· 42 42 SND_SOC_DAIFMT_NB_NF | 43 43 SND_SOC_DAIFMT_CBM_CFM); 44 44 if (ret) { 45 - pr_err("%s: failed set cpu dai format\n", __func__); 45 + dev_err(cpu_dai->dev, 46 + "Failed to set the cpu dai format.\n"); 46 47 return ret; 47 48 } 48 49 ··· 51 50 SND_SOC_DAIFMT_NB_NF | 52 51 SND_SOC_DAIFMT_CBM_CFM); 53 52 if (ret) { 54 - pr_err("%s: failed set codec dai format\n", __func__); 53 + dev_err(cpu_dai->dev, 54 + "Failed to set the codec format.\n"); 55 55 return ret; 56 56 } 57 57 58 58 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 59 59 CODEC_CLOCK, SND_SOC_CLOCK_OUT); 60 60 if (ret) { 61 - pr_err("%s: failed setting codec sysclk\n", __func__); 61 + dev_err(cpu_dai->dev, 62 + "Failed to set the codec sysclk.\n"); 62 63 return ret; 63 64 } 64 65 snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0); ··· 68 65 ret = snd_soc_dai_set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0, 69 66 SND_SOC_CLOCK_IN); 70 67 if (ret) { 71 - pr_err("can't set CPU system clock IMX_SSP_SYS_CLK\n"); 68 + dev_err(cpu_dai->dev, 69 + "Can't set the IMX_SSP_SYS_CLK CPU system clock.\n"); 72 70 return ret; 73 71 } 74 72 ··· 159 155 .owner = THIS_MODULE, 160 156 }, 161 157 .probe = eukrea_tlv320_probe, 162 - .remove = eukrea_tlv320_remove,}; 158 + .remove = eukrea_tlv320_remove, 159 + }; 163 160 164 161 module_platform_driver(eukrea_tlv320_driver); 165 162
+2 -2
sound/soc/fsl/fsl_spdif.c
··· 1107 1107 1108 1108 /* Get the addresses and IRQ */ 1109 1109 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1110 - if (IS_ERR(res)) { 1110 + if (!res) { 1111 1111 dev_err(&pdev->dev, "could not determine device resources\n"); 1112 - return PTR_ERR(res); 1112 + return -ENXIO; 1113 1113 } 1114 1114 1115 1115 regs = devm_ioremap_resource(&pdev->dev, res);
+1 -1
sound/soc/kirkwood/kirkwood-i2s.c
··· 568 568 } else { 569 569 dev_info(&pdev->dev, "found external clock\n"); 570 570 clk_prepare_enable(priv->extclk); 571 - soc_dai = &kirkwood_i2s_dai_extclk; 571 + soc_dai = kirkwood_i2s_dai_extclk; 572 572 } 573 573 } 574 574
+1 -1
sound/soc/samsung/i2s.c
··· 1068 1068 dev_set_drvdata(&i2s->pdev->dev, i2s); 1069 1069 } else { /* Create a new platform_device for Secondary */ 1070 1070 i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1); 1071 - if (IS_ERR(i2s->pdev)) 1071 + if (!i2s->pdev) 1072 1072 return NULL; 1073 1073 1074 1074 i2s->pdev->dev.parent = &pdev->dev;
+20 -11
sound/soc/sh/rcar/core.c
··· 94 94 * 95 95 */ 96 96 #include <linux/pm_runtime.h> 97 + #include <linux/shdma-base.h> 97 98 #include "rsnd.h" 98 99 99 100 #define RSND_RATES SNDRV_PCM_RATE_8000_96000 ··· 210 209 return !!dma->chan; 211 210 } 212 211 213 - static bool rsnd_dma_filter(struct dma_chan *chan, void *param) 214 - { 215 - chan->private = param; 216 - 217 - return true; 218 - } 219 - 220 212 int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma, 221 213 int is_play, int id, 222 214 int (*inquiry)(struct rsnd_dma *dma, ··· 217 223 int (*complete)(struct rsnd_dma *dma)) 218 224 { 219 225 struct device *dev = rsnd_priv_to_dev(priv); 226 + struct dma_slave_config cfg; 220 227 dma_cap_mask_t mask; 228 + int ret; 221 229 222 230 if (dma->chan) { 223 231 dev_err(dev, "it already has dma channel\n"); ··· 229 233 dma_cap_zero(mask); 230 234 dma_cap_set(DMA_SLAVE, mask); 231 235 232 - dma->slave.shdma_slave.slave_id = id; 233 - 234 - dma->chan = dma_request_channel(mask, rsnd_dma_filter, 235 - &dma->slave.shdma_slave); 236 + dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, 237 + (void *)id, dev, 238 + is_play ? "tx" : "rx"); 236 239 if (!dma->chan) { 237 240 dev_err(dev, "can't get dma channel\n"); 238 241 return -EIO; 239 242 } 243 + 244 + cfg.slave_id = id; 245 + cfg.dst_addr = 0; /* use default addr when playback */ 246 + cfg.src_addr = 0; /* use default addr when capture */ 247 + cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 248 + 249 + ret = dmaengine_slave_config(dma->chan, &cfg); 250 + if (ret < 0) 251 + goto rsnd_dma_init_err; 240 252 241 253 dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 242 254 dma->priv = priv; ··· 253 249 INIT_WORK(&dma->work, rsnd_dma_do_work); 254 250 255 251 return 0; 252 + 253 + rsnd_dma_init_err: 254 + rsnd_dma_quit(priv, dma); 255 + 256 + return ret; 256 257 } 257 258 258 259 void rsnd_dma_quit(struct rsnd_priv *priv,
+3 -2
sound/soc/soc-core.c
··· 2551 2551 2552 2552 if (uinfo->value.enumerated.item > e->max - 1) 2553 2553 uinfo->value.enumerated.item = e->max - 1; 2554 - strcpy(uinfo->value.enumerated.name, 2555 - e->texts[uinfo->value.enumerated.item]); 2554 + strlcpy(uinfo->value.enumerated.name, 2555 + e->texts[uinfo->value.enumerated.item], 2556 + sizeof(uinfo->value.enumerated.name)); 2556 2557 return 0; 2557 2558 } 2558 2559 EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
+3 -1
sound/soc/soc-dapm.c
··· 1974 1974 w->active ? "active" : "inactive"); 1975 1975 1976 1976 list_for_each_entry(p, &w->sources, list_sink) { 1977 - if (p->connected && !p->connected(w, p->sink)) 1977 + if (p->connected && !p->connected(w, p->source)) 1978 1978 continue; 1979 1979 1980 1980 if (p->connect) ··· 3525 3525 if (!w) { 3526 3526 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", 3527 3527 dai->driver->playback.stream_name); 3528 + return -ENOMEM; 3528 3529 } 3529 3530 3530 3531 w->priv = dai; ··· 3544 3543 if (!w) { 3545 3544 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", 3546 3545 dai->driver->capture.stream_name); 3546 + return -ENOMEM; 3547 3547 } 3548 3548 3549 3549 w->priv = dai;
+1 -1
sound/soc/soc-generic-dmaengine-pcm.c
··· 25 25 #include <sound/dmaengine_pcm.h> 26 26 27 27 struct dmaengine_pcm { 28 - struct dma_chan *chan[SNDRV_PCM_STREAM_CAPTURE + 1]; 28 + struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1]; 29 29 const struct snd_dmaengine_pcm_config *config; 30 30 struct snd_soc_platform platform; 31 31 unsigned int flags;
+30 -22
sound/soc/soc-pcm.c
··· 190 190 mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); 191 191 192 192 /* startup the audio subsystem */ 193 - if (cpu_dai->driver->ops->startup) { 193 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->startup) { 194 194 ret = cpu_dai->driver->ops->startup(substream, cpu_dai); 195 195 if (ret < 0) { 196 196 dev_err(cpu_dai->dev, "ASoC: can't open interface" ··· 208 208 } 209 209 } 210 210 211 - if (codec_dai->driver->ops->startup) { 211 + if (codec_dai->driver->ops && codec_dai->driver->ops->startup) { 212 212 ret = codec_dai->driver->ops->startup(substream, codec_dai); 213 213 if (ret < 0) { 214 214 dev_err(codec_dai->dev, "ASoC: can't open codec" ··· 463 463 } 464 464 } 465 465 466 - if (codec_dai->driver->ops->prepare) { 466 + if (codec_dai->driver->ops && codec_dai->driver->ops->prepare) { 467 467 ret = codec_dai->driver->ops->prepare(substream, codec_dai); 468 468 if (ret < 0) { 469 469 dev_err(codec_dai->dev, "ASoC: DAI prepare error: %d\n", ··· 472 472 } 473 473 } 474 474 475 - if (cpu_dai->driver->ops->prepare) { 475 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->prepare) { 476 476 ret = cpu_dai->driver->ops->prepare(substream, cpu_dai); 477 477 if (ret < 0) { 478 478 dev_err(cpu_dai->dev, "ASoC: DAI prepare error: %d\n", ··· 523 523 } 524 524 } 525 525 526 - if (codec_dai->driver->ops->hw_params) { 526 + if (codec_dai->driver->ops && codec_dai->driver->ops->hw_params) { 527 527 ret = codec_dai->driver->ops->hw_params(substream, params, codec_dai); 528 528 if (ret < 0) { 529 529 dev_err(codec_dai->dev, "ASoC: can't set %s hw params:" ··· 532 532 } 533 533 } 534 534 535 - if (cpu_dai->driver->ops->hw_params) { 535 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_params) { 536 536 ret = cpu_dai->driver->ops->hw_params(substream, params, cpu_dai); 537 537 if (ret < 0) { 538 538 dev_err(cpu_dai->dev, "ASoC: %s hw params failed: %d\n", ··· 559 559 return ret; 560 560 561 561 platform_err: 562 - if (cpu_dai->driver->ops->hw_free) 562 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free) 563 563 cpu_dai->driver->ops->hw_free(substream, cpu_dai); 564 564 565 565 interface_err: 566 - if (codec_dai->driver->ops->hw_free) 566 + if (codec_dai->driver->ops && codec_dai->driver->ops->hw_free) 567 567 codec_dai->driver->ops->hw_free(substream, codec_dai); 568 568 569 569 codec_err: ··· 600 600 platform->driver->ops->hw_free(substream); 601 601 602 602 /* now free hw params for the DAIs */ 603 - if (codec_dai->driver->ops->hw_free) 603 + if (codec_dai->driver->ops && codec_dai->driver->ops->hw_free) 604 604 codec_dai->driver->ops->hw_free(substream, codec_dai); 605 605 606 - if (cpu_dai->driver->ops->hw_free) 606 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free) 607 607 cpu_dai->driver->ops->hw_free(substream, cpu_dai); 608 608 609 609 mutex_unlock(&rtd->pcm_mutex); ··· 618 618 struct snd_soc_dai *codec_dai = rtd->codec_dai; 619 619 int ret; 620 620 621 - if (codec_dai->driver->ops->trigger) { 621 + if (codec_dai->driver->ops && codec_dai->driver->ops->trigger) { 622 622 ret = codec_dai->driver->ops->trigger(substream, cmd, codec_dai); 623 623 if (ret < 0) 624 624 return ret; ··· 630 630 return ret; 631 631 } 632 632 633 - if (cpu_dai->driver->ops->trigger) { 633 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->trigger) { 634 634 ret = cpu_dai->driver->ops->trigger(substream, cmd, cpu_dai); 635 635 if (ret < 0) 636 636 return ret; ··· 647 647 struct snd_soc_dai *codec_dai = rtd->codec_dai; 648 648 int ret; 649 649 650 - if (codec_dai->driver->ops->bespoke_trigger) { 650 + if (codec_dai->driver->ops && 651 + codec_dai->driver->ops->bespoke_trigger) { 651 652 ret = codec_dai->driver->ops->bespoke_trigger(substream, cmd, codec_dai); 652 653 if (ret < 0) 653 654 return ret; 654 655 } 655 656 656 - if (platform->driver->bespoke_trigger) { 657 + if (platform->driver->ops && platform->driver->bespoke_trigger) { 657 658 ret = platform->driver->bespoke_trigger(substream, cmd); 658 659 if (ret < 0) 659 660 return ret; 660 661 } 661 662 662 - if (cpu_dai->driver->ops->bespoke_trigger) { 663 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->bespoke_trigger) { 663 664 ret = cpu_dai->driver->ops->bespoke_trigger(substream, cmd, cpu_dai); 664 665 if (ret < 0) 665 666 return ret; ··· 685 684 if (platform->driver->ops && platform->driver->ops->pointer) 686 685 offset = platform->driver->ops->pointer(substream); 687 686 688 - if (cpu_dai->driver->ops->delay) 687 + if (cpu_dai->driver->ops && cpu_dai->driver->ops->delay) 689 688 delay += cpu_dai->driver->ops->delay(substream, cpu_dai); 690 689 691 - if (codec_dai->driver->ops->delay) 690 + if (codec_dai->driver->ops && codec_dai->driver->ops->delay) 692 691 delay += codec_dai->driver->ops->delay(substream, codec_dai); 693 692 694 693 if (platform->driver->delay) ··· 1038 1037 struct snd_pcm_substream *be_substream = 1039 1038 snd_soc_dpcm_get_substream(be, stream); 1040 1039 1040 + if (!be_substream) { 1041 + dev_err(be->dev, "ASoC: no backend %s stream\n", 1042 + stream ? "capture" : "playback"); 1043 + continue; 1044 + } 1045 + 1041 1046 /* is this op for this BE ? */ 1042 1047 if (!snd_soc_dpcm_be_can_update(fe, be, stream)) 1043 1048 continue; ··· 1061 1054 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE)) 1062 1055 continue; 1063 1056 1064 - dev_dbg(be->dev, "ASoC: open BE %s\n", be->dai_link->name); 1057 + dev_dbg(be->dev, "ASoC: open %s BE %s\n", 1058 + stream ? "capture" : "playback", be->dai_link->name); 1065 1059 1066 1060 be_substream->runtime = be->dpcm[stream].runtime; 1067 1061 err = soc_pcm_open(be_substream); ··· 1681 1673 struct snd_soc_pcm_runtime *rtd = substream->private_data; 1682 1674 struct snd_soc_platform *platform = rtd->platform; 1683 1675 1684 - if (platform->driver->ops->ioctl) 1676 + if (platform->driver->ops && platform->driver->ops->ioctl) 1685 1677 return platform->driver->ops->ioctl(substream, cmd, arg); 1686 1678 return snd_pcm_lib_ioctl(substream, cmd, arg); 1687 1679 } ··· 1942 1934 1943 1935 dev_dbg(be->dev, "ASoC: BE digital mute %s\n", be->dai_link->name); 1944 1936 1945 - if (drv->ops->digital_mute && dai->playback_active) 1946 - drv->ops->digital_mute(dai, mute); 1937 + if (drv->ops && drv->ops->digital_mute && dai->playback_active) 1938 + drv->ops->digital_mute(dai, mute); 1947 1939 } 1948 1940 1949 1941 return 0; ··· 2232 2224 int snd_soc_platform_trigger(struct snd_pcm_substream *substream, 2233 2225 int cmd, struct snd_soc_platform *platform) 2234 2226 { 2235 - if (platform->driver->ops->trigger) 2227 + if (platform->driver->ops && platform->driver->ops->trigger) 2236 2228 return platform->driver->ops->trigger(substream, cmd); 2237 2229 return 0; 2238 2230 }
+13 -17
tools/perf/util/event.c
··· 187 187 return -1; 188 188 } 189 189 190 - event->header.type = PERF_RECORD_MMAP2; 190 + event->header.type = PERF_RECORD_MMAP; 191 191 /* 192 192 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 193 193 */ ··· 198 198 char prot[5]; 199 199 char execname[PATH_MAX]; 200 200 char anonstr[] = "//anon"; 201 - unsigned int ino; 202 201 size_t size; 203 202 ssize_t n; 204 203 ··· 208 209 strcpy(execname, ""); 209 210 210 211 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 211 - n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", 212 - &event->mmap2.start, &event->mmap2.len, prot, 213 - &event->mmap2.pgoff, &event->mmap2.maj, 214 - &event->mmap2.min, 215 - &ino, execname); 216 - 217 - event->mmap2.ino = (u64)ino; 212 + n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n", 213 + &event->mmap.start, &event->mmap.len, prot, 214 + &event->mmap.pgoff, 215 + execname); 218 216 219 217 if (n != 8) 220 218 continue; ··· 223 227 strcpy(execname, anonstr); 224 228 225 229 size = strlen(execname) + 1; 226 - memcpy(event->mmap2.filename, execname, size); 230 + memcpy(event->mmap.filename, execname, size); 227 231 size = PERF_ALIGN(size, sizeof(u64)); 228 - event->mmap2.len -= event->mmap.start; 229 - event->mmap2.header.size = (sizeof(event->mmap2) - 230 - (sizeof(event->mmap2.filename) - size)); 231 - memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 232 - event->mmap2.header.size += machine->id_hdr_size; 233 - event->mmap2.pid = tgid; 234 - event->mmap2.tid = pid; 232 + event->mmap.len -= event->mmap.start; 233 + event->mmap.header.size = (sizeof(event->mmap) - 234 + (sizeof(event->mmap.filename) - size)); 235 + memset(event->mmap.filename + size, 0, machine->id_hdr_size); 236 + event->mmap.header.size += machine->id_hdr_size; 237 + event->mmap.pid = tgid; 238 + event->mmap.tid = pid; 235 239 236 240 if (process(tool, event, &synth_sample, machine) != 0) { 237 241 rc = -1;
-1
tools/perf/util/evsel.c
··· 678 678 attr->sample_type |= PERF_SAMPLE_WEIGHT; 679 679 680 680 attr->mmap = track; 681 - attr->mmap2 = track && !perf_missing_features.mmap2; 682 681 attr->comm = track; 683 682 684 683 /*
+1 -1
tools/perf/util/probe-finder.c
··· 1357 1357 goto post; 1358 1358 } 1359 1359 1360 + fname = dwarf_decl_file(&spdie); 1360 1361 if (addr == (unsigned long)baseaddr) { 1361 1362 /* Function entry - Relative line number is 0 */ 1362 1363 lineno = baseline; 1363 - fname = dwarf_decl_file(&spdie); 1364 1364 goto post; 1365 1365 } 1366 1366
+1 -1
tools/perf/util/scripting-engines/trace-event-perl.c
··· 282 282 283 283 event = find_cache_event(evsel); 284 284 if (!event) 285 - die("ug! no event found for type %" PRIu64, evsel->attr.config); 285 + die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); 286 286 287 287 pid = raw_field_value(event, "common_pid", data); 288 288