Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cleanups/dma' into next/cleanup

Separate patches from Marek Szyprowski <m.szyprowski@samsung.com>:

Commit e9da6e9905e639b0 ("ARM: dma-mapping: remove custom consistent dma
region") replaced custom consistent memory handling, so setting
consistent dma memory size is not longer required. This patch series
cleans sub-architecture platform code to remove all calls to the
obsolated init_consistent_dma_size() function and finally removes the
init_consistent_dma_size() stub itself.

* cleanups/dma:
ARM: at91: remove obsoleted init_consistent_dma_size()
ARM: u300: remove obsoleted init_consistent_dma_size()
ARM: dma-mapping: remove init_consistent_dma_size() stub
ARM: shmobile: remove obsoleted init_consistent_dma_size()
ARM: davinci: remove obsoleted init_consistent_dma_size()
ARM: samsung: remove obsoleted init_consistent_dma_size()

Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+3862 -2343
+7 -7
Documentation/arm64/memory.txt
··· 27 27 ----------------------------------------------------------------------- 28 28 0000000000000000 0000007fffffffff 512GB user 29 29 30 - ffffff8000000000 ffffffbbfffcffff ~240GB vmalloc 30 + ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc 31 31 32 - ffffffbbfffd0000 ffffffbcfffdffff 64KB [guard page] 33 - 34 - ffffffbbfffe0000 ffffffbcfffeffff 64KB PCI I/O space 35 - 36 - ffffffbbffff0000 ffffffbcffffffff 64KB [guard page] 32 + ffffffbbffff0000 ffffffbbffffffff 64KB [guard page] 37 33 38 34 ffffffbc00000000 ffffffbdffffffff 8GB vmemmap 39 35 40 - ffffffbe00000000 ffffffbffbffffff ~8GB [guard, future vmmemap] 36 + ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] 37 + 38 + ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space 39 + 40 + ffffffbbffff0000 ffffffbcffffffff ~2MB [guard] 41 41 42 42 ffffffbffc000000 ffffffbfffffffff 64MB modules 43 43
+19
Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
··· 1 + * EETI eGalax Multiple Touch Controller 2 + 3 + Required properties: 4 + - compatible: must be "eeti,egalax_ts" 5 + - reg: i2c slave address 6 + - interrupt-parent: the phandle for the interrupt controller 7 + - interrupts: touch controller interrupt 8 + - wakeup-gpios: the gpio pin to be used for waking up the controller 9 + as well as uased as irq pin 10 + 11 + Example: 12 + 13 + egalax_ts@04 { 14 + compatible = "eeti,egalax_ts"; 15 + reg = <0x04>; 16 + interrupt-parent = <&gpio1>; 17 + interrupts = <9 2>; 18 + wakeup-gpios = <&gpio1 9 0>; 19 + };
+1 -1
Documentation/hwmon/fam15h_power
··· 10 10 BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors 11 11 (not yet published) 12 12 13 - Author: Andreas Herrmann <andreas.herrmann3@amd.com> 13 + Author: Andreas Herrmann <herrmann.der.user@googlemail.com> 14 14 15 15 Description 16 16 -----------
+3 -2
MAINTAINERS
··· 503 503 F: include/linux/altera_jtaguart.h 504 504 505 505 AMD FAM15H PROCESSOR POWER MONITORING DRIVER 506 - M: Andreas Herrmann <andreas.herrmann3@amd.com> 506 + M: Andreas Herrmann <herrmann.der.user@googlemail.com> 507 507 L: lm-sensors@lm-sensors.org 508 508 S: Maintained 509 509 F: Documentation/hwmon/fam15h_power ··· 2506 2506 M: Seung-Woo Kim <sw0312.kim@samsung.com> 2507 2507 M: Kyungmin Park <kyungmin.park@samsung.com> 2508 2508 L: dri-devel@lists.freedesktop.org 2509 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git 2509 2510 S: Supported 2510 2511 F: drivers/gpu/drm/exynos 2511 2512 F: include/drm/exynos* ··· 5647 5646 F: drivers/pinctrl/spear/ 5648 5647 5649 5648 PKTCDVD DRIVER 5650 - M: Peter Osterlund <petero2@telia.com> 5649 + M: Jiri Kosina <jkosina@suse.cz> 5651 5650 S: Maintained 5652 5651 F: drivers/block/pktcdvd.c 5653 5652 F: include/linux/pktcdvd.h
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 7 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc5 5 5 NAME = Terrified Chipmunk 6 6 7 7 # *DOCUMENTATION*
-7
arch/arm/include/asm/dma-mapping.h
··· 211 211 extern void __init init_dma_coherent_pool_size(unsigned long size); 212 212 213 213 /* 214 - * This can be called during boot to increase the size of the consistent 215 - * DMA region above it's default value of 2MB. It must be called before the 216 - * memory allocator is initialised, i.e. before any core_initcall. 217 - */ 218 - static inline void init_consistent_dma_size(unsigned long size) { } 219 - 220 - /* 221 214 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" 222 215 * and utilize bounce buffers as needed to work around limited DMA windows. 223 216 *
+2 -2
arch/arm/include/asm/io.h
··· 64 64 static inline void __raw_writew(u16 val, volatile void __iomem *addr) 65 65 { 66 66 asm volatile("strh %1, %0" 67 - : "+Qo" (*(volatile u16 __force *)addr) 67 + : "+Q" (*(volatile u16 __force *)addr) 68 68 : "r" (val)); 69 69 } 70 70 ··· 72 72 { 73 73 u16 val; 74 74 asm volatile("ldrh %1, %0" 75 - : "+Qo" (*(volatile u16 __force *)addr), 75 + : "+Q" (*(volatile u16 __force *)addr), 76 76 "=r" (val)); 77 77 return val; 78 78 }
-2
arch/arm/include/asm/sched_clock.h
··· 10 10 11 11 extern void sched_clock_postinit(void); 12 12 extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); 13 - extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, 14 - unsigned long rate); 15 13 16 14 #endif
+6 -6
arch/arm/include/asm/vfpmacros.h
··· 27 27 #if __LINUX_ARM_ARCH__ <= 6 28 28 ldr \tmp, =elf_hwcap @ may not have MVFR regs 29 29 ldr \tmp, [\tmp, #0] 30 - tst \tmp, #HWCAP_VFPv3D16 31 - ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} 32 - addne \base, \base, #32*4 @ step over unused register space 30 + tst \tmp, #HWCAP_VFPD32 31 + ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} 32 + addeq \base, \base, #32*4 @ step over unused register space 33 33 #else 34 34 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 35 35 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field ··· 51 51 #if __LINUX_ARM_ARCH__ <= 6 52 52 ldr \tmp, =elf_hwcap @ may not have MVFR regs 53 53 ldr \tmp, [\tmp, #0] 54 - tst \tmp, #HWCAP_VFPv3D16 55 - stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} 56 - addne \base, \base, #32*4 @ step over unused register space 54 + tst \tmp, #HWCAP_VFPD32 55 + stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} 56 + addeq \base, \base, #32*4 @ step over unused register space 57 57 #else 58 58 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 59 59 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
+2 -1
arch/arm/include/uapi/asm/hwcap.h
··· 18 18 #define HWCAP_THUMBEE (1 << 11) 19 19 #define HWCAP_NEON (1 << 12) 20 20 #define HWCAP_VFPv3 (1 << 13) 21 - #define HWCAP_VFPv3D16 (1 << 14) 21 + #define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ 22 22 #define HWCAP_TLS (1 << 15) 23 23 #define HWCAP_VFPv4 (1 << 16) 24 24 #define HWCAP_IDIVA (1 << 17) 25 25 #define HWCAP_IDIVT (1 << 18) 26 + #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ 26 27 #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) 27 28 28 29
+4 -14
arch/arm/kernel/sched_clock.c
··· 107 107 update_sched_clock(); 108 108 } 109 109 110 - void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, 111 - unsigned long rate) 112 - { 113 - setup_sched_clock(read, bits, rate); 114 - cd.needs_suspend = true; 115 - } 116 - 117 110 void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) 118 111 { 119 112 unsigned long r, w; ··· 182 189 static int sched_clock_suspend(void) 183 190 { 184 191 sched_clock_poll(sched_clock_timer.data); 185 - if (cd.needs_suspend) 186 - cd.suspended = true; 192 + cd.suspended = true; 187 193 return 0; 188 194 } 189 195 190 196 static void sched_clock_resume(void) 191 197 { 192 - if (cd.needs_suspend) { 193 - cd.epoch_cyc = read_sched_clock(); 194 - cd.epoch_cyc_copy = cd.epoch_cyc; 195 - cd.suspended = false; 196 - } 198 + cd.epoch_cyc = read_sched_clock(); 199 + cd.epoch_cyc_copy = cd.epoch_cyc; 200 + cd.suspended = false; 197 201 } 198 202 199 203 static struct syscore_ops sched_clock_ops = {
-1
arch/arm/mach-at91/at91sam9g45.c
··· 343 343 static void __init at91sam9g45_map_io(void) 344 344 { 345 345 at91_init_sram(0, AT91SAM9G45_SRAM_BASE, AT91SAM9G45_SRAM_SIZE); 346 - init_consistent_dma_size(SZ_4M); 347 346 } 348 347 349 348 static void __init at91sam9g45_ioremap_registers(void)
-2
arch/arm/mach-davinci/common.c
··· 87 87 iotable_init(davinci_soc_info.io_desc, 88 88 davinci_soc_info.io_desc_num); 89 89 90 - init_consistent_dma_size(14 << 20); 91 - 92 90 /* 93 91 * Normally devicemaps_init() would flush caches and tlb after 94 92 * mdesc->map_io(), but we must also do it here because of the CPU
-1
arch/arm/mach-s3c64xx/common.c
··· 155 155 /* initialise the io descriptors we need for initialisation */ 156 156 iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc)); 157 157 iotable_init(mach_desc, size); 158 - init_consistent_dma_size(SZ_8M); 159 158 160 159 /* detect cpu id */ 161 160 s3c64xx_init_cpu();
-2
arch/arm/mach-s5p64x0/common.c
··· 187 187 s5p6440_default_sdhci2(); 188 188 189 189 iotable_init(s5p6440_iodesc, ARRAY_SIZE(s5p6440_iodesc)); 190 - init_consistent_dma_size(SZ_8M); 191 190 } 192 191 193 192 void __init s5p6450_map_io(void) ··· 201 202 s5p6450_default_sdhci2(); 202 203 203 204 iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc)); 204 - init_consistent_dma_size(SZ_8M); 205 205 } 206 206 207 207 /*
-2
arch/arm/mach-s5pv210/common.c
··· 169 169 170 170 void __init s5pv210_map_io(void) 171 171 { 172 - init_consistent_dma_size(14 << 20); 173 - 174 172 /* initialise device information early */ 175 173 s5pv210_default_sdhci0(); 176 174 s5pv210_default_sdhci1();
-6
arch/arm/mach-shmobile/setup-r8a7740.c
··· 66 66 void __init r8a7740_map_io(void) 67 67 { 68 68 iotable_init(r8a7740_io_desc, ARRAY_SIZE(r8a7740_io_desc)); 69 - 70 - /* 71 - * DMA memory at 0xff200000 - 0xffdfffff. The default 2MB size isn't 72 - * enough to allocate the frame buffer memory. 73 - */ 74 - init_consistent_dma_size(12 << 20); 75 69 } 76 70 77 71 /* SCIFA0 */
-6
arch/arm/mach-shmobile/setup-sh7372.c
··· 58 58 void __init sh7372_map_io(void) 59 59 { 60 60 iotable_init(sh7372_io_desc, ARRAY_SIZE(sh7372_io_desc)); 61 - 62 - /* 63 - * DMA memory at 0xff200000 - 0xffdfffff. The default 2MB size isn't 64 - * enough to allocate the frame buffer memory. 65 - */ 66 - init_consistent_dma_size(12 << 20); 67 61 } 68 62 69 63 /* SCIFA0 */
-2
arch/arm/mach-u300/core.c
··· 82 82 static void __init u300_map_io(void) 83 83 { 84 84 iotable_init(u300_io_desc, ARRAY_SIZE(u300_io_desc)); 85 - /* We enable a real big DMA buffer if need be. */ 86 - init_consistent_dma_size(SZ_4M); 87 85 } 88 86 89 87 /*
+1 -1
arch/arm/mm/alignment.c
··· 745 745 static int 746 746 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 747 747 { 748 - union offset_union offset; 748 + union offset_union uninitialized_var(offset); 749 749 unsigned long instr = 0, instrptr; 750 750 int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); 751 751 unsigned int type;
+6 -3
arch/arm/vfp/vfpmodule.c
··· 701 701 elf_hwcap |= HWCAP_VFPv3; 702 702 703 703 /* 704 - * Check for VFPv3 D16. CPUs in this configuration 705 - * only have 16 x 64bit registers. 704 + * Check for VFPv3 D16 and VFPv4 D16. CPUs in 705 + * this configuration only have 16 x 64bit 706 + * registers. 706 707 */ 707 708 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) 708 - elf_hwcap |= HWCAP_VFPv3D16; 709 + elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ 710 + else 711 + elf_hwcap |= HWCAP_VFPD32; 709 712 } 710 713 #endif 711 714 /*
+11
arch/arm/xen/enlighten.c
··· 166 166 *pages = NULL; 167 167 } 168 168 EXPORT_SYMBOL_GPL(free_xenballooned_pages); 169 + 170 + /* In the hypervisor.S file. */ 171 + EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); 172 + EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); 173 + EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); 174 + EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); 175 + EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); 176 + EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); 177 + EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); 178 + EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 179 + EXPORT_SYMBOL_GPL(privcmd_call);
+5 -9
arch/arm/xen/hypercall.S
··· 48 48 49 49 #include <linux/linkage.h> 50 50 #include <asm/assembler.h> 51 + #include <asm/opcodes-virt.h> 51 52 #include <xen/interface/xen.h> 52 53 53 54 54 - /* HVC 0xEA1 */ 55 - #ifdef CONFIG_THUMB2_KERNEL 56 - #define xen_hvc .word 0xf7e08ea1 57 - #else 58 - #define xen_hvc .word 0xe140ea71 59 - #endif 55 + #define XEN_IMM 0xEA1 60 56 61 57 #define HYPERCALL_SIMPLE(hypercall) \ 62 58 ENTRY(HYPERVISOR_##hypercall) \ 63 59 mov r12, #__HYPERVISOR_##hypercall; \ 64 - xen_hvc; \ 60 + __HVC(XEN_IMM); \ 65 61 mov pc, lr; \ 66 62 ENDPROC(HYPERVISOR_##hypercall) 67 63 ··· 72 76 stmdb sp!, {r4} \ 73 77 ldr r4, [sp, #4] \ 74 78 mov r12, #__HYPERVISOR_##hypercall; \ 75 - xen_hvc \ 79 + __HVC(XEN_IMM); \ 76 80 ldm sp!, {r4} \ 77 81 mov pc, lr \ 78 82 ENDPROC(HYPERVISOR_##hypercall) ··· 96 100 mov r2, r3 97 101 ldr r3, [sp, #8] 98 102 ldr r4, [sp, #4] 99 - xen_hvc 103 + __HVC(XEN_IMM) 100 104 ldm sp!, {r4} 101 105 mov pc, lr 102 106 ENDPROC(privcmd_call);
+1
arch/arm64/Kconfig
··· 1 1 config ARM64 2 2 def_bool y 3 3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 4 + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 4 5 select GENERIC_CLOCKEVENTS 5 6 select GENERIC_HARDIRQS_NO_DEPRECATED 6 7 select GENERIC_IOMAP
+1 -4
arch/arm64/include/asm/elf.h
··· 25 25 #include <asm/user.h> 26 26 27 27 typedef unsigned long elf_greg_t; 28 - typedef unsigned long elf_freg_t[3]; 29 28 30 29 #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) 31 30 typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 32 - 33 - typedef struct user_fp elf_fpregset_t; 31 + typedef struct user_fpsimd_state elf_fpregset_t; 34 32 35 33 #define EM_AARCH64 183 36 34 ··· 84 86 #define R_AARCH64_MOVW_PREL_G2 291 85 87 #define R_AARCH64_MOVW_PREL_G2_NC 292 86 88 #define R_AARCH64_MOVW_PREL_G3 293 87 - 88 89 89 90 /* 90 91 * These are used to set parameters in the core dumps.
+2 -3
arch/arm64/include/asm/fpsimd.h
··· 25 25 * - FPSR and FPCR 26 26 * - 32 128-bit data registers 27 27 * 28 - * Note that user_fp forms a prefix of this structure, which is relied 29 - * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must 30 - * form a prefix of struct fpsimd_state. 28 + * Note that user_fpsimd forms a prefix of this structure, which is 29 + * relied upon in the ptrace FP/SIMD accessors. 31 30 */ 32 31 struct fpsimd_state { 33 32 union {
+4 -4
arch/arm64/include/asm/io.h
··· 114 114 * I/O port access primitives. 115 115 */ 116 116 #define IO_SPACE_LIMIT 0xffff 117 - #define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL) 117 + #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) 118 118 119 119 static inline u8 inb(unsigned long addr) 120 120 { ··· 225 225 #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 226 226 #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) 227 227 228 - #define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE) 229 - #define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE) 230 - #define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC) 228 + #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 229 + #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 230 + #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 231 231 #define iounmap __iounmap 232 232 233 233 #define ARCH_HAS_IOREMAP_WC
+2
arch/arm64/include/asm/processor.h
··· 43 43 #else 44 44 #define STACK_TOP STACK_TOP_MAX 45 45 #endif /* CONFIG_COMPAT */ 46 + 47 + #define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK 46 48 #endif /* __KERNEL__ */ 47 49 48 50 struct debug_info {
-1
arch/arm64/include/asm/unistd.h
··· 14 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 15 */ 16 16 #ifdef CONFIG_COMPAT 17 - #define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION 18 17 #define __ARCH_WANT_COMPAT_STAT64 19 18 #define __ARCH_WANT_SYS_GETHOSTNAME 20 19 #define __ARCH_WANT_SYS_PAUSE
+2 -8
arch/arm64/kernel/perf_event.c
··· 613 613 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, 614 614 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, 615 615 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, 616 - 617 - /* 618 - * This isn't an architected event. 619 - * We detect this event number and use the cycle counter instead. 620 - */ 621 - ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF, 622 616 }; 623 617 624 618 /* PMUv3 HW events mapping. */ 625 619 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { 626 - [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, 620 + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, 627 621 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, 628 622 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, 629 623 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, ··· 1100 1106 unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; 1101 1107 1102 1108 /* Always place a cycle counter into the cycle counter. */ 1103 - if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { 1109 + if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { 1104 1110 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) 1105 1111 return -EAGAIN; 1106 1112
-18
arch/arm64/kernel/process.c
··· 310 310 } 311 311 312 312 /* 313 - * Fill in the task's elfregs structure for a core dump. 314 - */ 315 - int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) 316 - { 317 - elf_core_copy_regs(elfregs, task_pt_regs(t)); 318 - return 1; 319 - } 320 - 321 - /* 322 - * fill in the fpe structure for a core dump... 323 - */ 324 - int dump_fpu (struct pt_regs *regs, struct user_fp *fp) 325 - { 326 - return 0; 327 - } 328 - EXPORT_SYMBOL(dump_fpu); 329 - 330 - /* 331 313 * Shuffle the argument into the correct register before calling the 332 314 * thread function. x1 is the thread argument, x2 is the pointer to 333 315 * the thread function, and x3 points to the exit function.
+1 -2
arch/arm64/kernel/smp.c
··· 211 211 * before we continue. 212 212 */ 213 213 set_cpu_online(cpu, true); 214 - while (!cpu_active(cpu)) 215 - cpu_relax(); 214 + complete(&cpu_running); 216 215 217 216 /* 218 217 * OK, it's off to the idle thread for us
+1 -1
arch/arm64/mm/init.c
··· 80 80 #ifdef CONFIG_ZONE_DMA32 81 81 /* 4GB maximum for 32-bit only capable devices */ 82 82 max_dma32 = min(max, MAX_DMA32_PFN); 83 - zone_size[ZONE_DMA32] = max_dma32 - min; 83 + zone_size[ZONE_DMA32] = max(min, max_dma32) - min; 84 84 #endif 85 85 zone_size[ZONE_NORMAL] = max - max_dma32; 86 86
+1
arch/frv/Kconfig
··· 13 13 select GENERIC_CPU_DEVICES 14 14 select ARCH_WANT_IPC_PARSE_VERSION 15 15 select GENERIC_KERNEL_THREAD 16 + select GENERIC_KERNEL_EXECVE 16 17 17 18 config ZONE_DMA 18 19 bool
+6 -4
arch/frv/boot/Makefile
··· 17 17 INITRD_PHYS = 0x02180000 18 18 INITRD_VIRT = 0x02180000 19 19 20 + OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment 21 + 20 22 # 21 23 # If you don't define ZRELADDR above, 22 24 # then it defaults to ZTEXTADDR ··· 34 32 targets: $(obj)/Image 35 33 36 34 $(obj)/Image: vmlinux FORCE 37 - $(OBJCOPY) -O binary -R .note -R .comment -S vmlinux $@ 35 + $(OBJCOPY) $(OBJCOPYFLAGS) -S vmlinux $@ 38 36 39 37 #$(obj)/Image: $(CONFIGURE) $(SYSTEM) 40 - # $(OBJCOPY) -O binary -R .note -R .comment -g -S $(SYSTEM) $@ 38 + # $(OBJCOPY) $(OBJCOPYFLAGS) -g -S $(SYSTEM) $@ 41 39 42 40 bzImage: zImage 43 41 44 42 zImage: $(CONFIGURE) compressed/$(LINUX) 45 - $(OBJCOPY) -O binary -R .note -R .comment -S compressed/$(LINUX) $@ 43 + $(OBJCOPY) $(OBJCOPYFLAGS) -S compressed/$(LINUX) $@ 46 44 47 45 bootpImage: bootp/bootp 48 - $(OBJCOPY) -O binary -R .note -R .comment -S bootp/bootp $@ 46 + $(OBJCOPY) $(OBJCOPYFLAGS) -S bootp/bootp $@ 49 47 50 48 compressed/$(LINUX): $(LINUX) dep 51 49 @$(MAKE) -C compressed $(LINUX)
-1
arch/frv/include/asm/unistd.h
··· 30 30 #define __ARCH_WANT_SYS_RT_SIGACTION 31 31 #define __ARCH_WANT_SYS_RT_SIGSUSPEND 32 32 #define __ARCH_WANT_SYS_EXECVE 33 - #define __ARCH_WANT_KERNEL_EXECVE 34 33 35 34 /* 36 35 * "Conditional" syscalls
+3 -25
arch/frv/kernel/entry.S
··· 869 869 call schedule_tail 870 870 calll.p @(gr21,gr0) 871 871 or gr20,gr20,gr8 872 - bra sys_exit 873 - 874 - .globl ret_from_kernel_execve 875 - ret_from_kernel_execve: 876 - ori gr28,0,sp 877 872 bra __syscall_exit 878 873 879 874 ################################################################################################### ··· 1075 1080 subicc gr5,#0,gr0,icc0 1076 1081 beq icc0,#0,__entry_return_direct 1077 1082 1078 - __entry_preempt_need_resched: 1079 - ldi @(gr15,#TI_FLAGS),gr4 1080 - andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 1081 - beq icc0,#1,__entry_return_direct 1082 - 1083 - setlos #PREEMPT_ACTIVE,gr5 1084 - sti gr5,@(gr15,#TI_FLAGS) 1085 - 1086 - andi gr23,#~PSR_PIL,gr23 1087 - movgs gr23,psr 1088 - 1089 - call schedule 1090 - sti gr0,@(gr15,#TI_PRE_COUNT) 1091 - 1092 - movsg psr,gr23 1093 - ori gr23,#PSR_PIL_14,gr23 1094 - movgs gr23,psr 1095 - bra __entry_preempt_need_resched 1096 - #else 1097 - bra __entry_return_direct 1083 + subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ 1084 + call preempt_schedule_irq 1098 1085 #endif 1086 + bra __entry_return_direct 1099 1087 1100 1088 1101 1089 ###############################################################################
+3 -2
arch/frv/kernel/process.c
··· 181 181 childregs = (struct pt_regs *) 182 182 (task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE); 183 183 184 + /* set up the userspace frame (the only place that the USP is stored) */ 185 + *childregs = *__kernel_frame0_ptr; 186 + 184 187 p->set_child_tid = p->clear_child_tid = NULL; 185 188 186 189 p->thread.frame = childregs; ··· 194 191 p->thread.frame0 = childregs; 195 192 196 193 if (unlikely(!regs)) { 197 - memset(childregs, 0, sizeof(struct pt_regs)); 198 194 childregs->gr9 = usp; /* function */ 199 195 childregs->gr8 = arg; 200 - childregs->psr = PSR_S; 201 196 p->thread.pc = (unsigned long) ret_from_kernel_thread; 202 197 save_user_regs(p->thread.user); 203 198 return 0;
+1
arch/frv/mb93090-mb00/pci-dma-nommu.c
··· 11 11 12 12 #include <linux/types.h> 13 13 #include <linux/slab.h> 14 + #include <linux/export.h> 14 15 #include <linux/dma-mapping.h> 15 16 #include <linux/list.h> 16 17 #include <linux/pci.h>
+2 -1
arch/h8300/include/asm/cache.h
··· 2 2 #define __ARCH_H8300_CACHE_H 3 3 4 4 /* bytes per L1 cache line */ 5 - #define L1_CACHE_BYTES 4 5 + #define L1_CACHE_SHIFT 2 6 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 6 7 7 8 /* m68k-elf-gcc 2.95.2 doesn't like these */ 8 9
+2
arch/s390/include/asm/cio.h
··· 9 9 10 10 #define LPM_ANYPATH 0xff 11 11 #define __MAX_CSSID 0 12 + #define __MAX_SUBCHANNEL 65535 13 + #define __MAX_SSID 3 12 14 13 15 #include <asm/scsw.h> 14 16
+22 -13
arch/s390/include/asm/pgtable.h
··· 506 506 507 507 static inline int pmd_present(pmd_t pmd) 508 508 { 509 - return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; 509 + unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; 510 + return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE || 511 + !(pmd_val(pmd) & _SEGMENT_ENTRY_INV); 510 512 } 511 513 512 514 static inline int pmd_none(pmd_t pmd) 513 515 { 514 - return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; 516 + return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && 517 + !(pmd_val(pmd) & _SEGMENT_ENTRY_RO); 515 518 } 516 519 517 520 static inline int pmd_large(pmd_t pmd) ··· 1226 1223 } 1227 1224 1228 1225 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1226 + 1227 + #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) 1228 + #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) 1229 + #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) 1230 + 1229 1231 #define __HAVE_ARCH_PGTABLE_DEPOSIT 1230 1232 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); 1231 1233 ··· 1250 1242 1251 1243 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1252 1244 { 1253 - unsigned long pgprot_pmd = 0; 1254 - 1255 - if (pgprot_val(pgprot) & _PAGE_INVALID) { 1256 - if (pgprot_val(pgprot) & _PAGE_SWT) 1257 - pgprot_pmd |= _HPAGE_TYPE_NONE; 1258 - pgprot_pmd |= _SEGMENT_ENTRY_INV; 1259 - } 1260 - if (pgprot_val(pgprot) & _PAGE_RO) 1261 - pgprot_pmd |= _SEGMENT_ENTRY_RO; 1262 - return pgprot_pmd; 1245 + /* 1246 + * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) 1247 + * Convert to segment table entry format. 1248 + */ 1249 + if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1250 + return pgprot_val(SEGMENT_NONE); 1251 + if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1252 + return pgprot_val(SEGMENT_RO); 1253 + return pgprot_val(SEGMENT_RW); 1263 1254 } 1264 1255 1265 1256 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) ··· 1276 1269 1277 1270 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1278 1271 { 1279 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; 1272 + /* Do not clobber _HPAGE_TYPE_NONE pages! */ 1273 + if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) 1274 + pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; 1280 1275 return pmd; 1281 1276 } 1282 1277
+7 -1
arch/s390/kernel/sclp.S
··· 44 44 #endif 45 45 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) 46 46 mvc 0(16,%r8),0(%r9) 47 + #ifdef CONFIG_64BIT 48 + epsw %r6,%r7 # set current addressing mode 49 + nill %r6,0x1 # in new psw (31 or 64 bit mode) 50 + nilh %r7,0x8000 51 + stm %r6,%r7,0(%r8) 52 + #endif 47 53 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 48 54 ltr %r2,%r2 49 55 jz .LsetctS1 ··· 93 87 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 94 88 #ifdef CONFIG_64BIT 95 89 .LextpswS1_64: 96 - .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit 90 + .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit 97 91 #endif 98 92 .LwaitpswS1: 99 93 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
+1 -1
arch/s390/lib/uaccess_pt.c
··· 39 39 pmd = pmd_offset(pud, addr); 40 40 if (pmd_none(*pmd)) 41 41 return -0x10UL; 42 - if (pmd_huge(*pmd)) { 42 + if (pmd_large(*pmd)) { 43 43 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) 44 44 return -0x04UL; 45 45 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
+1 -1
arch/s390/mm/gup.c
··· 126 126 */ 127 127 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 128 128 return 0; 129 - if (unlikely(pmd_huge(pmd))) { 129 + if (unlikely(pmd_large(pmd))) { 130 130 if (!gup_huge_pmd(pmdp, pmd, addr, next, 131 131 write, pages, nr)) 132 132 return 0;
+1
arch/sparc/Kconfig
··· 20 20 select HAVE_ARCH_TRACEHOOK 21 21 select SYSCTL_EXCEPTION_TRACE 22 22 select ARCH_WANT_OPTIONAL_GPIOLIB 23 + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 23 24 select RTC_CLASS 24 25 select RTC_DRV_M48T59 25 26 select HAVE_IRQ_WORK
+8 -8
arch/sparc/crypto/Makefile
··· 13 13 14 14 obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o 15 15 16 - sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o 17 - sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o 18 - sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o 19 - md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o 16 + sha1-sparc64-y := sha1_asm.o sha1_glue.o 17 + sha256-sparc64-y := sha256_asm.o sha256_glue.o 18 + sha512-sparc64-y := sha512_asm.o sha512_glue.o 19 + md5-sparc64-y := md5_asm.o md5_glue.o 20 20 21 - aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o 22 - des-sparc64-y := des_asm.o des_glue.o crop_devid.o 23 - camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o 21 + aes-sparc64-y := aes_asm.o aes_glue.o 22 + des-sparc64-y := des_asm.o des_glue.o 23 + camellia-sparc64-y := camellia_asm.o camellia_glue.o 24 24 25 - crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o 25 + crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o
+2
arch/sparc/crypto/aes_glue.c
··· 475 475 MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); 476 476 477 477 MODULE_ALIAS("aes"); 478 + 479 + #include "crop_devid.c"
+2
arch/sparc/crypto/camellia_glue.c
··· 320 320 MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); 321 321 322 322 MODULE_ALIAS("aes"); 323 + 324 + #include "crop_devid.c"
+2
arch/sparc/crypto/crc32c_glue.c
··· 177 177 MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); 178 178 179 179 MODULE_ALIAS("crc32c"); 180 + 181 + #include "crop_devid.c"
+2
arch/sparc/crypto/des_glue.c
··· 527 527 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); 528 528 529 529 MODULE_ALIAS("des"); 530 + 531 + #include "crop_devid.c"
+2
arch/sparc/crypto/md5_glue.c
··· 186 186 MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); 187 187 188 188 MODULE_ALIAS("md5"); 189 + 190 + #include "crop_devid.c"
+2
arch/sparc/crypto/sha1_glue.c
··· 181 181 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); 182 182 183 183 MODULE_ALIAS("sha1"); 184 + 185 + #include "crop_devid.c"
+2
arch/sparc/crypto/sha256_glue.c
··· 239 239 240 240 MODULE_ALIAS("sha224"); 241 241 MODULE_ALIAS("sha256"); 242 + 243 + #include "crop_devid.c"
+2
arch/sparc/crypto/sha512_glue.c
··· 224 224 225 225 MODULE_ALIAS("sha384"); 226 226 MODULE_ALIAS("sha512"); 227 + 228 + #include "crop_devid.c"
+3 -1
arch/sparc/include/asm/atomic_64.h
··· 1 1 /* atomic.h: Thankfully the V9 is at least reasonable for this 2 2 * stuff. 3 3 * 4 - * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) 4 + * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com) 5 5 */ 6 6 7 7 #ifndef __ARCH_SPARC64_ATOMIC__ ··· 105 105 } 106 106 107 107 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 108 + 109 + extern long atomic64_dec_if_positive(atomic64_t *v); 108 110 109 111 /* Atomic operations are already serializing */ 110 112 #define smp_mb__before_atomic_dec() barrier()
+59 -10
arch/sparc/include/asm/backoff.h
··· 1 1 #ifndef _SPARC64_BACKOFF_H 2 2 #define _SPARC64_BACKOFF_H 3 3 4 + /* The macros in this file implement an exponential backoff facility 5 + * for atomic operations. 6 + * 7 + * When multiple threads compete on an atomic operation, it is 8 + * possible for one thread to be continually denied a successful 9 + * completion of the compare-and-swap instruction. Heavily 10 + * threaded cpu implementations like Niagara can compound this 11 + * problem even further. 12 + * 13 + * When an atomic operation fails and needs to be retried, we spin a 14 + * certain number of times. At each subsequent failure of the same 15 + * operation we double the spin count, realizing an exponential 16 + * backoff. 17 + * 18 + * When we spin, we try to use an operation that will cause the 19 + * current cpu strand to block, and therefore make the core fully 20 + * available to any other other runnable strands. There are two 21 + * options, based upon cpu capabilities. 22 + * 23 + * On all cpus prior to SPARC-T4 we do three dummy reads of the 24 + * condition code register. Each read blocks the strand for something 25 + * between 40 and 50 cpu cycles. 26 + * 27 + * For SPARC-T4 and later we have a special "pause" instruction 28 + * available. This is implemented using writes to register %asr27. 29 + * The cpu will block the number of cycles written into the register, 30 + * unless a disrupting trap happens first. SPARC-T4 specifically 31 + * implements pause with a granularity of 8 cycles. Each strand has 32 + * an internal pause counter which decrements every 8 cycles. So the 33 + * chip shifts the %asr27 value down by 3 bits, and writes the result 34 + * into the pause counter. If a value smaller than 8 is written, the 35 + * chip blocks for 1 cycle. 36 + * 37 + * To achieve the same amount of backoff as the three %ccr reads give 38 + * on earlier chips, we shift the backoff value up by 7 bits. (Three 39 + * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the 40 + * whole amount we want to block into the pause register, rather than 41 + * loop writing 128 each time. 42 + */ 43 + 4 44 #define BACKOFF_LIMIT (4 * 1024) 5 45 6 46 #ifdef CONFIG_SMP ··· 51 11 #define BACKOFF_LABEL(spin_label, continue_label) \ 52 12 spin_label 53 13 54 - #define BACKOFF_SPIN(reg, tmp, label) \ 55 - mov reg, tmp; \ 56 - 88: brnz,pt tmp, 88b; \ 57 - sub tmp, 1, tmp; \ 58 - set BACKOFF_LIMIT, tmp; \ 59 - cmp reg, tmp; \ 60 - bg,pn %xcc, label; \ 61 - nop; \ 62 - ba,pt %xcc, label; \ 63 - sllx reg, 1, reg; 14 + #define BACKOFF_SPIN(reg, tmp, label) \ 15 + mov reg, tmp; \ 16 + 88: rd %ccr, %g0; \ 17 + rd %ccr, %g0; \ 18 + rd %ccr, %g0; \ 19 + .section .pause_3insn_patch,"ax";\ 20 + .word 88b; \ 21 + sllx tmp, 7, tmp; \ 22 + wr tmp, 0, %asr27; \ 23 + clr tmp; \ 24 + .previous; \ 25 + brnz,pt tmp, 88b; \ 26 + sub tmp, 1, tmp; \ 27 + set BACKOFF_LIMIT, tmp; \ 28 + cmp reg, tmp; \ 29 + bg,pn %xcc, label; \ 30 + nop; \ 31 + ba,pt %xcc, label; \ 32 + sllx reg, 1, reg; 64 33 65 34 #else 66 35
+3 -2
arch/sparc/include/asm/compat.h
··· 232 232 struct pt_regs *regs = current_thread_info()->kregs; 233 233 unsigned long usp = regs->u_regs[UREG_I6]; 234 234 235 - if (!(test_thread_flag(TIF_32BIT))) 235 + if (test_thread_64bit_stack(usp)) 236 236 usp += STACK_BIAS; 237 - else 237 + 238 + if (test_thread_flag(TIF_32BIT)) 238 239 usp &= 0xffffffffUL; 239 240 240 241 usp -= len;
+16 -1
arch/sparc/include/asm/processor_64.h
··· 196 196 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) 197 197 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) 198 198 199 - #define cpu_relax() barrier() 199 + /* Please see the commentary in asm/backoff.h for a description of 200 + * what these instructions are doing and how they have been choosen. 201 + * To make a long story short, we are trying to yield the current cpu 202 + * strand during busy loops. 203 + */ 204 + #define cpu_relax() asm volatile("\n99:\n\t" \ 205 + "rd %%ccr, %%g0\n\t" \ 206 + "rd %%ccr, %%g0\n\t" \ 207 + "rd %%ccr, %%g0\n\t" \ 208 + ".section .pause_3insn_patch,\"ax\"\n\t"\ 209 + ".word 99b\n\t" \ 210 + "wr %%g0, 128, %%asr27\n\t" \ 211 + "nop\n\t" \ 212 + "nop\n\t" \ 213 + ".previous" \ 214 + ::: "memory") 200 215 201 216 /* Prefetch support. This is tuned for UltraSPARC-III and later. 202 217 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
+5
arch/sparc/include/asm/prom.h
··· 63 63 extern void irq_trans_init(struct device_node *dp); 64 64 extern char *build_path_component(struct device_node *dp); 65 65 66 + /* SPARC has a local implementation */ 67 + extern int of_address_to_resource(struct device_node *dev, int index, 68 + struct resource *r); 69 + #define of_address_to_resource of_address_to_resource 70 + 66 71 #endif /* __KERNEL__ */ 67 72 #endif /* _SPARC_PROM_H */
+5
arch/sparc/include/asm/thread_info_64.h
··· 259 259 260 260 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 261 261 262 + #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) 263 + #define test_thread_64bit_stack(__SP) \ 264 + ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ 265 + false : true) 266 + 262 267 #endif /* !__ASSEMBLY__ */ 263 268 264 269 #endif /* __KERNEL__ */
+16 -8
arch/sparc/include/asm/ttable.h
··· 372 372 373 373 /* Normal 32bit spill */ 374 374 #define SPILL_2_GENERIC(ASI) \ 375 - srl %sp, 0, %sp; \ 375 + and %sp, 1, %g3; \ 376 + brnz,pn %g3, (. - (128 + 4)); \ 377 + srl %sp, 0, %sp; \ 376 378 stwa %l0, [%sp + %g0] ASI; \ 377 379 mov 0x04, %g3; \ 378 380 stwa %l1, [%sp + %g3] ASI; \ ··· 400 398 stwa %i6, [%g1 + %g0] ASI; \ 401 399 stwa %i7, [%g1 + %g3] ASI; \ 402 400 saved; \ 403 - retry; nop; nop; \ 401 + retry; \ 404 402 b,a,pt %xcc, spill_fixup_dax; \ 405 403 b,a,pt %xcc, spill_fixup_mna; \ 406 404 b,a,pt %xcc, spill_fixup; 407 405 408 406 #define SPILL_2_GENERIC_ETRAP \ 409 407 etrap_user_spill_32bit: \ 410 - srl %sp, 0, %sp; \ 408 + and %sp, 1, %g3; \ 409 + brnz,pn %g3, etrap_user_spill_64bit; \ 410 + srl %sp, 0, %sp; \ 411 411 stwa %l0, [%sp + 0x00] %asi; \ 412 412 stwa %l1, [%sp + 0x04] %asi; \ 413 413 stwa %l2, [%sp + 0x08] %asi; \ ··· 431 427 ba,pt %xcc, etrap_save; \ 432 428 wrpr %g1, %cwp; \ 433 429 nop; nop; nop; nop; \ 434 - nop; nop; nop; nop; \ 430 + nop; nop; \ 435 431 ba,a,pt %xcc, etrap_spill_fixup_32bit; \ 436 432 ba,a,pt %xcc, etrap_spill_fixup_32bit; \ 437 433 ba,a,pt %xcc, etrap_spill_fixup_32bit; ··· 596 592 597 593 /* Normal 32bit fill */ 598 594 #define FILL_2_GENERIC(ASI) \ 599 - srl %sp, 0, %sp; \ 595 + and %sp, 1, %g3; \ 596 + brnz,pn %g3, (. - (128 + 4)); \ 597 + srl %sp, 0, %sp; \ 600 598 lduwa [%sp + %g0] ASI, %l0; \ 601 599 mov 0x04, %g2; \ 602 600 mov 0x08, %g3; \ ··· 622 616 lduwa [%g1 + %g3] ASI, %i6; \ 623 617 lduwa [%g1 + %g5] ASI, %i7; \ 624 618 restored; \ 625 - retry; nop; nop; nop; nop; \ 619 + retry; nop; nop; \ 626 620 b,a,pt %xcc, fill_fixup_dax; \ 627 621 b,a,pt %xcc, fill_fixup_mna; \ 628 622 b,a,pt %xcc, fill_fixup; 629 623 630 624 #define FILL_2_GENERIC_RTRAP \ 631 625 user_rtt_fill_32bit: \ 632 - srl %sp, 0, %sp; \ 626 + and %sp, 1, %g3; \ 627 + brnz,pn %g3, user_rtt_fill_64bit; \ 628 + srl %sp, 0, %sp; \ 633 629 lduwa [%sp + 0x00] %asi, %l0; \ 634 630 lduwa [%sp + 0x04] %asi, %l1; \ 635 631 lduwa [%sp + 0x08] %asi, %l2; \ ··· 651 643 ba,pt %xcc, user_rtt_pre_restore; \ 652 644 restored; \ 653 645 nop; nop; nop; nop; nop; \ 654 - nop; nop; nop; nop; nop; \ 646 + nop; nop; nop; \ 655 647 ba,a,pt %xcc, user_rtt_fill_fixup; \ 656 648 ba,a,pt %xcc, user_rtt_fill_fixup; \ 657 649 ba,a,pt %xcc, user_rtt_fill_fixup;
+6 -1
arch/sparc/include/uapi/asm/unistd.h
··· 405 405 #define __NR_setns 337 406 406 #define __NR_process_vm_readv 338 407 407 #define __NR_process_vm_writev 339 408 + #define __NR_kern_features 340 409 + #define __NR_kcmp 341 408 410 409 - #define NR_syscalls 340 411 + #define NR_syscalls 342 412 + 413 + /* Bitmask values returned from kern_features system call. */ 414 + #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 410 415 411 416 #ifdef __32bit_syscall_numbers__ 412 417 /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
+7
arch/sparc/kernel/entry.h
··· 59 59 extern struct popc_6insn_patch_entry __popc_6insn_patch, 60 60 __popc_6insn_patch_end; 61 61 62 + struct pause_patch_entry { 63 + unsigned int addr; 64 + unsigned int insns[3]; 65 + }; 66 + extern struct pause_patch_entry __pause_3insn_patch, 67 + __pause_3insn_patch_end; 68 + 62 69 extern void __init per_cpu_patch(void); 63 70 extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, 64 71 struct sun4v_1insn_patch_entry *);
+4 -2
arch/sparc/kernel/leon_kernel.c
··· 56 56 static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) 57 57 { 58 58 unsigned int eirq; 59 + struct irq_bucket *p; 59 60 int cpu = sparc_leon3_cpuid(); 60 61 61 62 eirq = leon_eirq_get(cpu); 62 - if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */ 63 - generic_handle_irq(irq_map[eirq]->irq); 63 + p = irq_map[eirq]; 64 + if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ 65 + generic_handle_irq(p->irq); 64 66 } 65 67 66 68 /* The extended IRQ controller has been found, this function registers it */
+16 -6
arch/sparc/kernel/perf_event.c
··· 1762 1762 1763 1763 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1764 1764 do { 1765 - struct sparc_stackf32 *usf, sf; 1766 1765 unsigned long pc; 1767 1766 1768 - usf = (struct sparc_stackf32 *) ufp; 1769 - if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1770 - break; 1767 + if (thread32_stack_is_64bit(ufp)) { 1768 + struct sparc_stackf *usf, sf; 1771 1769 1772 - pc = sf.callers_pc; 1773 - ufp = (unsigned long)sf.fp; 1770 + ufp += STACK_BIAS; 1771 + usf = (struct sparc_stackf *) ufp; 1772 + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1773 + break; 1774 + pc = sf.callers_pc & 0xffffffff; 1775 + ufp = ((unsigned long) sf.fp) & 0xffffffff; 1776 + } else { 1777 + struct sparc_stackf32 *usf, sf; 1778 + usf = (struct sparc_stackf32 *) ufp; 1779 + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1780 + break; 1781 + pc = sf.callers_pc; 1782 + ufp = (unsigned long)sf.fp; 1783 + } 1774 1784 perf_callchain_store(entry, pc); 1775 1785 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1776 1786 }
+23 -19
arch/sparc/kernel/process_64.c
··· 452 452 /* It's a bit more tricky when 64-bit tasks are involved... */ 453 453 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 454 454 { 455 + bool stack_64bit = test_thread_64bit_stack(psp); 455 456 unsigned long fp, distance, rval; 456 457 457 - if (!(test_thread_flag(TIF_32BIT))) { 458 + if (stack_64bit) { 458 459 csp += STACK_BIAS; 459 460 psp += STACK_BIAS; 460 461 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 461 462 fp += STACK_BIAS; 463 + if (test_thread_flag(TIF_32BIT)) 464 + fp &= 0xffffffff; 462 465 } else 463 466 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 464 467 ··· 475 472 rval = (csp - distance); 476 473 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 477 474 rval = 0; 478 - else if (test_thread_flag(TIF_32BIT)) { 475 + else if (!stack_64bit) { 479 476 if (put_user(((u32)csp), 480 477 &(((struct reg_window32 __user *)rval)->ins[6]))) 481 478 rval = 0; ··· 510 507 511 508 flush_user_windows(); 512 509 if ((window = get_thread_wsaved()) != 0) { 513 - int winsize = sizeof(struct reg_window); 514 - int bias = 0; 515 - 516 - if (test_thread_flag(TIF_32BIT)) 517 - winsize = sizeof(struct reg_window32); 518 - else 519 - bias = STACK_BIAS; 520 - 521 510 window -= 1; 522 511 do { 523 - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 524 512 struct reg_window *rwin = &t->reg_window[window]; 513 + int winsize = sizeof(struct reg_window); 514 + unsigned long sp; 515 + 516 + sp = t->rwbuf_stkptrs[window]; 517 + 518 + if (test_thread_64bit_stack(sp)) 519 + sp += STACK_BIAS; 520 + else 521 + winsize = sizeof(struct reg_window32); 525 522 526 523 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 527 524 shift_window_buffer(window, get_thread_wsaved() - 1, t); ··· 547 544 { 548 545 struct thread_info *t = current_thread_info(); 549 546 unsigned long window; 550 - int winsize = sizeof(struct reg_window); 551 - int bias = 0; 552 - 553 - if (test_thread_flag(TIF_32BIT)) 554 - winsize = sizeof(struct reg_window32); 555 - else 556 - bias = STACK_BIAS; 557 547 558 548 flush_user_windows(); 559 549 window = get_thread_wsaved(); ··· 554 558 if (likely(window != 0)) { 555 559 window -= 1; 556 560 do { 557 - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 558 561 struct reg_window *rwin = &t->reg_window[window]; 562 + int winsize = sizeof(struct reg_window); 563 + unsigned long sp; 564 + 565 + sp = t->rwbuf_stkptrs[window]; 566 + 567 + if (test_thread_64bit_stack(sp)) 568 + sp += STACK_BIAS; 569 + else 570 + winsize = sizeof(struct reg_window32); 559 571 560 572 if (unlikely(sp & 0x7UL)) 561 573 stack_unaligned(sp);
+2 -2
arch/sparc/kernel/ptrace_64.c
··· 151 151 { 152 152 unsigned long rw_addr = regs->u_regs[UREG_I6]; 153 153 154 - if (test_tsk_thread_flag(current, TIF_32BIT)) { 154 + if (!test_thread_64bit_stack(rw_addr)) { 155 155 struct reg_window32 win32; 156 156 int i; 157 157 ··· 176 176 { 177 177 unsigned long rw_addr = regs->u_regs[UREG_I6]; 178 178 179 - if (test_tsk_thread_flag(current, TIF_32BIT)) { 179 + if (!test_thread_64bit_stack(rw_addr)) { 180 180 struct reg_window32 win32; 181 181 int i; 182 182
+21
arch/sparc/kernel/setup_64.c
··· 316 316 } 317 317 } 318 318 319 + static void __init pause_patch(void) 320 + { 321 + struct pause_patch_entry *p; 322 + 323 + p = &__pause_3insn_patch; 324 + while (p < &__pause_3insn_patch_end) { 325 + unsigned long i, addr = p->addr; 326 + 327 + for (i = 0; i < 3; i++) { 328 + *(unsigned int *) (addr + (i * 4)) = p->insns[i]; 329 + wmb(); 330 + __asm__ __volatile__("flush %0" 331 + : : "r" (addr + (i * 4))); 332 + } 333 + 334 + p++; 335 + } 336 + } 337 + 319 338 #ifdef CONFIG_SMP 320 339 void __init boot_cpu_id_too_large(int cpu) 321 340 { ··· 547 528 548 529 if (sparc64_elf_hwcap & AV_SPARC_POPC) 549 530 popc_patch(); 531 + if (sparc64_elf_hwcap & AV_SPARC_PAUSE) 532 + pause_patch(); 550 533 } 551 534 552 535 void __init setup_arch(char **cmdline_p)
+5
arch/sparc/kernel/sys_sparc_64.c
··· 751 751 : "cc"); 752 752 return __res; 753 753 } 754 + 755 + asmlinkage long sys_kern_features(void) 756 + { 757 + return KERN_FEATURE_MIXED_MODE_STACK; 758 + }
+1
arch/sparc/kernel/systbls_32.S
··· 85 85 /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 86 86 /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 87 87 /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 88 + /*340*/ .long sys_ni_syscall, sys_kcmp
+2
arch/sparc/kernel/systbls_64.S
··· 86 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 87 87 /*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 88 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 89 + /*340*/ .word sys_kern_features, sys_kcmp 89 90 90 91 #endif /* CONFIG_COMPAT */ 91 92 ··· 164 163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 165 164 /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166 165 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 166 + /*340*/ .word sys_kern_features, sys_kcmp
+23 -13
arch/sparc/kernel/unaligned_64.c
··· 113 113 114 114 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) 115 115 { 116 - unsigned long value; 116 + unsigned long value, fp; 117 117 118 118 if (reg < 16) 119 119 return (!reg ? 0 : regs->u_regs[reg]); 120 + 121 + fp = regs->u_regs[UREG_FP]; 122 + 120 123 if (regs->tstate & TSTATE_PRIV) { 121 124 struct reg_window *win; 122 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 125 + win = (struct reg_window *)(fp + STACK_BIAS); 123 126 value = win->locals[reg - 16]; 124 - } else if (test_thread_flag(TIF_32BIT)) { 127 + } else if (!test_thread_64bit_stack(fp)) { 125 128 struct reg_window32 __user *win32; 126 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 129 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 127 130 get_user(value, &win32->locals[reg - 16]); 128 131 } else { 129 132 struct reg_window __user *win; 130 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 133 + win = (struct reg_window __user *)(fp + STACK_BIAS); 131 134 get_user(value, &win->locals[reg - 16]); 132 135 } 133 136 return value; ··· 138 135 139 136 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) 140 137 { 138 + unsigned long fp; 139 + 141 140 if (reg < 16) 142 141 return &regs->u_regs[reg]; 142 + 143 + fp = regs->u_regs[UREG_FP]; 144 + 143 145 if (regs->tstate & TSTATE_PRIV) { 144 146 struct reg_window *win; 145 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 147 + win = (struct reg_window *)(fp + STACK_BIAS); 146 148 return &win->locals[reg - 16]; 147 - } else if (test_thread_flag(TIF_32BIT)) { 149 + } else if (!test_thread_64bit_stack(fp)) { 148 150 struct reg_window32 *win32; 149 - win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 151 + win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); 150 152 return (unsigned long *)&win32->locals[reg - 16]; 151 153 } else { 152 154 struct reg_window *win; 153 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 155 + win = (struct reg_window *)(fp + STACK_BIAS); 154 156 return &win->locals[reg - 16]; 155 157 } 156 158 } ··· 400 392 if (rd) 401 393 regs->u_regs[rd] = ret; 402 394 } else { 403 - if (test_thread_flag(TIF_32BIT)) { 395 + unsigned long fp = regs->u_regs[UREG_FP]; 396 + 397 + if (!test_thread_64bit_stack(fp)) { 404 398 struct reg_window32 __user *win32; 405 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 399 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 406 400 put_user(ret, &win32->locals[rd - 16]); 407 401 } else { 408 402 struct reg_window __user *win; 409 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 403 + win = (struct reg_window __user *)(fp + STACK_BIAS); 410 404 put_user(ret, &win->locals[rd - 16]); 411 405 } 412 406 } ··· 564 554 reg[0] = 0; 565 555 if ((insn & 0x780000) == 0x180000) 566 556 reg[1] = 0; 567 - } else if (test_thread_flag(TIF_32BIT)) { 557 + } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { 568 558 put_user(0, (int __user *) reg); 569 559 if ((insn & 0x780000) == 0x180000) 570 560 put_user(0, ((int __user *) reg) + 1);
+14 -9
arch/sparc/kernel/visemul.c
··· 149 149 150 150 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) 151 151 { 152 - unsigned long value; 152 + unsigned long value, fp; 153 153 154 154 if (reg < 16) 155 155 return (!reg ? 0 : regs->u_regs[reg]); 156 + 157 + fp = regs->u_regs[UREG_FP]; 158 + 156 159 if (regs->tstate & TSTATE_PRIV) { 157 160 struct reg_window *win; 158 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 161 + win = (struct reg_window *)(fp + STACK_BIAS); 159 162 value = win->locals[reg - 16]; 160 - } else if (test_thread_flag(TIF_32BIT)) { 163 + } else if (!test_thread_64bit_stack(fp)) { 161 164 struct reg_window32 __user *win32; 162 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 165 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 163 166 get_user(value, &win32->locals[reg - 16]); 164 167 } else { 165 168 struct reg_window __user *win; 166 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 169 + win = (struct reg_window __user *)(fp + STACK_BIAS); 167 170 get_user(value, &win->locals[reg - 16]); 168 171 } 169 172 return value; ··· 175 172 static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, 176 173 struct pt_regs *regs) 177 174 { 175 + unsigned long fp = regs->u_regs[UREG_FP]; 176 + 178 177 BUG_ON(reg < 16); 179 178 BUG_ON(regs->tstate & TSTATE_PRIV); 180 179 181 - if (test_thread_flag(TIF_32BIT)) { 180 + if (!test_thread_64bit_stack(fp)) { 182 181 struct reg_window32 __user *win32; 183 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 182 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 184 183 return (unsigned long __user *)&win32->locals[reg - 16]; 185 184 } else { 186 185 struct reg_window __user *win; 187 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 186 + win = (struct reg_window __user *)(fp + STACK_BIAS); 188 187 return &win->locals[reg - 16]; 189 188 } 190 189 } ··· 209 204 } else { 210 205 unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); 211 206 212 - if (test_thread_flag(TIF_32BIT)) 207 + if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) 213 208 __put_user((u32)val, (u32 __user *)rd_user); 214 209 else 215 210 __put_user(val, rd_user);
+5
arch/sparc/kernel/vmlinux.lds.S
··· 132 132 *(.popc_6insn_patch) 133 133 __popc_6insn_patch_end = .; 134 134 } 135 + .pause_3insn_patch : { 136 + __pause_3insn_patch = .; 137 + *(.pause_3insn_patch) 138 + __pause_3insn_patch_end = .; 139 + } 135 140 PERCPU_SECTION(SMP_CACHE_BYTES) 136 141 137 142 . = ALIGN(PAGE_SIZE);
+2
arch/sparc/kernel/winfixup.S
··· 43 43 spill_fixup_dax: 44 44 TRAP_LOAD_THREAD_REG(%g6, %g1) 45 45 ldx [%g6 + TI_FLAGS], %g1 46 + andcc %sp, 0x1, %g0 47 + movne %icc, 0, %g1 46 48 andcc %g1, _TIF_32BIT, %g0 47 49 ldub [%g6 + TI_WSAVED], %g1 48 50 sll %g1, 3, %g3
+15 -1
arch/sparc/lib/atomic_64.S
··· 1 1 /* atomic.S: These things are too big to do inline. 2 2 * 3 - * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) 3 + * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net) 4 4 */ 5 5 6 6 #include <linux/linkage.h> ··· 117 117 sub %g1, %o0, %o0 118 118 2: BACKOFF_SPIN(%o2, %o3, 1b) 119 119 ENDPROC(atomic64_sub_ret) 120 + 121 + ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ 122 + BACKOFF_SETUP(%o2) 123 + 1: ldx [%o0], %g1 124 + brlez,pn %g1, 3f 125 + sub %g1, 1, %g7 126 + casx [%o0], %g1, %g7 127 + cmp %g1, %g7 128 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 129 + nop 130 + 3: retl 131 + sub %g1, 1, %o0 132 + 2: BACKOFF_SPIN(%o2, %o3, 1b) 133 + ENDPROC(atomic64_dec_if_positive)
+1
arch/sparc/lib/ksyms.c
··· 116 116 EXPORT_SYMBOL(atomic64_add_ret); 117 117 EXPORT_SYMBOL(atomic64_sub); 118 118 EXPORT_SYMBOL(atomic64_sub_ret); 119 + EXPORT_SYMBOL(atomic64_dec_if_positive); 119 120 120 121 /* Atomic bit operations. */ 121 122 EXPORT_SYMBOL(test_and_set_bit);
+1 -1
arch/sparc/math-emu/math_64.c
··· 320 320 XR = 0; 321 321 else if (freg < 16) 322 322 XR = regs->u_regs[freg]; 323 - else if (test_thread_flag(TIF_32BIT)) { 323 + else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { 324 324 struct reg_window32 __user *win32; 325 325 flushw_user (); 326 326 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+7 -14
arch/x86/include/asm/xen/hypercall.h
··· 359 359 return _hypercall4(int, update_va_mapping, va, 360 360 new_val.pte, new_val.pte >> 32, flags); 361 361 } 362 + extern int __must_check xen_event_channel_op_compat(int, void *); 362 363 363 364 static inline int 364 365 HYPERVISOR_event_channel_op(int cmd, void *arg) 365 366 { 366 367 int rc = _hypercall2(int, event_channel_op, cmd, arg); 367 - if (unlikely(rc == -ENOSYS)) { 368 - struct evtchn_op op; 369 - op.cmd = cmd; 370 - memcpy(&op.u, arg, sizeof(op.u)); 371 - rc = _hypercall1(int, event_channel_op_compat, &op); 372 - memcpy(arg, &op.u, sizeof(op.u)); 373 - } 368 + if (unlikely(rc == -ENOSYS)) 369 + rc = xen_event_channel_op_compat(cmd, arg); 374 370 return rc; 375 371 } 376 372 ··· 382 386 return _hypercall3(int, console_io, cmd, count, str); 383 387 } 384 388 389 + extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); 390 + 385 391 static inline int 386 392 HYPERVISOR_physdev_op(int cmd, void *arg) 387 393 { 388 394 int rc = _hypercall2(int, physdev_op, cmd, arg); 389 - if (unlikely(rc == -ENOSYS)) { 390 - struct physdev_op op; 391 - op.cmd = cmd; 392 - memcpy(&op.u, arg, sizeof(op.u)); 393 - rc = _hypercall1(int, physdev_op_compat, &op); 394 - memcpy(arg, &op.u, sizeof(op.u)); 395 - } 395 + if (unlikely(rc == -ENOSYS)) 396 + rc = HYPERVISOR_physdev_op_compat(cmd, arg); 396 397 return rc; 397 398 } 398 399
-1
arch/x86/include/asm/xen/hypervisor.h
··· 33 33 #ifndef _ASM_X86_XEN_HYPERVISOR_H 34 34 #define _ASM_X86_XEN_HYPERVISOR_H 35 35 36 - /* arch/i386/kernel/setup.c */ 37 36 extern struct shared_info *HYPERVISOR_shared_info; 38 37 extern struct start_info *xen_start_info; 39 38
+34 -26
arch/x86/kvm/x86.c
··· 3779 3779 { 3780 3780 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 3781 3781 3782 - memcpy(vcpu->run->mmio.data, frag->data, frag->len); 3782 + memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 3783 3783 return X86EMUL_CONTINUE; 3784 3784 } 3785 3785 ··· 3832 3832 bytes -= handled; 3833 3833 val += handled; 3834 3834 3835 - while (bytes) { 3836 - unsigned now = min(bytes, 8U); 3837 - 3838 - frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 3839 - frag->gpa = gpa; 3840 - frag->data = val; 3841 - frag->len = now; 3842 - 3843 - gpa += now; 3844 - val += now; 3845 - bytes -= now; 3846 - } 3835 + WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 3836 + frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 3837 + frag->gpa = gpa; 3838 + frag->data = val; 3839 + frag->len = bytes; 3847 3840 return X86EMUL_CONTINUE; 3848 3841 } 3849 3842 ··· 3883 3890 vcpu->mmio_needed = 1; 3884 3891 vcpu->mmio_cur_fragment = 0; 3885 3892 3886 - vcpu->run->mmio.len = vcpu->mmio_fragments[0].len; 3893 + vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 3887 3894 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 3888 3895 vcpu->run->exit_reason = KVM_EXIT_MMIO; 3889 3896 vcpu->run->mmio.phys_addr = gpa; ··· 5515 5522 * 5516 5523 * read: 5517 5524 * for each fragment 5518 - * write gpa, len 5519 - * exit 5520 - * copy data 5525 + * for each mmio piece in the fragment 5526 + * write gpa, len 5527 + * exit 5528 + * copy data 5521 5529 * execute insn 5522 5530 * 5523 5531 * write: 5524 5532 * for each fragment 5525 - * write gpa, len 5526 - * copy data 5527 - * exit 5533 + * for each mmio piece in the fragment 5534 + * write gpa, len 5535 + * copy data 5536 + * exit 5528 5537 */ 5529 5538 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 5530 5539 { 5531 5540 struct kvm_run *run = vcpu->run; 5532 5541 struct kvm_mmio_fragment *frag; 5542 + unsigned len; 5533 5543 5534 5544 BUG_ON(!vcpu->mmio_needed); 5535 5545 5536 5546 /* Complete previous fragment */ 5537 - frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++]; 5547 + frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 5548 + len = min(8u, frag->len); 5538 5549 if (!vcpu->mmio_is_write) 5539 - memcpy(frag->data, run->mmio.data, frag->len); 5550 + memcpy(frag->data, run->mmio.data, len); 5551 + 5552 + if (frag->len <= 8) { 5553 + /* Switch to the next fragment. */ 5554 + frag++; 5555 + vcpu->mmio_cur_fragment++; 5556 + } else { 5557 + /* Go forward to the next mmio piece. */ 5558 + frag->data += len; 5559 + frag->gpa += len; 5560 + frag->len -= len; 5561 + } 5562 + 5540 5563 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { 5541 5564 vcpu->mmio_needed = 0; 5542 5565 if (vcpu->mmio_is_write) ··· 5560 5551 vcpu->mmio_read_completed = 1; 5561 5552 return complete_emulated_io(vcpu); 5562 5553 } 5563 - /* Initiate next fragment */ 5564 - ++frag; 5554 + 5565 5555 run->exit_reason = KVM_EXIT_MMIO; 5566 5556 run->mmio.phys_addr = frag->gpa; 5567 5557 if (vcpu->mmio_is_write) 5568 - memcpy(run->mmio.data, frag->data, frag->len); 5569 - run->mmio.len = frag->len; 5558 + memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 5559 + run->mmio.len = min(8u, frag->len); 5570 5560 run->mmio.is_write = vcpu->mmio_is_write; 5571 5561 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 5572 5562 return 0;
+20 -1
arch/x86/xen/mmu.c
··· 1288 1288 return this_cpu_read(xen_vcpu_info.arch.cr2); 1289 1289 } 1290 1290 1291 + void xen_flush_tlb_all(void) 1292 + { 1293 + struct mmuext_op *op; 1294 + struct multicall_space mcs; 1295 + 1296 + trace_xen_mmu_flush_tlb_all(0); 1297 + 1298 + preempt_disable(); 1299 + 1300 + mcs = xen_mc_entry(sizeof(*op)); 1301 + 1302 + op = mcs.args; 1303 + op->cmd = MMUEXT_TLB_FLUSH_ALL; 1304 + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1305 + 1306 + xen_mc_issue(PARAVIRT_LAZY_MMU); 1307 + 1308 + preempt_enable(); 1309 + } 1291 1310 static void xen_flush_tlb(void) 1292 1311 { 1293 1312 struct mmuext_op *op; ··· 2537 2518 err = 0; 2538 2519 out: 2539 2520 2540 - flush_tlb_all(); 2521 + xen_flush_tlb_all(); 2541 2522 2542 2523 return err; 2543 2524 }
+2
arch/xtensa/Kconfig
··· 13 13 select GENERIC_CPU_DEVICES 14 14 select MODULES_USE_ELF_RELA 15 15 select GENERIC_PCI_IOMAP 16 + select GENERIC_KERNEL_THREAD 17 + select GENERIC_KERNEL_EXECVE 16 18 select ARCH_WANT_OPTIONAL_GPIOLIB 17 19 help 18 20 Xtensa processors are 32-bit RISC machines designed by Tensilica
+4
arch/xtensa/include/asm/io.h
··· 62 62 static inline void iounmap(volatile void __iomem *addr) 63 63 { 64 64 } 65 + 66 + #define virt_to_bus virt_to_phys 67 + #define bus_to_virt phys_to_virt 68 + 65 69 #endif /* CONFIG_MMU */ 66 70 67 71 /*
+1 -3
arch/xtensa/include/asm/processor.h
··· 152 152 153 153 /* Clearing a0 terminates the backtrace. */ 154 154 #define start_thread(regs, new_pc, new_sp) \ 155 + memset(regs, 0, sizeof(*regs)); \ 155 156 regs->pc = new_pc; \ 156 157 regs->ps = USER_PS_VALUE; \ 157 158 regs->areg[1] = new_sp; \ ··· 168 167 169 168 /* Free all resources held by a thread. */ 170 169 #define release_thread(thread) do { } while(0) 171 - 172 - /* Create a kernel thread without removing it from tasklists */ 173 - extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 174 170 175 171 /* Copy and release all segment info associated with a VM */ 176 172 #define copy_segments(p, mm) do { } while(0)
+1 -1
arch/xtensa/include/asm/syscall.h
··· 10 10 11 11 struct pt_regs; 12 12 struct sigaction; 13 - asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); 13 + asmlinkage long sys_execve(char*, char**, char**, struct pt_regs*); 14 14 asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*); 15 15 asmlinkage long xtensa_ptrace(long, long, long, long); 16 16 asmlinkage long xtensa_sigreturn(struct pt_regs*);
+5 -10
arch/xtensa/include/asm/unistd.h
··· 1 - /* 2 - * include/asm-xtensa/unistd.h 3 - * 4 - * This file is subject to the terms and conditions of the GNU General Public 5 - * License. See the file "COPYING" in the main directory of this archive 6 - * for more details. 7 - * 8 - * Copyright (C) 2001 - 2005 Tensilica Inc. 9 - */ 1 + #ifndef _XTENSA_UNISTD_H 2 + #define _XTENSA_UNISTD_H 10 3 4 + #define __ARCH_WANT_SYS_EXECVE 11 5 #include <uapi/asm/unistd.h> 12 - 13 6 14 7 /* 15 8 * "Conditional" syscalls ··· 30 37 #define __IGNORE_mmap /* use mmap2 */ 31 38 #define __IGNORE_vfork /* use clone */ 32 39 #define __IGNORE_fadvise64 /* use fadvise64_64 */ 40 + 41 + #endif /* _XTENSA_UNISTD_H */
+4 -12
arch/xtensa/include/uapi/asm/unistd.h
··· 1 - /* 2 - * include/asm-xtensa/unistd.h 3 - * 4 - * This file is subject to the terms and conditions of the GNU General Public 5 - * License. See the file "COPYING" in the main directory of this archive 6 - * for more details. 7 - * 8 - * Copyright (C) 2001 - 2012 Tensilica Inc. 9 - */ 10 - 11 - #ifndef _UAPI_XTENSA_UNISTD_H 1 + #if !defined(_UAPI_XTENSA_UNISTD_H) || defined(__SYSCALL) 12 2 #define _UAPI_XTENSA_UNISTD_H 13 3 14 4 #ifndef __SYSCALL ··· 262 272 #define __NR_clone 116 263 273 __SYSCALL(116, xtensa_clone, 5) 264 274 #define __NR_execve 117 265 - __SYSCALL(117, xtensa_execve, 3) 275 + __SYSCALL(117, sys_execve, 3) 266 276 #define __NR_exit 118 267 277 __SYSCALL(118, sys_exit, 1) 268 278 #define __NR_exit_group 119 ··· 748 758 #define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */ 749 759 750 760 #define SYS_XTENSA_COUNT 5 /* count */ 761 + 762 + #undef __SYSCALL 751 763 752 764 #endif /* _UAPI_XTENSA_UNISTD_H */
+13 -44
arch/xtensa/kernel/entry.S
··· 1833 1833 1834 1834 1835 1835 /* 1836 - * Create a kernel thread 1837 - * 1838 - * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 1839 - * a2 a2 a3 a4 1840 - */ 1841 - 1842 - ENTRY(kernel_thread) 1843 - entry a1, 16 1844 - 1845 - mov a5, a2 # preserve fn over syscall 1846 - mov a7, a3 # preserve args over syscall 1847 - 1848 - movi a3, _CLONE_VM | _CLONE_UNTRACED 1849 - movi a2, __NR_clone 1850 - or a6, a4, a3 # arg0: flags 1851 - mov a3, a1 # arg1: sp 1852 - syscall 1853 - 1854 - beq a3, a1, 1f # branch if parent 1855 - mov a6, a7 # args 1856 - callx4 a5 # fn(args) 1857 - 1858 - movi a2, __NR_exit 1859 - syscall # return value of fn(args) still in a6 1860 - 1861 - 1: retw 1862 - 1863 - /* 1864 - * Do a system call from kernel instead of calling sys_execve, so we end up 1865 - * with proper pt_regs. 1866 - * 1867 - * int kernel_execve(const char *fname, char *const argv[], charg *const envp[]) 1868 - * a2 a2 a3 a4 1869 - */ 1870 - 1871 - ENTRY(kernel_execve) 1872 - entry a1, 16 1873 - mov a6, a2 # arg0 is in a6 1874 - movi a2, __NR_execve 1875 - syscall 1876 - 1877 - retw 1878 - 1879 - /* 1880 1836 * Task switch. 1881 1837 * 1882 1838 * struct task* _switch_to (struct task* prev, struct task* next) ··· 1914 1958 1915 1959 j common_exception_return 1916 1960 1961 + /* 1962 + * Kernel thread creation helper 1963 + * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg 1964 + * left from _switch_to: a6 = prev 1965 + */ 1966 + ENTRY(ret_from_kernel_thread) 1967 + 1968 + call4 schedule_tail 1969 + mov a6, a3 1970 + callx4 a2 1971 + j common_exception_return 1972 + 1973 + ENDPROC(ret_from_kernel_thread)
+71 -57
arch/xtensa/kernel/process.c
··· 45 45 #include <asm/regs.h> 46 46 47 47 extern void ret_from_fork(void); 48 + extern void ret_from_kernel_thread(void); 48 49 49 50 struct task_struct *current_set[NR_CPUS] = {&init_task, }; 50 51 ··· 159 158 /* 160 159 * Copy thread. 161 160 * 161 + * There are two modes in which this function is called: 162 + * 1) Userspace thread creation, 163 + * regs != NULL, usp_thread_fn is userspace stack pointer. 164 + * It is expected to copy parent regs (in case CLONE_VM is not set 165 + * in the clone_flags) and set up passed usp in the childregs. 166 + * 2) Kernel thread creation, 167 + * regs == NULL, usp_thread_fn is the function to run in the new thread 168 + * and thread_fn_arg is its parameter. 169 + * childregs are not used for the kernel threads. 170 + * 162 171 * The stack layout for the new thread looks like this: 163 172 * 164 - * +------------------------+ <- sp in childregs (= tos) 173 + * +------------------------+ 165 174 * | childregs | 166 175 * +------------------------+ <- thread.sp = sp in dummy-frame 167 176 * | dummy-frame | (saved in dummy-frame spill-area) 168 177 * +------------------------+ 169 178 * 170 - * We create a dummy frame to return to ret_from_fork: 171 - * a0 points to ret_from_fork (simulating a call4) 179 + * We create a dummy frame to return to either ret_from_fork or 180 + * ret_from_kernel_thread: 181 + * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4) 172 182 * sp points to itself (thread.sp) 173 - * a2, a3 are unused. 183 + * a2, a3 are unused for userspace threads, 184 + * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads. 174 185 * 175 186 * Note: This is a pristine frame, so we don't need any spill region on top of 176 187 * childregs. ··· 198 185 * involved. Much simpler to just not copy those live frames across. 199 186 */ 200 187 201 - int copy_thread(unsigned long clone_flags, unsigned long usp, 202 - unsigned long unused, 203 - struct task_struct * p, struct pt_regs * regs) 188 + int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, 189 + unsigned long thread_fn_arg, 190 + struct task_struct *p, struct pt_regs *unused) 204 191 { 205 - struct pt_regs *childregs; 206 - unsigned long tos; 207 - int user_mode = user_mode(regs); 192 + struct pt_regs *childregs = task_pt_regs(p); 208 193 209 194 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 210 195 struct thread_info *ti; 211 196 #endif 212 197 213 - /* Set up new TSS. */ 214 - tos = (unsigned long)task_stack_page(p) + THREAD_SIZE; 215 - if (user_mode) 216 - childregs = (struct pt_regs*)(tos - PT_USER_SIZE); 217 - else 218 - childregs = (struct pt_regs*)tos - 1; 219 - 220 - /* This does not copy all the regs. In a bout of brilliance or madness, 221 - ARs beyond a0-a15 exist past the end of the struct. */ 222 - *childregs = *regs; 223 - 224 198 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ 225 199 *((int*)childregs - 3) = (unsigned long)childregs; 226 200 *((int*)childregs - 4) = 0; 227 201 228 - childregs->areg[2] = 0; 229 - p->set_child_tid = p->clear_child_tid = NULL; 230 - p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); 231 202 p->thread.sp = (unsigned long)childregs; 232 203 233 - if (user_mode(regs)) { 204 + if (!(p->flags & PF_KTHREAD)) { 205 + struct pt_regs *regs = current_pt_regs(); 206 + unsigned long usp = usp_thread_fn ? 207 + usp_thread_fn : regs->areg[1]; 234 208 209 + p->thread.ra = MAKE_RA_FOR_CALL( 210 + (unsigned long)ret_from_fork, 0x1); 211 + 212 + /* This does not copy all the regs. 213 + * In a bout of brilliance or madness, 214 + * ARs beyond a0-a15 exist past the end of the struct. 215 + */ 216 + *childregs = *regs; 235 217 childregs->areg[1] = usp; 218 + childregs->areg[2] = 0; 219 + 220 + /* When sharing memory with the parent thread, the child 221 + usually starts on a pristine stack, so we have to reset 222 + windowbase, windowstart and wmask. 223 + (Note that such a new thread is required to always create 224 + an initial call4 frame) 225 + The exception is vfork, where the new thread continues to 226 + run on the parent's stack until it calls execve. This could 227 + be a call8 or call12, which requires a legal stack frame 228 + of the previous caller for the overflow handlers to work. 229 + (Note that it's always legal to overflow live registers). 230 + In this case, ensure to spill at least the stack pointer 231 + of that frame. */ 232 + 236 233 if (clone_flags & CLONE_VM) { 237 - childregs->wmask = 1; /* can't share live windows */ 234 + /* check that caller window is live and same stack */ 235 + int len = childregs->wmask & ~0xf; 236 + if (regs->areg[1] == usp && len != 0) { 237 + int callinc = (regs->areg[0] >> 30) & 3; 238 + int caller_ars = XCHAL_NUM_AREGS - callinc * 4; 239 + put_user(regs->areg[caller_ars+1], 240 + (unsigned __user*)(usp - 12)); 241 + } 242 + childregs->wmask = 1; 243 + childregs->windowstart = 1; 244 + childregs->windowbase = 0; 238 245 } else { 239 246 int len = childregs->wmask & ~0xf; 240 247 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], ··· 263 230 // FIXME: we need to set THREADPTR in thread_info... 264 231 if (clone_flags & CLONE_SETTLS) 265 232 childregs->areg[2] = childregs->areg[6]; 266 - 267 233 } else { 268 - /* In kernel space, we start a new thread with a new stack. */ 269 - childregs->wmask = 1; 270 - childregs->areg[1] = tos; 234 + p->thread.ra = MAKE_RA_FOR_CALL( 235 + (unsigned long)ret_from_kernel_thread, 1); 236 + 237 + /* pass parameters to ret_from_kernel_thread: 238 + * a2 = thread_fn, a3 = thread_fn arg 239 + */ 240 + *((int *)childregs - 1) = thread_fn_arg; 241 + *((int *)childregs - 2) = usp_thread_fn; 242 + 243 + /* Childregs are only used when we're going to userspace 244 + * in which case start_thread will set them up. 245 + */ 271 246 } 272 247 273 248 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) ··· 371 330 void __user *child_tid, long a5, 372 331 struct pt_regs *regs) 373 332 { 374 - if (!newsp) 375 - newsp = regs->areg[1]; 376 333 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 377 334 } 378 - 379 - /* 380 - * xtensa_execve() executes a new program. 381 - */ 382 - 383 - asmlinkage 384 - long xtensa_execve(const char __user *name, 385 - const char __user *const __user *argv, 386 - const char __user *const __user *envp, 387 - long a3, long a4, long a5, 388 - struct pt_regs *regs) 389 - { 390 - long error; 391 - struct filename *filename; 392 - 393 - filename = getname(name); 394 - error = PTR_ERR(filename); 395 - if (IS_ERR(filename)) 396 - goto out; 397 - error = do_execve(filename->name, argv, envp, regs); 398 - putname(filename); 399 - out: 400 - return error; 401 - } 402 -
+3 -4
arch/xtensa/kernel/syscall.c
··· 32 32 syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { 33 33 [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall, 34 34 35 - #undef __SYSCALL 36 35 #define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol, 37 - #undef __KERNEL_SYSCALLS__ 38 - #include <asm/unistd.h> 36 + #include <uapi/asm/unistd.h> 39 37 }; 40 38 41 39 asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) ··· 47 49 return (long)ret; 48 50 } 49 51 50 - asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len) 52 + asmlinkage long xtensa_fadvise64_64(int fd, int advice, 53 + unsigned long long offset, unsigned long long len) 51 54 { 52 55 return sys_fadvise64_64(fd, offset, len, advice); 53 56 }
-1
arch/xtensa/kernel/xtensa_ksyms.c
··· 43 43 EXPORT_SYMBOL(clear_page); 44 44 EXPORT_SYMBOL(copy_page); 45 45 46 - EXPORT_SYMBOL(kernel_thread); 47 46 EXPORT_SYMBOL(empty_zero_page); 48 47 49 48 /*
+1 -1
block/Kconfig
··· 89 89 90 90 config BLK_DEV_THROTTLING 91 91 bool "Block layer bio throttling support" 92 - depends on BLK_CGROUP=y && EXPERIMENTAL 92 + depends on BLK_CGROUP=y 93 93 default n 94 94 ---help--- 95 95 Block layer bio throttling support. It can be used to limit
+10
block/blk-cgroup.c
··· 285 285 blkg_destroy(blkg); 286 286 spin_unlock(&blkcg->lock); 287 287 } 288 + 289 + /* 290 + * root blkg is destroyed. Just clear the pointer since 291 + * root_rl does not take reference on root blkg. 292 + */ 293 + q->root_blkg = NULL; 294 + q->root_rl.blkg = NULL; 288 295 } 289 296 290 297 static void blkg_rcu_free(struct rcu_head *rcu_head) ··· 333 326 */ 334 327 if (rl == &q->root_rl) { 335 328 ent = &q->blkg_list; 329 + /* There are no more block groups, hence no request lists */ 330 + if (list_empty(ent)) 331 + return NULL; 336 332 } else { 337 333 blkg = container_of(rl, struct blkcg_gq, rl); 338 334 ent = &blkg->q_node;
+2 -1
block/blk-core.c
··· 2868 2868 struct request *rqa = container_of(a, struct request, queuelist); 2869 2869 struct request *rqb = container_of(b, struct request, queuelist); 2870 2870 2871 - return !(rqa->q <= rqb->q); 2871 + return !(rqa->q < rqb->q || 2872 + (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 2872 2873 } 2873 2874 2874 2875 /*
+8 -3
crypto/cryptd.c
··· 137 137 struct crypto_async_request *req, *backlog; 138 138 139 139 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 140 - /* Only handle one request at a time to avoid hogging crypto 141 - * workqueue. preempt_disable/enable is used to prevent 142 - * being preempted by cryptd_enqueue_request() */ 140 + /* 141 + * Only handle one request at a time to avoid hogging crypto workqueue. 142 + * preempt_disable/enable is used to prevent being preempted by 143 + * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 144 + * cryptd_enqueue_request() being accessed from software interrupts. 145 + */ 146 + local_bh_disable(); 143 147 preempt_disable(); 144 148 backlog = crypto_get_backlog(&cpu_queue->queue); 145 149 req = crypto_dequeue_request(&cpu_queue->queue); 146 150 preempt_enable(); 151 + local_bh_enable(); 147 152 148 153 if (!req) 149 154 return;
+7 -4
drivers/acpi/video.c
··· 1345 1345 acpi_video_bus_get_devices(struct acpi_video_bus *video, 1346 1346 struct acpi_device *device) 1347 1347 { 1348 - int status; 1348 + int status = 0; 1349 1349 struct acpi_device *dev; 1350 1350 1351 - status = acpi_video_device_enumerate(video); 1352 - if (status) 1353 - return status; 1351 + /* 1352 + * There are systems where video module known to work fine regardless 1353 + * of broken _DOD and ignoring returned value here doesn't cause 1354 + * any issues later. 1355 + */ 1356 + acpi_video_device_enumerate(video); 1354 1357 1355 1358 list_for_each_entry(dev, &device->children, node) { 1356 1359
+7
drivers/base/platform.c
··· 83 83 */ 84 84 int platform_get_irq(struct platform_device *dev, unsigned int num) 85 85 { 86 + #ifdef CONFIG_SPARC 87 + /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 88 + if (!dev || num >= dev->archdata.num_irqs) 89 + return -ENXIO; 90 + return dev->archdata.irqs[num]; 91 + #else 86 92 struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); 87 93 88 94 return r ? r->start : -ENXIO; 95 + #endif 89 96 } 90 97 EXPORT_SYMBOL_GPL(platform_get_irq); 91 98
+8 -7
drivers/block/Kconfig
··· 131 131 config BLK_CPQ_CISS_DA 132 132 tristate "Compaq Smart Array 5xxx support" 133 133 depends on PCI 134 + select CHECK_SIGNATURE 134 135 help 135 136 This is the driver for Compaq Smart Array 5xxx controllers. 136 137 Everyone using these boards should say Y here. ··· 167 166 module will be called DAC960. 168 167 169 168 config BLK_DEV_UMEM 170 - tristate "Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)" 171 - depends on PCI && EXPERIMENTAL 169 + tristate "Micro Memory MM5415 Battery Backed RAM support" 170 + depends on PCI 172 171 ---help--- 173 172 Saying Y here will include support for the MM5415 family of 174 173 battery backed (Non-volatile) RAM cards. ··· 431 430 a disc is opened for writing. 432 431 433 432 config CDROM_PKTCDVD_WCACHE 434 - bool "Enable write caching (EXPERIMENTAL)" 435 - depends on CDROM_PKTCDVD && EXPERIMENTAL 433 + bool "Enable write caching" 434 + depends on CDROM_PKTCDVD 436 435 help 437 436 If enabled, write caching will be set for the CD-R/W device. For now 438 437 this option is dangerous unless the CD-RW media is known good, as we ··· 509 508 510 509 511 510 config VIRTIO_BLK 512 - tristate "Virtio block driver (EXPERIMENTAL)" 513 - depends on EXPERIMENTAL && VIRTIO 511 + tristate "Virtio block driver" 512 + depends on VIRTIO 514 513 ---help--- 515 514 This is the virtual block driver for virtio. It can be used with 516 515 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. ··· 529 528 530 529 config BLK_DEV_RBD 531 530 tristate "Rados block device (RBD)" 532 - depends on INET && EXPERIMENTAL && BLOCK 531 + depends on INET && BLOCK 533 532 select CEPH_LIB 534 533 select LIBCRC32C 535 534 select CRYPTO_AES
-1
drivers/block/cciss.c
··· 5205 5205 return; 5206 5206 } 5207 5207 /* write all data in the battery backed cache to disk */ 5208 - memset(flush_buf, 0, 4); 5209 5208 return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, 5210 5209 4, 0, CTLR_LUNID, TYPE_CMD); 5211 5210 kfree(flush_buf);
+48 -42
drivers/block/floppy.c
··· 4109 4109 4110 4110 static struct platform_device floppy_device[N_DRIVE]; 4111 4111 4112 + static bool floppy_available(int drive) 4113 + { 4114 + if (!(allowed_drive_mask & (1 << drive))) 4115 + return false; 4116 + if (fdc_state[FDC(drive)].version == FDC_NONE) 4117 + return false; 4118 + return true; 4119 + } 4120 + 4112 4121 static struct kobject *floppy_find(dev_t dev, int *part, void *data) 4113 4122 { 4114 4123 int drive = (*part & 3) | ((*part & 0x80) >> 5); 4115 - if (drive >= N_DRIVE || 4116 - !(allowed_drive_mask & (1 << drive)) || 4117 - fdc_state[FDC(drive)].version == FDC_NONE) 4124 + if (drive >= N_DRIVE || !floppy_available(drive)) 4118 4125 return NULL; 4119 4126 if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) 4120 4127 return NULL; ··· 4131 4124 4132 4125 static int __init do_floppy_init(void) 4133 4126 { 4134 - int i, unit, drive; 4135 - int err, dr; 4127 + int i, unit, drive, err; 4136 4128 4137 4129 set_debugt(); 4138 4130 interruptjiffies = resultjiffies = jiffies; ··· 4143 4137 4144 4138 raw_cmd = NULL; 4145 4139 4146 - for (dr = 0; dr < N_DRIVE; dr++) { 4147 - disks[dr] = alloc_disk(1); 4148 - if (!disks[dr]) { 4140 + floppy_wq = alloc_ordered_workqueue("floppy", 0); 4141 + if (!floppy_wq) 4142 + return -ENOMEM; 4143 + 4144 + for (drive = 0; drive < N_DRIVE; drive++) { 4145 + disks[drive] = alloc_disk(1); 4146 + if (!disks[drive]) { 4149 4147 err = -ENOMEM; 4150 4148 goto out_put_disk; 4151 4149 } 4152 4150 4153 - floppy_wq = alloc_ordered_workqueue("floppy", 0); 4154 - if (!floppy_wq) { 4151 + disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock); 4152 + if (!disks[drive]->queue) { 4155 4153 err = -ENOMEM; 4156 4154 goto out_put_disk; 4157 4155 } 4158 4156 4159 - disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock); 4160 - if (!disks[dr]->queue) { 4161 - err = -ENOMEM; 4162 - goto out_destroy_workq; 4163 - } 4157 + blk_queue_max_hw_sectors(disks[drive]->queue, 64); 4158 + disks[drive]->major = FLOPPY_MAJOR; 4159 + disks[drive]->first_minor = TOMINOR(drive); 4160 + disks[drive]->fops = &floppy_fops; 4161 + sprintf(disks[drive]->disk_name, "fd%d", drive); 4164 4162 4165 - blk_queue_max_hw_sectors(disks[dr]->queue, 64); 4166 - disks[dr]->major = FLOPPY_MAJOR; 4167 - disks[dr]->first_minor = TOMINOR(dr); 4168 - disks[dr]->fops = &floppy_fops; 4169 - sprintf(disks[dr]->disk_name, "fd%d", dr); 4170 - 4171 - init_timer(&motor_off_timer[dr]); 4172 - motor_off_timer[dr].data = dr; 4173 - motor_off_timer[dr].function = motor_off_callback; 4163 + init_timer(&motor_off_timer[drive]); 4164 + motor_off_timer[drive].data = drive; 4165 + motor_off_timer[drive].function = motor_off_callback; 4174 4166 } 4175 4167 4176 4168 err = register_blkdev(FLOPPY_MAJOR, "fd"); ··· 4286 4282 } 4287 4283 4288 4284 for (drive = 0; drive < N_DRIVE; drive++) { 4289 - if (!(allowed_drive_mask & (1 << drive))) 4290 - continue; 4291 - if (fdc_state[FDC(drive)].version == FDC_NONE) 4285 + if (!floppy_available(drive)) 4292 4286 continue; 4293 4287 4294 4288 floppy_device[drive].name = floppy_device_name; ··· 4295 4293 4296 4294 err = platform_device_register(&floppy_device[drive]); 4297 4295 if (err) 4298 - goto out_release_dma; 4296 + goto out_remove_drives; 4299 4297 4300 4298 err = device_create_file(&floppy_device[drive].dev, 4301 4299 &dev_attr_cmos); ··· 4313 4311 4314 4312 out_unreg_platform_dev: 4315 4313 platform_device_unregister(&floppy_device[drive]); 4314 + out_remove_drives: 4315 + while (drive--) { 4316 + if (floppy_available(drive)) { 4317 + del_gendisk(disks[drive]); 4318 + device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); 4319 + platform_device_unregister(&floppy_device[drive]); 4320 + } 4321 + } 4316 4322 out_release_dma: 4317 4323 if (atomic_read(&usage_count)) 4318 4324 floppy_release_irq_and_dma(); 4319 4325 out_unreg_region: 4320 4326 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4321 4327 platform_driver_unregister(&floppy_driver); 4322 - out_destroy_workq: 4323 - destroy_workqueue(floppy_wq); 4324 4328 out_unreg_blkdev: 4325 4329 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4326 4330 out_put_disk: 4327 - while (dr--) { 4328 - del_timer_sync(&motor_off_timer[dr]); 4329 - if (disks[dr]->queue) { 4330 - blk_cleanup_queue(disks[dr]->queue); 4331 - /* 4332 - * put_disk() is not paired with add_disk() and 4333 - * will put queue reference one extra time. fix it. 4334 - */ 4335 - disks[dr]->queue = NULL; 4331 + for (drive = 0; drive < N_DRIVE; drive++) { 4332 + if (!disks[drive]) 4333 + break; 4334 + if (disks[drive]->queue) { 4335 + del_timer_sync(&motor_off_timer[drive]); 4336 + blk_cleanup_queue(disks[drive]->queue); 4337 + disks[drive]->queue = NULL; 4336 4338 } 4337 - put_disk(disks[dr]); 4339 + put_disk(disks[drive]); 4338 4340 } 4341 + destroy_workqueue(floppy_wq); 4339 4342 return err; 4340 4343 } 4341 4344 ··· 4558 4551 for (drive = 0; drive < N_DRIVE; drive++) { 4559 4552 del_timer_sync(&motor_off_timer[drive]); 4560 4553 4561 - if ((allowed_drive_mask & (1 << drive)) && 4562 - fdc_state[FDC(drive)].version != FDC_NONE) { 4554 + if (floppy_available(drive)) { 4563 4555 del_gendisk(disks[drive]); 4564 4556 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); 4565 4557 platform_device_unregister(&floppy_device[drive]);
+15 -2
drivers/block/loop.c
··· 976 976 if (lo->lo_state != Lo_bound) 977 977 return -ENXIO; 978 978 979 - if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ 980 - return -EBUSY; 979 + /* 980 + * If we've explicitly asked to tear down the loop device, 981 + * and it has an elevated reference count, set it for auto-teardown when 982 + * the last reference goes away. This stops $!~#$@ udev from 983 + * preventing teardown because it decided that it needs to run blkid on 984 + * the loopback device whenever they appear. xfstests is notorious for 985 + * failing tests because blkid via udev races with a losetup 986 + * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d 987 + * command to fail with EBUSY. 988 + */ 989 + if (lo->lo_refcnt > 1) { 990 + lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 991 + mutex_unlock(&lo->lo_ctl_mutex); 992 + return 0; 993 + } 981 994 982 995 if (filp == NULL) 983 996 return -EINVAL;
+15 -4
drivers/block/mtip32xx/mtip32xx.c
··· 2035 2035 } 2036 2036 return rv; 2037 2037 } 2038 - 2039 - static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout) 2038 + static void mtip_set_timeout(struct driver_data *dd, 2039 + struct host_to_dev_fis *fis, 2040 + unsigned int *timeout, u8 erasemode) 2040 2041 { 2041 2042 switch (fis->command) { 2042 2043 case ATA_CMD_DOWNLOAD_MICRO: ··· 2045 2044 break; 2046 2045 case ATA_CMD_SEC_ERASE_UNIT: 2047 2046 case 0xFC: 2048 - *timeout = 240000; /* 4 minutes */ 2047 + if (erasemode) 2048 + *timeout = ((*(dd->port->identify + 90) * 2) * 60000); 2049 + else 2050 + *timeout = ((*(dd->port->identify + 89) * 2) * 60000); 2049 2051 break; 2050 2052 case ATA_CMD_STANDBYNOW1: 2051 2053 *timeout = 120000; /* 2 minutes */ ··· 2091 2087 unsigned int transfer_size; 2092 2088 unsigned long task_file_data; 2093 2089 int intotal = outtotal + req_task->out_size; 2090 + int erasemode = 0; 2094 2091 2095 2092 taskout = req_task->out_size; 2096 2093 taskin = req_task->in_size; ··· 2217 2212 fis.lba_hi, 2218 2213 fis.device); 2219 2214 2220 - mtip_set_timeout(&fis, &timeout); 2215 + /* check for erase mode support during secure erase.*/ 2216 + if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) 2217 + && (outbuf[0] & MTIP_SEC_ERASE_MODE)) { 2218 + erasemode = 1; 2219 + } 2220 + 2221 + mtip_set_timeout(dd, &fis, &timeout, erasemode); 2221 2222 2222 2223 /* Determine the correct transfer size.*/ 2223 2224 if (force_single_sector)
+3
drivers/block/mtip32xx/mtip32xx.h
··· 33 33 /* offset of Device Control register in PCIe extended capabilites space */ 34 34 #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 35 35 36 + /* check for erase mode support during secure erase */ 37 + #define MTIP_SEC_ERASE_MODE 0x3 38 + 36 39 /* # of times to retry timed out/failed IOs */ 37 40 #define MTIP_MAX_RETRIES 2 38 41
+2 -2
drivers/block/xen-blkback/common.h
··· 158 158 struct block_device *bdev; 159 159 /* Cached size parameter. */ 160 160 sector_t size; 161 - bool flush_support; 162 - bool discard_secure; 161 + unsigned int flush_support:1; 162 + unsigned int discard_secure:1; 163 163 }; 164 164 165 165 struct backend_info;
+4 -5
drivers/block/xen-blkback/xenbus.c
··· 105 105 { 106 106 struct xen_blkif *blkif; 107 107 108 - blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL); 108 + blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); 109 109 if (!blkif) 110 110 return ERR_PTR(-ENOMEM); 111 111 112 - memset(blkif, 0, sizeof(*blkif)); 113 112 blkif->domid = domid; 114 113 spin_lock_init(&blkif->blk_ring_lock); 115 114 atomic_set(&blkif->refcnt, 1); ··· 195 196 } 196 197 } 197 198 198 - void xen_blkif_free(struct xen_blkif *blkif) 199 + static void xen_blkif_free(struct xen_blkif *blkif) 199 200 { 200 201 if (!atomic_dec_and_test(&blkif->refcnt)) 201 202 BUG(); ··· 256 257 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor); 257 258 VBD_SHOW(mode, "%s\n", be->mode); 258 259 259 - int xenvbd_sysfs_addif(struct xenbus_device *dev) 260 + static int xenvbd_sysfs_addif(struct xenbus_device *dev) 260 261 { 261 262 int error; 262 263 ··· 280 281 return error; 281 282 } 282 283 283 - void xenvbd_sysfs_delif(struct xenbus_device *dev) 284 + static void xenvbd_sysfs_delif(struct xenbus_device *dev) 284 285 { 285 286 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group); 286 287 device_remove_file(&dev->dev, &dev_attr_mode);
+1 -1
drivers/cpufreq/powernow-k8.c
··· 5 5 * http://www.gnu.org/licenses/gpl.html 6 6 * 7 7 * Maintainer: 8 - * Andreas Herrmann <andreas.herrmann3@amd.com> 8 + * Andreas Herrmann <herrmann.der.user@googlemail.com> 9 9 * 10 10 * Based on the powernow-k7.c module written by Dave Jones. 11 11 * (C) 2003 Dave Jones on behalf of SuSE Labs
+1 -1
drivers/gpio/Kconfig
··· 47 47 48 48 config OF_GPIO 49 49 def_bool y 50 - depends on OF && !SPARC 50 + depends on OF 51 51 52 52 config DEBUG_GPIO 53 53 bool "Debug GPIO calls"
+1 -1
drivers/gpio/gpio-74x164.c
··· 153 153 } 154 154 155 155 chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers; 156 - chip->buffer = devm_kzalloc(&spi->dev, chip->gpio_chip.ngpio, GFP_KERNEL); 156 + chip->buffer = devm_kzalloc(&spi->dev, chip->registers, GFP_KERNEL); 157 157 if (!chip->buffer) { 158 158 ret = -ENOMEM; 159 159 goto exit_destroy;
+3 -1
drivers/gpio/gpio-mvebu.c
··· 244 244 if (ret) 245 245 return ret; 246 246 247 + mvebu_gpio_set(chip, pin, value); 248 + 247 249 spin_lock_irqsave(&mvchip->lock, flags); 248 250 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)); 249 251 u &= ~(1 << pin); ··· 646 644 ct->handler = handle_edge_irq; 647 645 ct->chip.name = mvchip->chip.label; 648 646 649 - irq_setup_generic_chip(gc, IRQ_MSK(ngpios), IRQ_GC_INIT_MASK_CACHE, 647 + irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0, 650 648 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); 651 649 652 650 /* Setup irq domain on top of the generic chip. */
+35
drivers/gpio/gpio-omap.c
··· 251 251 } 252 252 } 253 253 254 + /** 255 + * _clear_gpio_debounce - clear debounce settings for a gpio 256 + * @bank: the gpio bank we're acting upon 257 + * @gpio: the gpio number on this @gpio 258 + * 259 + * If a gpio is using debounce, then clear the debounce enable bit and if 260 + * this is the only gpio in this bank using debounce, then clear the debounce 261 + * time too. The debounce clock will also be disabled when calling this function 262 + * if this is the only gpio in the bank using debounce. 263 + */ 264 + static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio) 265 + { 266 + u32 gpio_bit = GPIO_BIT(bank, gpio); 267 + 268 + if (!bank->dbck_flag) 269 + return; 270 + 271 + if (!(bank->dbck_enable_mask & gpio_bit)) 272 + return; 273 + 274 + bank->dbck_enable_mask &= ~gpio_bit; 275 + bank->context.debounce_en &= ~gpio_bit; 276 + __raw_writel(bank->context.debounce_en, 277 + bank->base + bank->regs->debounce_en); 278 + 279 + if (!bank->dbck_enable_mask) { 280 + bank->context.debounce = 0; 281 + __raw_writel(bank->context.debounce, bank->base + 282 + bank->regs->debounce); 283 + clk_disable(bank->dbck); 284 + bank->dbck_enabled = false; 285 + } 286 + } 287 + 254 288 static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio, 255 289 unsigned trigger) 256 290 { ··· 573 539 _set_gpio_irqenable(bank, gpio, 0); 574 540 _clear_gpio_irqstatus(bank, gpio); 575 541 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); 542 + _clear_gpio_debounce(bank, gpio); 576 543 } 577 544 578 545 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
+2 -2
drivers/gpio/gpio-timberdale.c
··· 116 116 unsigned long flags; 117 117 118 118 spin_lock_irqsave(&tgpio->lock, flags); 119 - tgpio->last_ier &= ~(1 << offset); 119 + tgpio->last_ier &= ~(1UL << offset); 120 120 iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); 121 121 spin_unlock_irqrestore(&tgpio->lock, flags); 122 122 } ··· 128 128 unsigned long flags; 129 129 130 130 spin_lock_irqsave(&tgpio->lock, flags); 131 - tgpio->last_ier |= 1 << offset; 131 + tgpio->last_ier |= 1UL << offset; 132 132 iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); 133 133 spin_unlock_irqrestore(&tgpio->lock, flags); 134 134 }
+7 -3
drivers/gpio/gpiolib.c
··· 623 623 */ 624 624 625 625 status = gpio_request(gpio, "sysfs"); 626 - if (status < 0) 626 + if (status < 0) { 627 + if (status == -EPROBE_DEFER) 628 + status = -ENODEV; 627 629 goto done; 628 - 630 + } 629 631 status = gpio_export(gpio, true); 630 632 if (status < 0) 631 633 gpio_free(gpio); ··· 1193 1191 1194 1192 spin_lock_irqsave(&gpio_lock, flags); 1195 1193 1196 - if (!gpio_is_valid(gpio)) 1194 + if (!gpio_is_valid(gpio)) { 1195 + status = -EINVAL; 1197 1196 goto done; 1197 + } 1198 1198 desc = &gpio_desc[gpio]; 1199 1199 chip = desc->chip; 1200 1200 if (chip == NULL)
+32 -16
drivers/gpu/drm/drm_fops.c
··· 121 121 int minor_id = iminor(inode); 122 122 struct drm_minor *minor; 123 123 int retcode = 0; 124 + int need_setup = 0; 125 + struct address_space *old_mapping; 124 126 125 127 minor = idr_find(&drm_minors_idr, minor_id); 126 128 if (!minor) ··· 134 132 if (drm_device_is_unplugged(dev)) 135 133 return -ENODEV; 136 134 137 - retcode = drm_open_helper(inode, filp, dev); 138 - if (!retcode) { 139 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]); 140 - if (!dev->open_count++) 141 - retcode = drm_setup(dev); 142 - } 143 - if (!retcode) { 144 - mutex_lock(&dev->struct_mutex); 145 - if (dev->dev_mapping == NULL) 146 - dev->dev_mapping = &inode->i_data; 147 - /* ihold ensures nobody can remove inode with our i_data */ 148 - ihold(container_of(dev->dev_mapping, struct inode, i_data)); 149 - inode->i_mapping = dev->dev_mapping; 150 - filp->f_mapping = dev->dev_mapping; 151 - mutex_unlock(&dev->struct_mutex); 152 - } 135 + if (!dev->open_count++) 136 + need_setup = 1; 137 + mutex_lock(&dev->struct_mutex); 138 + old_mapping = dev->dev_mapping; 139 + if (old_mapping == NULL) 140 + dev->dev_mapping = &inode->i_data; 141 + /* ihold ensures nobody can remove inode with our i_data */ 142 + ihold(container_of(dev->dev_mapping, struct inode, i_data)); 143 + inode->i_mapping = dev->dev_mapping; 144 + filp->f_mapping = dev->dev_mapping; 145 + mutex_unlock(&dev->struct_mutex); 153 146 147 + retcode = drm_open_helper(inode, filp, dev); 148 + if (retcode) 149 + goto err_undo; 150 + atomic_inc(&dev->counts[_DRM_STAT_OPENS]); 151 + if (need_setup) { 152 + retcode = drm_setup(dev); 153 + if (retcode) 154 + goto err_undo; 155 + } 156 + return 0; 157 + 158 + err_undo: 159 + mutex_lock(&dev->struct_mutex); 160 + filp->f_mapping = old_mapping; 161 + inode->i_mapping = old_mapping; 162 + iput(container_of(dev->dev_mapping, struct inode, i_data)); 163 + dev->dev_mapping = old_mapping; 164 + mutex_unlock(&dev->struct_mutex); 165 + dev->open_count--; 154 166 return retcode; 155 167 } 156 168 EXPORT_SYMBOL(drm_open);
+1 -1
drivers/gpu/drm/exynos/Kconfig
··· 1 1 config DRM_EXYNOS 2 2 tristate "DRM Support for Samsung SoC EXYNOS Series" 3 - depends on DRM && PLAT_SAMSUNG 3 + depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 4 4 select DRM_KMS_HELPER 5 5 select FB_CFB_FILLRECT 6 6 select FB_CFB_COPYAREA
+1
drivers/gpu/drm/exynos/exynos_drm_connector.c
··· 374 374 exynos_connector->encoder_id = encoder->base.id; 375 375 exynos_connector->manager = manager; 376 376 exynos_connector->dpms = DRM_MODE_DPMS_OFF; 377 + connector->dpms = DRM_MODE_DPMS_OFF; 377 378 connector->encoder = encoder; 378 379 379 380 err = drm_mode_connector_attach_encoder(connector, encoder);
+17 -16
drivers/gpu/drm/exynos/exynos_drm_encoder.c
··· 43 43 * @manager: specific encoder has its own manager to control a hardware 44 44 * appropriately and we can access a hardware drawing on this manager. 45 45 * @dpms: store the encoder dpms value. 46 + * @updated: indicate whether overlay data updating is needed or not. 46 47 */ 47 48 struct exynos_drm_encoder { 48 49 struct drm_crtc *old_crtc; 49 50 struct drm_encoder drm_encoder; 50 51 struct exynos_drm_manager *manager; 51 - int dpms; 52 + int dpms; 53 + bool updated; 52 54 }; 53 55 54 56 static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode) ··· 87 85 switch (mode) { 88 86 case DRM_MODE_DPMS_ON: 89 87 if (manager_ops && manager_ops->apply) 90 - manager_ops->apply(manager->dev); 88 + if (!exynos_encoder->updated) 89 + manager_ops->apply(manager->dev); 90 + 91 91 exynos_drm_connector_power(encoder, mode); 92 92 exynos_encoder->dpms = mode; 93 93 break; ··· 98 94 case DRM_MODE_DPMS_OFF: 99 95 exynos_drm_connector_power(encoder, mode); 100 96 exynos_encoder->dpms = mode; 97 + exynos_encoder->updated = false; 101 98 break; 102 99 default: 103 100 DRM_ERROR("unspecified mode %d\n", mode); ··· 210 205 211 206 static void exynos_drm_encoder_commit(struct drm_encoder *encoder) 212 207 { 213 - struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 208 + struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 209 + struct exynos_drm_manager *manager = exynos_encoder->manager; 214 210 struct exynos_drm_manager_ops *manager_ops = manager->ops; 215 211 216 212 DRM_DEBUG_KMS("%s\n", __FILE__); 217 213 218 214 if (manager_ops && manager_ops->commit) 219 215 manager_ops->commit(manager->dev); 216 + 217 + /* 218 + * this will avoid one issue that overlay data is updated to 219 + * real hardware two times. 220 + * And this variable will be used to check if the data was 221 + * already updated or not by exynos_drm_encoder_dpms function. 222 + */ 223 + exynos_encoder->updated = true; 220 224 } 221 225 222 226 static void exynos_drm_encoder_disable(struct drm_encoder *encoder) ··· 413 399 414 400 if (manager_ops && manager_ops->dpms) 415 401 manager_ops->dpms(manager->dev, mode); 416 - 417 - /* 418 - * set current mode to new one so that data aren't updated into 419 - * registers by drm_helper_connector_dpms two times. 420 - * 421 - * in case that drm_crtc_helper_set_mode() is called, 422 - * overlay_ops->commit() and manager_ops->commit() callbacks 423 - * can be called two times, first at drm_crtc_helper_set_mode() 424 - * and second at drm_helper_connector_dpms(). 425 - * so with this setting, when drm_helper_connector_dpms() is called 426 - * encoder->funcs->dpms() will be ignored. 427 - */ 428 - exynos_encoder->dpms = mode; 429 402 430 403 /* 431 404 * if this condition is ok then it means that the crtc is already
+1 -1
drivers/gpu/drm/exynos/exynos_mixer.c
··· 1142 1142 const struct of_device_id *match; 1143 1143 match = of_match_node(of_match_ptr(mixer_match_types), 1144 1144 pdev->dev.of_node); 1145 - drv = match->data; 1145 + drv = (struct mixer_drv_data *)match->data; 1146 1146 } else { 1147 1147 drv = (struct mixer_drv_data *) 1148 1148 platform_get_device_id(pdev)->driver_data;
+2 -1
drivers/gpu/drm/i915/i915_dma.c
··· 1505 1505 goto put_gmch; 1506 1506 } 1507 1507 1508 - i915_kick_out_firmware_fb(dev_priv); 1508 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 1509 + i915_kick_out_firmware_fb(dev_priv); 1509 1510 1510 1511 pci_set_master(dev->pdev); 1511 1512
+1 -1
drivers/gpu/drm/i915/intel_crt.c
··· 729 729 730 730 crt->base.type = INTEL_OUTPUT_ANALOG; 731 731 crt->base.cloneable = true; 732 - if (IS_HASWELL(dev)) 732 + if (IS_HASWELL(dev) || IS_I830(dev)) 733 733 crt->base.crtc_mask = (1 << 0); 734 734 else 735 735 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+11 -3
drivers/gpu/drm/i915/intel_overlay.c
··· 341 341 intel_ring_emit(ring, flip_addr); 342 342 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 343 343 /* turn overlay off */ 344 - intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 345 - intel_ring_emit(ring, flip_addr); 346 - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 344 + if (IS_I830(dev)) { 345 + /* Workaround: Don't disable the overlay fully, since otherwise 346 + * it dies on the next OVERLAY_ON cmd. */ 347 + intel_ring_emit(ring, MI_NOOP); 348 + intel_ring_emit(ring, MI_NOOP); 349 + intel_ring_emit(ring, MI_NOOP); 350 + } else { 351 + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 352 + intel_ring_emit(ring, flip_addr); 353 + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 354 + } 347 355 intel_ring_advance(ring); 348 356 349 357 return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+1 -1
drivers/gpu/drm/i915/intel_panel.c
··· 435 435 props.type = BACKLIGHT_RAW; 436 436 props.max_brightness = _intel_panel_get_max_backlight(dev); 437 437 if (props.max_brightness == 0) { 438 - DRM_ERROR("Failed to get maximum backlight value\n"); 438 + DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n"); 439 439 return -ENODEV; 440 440 } 441 441 dev_priv->backlight =
+42 -20
drivers/gpu/drm/i915/intel_sdvo.c
··· 894 894 } 895 895 #endif 896 896 897 + static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, 898 + unsigned if_index, uint8_t tx_rate, 899 + uint8_t *data, unsigned length) 900 + { 901 + uint8_t set_buf_index[2] = { if_index, 0 }; 902 + uint8_t hbuf_size, tmp[8]; 903 + int i; 904 + 905 + if (!intel_sdvo_set_value(intel_sdvo, 906 + SDVO_CMD_SET_HBUF_INDEX, 907 + set_buf_index, 2)) 908 + return false; 909 + 910 + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, 911 + &hbuf_size, 1)) 912 + return false; 913 + 914 + /* Buffer size is 0 based, hooray! */ 915 + hbuf_size++; 916 + 917 + DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", 918 + if_index, length, hbuf_size); 919 + 920 + for (i = 0; i < hbuf_size; i += 8) { 921 + memset(tmp, 0, 8); 922 + if (i < length) 923 + memcpy(tmp, data + i, min_t(unsigned, 8, length - i)); 924 + 925 + if (!intel_sdvo_set_value(intel_sdvo, 926 + SDVO_CMD_SET_HBUF_DATA, 927 + tmp, 8)) 928 + return false; 929 + } 930 + 931 + return intel_sdvo_set_value(intel_sdvo, 932 + SDVO_CMD_SET_HBUF_TXRATE, 933 + &tx_rate, 1); 934 + } 935 + 897 936 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) 898 937 { 899 938 struct dip_infoframe avi_if = { ··· 940 901 .ver = DIP_VERSION_AVI, 941 902 .len = DIP_LEN_AVI, 942 903 }; 943 - uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; 944 - uint8_t set_buf_index[2] = { 1, 0 }; 945 904 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 946 - uint64_t *data = (uint64_t *)sdvo_data; 947 - unsigned i; 948 905 949 906 intel_dip_infoframe_csum(&avi_if); 950 907 ··· 950 915 sdvo_data[3] = avi_if.checksum; 951 916 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); 952 917 953 - if (!intel_sdvo_set_value(intel_sdvo, 954 - SDVO_CMD_SET_HBUF_INDEX, 955 - set_buf_index, 2)) 956 - return false; 957 - 958 - for (i = 0; i < sizeof(sdvo_data); i += 8) { 959 - if (!intel_sdvo_set_value(intel_sdvo, 960 - SDVO_CMD_SET_HBUF_DATA, 961 - data, 8)) 962 - return false; 963 - data++; 964 - } 965 - 966 - return intel_sdvo_set_value(intel_sdvo, 967 - SDVO_CMD_SET_HBUF_TXRATE, 968 - &tx_rate, 1); 918 + return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, 919 + SDVO_HBUF_TX_VSYNC, 920 + sdvo_data, sizeof(sdvo_data)); 969 921 } 970 922 971 923 static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+2
drivers/gpu/drm/i915/intel_sdvo_regs.h
··· 708 708 #define SDVO_CMD_SET_AUDIO_STAT 0x91 709 709 #define SDVO_CMD_GET_AUDIO_STAT 0x92 710 710 #define SDVO_CMD_SET_HBUF_INDEX 0x93 711 + #define SDVO_HBUF_INDEX_ELD 0 712 + #define SDVO_HBUF_INDEX_AVI_IF 1 711 713 #define SDVO_CMD_GET_HBUF_INDEX 0x94 712 714 #define SDVO_CMD_GET_HBUF_INFO 0x95 713 715 #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+6 -3
drivers/gpu/drm/nouveau/core/core/mm.c
··· 218 218 node = kzalloc(sizeof(*node), GFP_KERNEL); 219 219 if (!node) 220 220 return -ENOMEM; 221 - node->offset = roundup(offset, mm->block_size); 222 - node->length = rounddown(offset + length, mm->block_size) - node->offset; 221 + 222 + if (length) { 223 + node->offset = roundup(offset, mm->block_size); 224 + node->length = rounddown(offset + length, mm->block_size); 225 + node->length -= node->offset; 226 + } 223 227 224 228 list_add_tail(&node->nl_entry, &mm->nodes); 225 229 list_add_tail(&node->fl_entry, &mm->free); 226 230 mm->heap_nodes++; 227 - mm->heap_size += length; 228 231 return 0; 229 232 } 230 233
+12 -8
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
··· 22 22 * Authors: Ben Skeggs 23 23 */ 24 24 25 + #include <subdev/bar.h> 26 + 25 27 #include <engine/software.h> 26 28 #include <engine/disp.h> 27 29 ··· 39 37 static void 40 38 nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) 41 39 { 40 + struct nouveau_bar *bar = nouveau_bar(priv); 42 41 struct nouveau_disp *disp = &priv->base; 43 42 struct nouveau_software_chan *chan, *temp; 44 43 unsigned long flags; ··· 49 46 if (chan->vblank.crtc != crtc) 50 47 continue; 51 48 52 - nv_wr32(priv, 0x001704, chan->vblank.channel); 53 - nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); 54 - 55 49 if (nv_device(priv)->chipset == 0x50) { 50 + nv_wr32(priv, 0x001704, chan->vblank.channel); 51 + nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); 52 + bar->flush(bar); 56 53 nv_wr32(priv, 0x001570, chan->vblank.offset); 57 54 nv_wr32(priv, 0x001574, chan->vblank.value); 58 55 } else { 59 - if (nv_device(priv)->chipset >= 0xc0) { 60 - nv_wr32(priv, 0x06000c, 61 - upper_32_bits(chan->vblank.offset)); 62 - } 63 - nv_wr32(priv, 0x060010, chan->vblank.offset); 56 + nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); 57 + bar->flush(bar); 58 + nv_wr32(priv, 0x06000c, 59 + upper_32_bits(chan->vblank.offset)); 60 + nv_wr32(priv, 0x060010, 61 + lower_32_bits(chan->vblank.offset)); 64 62 nv_wr32(priv, 0x060014, chan->vblank.value); 65 63 } 66 64
+2 -2
drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
··· 156 156 static int 157 157 nv40_graph_context_fini(struct nouveau_object *object, bool suspend) 158 158 { 159 - struct nv04_graph_priv *priv = (void *)object->engine; 160 - struct nv04_graph_chan *chan = (void *)object; 159 + struct nv40_graph_priv *priv = (void *)object->engine; 160 + struct nv40_graph_chan *chan = (void *)object; 161 161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4; 162 162 int ret = 0; 163 163
+1 -1
drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
··· 38 38 }; 39 39 40 40 struct nv40_mpeg_chan { 41 - struct nouveau_mpeg base; 41 + struct nouveau_mpeg_chan base; 42 42 }; 43 43 44 44 /*******************************************************************************
-1
drivers/gpu/drm/nouveau/core/include/core/mm.h
··· 19 19 20 20 u32 block_size; 21 21 int heap_nodes; 22 - u32 heap_size; 23 22 }; 24 23 25 24 int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
+4 -6
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
··· 219 219 ((priv->base.ram.size & 0x000000ff) << 32); 220 220 221 221 tags = nv_rd32(priv, 0x100320); 222 - if (tags) { 223 - ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1); 224 - if (ret) 225 - return ret; 222 + ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1); 223 + if (ret) 224 + return ret; 226 225 227 - nv_debug(priv, "%d compression tags\n", tags); 228 - } 226 + nv_debug(priv, "%d compression tags\n", tags); 229 227 230 228 size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail; 231 229 switch (device->chipset) {
+1 -1
drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
··· 292 292 case DCB_I2C_NVIO_BIT: 293 293 port->drive = info.drive & 0x0f; 294 294 if (device->card_type < NV_D0) { 295 - if (info.drive >= ARRAY_SIZE(nv50_i2c_port)) 295 + if (port->drive >= ARRAY_SIZE(nv50_i2c_port)) 296 296 break; 297 297 port->drive = nv50_i2c_port[port->drive]; 298 298 port->sense = port->drive;
+1 -1
drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
··· 67 67 static void 68 68 nv41_vm_flush(struct nouveau_vm *vm) 69 69 { 70 - struct nv04_vm_priv *priv = (void *)vm->vmm; 70 + struct nv04_vmmgr_priv *priv = (void *)vm->vmm; 71 71 72 72 mutex_lock(&nv_subdev(priv)->mutex); 73 73 nv_wr32(priv, 0x100810, 0x00000022);
+1 -1
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 355 355 * valid - it's not (rh#613284) 356 356 */ 357 357 if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { 358 - if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) { 358 + if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) { 359 359 status = connector_status_connected; 360 360 goto out; 361 361 }
+21 -15
drivers/gpu/drm/nouveau/nouveau_display.c
··· 290 290 struct nouveau_drm *drm = nouveau_drm(dev); 291 291 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 292 292 struct nouveau_display *disp; 293 + u32 pclass = dev->pdev->class >> 8; 293 294 int ret, gen; 294 295 295 296 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); ··· 361 360 drm_kms_helper_poll_init(dev); 362 361 drm_kms_helper_poll_disable(dev); 363 362 364 - if (nv_device(drm->device)->card_type < NV_50) 365 - ret = nv04_display_create(dev); 366 - else 367 - if (nv_device(drm->device)->card_type < NV_D0) 368 - ret = nv50_display_create(dev); 369 - else 370 - ret = nvd0_display_create(dev); 371 - if (ret) 372 - goto disp_create_err; 373 - 374 - if (dev->mode_config.num_crtc) { 375 - ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 363 + if (nouveau_modeset == 1 || 364 + (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) { 365 + if (nv_device(drm->device)->card_type < NV_50) 366 + ret = nv04_display_create(dev); 367 + else 368 + if (nv_device(drm->device)->card_type < NV_D0) 369 + ret = nv50_display_create(dev); 370 + else 371 + ret = nvd0_display_create(dev); 376 372 if (ret) 377 - goto vblank_err; 373 + goto disp_create_err; 374 + 375 + if (dev->mode_config.num_crtc) { 376 + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 377 + if (ret) 378 + goto vblank_err; 379 + } 380 + 381 + nouveau_backlight_init(dev); 378 382 } 379 383 380 - nouveau_backlight_init(dev); 381 384 return 0; 382 385 383 386 vblank_err: ··· 400 395 nouveau_backlight_exit(dev); 401 396 drm_vblank_cleanup(dev); 402 397 403 - disp->dtor(dev); 398 + if (disp->dtor) 399 + disp->dtor(dev); 404 400 405 401 drm_kms_helper_poll_fini(dev); 406 402 drm_mode_config_cleanup(dev);
+21 -15
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 63 63 static int nouveau_noaccel = 0; 64 64 module_param_named(noaccel, nouveau_noaccel, int, 0400); 65 65 66 - MODULE_PARM_DESC(modeset, "enable driver"); 67 - static int nouveau_modeset = -1; 66 + MODULE_PARM_DESC(modeset, "enable driver (default: auto, " 67 + "0 = disabled, 1 = enabled, 2 = headless)"); 68 + int nouveau_modeset = -1; 68 69 module_param_named(modeset, nouveau_modeset, int, 0400); 69 70 70 71 static struct drm_driver driver; ··· 364 363 365 364 nouveau_pm_fini(dev); 366 365 367 - nouveau_display_fini(dev); 366 + if (dev->mode_config.num_crtc) 367 + nouveau_display_fini(dev); 368 368 nouveau_display_destroy(dev); 369 369 370 370 nouveau_irq_fini(dev); ··· 405 403 pm_state.event == PM_EVENT_PRETHAW) 406 404 return 0; 407 405 408 - NV_INFO(drm, "suspending fbcon...\n"); 409 - nouveau_fbcon_set_suspend(dev, 1); 406 + if (dev->mode_config.num_crtc) { 407 + NV_INFO(drm, "suspending fbcon...\n"); 408 + nouveau_fbcon_set_suspend(dev, 1); 410 409 411 - NV_INFO(drm, "suspending display...\n"); 412 - ret = nouveau_display_suspend(dev); 413 - if (ret) 414 - return ret; 410 + NV_INFO(drm, "suspending display...\n"); 411 + ret = nouveau_display_suspend(dev); 412 + if (ret) 413 + return ret; 414 + } 415 415 416 416 NV_INFO(drm, "evicting buffers...\n"); 417 417 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); ··· 449 445 nouveau_client_init(&cli->base); 450 446 } 451 447 452 - NV_INFO(drm, "resuming display...\n"); 453 - nouveau_display_resume(dev); 448 + if (dev->mode_config.num_crtc) { 449 + NV_INFO(drm, "resuming display...\n"); 450 + nouveau_display_resume(dev); 451 + } 454 452 return ret; 455 453 } 456 454 ··· 492 486 nouveau_irq_postinstall(dev); 493 487 nouveau_pm_resume(dev); 494 488 495 - NV_INFO(drm, "resuming display...\n"); 496 - nouveau_display_resume(dev); 489 + if (dev->mode_config.num_crtc) { 490 + NV_INFO(drm, "resuming display...\n"); 491 + nouveau_display_resume(dev); 492 + } 497 493 return 0; 498 494 } 499 495 ··· 670 662 #ifdef CONFIG_VGA_CONSOLE 671 663 if (vgacon_text_force()) 672 664 nouveau_modeset = 0; 673 - else 674 665 #endif 675 - nouveau_modeset = 1; 676 666 } 677 667 678 668 if (!nouveau_modeset)
+2
drivers/gpu/drm/nouveau/nouveau_drm.h
··· 141 141 nv_info((cli), fmt, ##args); \ 142 142 } while (0) 143 143 144 + extern int nouveau_modeset; 145 + 144 146 #endif
+9 -7
drivers/gpu/drm/nouveau/nouveau_irq.c
··· 61 61 62 62 nv_subdev(pmc)->intr(nv_subdev(pmc)); 63 63 64 - if (device->card_type >= NV_D0) { 65 - if (nv_rd32(device, 0x000100) & 0x04000000) 66 - nvd0_display_intr(dev); 67 - } else 68 - if (device->card_type >= NV_50) { 69 - if (nv_rd32(device, 0x000100) & 0x04000000) 70 - nv50_display_intr(dev); 64 + if (dev->mode_config.num_crtc) { 65 + if (device->card_type >= NV_D0) { 66 + if (nv_rd32(device, 0x000100) & 0x04000000) 67 + nvd0_display_intr(dev); 68 + } else 69 + if (device->card_type >= NV_50) { 70 + if (nv_rd32(device, 0x000100) & 0x04000000) 71 + nv50_display_intr(dev); 72 + } 71 73 } 72 74 73 75 return IRQ_HANDLED;
+8 -8
drivers/gpu/drm/nouveau/nv04_dac.c
··· 220 220 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); 221 221 222 222 if (blue == 0x18) { 223 - NV_INFO(drm, "Load detected on head A\n"); 223 + NV_DEBUG(drm, "Load detected on head A\n"); 224 224 return connector_status_connected; 225 225 } 226 226 ··· 338 338 339 339 if (nv17_dac_sample_load(encoder) & 340 340 NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { 341 - NV_INFO(drm, "Load detected on output %c\n", 342 - '@' + ffs(dcb->or)); 341 + NV_DEBUG(drm, "Load detected on output %c\n", 342 + '@' + ffs(dcb->or)); 343 343 return connector_status_connected; 344 344 } else { 345 345 return connector_status_disconnected; ··· 413 413 414 414 helper->dpms(encoder, DRM_MODE_DPMS_ON); 415 415 416 - NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", 417 - drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 418 - nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 416 + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", 417 + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 418 + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 419 419 } 420 420 421 421 void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) ··· 461 461 return; 462 462 nv_encoder->last_dpms = mode; 463 463 464 - NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n", 465 - mode, nv_encoder->dcb->index); 464 + NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n", 465 + mode, nv_encoder->dcb->index); 466 466 467 467 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 468 468 }
+7 -7
drivers/gpu/drm/nouveau/nv04_dfp.c
··· 476 476 477 477 helper->dpms(encoder, DRM_MODE_DPMS_ON); 478 478 479 - NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", 480 - drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 481 - nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 479 + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", 480 + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 481 + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 482 482 } 483 483 484 484 static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) ··· 520 520 return; 521 521 nv_encoder->last_dpms = mode; 522 522 523 - NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n", 524 - mode, nv_encoder->dcb->index); 523 + NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n", 524 + mode, nv_encoder->dcb->index); 525 525 526 526 if (was_powersaving && is_powersaving_dpms(mode)) 527 527 return; ··· 565 565 return; 566 566 nv_encoder->last_dpms = mode; 567 567 568 - NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n", 569 - mode, nv_encoder->dcb->index); 568 + NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n", 569 + mode, nv_encoder->dcb->index); 570 570 571 571 nv04_dfp_update_backlight(encoder, mode); 572 572 nv04_dfp_update_fp_control(encoder, mode);
+4 -5
drivers/gpu/drm/nouveau/nv04_tv.c
··· 75 75 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; 76 76 uint8_t crtc1A; 77 77 78 - NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n", 79 - mode, nv_encoder->dcb->index); 78 + NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n", 79 + mode, nv_encoder->dcb->index); 80 80 81 81 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); 82 82 ··· 167 167 168 168 helper->dpms(encoder, DRM_MODE_DPMS_ON); 169 169 170 - NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", 171 - drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, 172 - '@' + ffs(nv_encoder->dcb->or)); 170 + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", 171 + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 173 172 } 174 173 175 174 static void nv04_tv_destroy(struct drm_encoder *encoder)
+31 -23
drivers/gpu/drm/radeon/atombios_crtc.c
··· 1696 1696 return ATOM_PPLL2; 1697 1697 DRM_ERROR("unable to allocate a PPLL\n"); 1698 1698 return ATOM_PPLL_INVALID; 1699 - } else { 1700 - if (ASIC_IS_AVIVO(rdev)) { 1701 - /* in DP mode, the DP ref clock can come from either PPLL 1702 - * depending on the asic: 1703 - * DCE3: PPLL1 or PPLL2 1704 - */ 1705 - if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1706 - /* use the same PPLL for all DP monitors */ 1707 - pll = radeon_get_shared_dp_ppll(crtc); 1708 - if (pll != ATOM_PPLL_INVALID) 1709 - return pll; 1710 - } else { 1711 - /* use the same PPLL for all monitors with the same clock */ 1712 - pll = radeon_get_shared_nondp_ppll(crtc); 1713 - if (pll != ATOM_PPLL_INVALID) 1714 - return pll; 1715 - } 1716 - /* all other cases */ 1717 - pll_in_use = radeon_get_pll_use_mask(crtc); 1699 + } else if (ASIC_IS_AVIVO(rdev)) { 1700 + /* in DP mode, the DP ref clock can come from either PPLL 1701 + * depending on the asic: 1702 + * DCE3: PPLL1 or PPLL2 1703 + */ 1704 + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1705 + /* use the same PPLL for all DP monitors */ 1706 + pll = radeon_get_shared_dp_ppll(crtc); 1707 + if (pll != ATOM_PPLL_INVALID) 1708 + return pll; 1709 + } else { 1710 + /* use the same PPLL for all monitors with the same clock */ 1711 + pll = radeon_get_shared_nondp_ppll(crtc); 1712 + if (pll != ATOM_PPLL_INVALID) 1713 + return pll; 1714 + } 1715 + /* all other cases */ 1716 + pll_in_use = radeon_get_pll_use_mask(crtc); 1717 + /* the order shouldn't matter here, but we probably 1718 + * need this until we have atomic modeset 1719 + */ 1720 + if (rdev->flags & RADEON_IS_IGP) { 1718 1721 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1719 1722 return ATOM_PPLL1; 1720 1723 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1721 1724 return ATOM_PPLL2; 1722 - DRM_ERROR("unable to allocate a PPLL\n"); 1723 - return ATOM_PPLL_INVALID; 1724 1725 } else { 1725 - /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ 1726 - return radeon_crtc->crtc_id; 1726 + if (!(pll_in_use & (1 << ATOM_PPLL2))) 1727 + return ATOM_PPLL2; 1728 + if (!(pll_in_use & (1 << ATOM_PPLL1))) 1729 + return ATOM_PPLL1; 1727 1730 } 1731 + DRM_ERROR("unable to allocate a PPLL\n"); 1732 + return ATOM_PPLL_INVALID; 1733 + } else { 1734 + /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ 1735 + return radeon_crtc->crtc_id; 1728 1736 } 1729 1737 } 1730 1738
+1 -1
drivers/gpu/drm/radeon/evergreen.c
··· 1372 1372 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); 1373 1373 1374 1374 for (i = 0; i < rdev->num_crtc; i++) { 1375 - if (save->crtc_enabled) { 1375 + if (save->crtc_enabled[i]) { 1376 1376 if (ASIC_IS_DCE6(rdev)) { 1377 1377 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 1378 1378 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+4 -1
drivers/gpu/drm/radeon/evergreen_cs.c
··· 264 264 /* macro tile width & height */ 265 265 palign = (8 * surf->bankw * track->npipes) * surf->mtilea; 266 266 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; 267 - mtileb = (palign / 8) * (halign / 8) * tileb;; 267 + mtileb = (palign / 8) * (halign / 8) * tileb; 268 268 mtile_pr = surf->nbx / palign; 269 269 mtile_ps = (mtile_pr * surf->nby) / halign; 270 270 surf->layer_size = mtile_ps * mtileb * slice_pt; ··· 2725 2725 /* check config regs */ 2726 2726 switch (reg) { 2727 2727 case GRBM_GFX_INDEX: 2728 + case CP_STRMOUT_CNTL: 2729 + case CP_COHER_CNTL: 2730 + case CP_COHER_SIZE: 2728 2731 case VGT_VTX_VECT_EJECT_REG: 2729 2732 case VGT_CACHE_INVALIDATION: 2730 2733 case VGT_GS_VERTEX_REUSE:
+4
drivers/gpu/drm/radeon/evergreend.h
··· 91 91 #define FB_READ_EN (1 << 0) 92 92 #define FB_WRITE_EN (1 << 1) 93 93 94 + #define CP_STRMOUT_CNTL 0x84FC 95 + 96 + #define CP_COHER_CNTL 0x85F0 97 + #define CP_COHER_SIZE 0x85F4 94 98 #define CP_COHER_BASE 0x85F8 95 99 #define CP_STALLED_STAT1 0x8674 96 100 #define CP_STALLED_STAT2 0x8678
+2 -2
drivers/gpu/drm/radeon/radeon_atpx_handler.c
··· 352 352 } 353 353 354 354 /** 355 - * radeon_atpx_switchto - switch to the requested GPU 355 + * radeon_atpx_power_state - power down/up the requested GPU 356 356 * 357 - * @id: GPU to switch to 357 + * @id: GPU to power down/up 358 358 * @state: requested power state (0 = off, 1 = on) 359 359 * 360 360 * Execute the necessary ATPX function to power down/up the discrete GPU
+21 -7
drivers/gpu/drm/radeon/radeon_connectors.c
··· 941 941 struct drm_mode_object *obj; 942 942 int i; 943 943 enum drm_connector_status ret = connector_status_disconnected; 944 - bool dret = false; 944 + bool dret = false, broken_edid = false; 945 945 946 946 if (!force && radeon_check_hpd_status_unchanged(connector)) 947 947 return connector->status; ··· 965 965 ret = connector_status_disconnected; 966 966 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); 967 967 radeon_connector->ddc_bus = NULL; 968 + } else { 969 + ret = connector_status_connected; 970 + broken_edid = true; /* defer use_digital to later */ 968 971 } 969 972 } else { 970 973 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); ··· 1050 1047 1051 1048 encoder_funcs = encoder->helper_private; 1052 1049 if (encoder_funcs->detect) { 1053 - if (ret != connector_status_connected) { 1054 - ret = encoder_funcs->detect(encoder, connector); 1055 - if (ret == connector_status_connected) { 1056 - radeon_connector->use_digital = false; 1050 + if (!broken_edid) { 1051 + if (ret != connector_status_connected) { 1052 + /* deal with analog monitors without DDC */ 1053 + ret = encoder_funcs->detect(encoder, connector); 1054 + if (ret == connector_status_connected) { 1055 + radeon_connector->use_digital = false; 1056 + } 1057 + if (ret != connector_status_disconnected) 1058 + radeon_connector->detected_by_load = true; 1057 1059 } 1058 - if (ret != connector_status_disconnected) 1059 - radeon_connector->detected_by_load = true; 1060 + } else { 1061 + enum drm_connector_status lret; 1062 + /* assume digital unless load detected otherwise */ 1063 + radeon_connector->use_digital = true; 1064 + lret = encoder_funcs->detect(encoder, connector); 1065 + DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret); 1066 + if (lret == connector_status_connected) 1067 + radeon_connector->use_digital = false; 1060 1068 } 1061 1069 break; 1062 1070 }
+13 -2
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
··· 295 295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 296 296 struct drm_device *dev = crtc->dev; 297 297 struct radeon_device *rdev = dev->dev_private; 298 + uint32_t crtc_ext_cntl = 0; 298 299 uint32_t mask; 299 300 300 301 if (radeon_crtc->crtc_id) ··· 308 307 RADEON_CRTC_VSYNC_DIS | 309 308 RADEON_CRTC_HSYNC_DIS); 310 309 310 + /* 311 + * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC. 312 + * Therefore it is set in the DAC DMPS function. 313 + * This is different for GPU's with a single CRTC but a primary and a 314 + * TV DAC: here it controls the single CRTC no matter where it is 315 + * routed. Therefore we set it here. 316 + */ 317 + if (rdev->flags & RADEON_SINGLE_CRTC) 318 + crtc_ext_cntl = RADEON_CRTC_CRT_ON; 319 + 311 320 switch (mode) { 312 321 case DRM_MODE_DPMS_ON: 313 322 radeon_crtc->enabled = true; ··· 328 317 else { 329 318 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | 330 319 RADEON_CRTC_DISP_REQ_EN_B)); 331 - WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); 320 + WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); 332 321 } 333 322 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); 334 323 radeon_crtc_load_lut(crtc); ··· 342 331 else { 343 332 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | 344 333 RADEON_CRTC_DISP_REQ_EN_B)); 345 - WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); 334 + WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl)); 346 335 } 347 336 radeon_crtc->enabled = false; 348 337 /* adjust pm to dpms changes AFTER disabling crtcs */
+147 -28
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
··· 537 537 break; 538 538 } 539 539 540 - WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 540 + /* handled in radeon_crtc_dpms() */ 541 + if (!(rdev->flags & RADEON_SINGLE_CRTC)) 542 + WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 541 543 WREG32(RADEON_DAC_CNTL, dac_cntl); 542 544 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); 543 545 ··· 664 662 665 663 if (ASIC_IS_R300(rdev)) 666 664 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); 665 + else if (ASIC_IS_RV100(rdev)) 666 + tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT); 667 667 else 668 668 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); 669 669 ··· 675 671 tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; 676 672 WREG32(RADEON_DAC_CNTL, tmp); 677 673 674 + tmp = dac_macro_cntl; 678 675 tmp &= ~(RADEON_DAC_PDWN_R | 679 676 RADEON_DAC_PDWN_G | 680 677 RADEON_DAC_PDWN_B); ··· 1097 1092 } else { 1098 1093 if (is_tv) 1099 1094 WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); 1100 - else 1095 + /* handled in radeon_crtc_dpms() */ 1096 + else if (!(rdev->flags & RADEON_SINGLE_CRTC)) 1101 1097 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 1102 1098 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 1103 1099 } ··· 1422 1416 return found; 1423 1417 } 1424 1418 1419 + static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder, 1420 + struct drm_connector *connector) 1421 + { 1422 + struct drm_device *dev = encoder->dev; 1423 + struct radeon_device *rdev = dev->dev_private; 1424 + uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl; 1425 + uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c; 1426 + uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f; 1427 + uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp; 1428 + uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid; 1429 + bool found = false; 1430 + int i; 1431 + 1432 + /* save the regs we need */ 1433 + gpio_monid = RREG32(RADEON_GPIO_MONID); 1434 + fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 1435 + disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); 1436 + crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); 1437 + disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A); 1438 + disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B); 1439 + disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C); 1440 + disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D); 1441 + disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E); 1442 + disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F); 1443 + crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP); 1444 + crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP); 1445 + crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID); 1446 + crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID); 1447 + 1448 + tmp = RREG32(RADEON_GPIO_MONID); 1449 + tmp &= ~RADEON_GPIO_A_0; 1450 + WREG32(RADEON_GPIO_MONID, tmp); 1451 + 1452 + WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON | 1453 + RADEON_FP2_PANEL_FORMAT | 1454 + R200_FP2_SOURCE_SEL_TRANS_UNIT | 1455 + RADEON_FP2_DVO_EN | 1456 + R200_FP2_DVO_RATE_SEL_SDR)); 1457 + 1458 + WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX | 1459 + RADEON_DISP_TRANS_MATRIX_GRAPHICS)); 1460 + 1461 + WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN | 1462 + RADEON_CRTC2_DISP_REQ_EN_B)); 1463 + 1464 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000); 1465 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0); 1466 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000); 1467 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0); 1468 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000); 1469 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0); 1470 + 1471 + WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008); 1472 + WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800); 1473 + WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001); 1474 + WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080); 1475 + 1476 + for (i = 0; i < 200; i++) { 1477 + tmp = RREG32(RADEON_GPIO_MONID); 1478 + if (tmp & RADEON_GPIO_Y_0) 1479 + found = true; 1480 + 1481 + if (found) 1482 + break; 1483 + 1484 + if (!drm_can_sleep()) 1485 + mdelay(1); 1486 + else 1487 + msleep(1); 1488 + } 1489 + 1490 + /* restore the regs we used */ 1491 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a); 1492 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b); 1493 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c); 1494 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d); 1495 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e); 1496 + WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f); 1497 + WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp); 1498 + WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp); 1499 + WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid); 1500 + WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid); 1501 + WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 1502 + WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); 1503 + WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); 1504 + WREG32(RADEON_GPIO_MONID, gpio_monid); 1505 + 1506 + return found; 1507 + } 1508 + 1425 1509 static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, 1426 1510 struct drm_connector *connector) 1427 1511 { 1428 1512 struct drm_device *dev = encoder->dev; 1429 1513 struct radeon_device *rdev = dev->dev_private; 1430 - uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; 1431 - uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp; 1514 + uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl; 1515 + uint32_t gpiopad_a = 0, pixclks_cntl, tmp; 1516 + uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0; 1432 1517 enum drm_connector_status found = connector_status_disconnected; 1433 1518 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1434 1519 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; ··· 1556 1459 return connector_status_disconnected; 1557 1460 } 1558 1461 1462 + /* R200 uses an external DAC for secondary DAC */ 1463 + if (rdev->family == CHIP_R200) { 1464 + if (radeon_legacy_ext_dac_detect(encoder, connector)) 1465 + found = connector_status_connected; 1466 + return found; 1467 + } 1468 + 1559 1469 /* save the regs we need */ 1560 1470 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); 1561 - gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0; 1562 - disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0; 1563 - disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG); 1564 - crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); 1471 + 1472 + if (rdev->flags & RADEON_SINGLE_CRTC) { 1473 + crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 1474 + } else { 1475 + if (ASIC_IS_R300(rdev)) { 1476 + gpiopad_a = RREG32(RADEON_GPIOPAD_A); 1477 + disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); 1478 + } else { 1479 + disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 1480 + } 1481 + crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); 1482 + } 1565 1483 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 1566 1484 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); 1567 1485 dac_cntl2 = RREG32(RADEON_DAC_CNTL2); ··· 1585 1473 | RADEON_PIX2CLK_DAC_ALWAYS_ONb); 1586 1474 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 1587 1475 1588 - if (ASIC_IS_R300(rdev)) 1589 - WREG32_P(RADEON_GPIOPAD_A, 1, ~1); 1590 - 1591 - tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK; 1592 - tmp |= RADEON_CRTC2_CRT2_ON | 1593 - (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT); 1594 - 1595 - WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 1596 - 1597 - if (ASIC_IS_R300(rdev)) { 1598 - tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; 1599 - tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; 1600 - WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); 1476 + if (rdev->flags & RADEON_SINGLE_CRTC) { 1477 + tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON; 1478 + WREG32(RADEON_CRTC_EXT_CNTL, tmp); 1601 1479 } else { 1602 - tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; 1603 - WREG32(RADEON_DISP_HW_DEBUG, tmp); 1480 + tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK; 1481 + tmp |= RADEON_CRTC2_CRT2_ON | 1482 + (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT); 1483 + WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 1484 + 1485 + if (ASIC_IS_R300(rdev)) { 1486 + WREG32_P(RADEON_GPIOPAD_A, 1, ~1); 1487 + tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; 1488 + tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; 1489 + WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); 1490 + } else { 1491 + tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; 1492 + WREG32(RADEON_DISP_HW_DEBUG, tmp); 1493 + } 1604 1494 } 1605 1495 1606 1496 tmp = RADEON_TV_DAC_NBLANK | ··· 1644 1530 WREG32(RADEON_DAC_CNTL2, dac_cntl2); 1645 1531 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); 1646 1532 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 1647 - WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 1648 1533 1649 - if (ASIC_IS_R300(rdev)) { 1650 - WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); 1651 - WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); 1534 + if (rdev->flags & RADEON_SINGLE_CRTC) { 1535 + WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 1652 1536 } else { 1653 - WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1537 + WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 1538 + if (ASIC_IS_R300(rdev)) { 1539 + WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); 1540 + WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); 1541 + } else { 1542 + WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1543 + } 1654 1544 } 1545 + 1655 1546 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); 1656 1547 1657 1548 return found;
+1
drivers/gpu/drm/radeon/si.c
··· 2474 2474 /* check config regs */ 2475 2475 switch (reg) { 2476 2476 case GRBM_GFX_INDEX: 2477 + case CP_STRMOUT_CNTL: 2477 2478 case VGT_VTX_VECT_EJECT_REG: 2478 2479 case VGT_CACHE_INVALIDATION: 2479 2480 case VGT_ESGS_RING_SIZE:
+1
drivers/gpu/drm/radeon/sid.h
··· 424 424 # define RDERR_INT_ENABLE (1 << 0) 425 425 # define GUI_IDLE_INT_ENABLE (1 << 19) 426 426 427 + #define CP_STRMOUT_CNTL 0x84FC 427 428 #define SCRATCH_REG0 0x8500 428 429 #define SCRATCH_REG1 0x8504 429 430 #define SCRATCH_REG2 0x8508
+1 -1
drivers/gpu/drm/udl/udl_drv.h
··· 104 104 105 105 int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, 106 106 const char *front, char **urb_buf_ptr, 107 - u32 byte_offset, u32 byte_width, 107 + u32 byte_offset, u32 device_byte_offset, u32 byte_width, 108 108 int *ident_ptr, int *sent_ptr); 109 109 110 110 int udl_dumb_create(struct drm_file *file_priv,
+7 -5
drivers/gpu/drm/udl/udl_fb.c
··· 114 114 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 115 115 116 116 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8), 117 - &urb, (char *) info->fix.smem_start, 118 - &cmd, cur->index << PAGE_SHIFT, 119 - PAGE_SIZE, &bytes_identical, &bytes_sent)) 117 + &urb, (char *) info->fix.smem_start, 118 + &cmd, cur->index << PAGE_SHIFT, 119 + cur->index << PAGE_SHIFT, 120 + PAGE_SIZE, &bytes_identical, &bytes_sent)) 120 121 goto error; 121 122 bytes_rendered += PAGE_SIZE; 122 123 } ··· 188 187 for (i = y; i < y + height ; i++) { 189 188 const int line_offset = fb->base.pitches[0] * i; 190 189 const int byte_offset = line_offset + (x * bpp); 191 - 190 + const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); 192 191 if (udl_render_hline(dev, bpp, &urb, 193 192 (char *) fb->obj->vmapping, 194 - &cmd, byte_offset, width * bpp, 193 + &cmd, byte_offset, dev_byte_offset, 194 + width * bpp, 195 195 &bytes_identical, &bytes_sent)) 196 196 goto error; 197 197 }
+3 -2
drivers/gpu/drm/udl/udl_transfer.c
··· 213 213 */ 214 214 int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, 215 215 const char *front, char **urb_buf_ptr, 216 - u32 byte_offset, u32 byte_width, 216 + u32 byte_offset, u32 device_byte_offset, 217 + u32 byte_width, 217 218 int *ident_ptr, int *sent_ptr) 218 219 { 219 220 const u8 *line_start, *line_end, *next_pixel; 220 - u32 base16 = 0 + (byte_offset / bpp) * 2; 221 + u32 base16 = 0 + (device_byte_offset / bpp) * 2; 221 222 struct urb *urb = *urb_ptr; 222 223 u8 *cmd = *urb_buf_ptr; 223 224 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
··· 306 306 307 307 BUG_ON(!atomic_read(&bo->reserved)); 308 308 BUG_ON(old_mem_type != TTM_PL_VRAM && 309 - old_mem_type != VMW_PL_FLAG_GMR); 309 + old_mem_type != VMW_PL_GMR); 310 310 311 311 pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; 312 312 if (pin)
+5
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 1098 1098 struct drm_device *dev = pci_get_drvdata(pdev); 1099 1099 struct vmw_private *dev_priv = vmw_priv(dev); 1100 1100 1101 + mutex_lock(&dev_priv->hw_mutex); 1102 + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1103 + (void) vmw_read(dev_priv, SVGA_REG_ID); 1104 + mutex_unlock(&dev_priv->hw_mutex); 1105 + 1101 1106 /** 1102 1107 * Reclaim 3d reference held by fbdev and potentially 1103 1108 * start fifo.
+6
drivers/hid/hid-apple.c
··· 522 522 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 523 523 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), 524 524 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 525 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI), 526 + .driver_data = APPLE_HAS_FN }, 527 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO), 528 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 529 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), 530 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 525 531 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), 526 532 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 527 533 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+6
drivers/hid/hid-core.c
··· 1532 1532 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) }, 1533 1533 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) }, 1534 1534 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) }, 1535 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) }, 1536 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) }, 1537 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) }, 1535 1538 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1536 1539 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1537 1540 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, ··· 2142 2139 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) }, 2143 2140 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) }, 2144 2141 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) }, 2142 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) }, 2143 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) }, 2144 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) }, 2145 2145 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 2146 2146 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 2147 2147 { }
+3
drivers/hid/hid-ids.h
··· 118 118 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 119 119 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 120 120 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 121 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259 122 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a 123 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b 121 124 #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 122 125 #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a 123 126 #define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
+13 -5
drivers/hid/hid-microsoft.c
··· 28 28 #define MS_RDESC 0x08 29 29 #define MS_NOGET 0x10 30 30 #define MS_DUPLICATE_USAGES 0x20 31 + #define MS_RDESC_3K 0x40 31 32 32 - /* 33 - * Microsoft Wireless Desktop Receiver (Model 1028) has 34 - * 'Usage Min/Max' where it ought to have 'Physical Min/Max' 35 - */ 36 33 static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, 37 34 unsigned int *rsize) 38 35 { 39 36 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); 40 37 38 + /* 39 + * Microsoft Wireless Desktop Receiver (Model 1028) has 40 + * 'Usage Min/Max' where it ought to have 'Physical Min/Max' 41 + */ 41 42 if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 && 42 43 rdesc[559] == 0x29) { 43 44 hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); 44 45 rdesc[557] = 0x35; 45 46 rdesc[559] = 0x45; 47 + } 48 + /* the same as above (s/usage/physical/) */ 49 + if ((quirks & MS_RDESC_3K) && *rsize == 106 && 50 + !memcmp((char []){ 0x19, 0x00, 0x29, 0xff }, 51 + &rdesc[94], 4)) { 52 + rdesc[94] = 0x35; 53 + rdesc[96] = 0x45; 46 54 } 47 55 return rdesc; 48 56 } ··· 200 192 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), 201 193 .driver_data = MS_PRESENTER }, 202 194 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K), 203 - .driver_data = MS_ERGONOMY }, 195 + .driver_data = MS_ERGONOMY | MS_RDESC_3K }, 204 196 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0), 205 197 .driver_data = MS_NOGET }, 206 198 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
+2 -3
drivers/hid/hid-multitouch.c
··· 210 210 }, 211 211 { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, 212 212 .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | 213 - MT_QUIRK_SLOT_IS_CONTACTNUMBER, 214 - .maxcontacts = 10 213 + MT_QUIRK_SLOT_IS_CONTACTNUMBER 215 214 }, 216 215 217 216 { .name = MT_CLS_FLATFROG, ··· 420 421 * contact max are global to the report */ 421 422 td->last_field_index = field->index; 422 423 return -1; 423 - } 424 424 case HID_DG_TOUCH: 425 425 /* Legacy devices use TIPSWITCH and not TOUCH. 426 426 * Let's just ignore this field. */ 427 427 return -1; 428 + } 428 429 /* let hid-input decide for the others */ 429 430 return 0; 430 431
+43 -26
drivers/hid/hidraw.c
··· 42 42 static struct class *hidraw_class; 43 43 static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; 44 44 static DEFINE_MUTEX(minors_lock); 45 - static void drop_ref(struct hidraw *hid, int exists_bit); 46 45 47 46 static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) 48 47 { ··· 113 114 __u8 *buf; 114 115 int ret = 0; 115 116 116 - if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { 117 + if (!hidraw_table[minor]) { 117 118 ret = -ENODEV; 118 119 goto out; 119 120 } ··· 261 262 } 262 263 263 264 mutex_lock(&minors_lock); 264 - if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { 265 + if (!hidraw_table[minor]) { 265 266 err = -ENODEV; 266 267 goto out_unlock; 267 268 } ··· 298 299 static int hidraw_release(struct inode * inode, struct file * file) 299 300 { 300 301 unsigned int minor = iminor(inode); 302 + struct hidraw *dev; 301 303 struct hidraw_list *list = file->private_data; 304 + int ret; 305 + int i; 302 306 303 - drop_ref(hidraw_table[minor], 0); 307 + mutex_lock(&minors_lock); 308 + if (!hidraw_table[minor]) { 309 + ret = -ENODEV; 310 + goto unlock; 311 + } 312 + 304 313 list_del(&list->node); 314 + dev = hidraw_table[minor]; 315 + if (!--dev->open) { 316 + if (list->hidraw->exist) { 317 + hid_hw_power(dev->hid, PM_HINT_NORMAL); 318 + hid_hw_close(dev->hid); 319 + } else { 320 + kfree(list->hidraw); 321 + } 322 + } 323 + 324 + for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i) 325 + kfree(list->buffer[i].value); 305 326 kfree(list); 306 - return 0; 327 + ret = 0; 328 + unlock: 329 + mutex_unlock(&minors_lock); 330 + 331 + return ret; 307 332 } 308 333 309 334 static long hidraw_ioctl(struct file *file, unsigned int cmd, ··· 529 506 void hidraw_disconnect(struct hid_device *hid) 530 507 { 531 508 struct hidraw *hidraw = hid->hidraw; 532 - drop_ref(hidraw, 1); 509 + 510 + mutex_lock(&minors_lock); 511 + hidraw->exist = 0; 512 + 513 + device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); 514 + 515 + hidraw_table[hidraw->minor] = NULL; 516 + 517 + if (hidraw->open) { 518 + hid_hw_close(hid); 519 + wake_up_interruptible(&hidraw->wait); 520 + } else { 521 + kfree(hidraw); 522 + } 523 + mutex_unlock(&minors_lock); 533 524 } 534 525 EXPORT_SYMBOL_GPL(hidraw_disconnect); 535 526 ··· 591 554 class_destroy(hidraw_class); 592 555 unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); 593 556 594 - } 595 - 596 - static void drop_ref(struct hidraw *hidraw, int exists_bit) 597 - { 598 - mutex_lock(&minors_lock); 599 - if (exists_bit) { 600 - hid_hw_close(hidraw->hid); 601 - hidraw->exist = 0; 602 - if (hidraw->open) 603 - wake_up_interruptible(&hidraw->wait); 604 - } else { 605 - --hidraw->open; 606 - } 607 - 608 - if (!hidraw->open && !hidraw->exist) { 609 - device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); 610 - hidraw_table[hidraw->minor] = NULL; 611 - kfree(hidraw); 612 - } 613 - mutex_unlock(&minors_lock); 614 557 }
+1 -1
drivers/hwmon/asb100.c
··· 32 32 * ASB100-A supports pwm1, while plain ASB100 does not. There is no known 33 33 * way for the driver to tell which one is there. 34 34 * 35 - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 35 + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 36 36 * asb100 7 3 1 4 0x31 0x0694 yes no 37 37 */ 38 38
+2 -2
drivers/hwmon/fam15h_power.c
··· 2 2 * fam15h_power.c - AMD Family 15h processor power monitoring 3 3 * 4 4 * Copyright (c) 2011 Advanced Micro Devices, Inc. 5 - * Author: Andreas Herrmann <andreas.herrmann3@amd.com> 5 + * Author: Andreas Herrmann <herrmann.der.user@googlemail.com> 6 6 * 7 7 * 8 8 * This driver is free software; you can redistribute it and/or ··· 28 28 #include <asm/processor.h> 29 29 30 30 MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); 31 - MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>"); 31 + MODULE_AUTHOR("Andreas Herrmann <herrmann.der.user@googlemail.com>"); 32 32 MODULE_LICENSE("GPL"); 33 33 34 34 /* D18F3 */
+2
drivers/hwmon/gpio-fan.c
··· 630 630 .driver = { 631 631 .name = "gpio-fan", 632 632 .pm = GPIO_FAN_PM, 633 + #ifdef CONFIG_OF_GPIO 633 634 .of_match_table = of_match_ptr(of_gpio_fan_match), 635 + #endif 634 636 }, 635 637 }; 636 638
+1
drivers/hwmon/w83627ehf.c
··· 2083 2083 mutex_init(&data->lock); 2084 2084 mutex_init(&data->update_lock); 2085 2085 data->name = w83627ehf_device_names[sio_data->kind]; 2086 + data->bank = 0xff; /* Force initial bank selection */ 2086 2087 platform_set_drvdata(pdev, data); 2087 2088 2088 2089 /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
+1 -1
drivers/hwmon/w83627hf.c
··· 25 25 /* 26 26 * Supports following chips: 27 27 * 28 - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 28 + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 29 29 * w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC) 30 30 * w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) 31 31 * w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC)
+1 -1
drivers/hwmon/w83781d.c
··· 24 24 /* 25 25 * Supports following chips: 26 26 * 27 - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 27 + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 28 28 * as99127f 7 3 0 3 0x31 0x12c3 yes no 29 29 * as99127f rev.2 (type_name = as99127f) 0x31 0x5ca3 yes no 30 30 * w83781d 7 3 0 3 0x10-1 0x5ca3 yes yes
+1 -1
drivers/hwmon/w83791d.c
··· 22 22 /* 23 23 * Supports following chips: 24 24 * 25 - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 25 + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 26 26 * w83791d 10 5 5 3 0x71 0x5ca3 yes no 27 27 * 28 28 * The w83791d chip appears to be part way between the 83781d and the
+1 -1
drivers/hwmon/w83792d.c
··· 31 31 /* 32 32 * Supports following chips: 33 33 * 34 - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 34 + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 35 35 * w83792d 9 7 7 3 0x7a 0x5ca3 yes no 36 36 */ 37 37
+1 -1
drivers/hwmon/w83l786ng.c
··· 20 20 /* 21 21 * Supports following chips: 22 22 * 23 - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 23 + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA 24 24 * w83l786ng 3 2 2 2 0x7b 0x5ca3 yes no 25 25 */ 26 26
+1
drivers/i2c/Makefile
··· 8 8 obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o 9 9 obj-$(CONFIG_I2C_MUX) += i2c-mux.o 10 10 obj-y += algos/ busses/ muxes/ 11 + obj-$(CONFIG_I2C_STUB) += i2c-stub.o 11 12 12 13 ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG 13 14 CFLAGS_i2c-core.o := -Wno-deprecated-declarations
-1
drivers/i2c/busses/Kconfig
··· 81 81 tristate "Intel 82801 (ICH/PCH)" 82 82 depends on PCI 83 83 select CHECK_SIGNATURE if X86 && DMI 84 - select GPIOLIB if I2C_MUX 85 84 help 86 85 If you say yes to this option, support will be included for the Intel 87 86 801 family of mainboard I2C interfaces. Specifically, the following
-1
drivers/i2c/busses/Makefile
··· 85 85 obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o 86 86 obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o 87 87 obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o 88 - obj-$(CONFIG_I2C_STUB) += i2c-stub.o 89 88 obj-$(CONFIG_SCx200_ACB) += scx200_acb.o 90 89 obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o 91 90
+7 -4
drivers/i2c/busses/i2c-i801.c
··· 82 82 #include <linux/wait.h> 83 83 #include <linux/err.h> 84 84 85 - #if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE 85 + #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \ 86 + defined CONFIG_DMI 86 87 #include <linux/gpio.h> 87 88 #include <linux/i2c-mux-gpio.h> 88 89 #include <linux/platform_device.h> ··· 193 192 int len; 194 193 u8 *data; 195 194 196 - #if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE 195 + #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \ 196 + defined CONFIG_DMI 197 197 const struct i801_mux_config *mux_drvdata; 198 198 struct platform_device *mux_pdev; 199 199 #endif ··· 923 921 static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {} 924 922 #endif /* CONFIG_X86 && CONFIG_DMI */ 925 923 926 - #if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE 924 + #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \ 925 + defined CONFIG_DMI 927 926 static struct i801_mux_config i801_mux_config_asus_z8_d12 = { 928 927 .gpio_chip = "gpio_ich", 929 928 .values = { 0x02, 0x03 }, ··· 1062 1059 1063 1060 id = dmi_first_match(mux_dmi_table); 1064 1061 if (id) { 1065 - /* Remove from branch classes from trunk */ 1062 + /* Remove branch classes from trunk */ 1066 1063 mux_config = id->driver_data; 1067 1064 for (i = 0; i < mux_config->n_values; i++) 1068 1065 class &= ~mux_config->classes[i];
+14 -172
drivers/i2c/busses/i2c-mxs.c
··· 1 1 /* 2 2 * Freescale MXS I2C bus driver 3 3 * 4 - * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. 4 + * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K. 5 5 * 6 6 * based on a (non-working) driver which was: 7 7 * ··· 34 34 #include <linux/fsl/mxs-dma.h> 35 35 36 36 #define DRIVER_NAME "mxs-i2c" 37 - 38 - static bool use_pioqueue; 39 - module_param(use_pioqueue, bool, 0); 40 - MODULE_PARM_DESC(use_pioqueue, "Use PIOQUEUE mode for transfer instead of DMA"); 41 37 42 38 #define MXS_I2C_CTRL0 (0x00) 43 39 #define MXS_I2C_CTRL0_SET (0x04) ··· 70 74 MXS_I2C_CTRL1_MASTER_LOSS_IRQ | \ 71 75 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \ 72 76 MXS_I2C_CTRL1_SLAVE_IRQ) 73 - 74 - #define MXS_I2C_QUEUECTRL (0x60) 75 - #define MXS_I2C_QUEUECTRL_SET (0x64) 76 - #define MXS_I2C_QUEUECTRL_CLR (0x68) 77 - 78 - #define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20 79 - #define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04 80 - 81 - #define MXS_I2C_QUEUESTAT (0x70) 82 - #define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000 83 - #define MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK 0x0000001F 84 - 85 - #define MXS_I2C_QUEUECMD (0x80) 86 - 87 - #define MXS_I2C_QUEUEDATA (0x90) 88 - 89 - #define MXS_I2C_DATA (0xa0) 90 77 91 78 92 79 #define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \ ··· 132 153 const struct mxs_i2c_speed_config *speed; 133 154 134 155 /* DMA support components */ 135 - bool dma_mode; 136 156 int dma_channel; 137 157 struct dma_chan *dmach; 138 158 struct mxs_dma_data dma_data; ··· 150 172 writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2); 151 173 152 174 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); 153 - if (i2c->dma_mode) 154 - writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE, 155 - i2c->regs + MXS_I2C_QUEUECTRL_CLR); 156 - else 157 - writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE, 158 - i2c->regs + MXS_I2C_QUEUECTRL_SET); 159 - } 160 - 161 - static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len, 162 - int flags) 163 - { 164 - u32 data; 165 - 166 - writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD); 167 - 168 - data = (addr << 1) | I2C_SMBUS_READ; 169 - writel(data, i2c->regs + MXS_I2C_DATA); 170 - 171 - data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags; 172 - writel(data, i2c->regs + MXS_I2C_QUEUECMD); 173 - } 174 - 175 - static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c, 176 - u8 addr, u8 *buf, int len, int flags) 177 - { 178 - u32 data; 179 - int i, shifts_left; 180 - 181 - data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags; 182 - writel(data, i2c->regs + MXS_I2C_QUEUECMD); 183 - 184 - /* 185 - * We have to copy the slave address (u8) and buffer (arbitrary number 186 - * of u8) into the data register (u32). To achieve that, the u8 are put 187 - * into the MSBs of 'data' which is then shifted for the next u8. When 188 - * appropriate, 'data' is written to MXS_I2C_DATA. So, the first u32 189 - * looks like this: 190 - * 191 - * 3 2 1 0 192 - * 10987654|32109876|54321098|76543210 193 - * --------+--------+--------+-------- 194 - * buffer+2|buffer+1|buffer+0|slave_addr 195 - */ 196 - 197 - data = ((addr << 1) | I2C_SMBUS_WRITE) << 24; 198 - 199 - for (i = 0; i < len; i++) { 200 - data >>= 8; 201 - data |= buf[i] << 24; 202 - if ((i & 3) == 2) 203 - writel(data, i2c->regs + MXS_I2C_DATA); 204 - } 205 - 206 - /* Write out the remaining bytes if any */ 207 - shifts_left = 24 - (i & 3) * 8; 208 - if (shifts_left) 209 - writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA); 210 - } 211 - 212 - /* 213 - * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the 214 - * rd_threshold to 1). Couldn't get this to work, though. 215 - */ 216 - static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c) 217 - { 218 - unsigned long timeout = jiffies + msecs_to_jiffies(1000); 219 - 220 - while (readl(i2c->regs + MXS_I2C_QUEUESTAT) 221 - & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) { 222 - if (time_after(jiffies, timeout)) 223 - return -ETIMEDOUT; 224 - cond_resched(); 225 - } 226 - 227 - return 0; 228 - } 229 - 230 - static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len) 231 - { 232 - u32 uninitialized_var(data); 233 - int i; 234 - 235 - for (i = 0; i < len; i++) { 236 - if ((i & 3) == 0) { 237 - if (mxs_i2c_wait_for_data(i2c)) 238 - return -ETIMEDOUT; 239 - data = readl(i2c->regs + MXS_I2C_QUEUEDATA); 240 - } 241 - buf[i] = data & 0xff; 242 - data >>= 8; 243 - } 244 - 245 - return 0; 246 175 } 247 176 248 177 static void mxs_i2c_dma_finish(struct mxs_i2c_dev *i2c) ··· 317 432 init_completion(&i2c->cmd_complete); 318 433 i2c->cmd_err = 0; 319 434 320 - if (i2c->dma_mode) { 321 - ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); 322 - if (ret) 323 - return ret; 324 - } else { 325 - if (msg->flags & I2C_M_RD) { 326 - mxs_i2c_pioq_setup_read(i2c, msg->addr, 327 - msg->len, flags); 328 - } else { 329 - mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf, 330 - msg->len, flags); 331 - } 332 - 333 - writel(MXS_I2C_QUEUECTRL_QUEUE_RUN, 334 - i2c->regs + MXS_I2C_QUEUECTRL_SET); 335 - } 435 + ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); 436 + if (ret) 437 + return ret; 336 438 337 439 ret = wait_for_completion_timeout(&i2c->cmd_complete, 338 440 msecs_to_jiffies(1000)); 339 441 if (ret == 0) 340 442 goto timeout; 341 443 342 - if (!i2c->dma_mode && !i2c->cmd_err && (msg->flags & I2C_M_RD)) { 343 - ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len); 344 - if (ret) 345 - goto timeout; 346 - } 347 - 348 444 if (i2c->cmd_err == -ENXIO) 349 445 mxs_i2c_reset(i2c); 350 - else 351 - writel(MXS_I2C_QUEUECTRL_QUEUE_RUN, 352 - i2c->regs + MXS_I2C_QUEUECTRL_CLR); 353 446 354 447 dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err); 355 448 ··· 335 472 336 473 timeout: 337 474 dev_dbg(i2c->dev, "Timeout!\n"); 338 - if (i2c->dma_mode) 339 - mxs_i2c_dma_finish(i2c); 475 + mxs_i2c_dma_finish(i2c); 340 476 mxs_i2c_reset(i2c); 341 477 return -ETIMEDOUT; 342 478 } ··· 364 502 { 365 503 struct mxs_i2c_dev *i2c = dev_id; 366 504 u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK; 367 - bool is_last_cmd; 368 505 369 506 if (!stat) 370 507 return IRQ_NONE; ··· 375 514 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ)) 376 515 /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */ 377 516 i2c->cmd_err = -EIO; 378 - 379 - if (!i2c->dma_mode) { 380 - is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) & 381 - MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0; 382 - 383 - if (is_last_cmd || i2c->cmd_err) 384 - complete(&i2c->cmd_complete); 385 - } 386 517 387 518 writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR); 388 519 ··· 409 556 int ret; 410 557 411 558 /* 412 - * The MXS I2C DMA mode is prefered and enabled by default. 413 - * The PIO mode is still supported, but should be used only 414 - * for debuging purposes etc. 415 - */ 416 - i2c->dma_mode = !use_pioqueue; 417 - if (!i2c->dma_mode) 418 - dev_info(dev, "Using PIOQUEUE mode for I2C transfers!\n"); 419 - 420 - /* 421 559 * TODO: This is a temporary solution and should be changed 422 560 * to use generic DMA binding later when the helpers get in. 423 561 */ 424 562 ret = of_property_read_u32(node, "fsl,i2c-dma-channel", 425 563 &i2c->dma_channel); 426 564 if (ret) { 427 - dev_warn(dev, "Failed to get DMA channel, using PIOQUEUE!\n"); 428 - i2c->dma_mode = 0; 565 + dev_err(dev, "Failed to get DMA channel!\n"); 566 + return -ENODEV; 429 567 } 430 568 431 569 ret = of_property_read_u32(node, "clock-frequency", &speed); ··· 478 634 } 479 635 480 636 /* Setup the DMA */ 481 - if (i2c->dma_mode) { 482 - dma_cap_zero(mask); 483 - dma_cap_set(DMA_SLAVE, mask); 484 - i2c->dma_data.chan_irq = dmairq; 485 - i2c->dmach = dma_request_channel(mask, mxs_i2c_dma_filter, i2c); 486 - if (!i2c->dmach) { 487 - dev_err(dev, "Failed to request dma\n"); 488 - return -ENODEV; 489 - } 637 + dma_cap_zero(mask); 638 + dma_cap_set(DMA_SLAVE, mask); 639 + i2c->dma_data.chan_irq = dmairq; 640 + i2c->dmach = dma_request_channel(mask, mxs_i2c_dma_filter, i2c); 641 + if (!i2c->dmach) { 642 + dev_err(dev, "Failed to request dma\n"); 643 + return -ENODEV; 490 644 } 491 645 492 646 platform_set_drvdata(pdev, i2c);
+7 -2
drivers/i2c/busses/i2c-nomadik.c
··· 644 644 645 645 pm_runtime_get_sync(&dev->adev->dev); 646 646 647 - clk_enable(dev->clk); 647 + status = clk_prepare_enable(dev->clk); 648 + if (status) { 649 + dev_err(&dev->adev->dev, "can't prepare_enable clock\n"); 650 + goto out_clk; 651 + } 648 652 649 653 status = init_hw(dev); 650 654 if (status) ··· 675 671 } 676 672 677 673 out: 678 - clk_disable(dev->clk); 674 + clk_disable_unprepare(dev->clk); 675 + out_clk: 679 676 pm_runtime_put_sync(&dev->adev->dev); 680 677 681 678 dev->busy = false;
+32 -34
drivers/i2c/busses/i2c-stub.c drivers/i2c/i2c-stub.c
··· 2 2 i2c-stub.c - I2C/SMBus chip emulator 3 3 4 4 Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com> 5 - Copyright (C) 2007 Jean Delvare <khali@linux-fr.org> 5 + Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org> 6 6 7 7 This program is free software; you can redistribute it and/or modify 8 8 it under the terms of the GNU General Public License as published by ··· 51 51 static struct stub_chip *stub_chips; 52 52 53 53 /* Return negative errno on error. */ 54 - static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags, 55 - char read_write, u8 command, int size, union i2c_smbus_data * data) 54 + static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, 55 + char read_write, u8 command, int size, union i2c_smbus_data *data) 56 56 { 57 57 s32 ret; 58 58 int i, len; ··· 78 78 case I2C_SMBUS_BYTE: 79 79 if (read_write == I2C_SMBUS_WRITE) { 80 80 chip->pointer = command; 81 - dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " 82 - "wrote 0x%02x.\n", 83 - addr, command); 81 + dev_dbg(&adap->dev, 82 + "smbus byte - addr 0x%02x, wrote 0x%02x.\n", 83 + addr, command); 84 84 } else { 85 85 data->byte = chip->words[chip->pointer++] & 0xff; 86 - dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " 87 - "read 0x%02x.\n", 88 - addr, data->byte); 86 + dev_dbg(&adap->dev, 87 + "smbus byte - addr 0x%02x, read 0x%02x.\n", 88 + addr, data->byte); 89 89 } 90 90 91 91 ret = 0; ··· 95 95 if (read_write == I2C_SMBUS_WRITE) { 96 96 chip->words[command] &= 0xff00; 97 97 chip->words[command] |= data->byte; 98 - dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " 99 - "wrote 0x%02x at 0x%02x.\n", 100 - addr, data->byte, command); 98 + dev_dbg(&adap->dev, 99 + "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n", 100 + addr, data->byte, command); 101 101 } else { 102 102 data->byte = chip->words[command] & 0xff; 103 - dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " 104 - "read 0x%02x at 0x%02x.\n", 105 - addr, data->byte, command); 103 + dev_dbg(&adap->dev, 104 + "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n", 105 + addr, data->byte, command); 106 106 } 107 107 chip->pointer = command + 1; 108 108 ··· 112 112 case I2C_SMBUS_WORD_DATA: 113 113 if (read_write == I2C_SMBUS_WRITE) { 114 114 chip->words[command] = data->word; 115 - dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, " 116 - "wrote 0x%04x at 0x%02x.\n", 117 - addr, data->word, command); 115 + dev_dbg(&adap->dev, 116 + "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n", 117 + addr, data->word, command); 118 118 } else { 119 119 data->word = chip->words[command]; 120 - dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, " 121 - "read 0x%04x at 0x%02x.\n", 122 - addr, data->word, command); 120 + dev_dbg(&adap->dev, 121 + "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n", 122 + addr, data->word, command); 123 123 } 124 124 125 125 ret = 0; ··· 132 132 chip->words[command + i] &= 0xff00; 133 133 chip->words[command + i] |= data->block[1 + i]; 134 134 } 135 - dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, " 136 - "wrote %d bytes at 0x%02x.\n", 137 - addr, len, command); 135 + dev_dbg(&adap->dev, 136 + "i2c block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n", 137 + addr, len, command); 138 138 } else { 139 139 for (i = 0; i < len; i++) { 140 140 data->block[1 + i] = 141 141 chip->words[command + i] & 0xff; 142 142 } 143 - dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, " 144 - "read %d bytes at 0x%02x.\n", 145 - addr, len, command); 143 + dev_dbg(&adap->dev, 144 + "i2c block data - addr 0x%02x, read %d bytes at 0x%02x.\n", 145 + addr, len, command); 146 146 } 147 147 148 148 ret = 0; ··· 179 179 int i, ret; 180 180 181 181 if (!chip_addr[0]) { 182 - printk(KERN_ERR "i2c-stub: Please specify a chip address\n"); 182 + pr_err("i2c-stub: Please specify a chip address\n"); 183 183 return -ENODEV; 184 184 } 185 185 186 186 for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) { 187 187 if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) { 188 - printk(KERN_ERR "i2c-stub: Invalid chip address " 189 - "0x%02x\n", chip_addr[i]); 188 + pr_err("i2c-stub: Invalid chip address 0x%02x\n", 189 + chip_addr[i]); 190 190 return -EINVAL; 191 191 } 192 192 193 - printk(KERN_INFO "i2c-stub: Virtual chip at 0x%02x\n", 194 - chip_addr[i]); 193 + pr_info("i2c-stub: Virtual chip at 0x%02x\n", chip_addr[i]); 195 194 } 196 195 197 196 /* Allocate memory for all chips at once */ 198 197 stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL); 199 198 if (!stub_chips) { 200 - printk(KERN_ERR "i2c-stub: Out of memory\n"); 199 + pr_err("i2c-stub: Out of memory\n"); 201 200 return -ENOMEM; 202 201 } 203 202 ··· 218 219 219 220 module_init(i2c_stub_init); 220 221 module_exit(i2c_stub_exit); 221 -
+1 -1
drivers/i2c/busses/i2c-tegra.c
··· 742 742 } 743 743 744 744 ret = devm_request_irq(&pdev->dev, i2c_dev->irq, 745 - tegra_i2c_isr, 0, pdev->name, i2c_dev); 745 + tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); 746 746 if (ret) { 747 747 dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); 748 748 return ret;
+1
drivers/input/keyboard/Kconfig
··· 335 335 config KEYBOARD_LPC32XX 336 336 tristate "LPC32XX matrix key scanner support" 337 337 depends on ARCH_LPC32XX && OF 338 + select INPUT_MATRIXKMAP 338 339 help 339 340 Say Y here if you want to use NXP LPC32XX SoC key scanner interface, 340 341 connected to a key matrix.
+3
drivers/input/keyboard/pxa27x_keypad.c
··· 368 368 unsigned int mask = 0, direct_key_num = 0; 369 369 unsigned long kpc = 0; 370 370 371 + /* clear pending interrupt bit */ 372 + keypad_readl(KPC); 373 + 371 374 /* enable matrix keys with automatic scan */ 372 375 if (pdata->matrix_key_rows && pdata->matrix_key_cols) { 373 376 kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
+4 -1
drivers/input/misc/xen-kbdfront.c
··· 311 311 case XenbusStateReconfiguring: 312 312 case XenbusStateReconfigured: 313 313 case XenbusStateUnknown: 314 - case XenbusStateClosed: 315 314 break; 316 315 317 316 case XenbusStateInitWait: ··· 349 350 350 351 break; 351 352 353 + case XenbusStateClosed: 354 + if (dev->state == XenbusStateClosed) 355 + break; 356 + /* Missed the backend's CLOSING state -- fallthrough */ 352 357 case XenbusStateClosing: 353 358 xenbus_frontend_closed(dev); 354 359 break;
+21
drivers/input/mouse/bcm5974.c
··· 84 84 #define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262 85 85 #define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263 86 86 #define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264 87 + /* MacbookPro10,2 (unibody, October 2012) */ 88 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259 89 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a 90 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b 87 91 88 92 #define BCM5974_DEVICE(prod) { \ 89 93 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ ··· 141 137 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI), 142 138 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO), 143 139 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), 140 + /* MacbookPro10,2 */ 141 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI), 142 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO), 143 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), 144 144 /* Terminating entry */ 145 145 {} 146 146 }; ··· 378 370 USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI, 379 371 USB_DEVICE_ID_APPLE_WELLSPRING7_ISO, 380 372 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, 373 + HAS_INTEGRATED_BUTTON, 374 + 0x84, sizeof(struct bt_data), 375 + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 376 + { SN_PRESSURE, 0, 300 }, 377 + { SN_WIDTH, 0, 2048 }, 378 + { SN_COORD, -4750, 5280 }, 379 + { SN_COORD, -150, 6730 }, 380 + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } 381 + }, 382 + { 383 + USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI, 384 + USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO, 385 + USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, 381 386 HAS_INTEGRATED_BUTTON, 382 387 0x84, sizeof(struct bt_data), 383 388 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+1 -1
drivers/input/tablet/wacom_sys.c
··· 391 391 features->pktlen = WACOM_PKGLEN_TPC2FG; 392 392 } 393 393 394 - if (features->type == MTSCREEN || WACOM_24HDT) 394 + if (features->type == MTSCREEN || features->type == WACOM_24HDT) 395 395 features->pktlen = WACOM_PKGLEN_MTOUCH; 396 396 397 397 if (features->type == BAMBOO_PT) {
+3
drivers/input/tablet/wacom_wac.c
··· 1518 1518 1519 1519 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1520 1520 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); 1521 + 1522 + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1523 + 1521 1524 wacom_setup_cintiq(wacom_wac); 1522 1525 break; 1523 1526
+1 -1
drivers/input/touchscreen/Kconfig
··· 239 239 240 240 config TOUCHSCREEN_EGALAX 241 241 tristate "EETI eGalax multi-touch panel support" 242 - depends on I2C 242 + depends on I2C && OF 243 243 help 244 244 Say Y here to enable support for I2C connected EETI 245 245 eGalax multi-touch panels.
+21 -2
drivers/input/touchscreen/egalax_ts.c
··· 28 28 #include <linux/slab.h> 29 29 #include <linux/bitops.h> 30 30 #include <linux/input/mt.h> 31 + #include <linux/of_gpio.h> 31 32 32 33 /* 33 34 * Mouse Mode: some panel may configure the controller to mouse mode, ··· 123 122 /* wake up controller by an falling edge of interrupt gpio. */ 124 123 static int egalax_wake_up_device(struct i2c_client *client) 125 124 { 126 - int gpio = irq_to_gpio(client->irq); 125 + struct device_node *np = client->dev.of_node; 126 + int gpio; 127 127 int ret; 128 + 129 + if (!np) 130 + return -ENODEV; 131 + 132 + gpio = of_get_named_gpio(np, "wakeup-gpios", 0); 133 + if (!gpio_is_valid(gpio)) 134 + return -ENODEV; 128 135 129 136 ret = gpio_request(gpio, "egalax_irq"); 130 137 if (ret < 0) { ··· 190 181 ts->input_dev = input_dev; 191 182 192 183 /* controller may be in sleep, wake it up. */ 193 - egalax_wake_up_device(client); 184 + error = egalax_wake_up_device(client); 185 + if (error) { 186 + dev_err(&client->dev, "Failed to wake up the controller\n"); 187 + goto err_free_dev; 188 + } 194 189 195 190 ret = egalax_firmware_version(client); 196 191 if (ret < 0) { ··· 287 274 288 275 static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume); 289 276 277 + static struct of_device_id egalax_ts_dt_ids[] = { 278 + { .compatible = "eeti,egalax_ts" }, 279 + { /* sentinel */ } 280 + }; 281 + 290 282 static struct i2c_driver egalax_ts_driver = { 291 283 .driver = { 292 284 .name = "egalax_ts", 293 285 .owner = THIS_MODULE, 294 286 .pm = &egalax_ts_pm_ops, 287 + .of_match_table = of_match_ptr(egalax_ts_dt_ids), 295 288 }, 296 289 .id_table = egalax_ts_id, 297 290 .probe = egalax_ts_probe,
-1
drivers/input/touchscreen/tsc40.c
··· 107 107 __set_bit(BTN_TOUCH, input_dev->keybit); 108 108 input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0); 109 109 input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0); 110 - input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0); 111 110 112 111 serio_set_drvdata(serio, ptsc); 113 112
+1 -1
drivers/isdn/Kconfig
··· 4 4 5 5 menuconfig ISDN 6 6 bool "ISDN support" 7 - depends on NET 7 + depends on NET && NETDEVICES 8 8 depends on !S390 && !UML 9 9 ---help--- 10 10 ISDN ("Integrated Services Digital Network", called RNIS in France)
+1 -1
drivers/isdn/i4l/Kconfig
··· 6 6 7 7 config ISDN_PPP 8 8 bool "Support synchronous PPP" 9 - depends on INET && NETDEVICES 9 + depends on INET 10 10 select SLHC 11 11 help 12 12 Over digital connections such as ISDN, there is no need to
-4
drivers/isdn/i4l/isdn_common.c
··· 1312 1312 } else 1313 1313 return -EINVAL; 1314 1314 break; 1315 - #ifdef CONFIG_NETDEVICES 1316 1315 case IIOCNETGPN: 1317 1316 /* Get peer phone number of a connected 1318 1317 * isdn network interface */ ··· 1321 1322 return isdn_net_getpeer(&phone, argp); 1322 1323 } else 1323 1324 return -EINVAL; 1324 - #endif 1325 1325 default: 1326 1326 return -EINVAL; 1327 1327 } ··· 1350 1352 case IIOCNETLCR: 1351 1353 printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); 1352 1354 return -ENODEV; 1353 - #ifdef CONFIG_NETDEVICES 1354 1355 case IIOCNETAIF: 1355 1356 /* Add a network-interface */ 1356 1357 if (arg) { ··· 1488 1491 return -EFAULT; 1489 1492 return isdn_net_force_hangup(name); 1490 1493 break; 1491 - #endif /* CONFIG_NETDEVICES */ 1492 1494 case IIOCSETVER: 1493 1495 dev->net_verbose = arg; 1494 1496 printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose);
+4 -1
drivers/md/faulty.c
··· 315 315 } 316 316 conf->nfaults = 0; 317 317 318 - rdev_for_each(rdev, mddev) 318 + rdev_for_each(rdev, mddev) { 319 319 conf->rdev = rdev; 320 + disk_stack_limits(mddev->gendisk, rdev->bdev, 321 + rdev->data_offset << 9); 322 + } 320 323 321 324 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); 322 325 mddev->private = conf;
+1 -1
drivers/md/raid1.c
··· 2710 2710 || disk_idx < 0) 2711 2711 continue; 2712 2712 if (test_bit(Replacement, &rdev->flags)) 2713 - disk = conf->mirrors + conf->raid_disks + disk_idx; 2713 + disk = conf->mirrors + mddev->raid_disks + disk_idx; 2714 2714 else 2715 2715 disk = conf->mirrors + disk_idx; 2716 2716
+9 -6
drivers/md/raid10.c
··· 1783 1783 clear_bit(Unmerged, &rdev->flags); 1784 1784 } 1785 1785 md_integrity_add_rdev(rdev, mddev); 1786 - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 1786 + if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1787 1787 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1788 1788 1789 1789 print_conf(conf); ··· 3613 3613 discard_supported = true; 3614 3614 } 3615 3615 3616 - if (discard_supported) 3617 - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 3618 - else 3619 - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 3620 - 3616 + if (mddev->queue) { 3617 + if (discard_supported) 3618 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3619 + mddev->queue); 3620 + else 3621 + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3622 + mddev->queue); 3623 + } 3621 3624 /* need to check that every block has at least one working mirror */ 3622 3625 if (!enough(conf, -1)) { 3623 3626 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
+4 -4
drivers/mmc/host/dw_mmc-exynos.c
··· 208 208 MMC_CAP_CMD23, 209 209 }; 210 210 211 - static struct dw_mci_drv_data exynos5250_drv_data = { 211 + static const struct dw_mci_drv_data exynos5250_drv_data = { 212 212 .caps = exynos5250_dwmmc_caps, 213 213 .init = dw_mci_exynos_priv_init, 214 214 .setup_clock = dw_mci_exynos_setup_clock, ··· 220 220 221 221 static const struct of_device_id dw_mci_exynos_match[] = { 222 222 { .compatible = "samsung,exynos5250-dw-mshc", 223 - .data = (void *)&exynos5250_drv_data, }, 223 + .data = &exynos5250_drv_data, }, 224 224 {}, 225 225 }; 226 - MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); 226 + MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); 227 227 228 228 int dw_mci_exynos_probe(struct platform_device *pdev) 229 229 { 230 - struct dw_mci_drv_data *drv_data; 230 + const struct dw_mci_drv_data *drv_data; 231 231 const struct of_device_id *match; 232 232 233 233 match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
+3 -3
drivers/mmc/host/dw_mmc-pltfm.c
··· 24 24 #include "dw_mmc.h" 25 25 26 26 int dw_mci_pltfm_register(struct platform_device *pdev, 27 - struct dw_mci_drv_data *drv_data) 27 + const struct dw_mci_drv_data *drv_data) 28 28 { 29 29 struct dw_mci *host; 30 30 struct resource *regs; ··· 50 50 if (!host->regs) 51 51 return -ENOMEM; 52 52 53 - if (host->drv_data->init) { 54 - ret = host->drv_data->init(host); 53 + if (drv_data && drv_data->init) { 54 + ret = drv_data->init(host); 55 55 if (ret) 56 56 return ret; 57 57 }
+1 -1
drivers/mmc/host/dw_mmc-pltfm.h
··· 13 13 #define _DW_MMC_PLTFM_H_ 14 14 15 15 extern int dw_mci_pltfm_register(struct platform_device *pdev, 16 - struct dw_mci_drv_data *drv_data); 16 + const struct dw_mci_drv_data *drv_data); 17 17 extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev); 18 18 extern const struct dev_pm_ops dw_mci_pltfm_pmops; 19 19
+34 -28
drivers/mmc/host/dw_mmc.c
··· 232 232 { 233 233 struct mmc_data *data; 234 234 struct dw_mci_slot *slot = mmc_priv(mmc); 235 + struct dw_mci_drv_data *drv_data = slot->host->drv_data; 235 236 u32 cmdr; 236 237 cmd->error = -EINPROGRESS; 237 238 ··· 262 261 cmdr |= SDMMC_CMD_DAT_WR; 263 262 } 264 263 265 - if (slot->host->drv_data->prepare_command) 266 - slot->host->drv_data->prepare_command(slot->host, &cmdr); 264 + if (drv_data && drv_data->prepare_command) 265 + drv_data->prepare_command(slot->host, &cmdr); 267 266 268 267 return cmdr; 269 268 } ··· 435 434 return 0; 436 435 } 437 436 438 - static struct dw_mci_dma_ops dw_mci_idmac_ops = { 437 + static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 439 438 .init = dw_mci_idmac_init, 440 439 .start = dw_mci_idmac_start_dma, 441 440 .stop = dw_mci_idmac_stop_dma, ··· 773 772 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 774 773 { 775 774 struct dw_mci_slot *slot = mmc_priv(mmc); 775 + struct dw_mci_drv_data *drv_data = slot->host->drv_data; 776 776 u32 regs; 777 777 778 778 /* set default 1 bit mode */ ··· 809 807 slot->clock = ios->clock; 810 808 } 811 809 812 - if (slot->host->drv_data->set_ios) 813 - slot->host->drv_data->set_ios(slot->host, ios); 810 + if (drv_data && drv_data->set_ios) 811 + drv_data->set_ios(slot->host, ios); 814 812 815 813 switch (ios->power_mode) { 816 814 case MMC_POWER_UP: ··· 1817 1815 { 1818 1816 struct mmc_host *mmc; 1819 1817 struct dw_mci_slot *slot; 1818 + struct dw_mci_drv_data *drv_data = host->drv_data; 1820 1819 int ctrl_id, ret; 1821 1820 u8 bus_width; 1822 1821 ··· 1857 1854 } else { 1858 1855 ctrl_id = to_platform_device(host->dev)->id; 1859 1856 } 1860 - if (host->drv_data && host->drv_data->caps) 1861 - mmc->caps |= host->drv_data->caps[ctrl_id]; 1857 + if (drv_data && drv_data->caps) 1858 + mmc->caps |= drv_data->caps[ctrl_id]; 1862 1859 1863 1860 if (host->pdata->caps2) 1864 1861 mmc->caps2 = host->pdata->caps2; ··· 1870 1867 else 1871 1868 bus_width = 1; 1872 1869 1873 - if (host->drv_data->setup_bus) { 1870 + if (drv_data && drv_data->setup_bus) { 1874 1871 struct device_node *slot_np; 1875 1872 slot_np = dw_mci_of_find_slot_node(host->dev, slot->id); 1876 - ret = host->drv_data->setup_bus(host, slot_np, bus_width); 1873 + ret = drv_data->setup_bus(host, slot_np, bus_width); 1877 1874 if (ret) 1878 1875 goto err_setup_bus; 1879 1876 } ··· 1971 1968 /* Determine which DMA interface to use */ 1972 1969 #ifdef CONFIG_MMC_DW_IDMAC 1973 1970 host->dma_ops = &dw_mci_idmac_ops; 1974 - dev_info(&host->dev, "Using internal DMA controller.\n"); 1971 + dev_info(host->dev, "Using internal DMA controller.\n"); 1975 1972 #endif 1976 1973 1977 1974 if (!host->dma_ops) ··· 2038 2035 struct dw_mci_board *pdata; 2039 2036 struct device *dev = host->dev; 2040 2037 struct device_node *np = dev->of_node; 2038 + struct dw_mci_drv_data *drv_data = host->drv_data; 2041 2039 int idx, ret; 2042 2040 2043 2041 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); ··· 2066 2062 2067 2063 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2068 2064 2069 - if (host->drv_data->parse_dt) { 2070 - ret = host->drv_data->parse_dt(host); 2065 + if (drv_data && drv_data->parse_dt) { 2066 + ret = drv_data->parse_dt(host); 2071 2067 if (ret) 2072 2068 return ERR_PTR(ret); 2073 2069 } ··· 2084 2080 2085 2081 int dw_mci_probe(struct dw_mci *host) 2086 2082 { 2083 + struct dw_mci_drv_data *drv_data = host->drv_data; 2087 2084 int width, i, ret = 0; 2088 2085 u32 fifo_size; 2089 2086 int init_slots = 0; ··· 2132 2127 else 2133 2128 host->bus_hz = clk_get_rate(host->ciu_clk); 2134 2129 2135 - if (host->drv_data->setup_clock) { 2136 - ret = host->drv_data->setup_clock(host); 2130 + if (drv_data && drv_data->setup_clock) { 2131 + ret = drv_data->setup_clock(host); 2137 2132 if (ret) { 2138 2133 dev_err(host->dev, 2139 2134 "implementation specific clock setup failed\n"); ··· 2233 2228 else 2234 2229 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 2235 2230 2231 + /* 2232 + * Enable interrupts for command done, data over, data empty, card det, 2233 + * receive ready and error such as transmit, receive timeout, crc error 2234 + */ 2235 + mci_writel(host, RINTSTS, 0xFFFFFFFF); 2236 + mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2237 + SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2238 + DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2239 + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2240 + 2241 + dev_info(host->dev, "DW MMC controller at irq %d, " 2242 + "%d bit host data width, " 2243 + "%u deep fifo\n", 2244 + host->irq, width, fifo_size); 2245 + 2236 2246 /* We need at least one slot to succeed */ 2237 2247 for (i = 0; i < host->num_slots; i++) { 2238 2248 ret = dw_mci_init_slot(host, i); ··· 2277 2257 else 2278 2258 host->data_offset = DATA_240A_OFFSET; 2279 2259 2280 - /* 2281 - * Enable interrupts for command done, data over, data empty, card det, 2282 - * receive ready and error such as transmit, receive timeout, crc error 2283 - */ 2284 - mci_writel(host, RINTSTS, 0xFFFFFFFF); 2285 - mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2286 - SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2287 - DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2288 - mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2289 - 2290 - dev_info(host->dev, "DW MMC controller at irq %d, " 2291 - "%d bit host data width, " 2292 - "%u deep fifo\n", 2293 - host->irq, width, fifo_size); 2294 2260 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2295 2261 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); 2296 2262
+1 -1
drivers/mmc/host/mxcmmc.c
··· 1161 1161 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); 1162 1162 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1163 1163 MODULE_LICENSE("GPL"); 1164 - MODULE_ALIAS("platform:imx-mmc"); 1164 + MODULE_ALIAS("platform:mxc-mmc");
+12 -7
drivers/mmc/host/omap_hsmmc.c
··· 178 178 179 179 static int omap_hsmmc_card_detect(struct device *dev, int slot) 180 180 { 181 - struct omap_mmc_platform_data *mmc = dev->platform_data; 181 + struct omap_hsmmc_host *host = dev_get_drvdata(dev); 182 + struct omap_mmc_platform_data *mmc = host->pdata; 182 183 183 184 /* NOTE: assumes card detect signal is active-low */ 184 185 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); ··· 187 186 188 187 static int omap_hsmmc_get_wp(struct device *dev, int slot) 189 188 { 190 - struct omap_mmc_platform_data *mmc = dev->platform_data; 189 + struct omap_hsmmc_host *host = dev_get_drvdata(dev); 190 + struct omap_mmc_platform_data *mmc = host->pdata; 191 191 192 192 /* NOTE: assumes write protect signal is active-high */ 193 193 return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); ··· 196 194 197 195 static int omap_hsmmc_get_cover_state(struct device *dev, int slot) 198 196 { 199 - struct omap_mmc_platform_data *mmc = dev->platform_data; 197 + struct omap_hsmmc_host *host = dev_get_drvdata(dev); 198 + struct omap_mmc_platform_data *mmc = host->pdata; 200 199 201 200 /* NOTE: assumes card detect signal is active-low */ 202 201 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); ··· 207 204 208 205 static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) 209 206 { 210 - struct omap_mmc_platform_data *mmc = dev->platform_data; 207 + struct omap_hsmmc_host *host = dev_get_drvdata(dev); 208 + struct omap_mmc_platform_data *mmc = host->pdata; 211 209 212 210 disable_irq(mmc->slots[0].card_detect_irq); 213 211 return 0; ··· 216 212 217 213 static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) 218 214 { 219 - struct omap_mmc_platform_data *mmc = dev->platform_data; 215 + struct omap_hsmmc_host *host = dev_get_drvdata(dev); 216 + struct omap_mmc_platform_data *mmc = host->pdata; 220 217 221 218 enable_irq(mmc->slots[0].card_detect_irq); 222 219 return 0; ··· 2014 2009 clk_put(host->dbclk); 2015 2010 } 2016 2011 2017 - mmc_free_host(host->mmc); 2012 + omap_hsmmc_gpio_free(host->pdata); 2018 2013 iounmap(host->base); 2019 - omap_hsmmc_gpio_free(pdev->dev.platform_data); 2014 + mmc_free_host(host->mmc); 2020 2015 2021 2016 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2022 2017 if (res)
+20 -18
drivers/mmc/host/sdhci-dove.c
··· 19 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 20 */ 21 21 22 + #include <linux/err.h> 22 23 #include <linux/io.h> 23 24 #include <linux/clk.h> 24 25 #include <linux/err.h> ··· 85 84 struct sdhci_dove_priv *priv; 86 85 int ret; 87 86 88 - ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata); 89 - if (ret) 90 - goto sdhci_dove_register_fail; 91 - 92 87 priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv), 93 88 GFP_KERNEL); 94 89 if (!priv) { 95 90 dev_err(&pdev->dev, "unable to allocate private data"); 96 - ret = -ENOMEM; 97 - goto sdhci_dove_allocate_fail; 91 + return -ENOMEM; 98 92 } 93 + 94 + priv->clk = clk_get(&pdev->dev, NULL); 95 + if (!IS_ERR(priv->clk)) 96 + clk_prepare_enable(priv->clk); 97 + 98 + ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata); 99 + if (ret) 100 + goto sdhci_dove_register_fail; 99 101 100 102 host = platform_get_drvdata(pdev); 101 103 pltfm_host = sdhci_priv(host); 102 104 pltfm_host->priv = priv; 103 105 104 - priv->clk = clk_get(&pdev->dev, NULL); 105 - if (!IS_ERR(priv->clk)) 106 - clk_prepare_enable(priv->clk); 107 106 return 0; 108 107 109 - sdhci_dove_allocate_fail: 110 - sdhci_pltfm_unregister(pdev); 111 108 sdhci_dove_register_fail: 109 + if (!IS_ERR(priv->clk)) { 110 + clk_disable_unprepare(priv->clk); 111 + clk_put(priv->clk); 112 + } 112 113 return ret; 113 114 } 114 115 ··· 120 117 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 121 118 struct sdhci_dove_priv *priv = pltfm_host->priv; 122 119 123 - if (priv->clk) { 124 - if (!IS_ERR(priv->clk)) { 125 - clk_disable_unprepare(priv->clk); 126 - clk_put(priv->clk); 127 - } 128 - devm_kfree(&pdev->dev, priv->clk); 120 + sdhci_pltfm_unregister(pdev); 121 + 122 + if (!IS_ERR(priv->clk)) { 123 + clk_disable_unprepare(priv->clk); 124 + clk_put(priv->clk); 129 125 } 130 - return sdhci_pltfm_unregister(pdev); 126 + return 0; 131 127 } 132 128 133 129 static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = {
+11
drivers/mmc/host/sdhci-of-esdhc.c
··· 169 169 } 170 170 #endif 171 171 172 + static void esdhc_of_platform_init(struct sdhci_host *host) 173 + { 174 + u32 vvn; 175 + 176 + vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); 177 + vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; 178 + if (vvn == VENDOR_V_22) 179 + host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; 180 + } 181 + 172 182 static struct sdhci_ops sdhci_esdhc_ops = { 173 183 .read_l = esdhc_readl, 174 184 .read_w = esdhc_readw, ··· 190 180 .enable_dma = esdhc_of_enable_dma, 191 181 .get_max_clock = esdhc_of_get_max_clock, 192 182 .get_min_clock = esdhc_of_get_min_clock, 183 + .platform_init = esdhc_of_platform_init, 193 184 #ifdef CONFIG_PM 194 185 .platform_suspend = esdhc_of_suspend, 195 186 .platform_resume = esdhc_of_resume,
+1 -1
drivers/mmc/host/sdhci-pci.c
··· 1196 1196 return ERR_PTR(-ENODEV); 1197 1197 } 1198 1198 1199 - if (pci_resource_len(pdev, bar) != 0x100) { 1199 + if (pci_resource_len(pdev, bar) < 0x100) { 1200 1200 dev_err(&pdev->dev, "Invalid iomem size. You may " 1201 1201 "experience problems.\n"); 1202 1202 }
+7
drivers/mmc/host/sdhci-pltfm.c
··· 150 150 goto err_remap; 151 151 } 152 152 153 + /* 154 + * Some platforms need to probe the controller to be able to 155 + * determine which caps should be used. 156 + */ 157 + if (host->ops && host->ops->platform_init) 158 + host->ops->platform_init(host); 159 + 153 160 platform_set_drvdata(pdev, host); 154 161 155 162 return host;
+16 -14
drivers/mmc/host/sdhci-s3c.c
··· 211 211 if (ourhost->cur_clk != best_src) { 212 212 struct clk *clk = ourhost->clk_bus[best_src]; 213 213 214 - clk_enable(clk); 215 - clk_disable(ourhost->clk_bus[ourhost->cur_clk]); 214 + clk_prepare_enable(clk); 215 + clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); 216 216 217 217 /* turn clock off to card before changing clock source */ 218 218 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); ··· 607 607 } 608 608 609 609 /* enable the local io clock and keep it running for the moment. */ 610 - clk_enable(sc->clk_io); 610 + clk_prepare_enable(sc->clk_io); 611 611 612 612 for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) { 613 613 struct clk *clk; ··· 638 638 } 639 639 640 640 #ifndef CONFIG_PM_RUNTIME 641 - clk_enable(sc->clk_bus[sc->cur_clk]); 641 + clk_prepare_enable(sc->clk_bus[sc->cur_clk]); 642 642 #endif 643 643 644 644 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 747 747 sdhci_s3c_setup_card_detect_gpio(sc); 748 748 749 749 #ifdef CONFIG_PM_RUNTIME 750 - clk_disable(sc->clk_io); 750 + if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) 751 + clk_disable_unprepare(sc->clk_io); 751 752 #endif 752 753 return 0; 753 754 754 755 err_req_regs: 755 756 #ifndef CONFIG_PM_RUNTIME 756 - clk_disable(sc->clk_bus[sc->cur_clk]); 757 + clk_disable_unprepare(sc->clk_bus[sc->cur_clk]); 757 758 #endif 758 759 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { 759 760 if (sc->clk_bus[ptr]) { ··· 763 762 } 764 763 765 764 err_no_busclks: 766 - clk_disable(sc->clk_io); 765 + clk_disable_unprepare(sc->clk_io); 767 766 clk_put(sc->clk_io); 768 767 769 768 err_io_clk: ··· 795 794 gpio_free(sc->ext_cd_gpio); 796 795 797 796 #ifdef CONFIG_PM_RUNTIME 798 - clk_enable(sc->clk_io); 797 + if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) 798 + clk_prepare_enable(sc->clk_io); 799 799 #endif 800 800 sdhci_remove_host(host, 1); 801 801 ··· 804 802 pm_runtime_disable(&pdev->dev); 805 803 806 804 #ifndef CONFIG_PM_RUNTIME 807 - clk_disable(sc->clk_bus[sc->cur_clk]); 805 + clk_disable_unprepare(sc->clk_bus[sc->cur_clk]); 808 806 #endif 809 807 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { 810 808 if (sc->clk_bus[ptr]) { 811 809 clk_put(sc->clk_bus[ptr]); 812 810 } 813 811 } 814 - clk_disable(sc->clk_io); 812 + clk_disable_unprepare(sc->clk_io); 815 813 clk_put(sc->clk_io); 816 814 817 815 if (pdev->dev.of_node) { ··· 851 849 852 850 ret = sdhci_runtime_suspend_host(host); 853 851 854 - clk_disable(ourhost->clk_bus[ourhost->cur_clk]); 855 - clk_disable(busclk); 852 + clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); 853 + clk_disable_unprepare(busclk); 856 854 return ret; 857 855 } 858 856 ··· 863 861 struct clk *busclk = ourhost->clk_io; 864 862 int ret; 865 863 866 - clk_enable(busclk); 867 - clk_enable(ourhost->clk_bus[ourhost->cur_clk]); 864 + clk_prepare_enable(busclk); 865 + clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); 868 866 ret = sdhci_runtime_resume_host(host); 869 867 return ret; 870 868 }
+27 -15
drivers/mmc/host/sdhci.c
··· 1315 1315 */ 1316 1316 if ((host->flags & SDHCI_NEEDS_RETUNING) && 1317 1317 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { 1318 - /* eMMC uses cmd21 while sd and sdio use cmd19 */ 1319 - tuning_opcode = mmc->card->type == MMC_TYPE_MMC ? 1320 - MMC_SEND_TUNING_BLOCK_HS200 : 1321 - MMC_SEND_TUNING_BLOCK; 1322 - spin_unlock_irqrestore(&host->lock, flags); 1323 - sdhci_execute_tuning(mmc, tuning_opcode); 1324 - spin_lock_irqsave(&host->lock, flags); 1318 + if (mmc->card) { 1319 + /* eMMC uses cmd21 but sd and sdio use cmd19 */ 1320 + tuning_opcode = 1321 + mmc->card->type == MMC_TYPE_MMC ? 1322 + MMC_SEND_TUNING_BLOCK_HS200 : 1323 + MMC_SEND_TUNING_BLOCK; 1324 + spin_unlock_irqrestore(&host->lock, flags); 1325 + sdhci_execute_tuning(mmc, tuning_opcode); 1326 + spin_lock_irqsave(&host->lock, flags); 1325 1327 1326 - /* Restore original mmc_request structure */ 1327 - host->mrq = mrq; 1328 + /* Restore original mmc_request structure */ 1329 + host->mrq = mrq; 1330 + } 1328 1331 } 1329 1332 1330 1333 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) ··· 2840 2837 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 2841 2838 mmc->caps |= MMC_CAP_4_BIT_DATA; 2842 2839 2840 + if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 2841 + mmc->caps &= ~MMC_CAP_CMD23; 2842 + 2843 2843 if (caps[0] & SDHCI_CAN_DO_HISPD) 2844 2844 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2845 2845 ··· 2852 2846 2853 2847 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ 2854 2848 host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc"); 2855 - if (IS_ERR(host->vqmmc)) { 2856 - pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc)); 2857 - host->vqmmc = NULL; 2849 + if (IS_ERR_OR_NULL(host->vqmmc)) { 2850 + if (PTR_ERR(host->vqmmc) < 0) { 2851 + pr_info("%s: no vqmmc regulator found\n", 2852 + mmc_hostname(mmc)); 2853 + host->vqmmc = NULL; 2854 + } 2858 2855 } 2859 2856 else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000)) 2860 2857 regulator_enable(host->vqmmc); ··· 2913 2904 ocr_avail = 0; 2914 2905 2915 2906 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 2916 - if (IS_ERR(host->vmmc)) { 2917 - pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 2918 - host->vmmc = NULL; 2907 + if (IS_ERR_OR_NULL(host->vmmc)) { 2908 + if (PTR_ERR(host->vmmc) < 0) { 2909 + pr_info("%s: no vmmc regulator found\n", 2910 + mmc_hostname(mmc)); 2911 + host->vmmc = NULL; 2912 + } 2919 2913 } else 2920 2914 regulator_enable(host->vmmc); 2921 2915
+1
drivers/mmc/host/sdhci.h
··· 278 278 void (*hw_reset)(struct sdhci_host *host); 279 279 void (*platform_suspend)(struct sdhci_host *host); 280 280 void (*platform_resume)(struct sdhci_host *host); 281 + void (*platform_init)(struct sdhci_host *host); 281 282 }; 282 283 283 284 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+1 -1
drivers/mmc/host/sh_mmcif.c
··· 1466 1466 1467 1467 platform_set_drvdata(pdev, NULL); 1468 1468 1469 + clk_disable(host->hclk); 1469 1470 mmc_free_host(host->mmc); 1470 1471 pm_runtime_put_sync(&pdev->dev); 1471 - clk_disable(host->hclk); 1472 1472 pm_runtime_disable(&pdev->dev); 1473 1473 1474 1474 return 0;
+2 -2
drivers/net/bonding/bond_sysfs.c
··· 1060 1060 goto out; 1061 1061 } 1062 1062 1063 - sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ 1063 + sscanf(buf, "%15s", ifname); /* IFNAMSIZ */ 1064 1064 1065 1065 /* check to see if we are clearing primary */ 1066 1066 if (!strlen(ifname) || buf[0] == '\n') { ··· 1237 1237 goto out; 1238 1238 } 1239 1239 1240 - sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ 1240 + sscanf(buf, "%15s", ifname); /* IFNAMSIZ */ 1241 1241 1242 1242 /* check to see if we are clearing active */ 1243 1243 if (!strlen(ifname) || buf[0] == '\n') {
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
··· 1702 1702 SHMEM_EEE_ADV_STATUS_SHIFT); 1703 1703 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { 1704 1704 DP(BNX2X_MSG_ETHTOOL, 1705 - "Direct manipulation of EEE advertisment is not supported\n"); 1705 + "Direct manipulation of EEE advertisement is not supported\n"); 1706 1706 return -EINVAL; 1707 1707 } 1708 1708
+115 -47
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
··· 137 137 #define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD 138 138 #define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD 139 139 140 - 140 + #define LINK_UPDATE_MASK \ 141 + (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ 142 + LINK_STATUS_LINK_UP | \ 143 + LINK_STATUS_PHYSICAL_LINK_FLAG | \ 144 + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ 145 + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ 146 + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ 147 + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ 148 + LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ 149 + LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) 141 150 142 151 #define SFP_EEPROM_CON_TYPE_ADDR 0x2 143 152 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 ··· 3304 3295 DEFAULT_PHY_DEV_ADDR); 3305 3296 } 3306 3297 3298 + static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy, 3299 + struct link_params *params, 3300 + u32 action) 3301 + { 3302 + struct bnx2x *bp = params->bp; 3303 + switch (action) { 3304 + case PHY_INIT: 3305 + /* Set correct devad */ 3306 + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); 3307 + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, 3308 + phy->def_md_devad); 3309 + break; 3310 + } 3311 + } 3312 + 3307 3313 static void bnx2x_xgxs_deassert(struct link_params *params) 3308 3314 { 3309 3315 struct bnx2x *bp = params->bp; ··· 3333 3309 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3334 3310 udelay(500); 3335 3311 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3336 - 3337 - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); 3338 - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 3339 - params->phy[INT_PHY].def_md_devad); 3312 + bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params, 3313 + PHY_INIT); 3340 3314 } 3341 3315 3342 3316 static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, ··· 3567 3545 static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3568 3546 struct link_params *params, 3569 3547 struct link_vars *vars) { 3570 - u16 val16 = 0, lane, i; 3548 + u16 lane, i, cl72_ctrl, an_adv = 0; 3549 + u16 ucode_ver; 3571 3550 struct bnx2x *bp = params->bp; 3572 3551 static struct bnx2x_reg_set reg_set[] = { 3573 3552 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3574 - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, 3575 - {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0}, 3576 - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff}, 3577 - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555}, 3578 3553 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, 3579 3554 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, 3580 3555 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, ··· 3584 3565 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3585 3566 reg_set[i].val); 3586 3567 3568 + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3569 + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); 3570 + cl72_ctrl &= 0xf8ff; 3571 + cl72_ctrl |= 0x3800; 3572 + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3573 + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); 3574 + 3587 3575 /* Check adding advertisement for 1G KX */ 3588 3576 if (((vars->line_speed == SPEED_AUTO_NEG) && 3589 3577 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3590 3578 (vars->line_speed == SPEED_1000)) { 3591 3579 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3592 - val16 |= (1<<5); 3580 + an_adv |= (1<<5); 3593 3581 3594 3582 /* Enable CL37 1G Parallel Detect */ 3595 3583 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); ··· 3606 3580 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || 3607 3581 (vars->line_speed == SPEED_10000)) { 3608 3582 /* Check adding advertisement for 10G KR */ 3609 - val16 |= (1<<7); 3583 + an_adv |= (1<<7); 3610 3584 /* Enable 10G Parallel Detect */ 3585 + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3586 + MDIO_AER_BLOCK_AER_REG, 0); 3587 + 3611 3588 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3612 3589 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3613 - 3590 + bnx2x_set_aer_mmd(params, phy); 3614 3591 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3615 3592 } 3616 3593 ··· 3633 3604 3634 3605 /* Advertised speeds */ 3635 3606 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3636 - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3607 + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); 3637 3608 3638 3609 /* Advertised and set FEC (Forward Error Correction) */ 3639 3610 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, ··· 3657 3628 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3658 3629 */ 3659 3630 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3660 - MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3661 - if (val16 < 0xd108) { 3662 - DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3631 + MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver); 3632 + if (ucode_ver < 0xd108) { 3633 + DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n", 3634 + ucode_ver); 3663 3635 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3664 3636 } 3665 3637 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, ··· 3681 3651 struct link_vars *vars) 3682 3652 { 3683 3653 struct bnx2x *bp = params->bp; 3684 - u16 i; 3654 + u16 val16, i, lane; 3685 3655 static struct bnx2x_reg_set reg_set[] = { 3686 3656 /* Disable Autoneg */ 3687 3657 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3688 - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, 3689 3658 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3690 3659 0x3f00}, 3691 3660 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, 3692 3661 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, 3693 3662 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, 3694 3663 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, 3695 - /* Disable CL36 PCS Tx */ 3696 - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0}, 3697 - /* Double Wide Single Data Rate @ pll rate */ 3698 - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF}, 3699 3664 /* Leave cl72 training enable, needed for KR */ 3700 3665 {MDIO_PMA_DEVAD, 3701 3666 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, ··· 3701 3676 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3702 3677 reg_set[i].val); 3703 3678 3704 - /* Leave CL72 enabled */ 3705 - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3706 - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3707 - 0x3800); 3679 + lane = bnx2x_get_warpcore_lane(phy, params); 3680 + /* Global registers */ 3681 + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3682 + MDIO_AER_BLOCK_AER_REG, 0); 3683 + /* Disable CL36 PCS Tx */ 3684 + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3685 + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); 3686 + val16 &= ~(0x0011 << lane); 3687 + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3688 + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); 3708 3689 3690 + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3691 + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); 3692 + val16 |= (0x0303 << (lane << 1)); 3693 + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3694 + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); 3695 + /* Restore AER */ 3696 + bnx2x_set_aer_mmd(params, phy); 3709 3697 /* Set speed via PMA/PMD register */ 3710 3698 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3711 3699 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); ··· 4341 4303 struct link_params *params) 4342 4304 { 4343 4305 struct bnx2x *bp = params->bp; 4344 - u16 val16; 4306 + u16 val16, lane; 4345 4307 bnx2x_sfp_e3_set_transmitter(params, phy, 0); 4346 4308 bnx2x_set_mdio_clk(bp, params->chip_id, params->port); 4347 4309 bnx2x_set_aer_mmd(params, phy); ··· 4377 4339 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4378 4340 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 4379 4341 val16 & 0xff00); 4342 + 4343 + lane = bnx2x_get_warpcore_lane(phy, params); 4344 + /* Disable CL36 PCS Tx */ 4345 + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4346 + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); 4347 + val16 |= (0x11 << lane); 4348 + if (phy->flags & FLAGS_WC_DUAL_MODE) 4349 + val16 |= (0x22 << lane); 4350 + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4351 + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); 4352 + 4353 + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4354 + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); 4355 + val16 &= ~(0x0303 << (lane << 1)); 4356 + val16 |= (0x0101 << (lane << 1)); 4357 + if (phy->flags & FLAGS_WC_DUAL_MODE) { 4358 + val16 &= ~(0x0c0c << (lane << 1)); 4359 + val16 |= (0x0404 << (lane << 1)); 4360 + } 4361 + 4362 + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4363 + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); 4364 + /* Restore AER */ 4365 + bnx2x_set_aer_mmd(params, phy); 4380 4366 4381 4367 } 4382 4368 ··· 6358 6296 vars->mac_type = MAC_TYPE_NONE; 6359 6297 6360 6298 /* Update shared memory */ 6361 - vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6362 - LINK_STATUS_LINK_UP | 6363 - LINK_STATUS_PHYSICAL_LINK_FLAG | 6364 - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | 6365 - LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | 6366 - LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | 6367 - LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | 6368 - LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | 6369 - LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE); 6299 + vars->link_status &= ~LINK_UPDATE_MASK; 6370 6300 vars->line_speed = 0; 6371 6301 bnx2x_update_mng(params, vars->link_status); 6372 6302 ··· 6506 6452 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; 6507 6453 u8 active_external_phy = INT_PHY; 6508 6454 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 6455 + vars->link_status &= ~LINK_UPDATE_MASK; 6509 6456 for (phy_index = INT_PHY; phy_index < params->num_phys; 6510 6457 phy_index++) { 6511 6458 phy_vars[phy_index].flow_ctrl = 0; ··· 7634 7579 static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7635 7580 struct link_params *params, 7636 7581 u16 addr, u8 byte_cnt, 7637 - u8 *o_buf) 7582 + u8 *o_buf, u8 is_init) 7638 7583 { 7639 7584 int rc = 0; 7640 7585 u8 i, j = 0, cnt = 0; ··· 7651 7596 /* 4 byte aligned address */ 7652 7597 addr32 = addr & (~0x3); 7653 7598 do { 7654 - if (cnt == I2C_WA_PWR_ITER) { 7599 + if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { 7655 7600 bnx2x_warpcore_power_module(params, phy, 0); 7656 7601 /* Note that 100us are not enough here */ 7657 - usleep_range(1000,1000); 7602 + usleep_range(1000, 2000); 7658 7603 bnx2x_warpcore_power_module(params, phy, 1); 7659 7604 } 7660 7605 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, ··· 7774 7719 break; 7775 7720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 7776 7721 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, 7777 - byte_cnt, o_buf); 7722 + byte_cnt, o_buf, 0); 7778 7723 break; 7779 7724 } 7780 7725 return rc; ··· 7978 7923 7979 7924 { 7980 7925 u8 val; 7926 + int rc; 7981 7927 struct bnx2x *bp = params->bp; 7982 7928 u16 timeout; 7983 7929 /* Initialization time after hot-plug may take up to 300ms for ··· 7986 7930 */ 7987 7931 7988 7932 for (timeout = 0; timeout < 60; timeout++) { 7989 - if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 7990 - == 0) { 7933 + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 7934 + rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, 7935 + params, 1, 7936 + 1, &val, 1); 7937 + else 7938 + rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, 7939 + &val); 7940 + if (rc == 0) { 7991 7941 DP(NETIF_MSG_LINK, 7992 7942 "SFP+ module initialization took %d ms\n", 7993 7943 timeout * 5); ··· 8001 7939 } 8002 7940 usleep_range(5000, 10000); 8003 7941 } 8004 - return -EINVAL; 7942 + rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val); 7943 + return rc; 8005 7944 } 8006 7945 8007 7946 static void bnx2x_8727_power_module(struct bnx2x *bp, ··· 9941 9878 else 9942 9879 rc = bnx2x_8483x_disable_eee(phy, params, vars); 9943 9880 if (rc) { 9944 - DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); 9881 + DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); 9945 9882 return rc; 9946 9883 } 9947 9884 } else { ··· 11056 10993 .format_fw_ver = (format_fw_ver_t)NULL, 11057 10994 .hw_reset = (hw_reset_t)NULL, 11058 10995 .set_link_led = (set_link_led_t)NULL, 11059 - .phy_specific_func = (phy_specific_func_t)NULL 10996 + .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func 11060 10997 }; 11061 10998 static struct bnx2x_phy phy_warpcore = { 11062 10999 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, ··· 11528 11465 phy->media_type = ETH_PHY_BASE_T; 11529 11466 break; 11530 11467 case PORT_HW_CFG_NET_SERDES_IF_XFI: 11468 + phy->supported &= (SUPPORTED_1000baseT_Full | 11469 + SUPPORTED_10000baseT_Full | 11470 + SUPPORTED_FIBRE | 11471 + SUPPORTED_Pause | 11472 + SUPPORTED_Asym_Pause); 11531 11473 phy->media_type = ETH_PHY_XFP_FIBER; 11532 11474 break; 11533 11475 case PORT_HW_CFG_NET_SERDES_IF_SFI: ··· 12987 12919 DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); 12988 12920 break; 12989 12921 default: 12990 - DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); 12922 + DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n"); 12991 12923 } 12992 12924 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 12993 12925 old_status, status);
+11 -2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 6794 6794 6795 6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6796 6796 6797 + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 6798 + 6797 6799 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6798 - bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 6799 6800 6800 6801 if (IS_MF(bp)) 6801 6802 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); ··· 11903 11902 /* disable FCOE L2 queue for E1x */ 11904 11903 if (CHIP_IS_E1x(bp)) 11905 11904 bp->flags |= NO_FCOE_FLAG; 11906 - 11905 + /* disable FCOE for 57840 device, until FW supports it */ 11906 + switch (ent->driver_data) { 11907 + case BCM57840_O: 11908 + case BCM57840_4_10: 11909 + case BCM57840_2_20: 11910 + case BCM57840_MFO: 11911 + case BCM57840_MF: 11912 + bp->flags |= NO_FCOE_FLAG; 11913 + } 11907 11914 #endif 11908 11915 11909 11916
-10
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 3416 3416 finicsum, cfcsum); 3417 3417 3418 3418 /* 3419 - * If we're a pure NIC driver then disable all offloading facilities. 3420 - * This will allow the firmware to optimize aspects of the hardware 3421 - * configuration which will result in improved performance. 3422 - */ 3423 - caps_cmd.ofldcaps = 0; 3424 - caps_cmd.iscsicaps = 0; 3425 - caps_cmd.rdmacaps = 0; 3426 - caps_cmd.fcoecaps = 0; 3427 - 3428 - /* 3429 3419 * And now tell the firmware to use the configuration we just loaded. 3430 3420 */ 3431 3421 caps_cmd.op_to_write =
+5 -1
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 2519 2519 { 2520 2520 struct fw_bye_cmd c; 2521 2521 2522 + memset(&c, 0, sizeof(c)); 2522 2523 INIT_CMD(c, BYE, WRITE); 2523 2524 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2524 2525 } ··· 2536 2535 { 2537 2536 struct fw_initialize_cmd c; 2538 2537 2538 + memset(&c, 0, sizeof(c)); 2539 2539 INIT_CMD(c, INITIALIZE, WRITE); 2540 2540 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2541 2541 } ··· 2553 2551 { 2554 2552 struct fw_reset_cmd c; 2555 2553 2554 + memset(&c, 0, sizeof(c)); 2556 2555 INIT_CMD(c, RESET, WRITE); 2557 2556 c.val = htonl(reset); 2558 2557 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); ··· 2831 2828 HOSTPAGESIZEPF7(sge_hps)); 2832 2829 2833 2830 t4_set_reg_field(adap, SGE_CONTROL, 2834 - INGPADBOUNDARY(INGPADBOUNDARY_MASK) | 2831 + INGPADBOUNDARY_MASK | 2835 2832 EGRSTATUSPAGESIZE_MASK, 2836 2833 INGPADBOUNDARY(fl_align_log - 5) | 2837 2834 EGRSTATUSPAGESIZE(stat_len != 64)); ··· 3281 3278 { 3282 3279 struct fw_vi_enable_cmd c; 3283 3280 3281 + memset(&c, 0, sizeof(c)); 3284 3282 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3285 3283 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3286 3284 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+4 -1
drivers/net/ethernet/freescale/gianfar.c
··· 1353 1353 struct gfar_private *priv = dev_get_drvdata(dev); 1354 1354 struct net_device *ndev = priv->ndev; 1355 1355 1356 - if (!netif_running(ndev)) 1356 + if (!netif_running(ndev)) { 1357 + netif_device_attach(ndev); 1358 + 1357 1359 return 0; 1360 + } 1358 1361 1359 1362 gfar_init_bds(ndev); 1360 1363 init_registers(ndev);
+3
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 2673 2673 case ixgbe_mac_X540: 2674 2674 case ixgbe_mac_82599EB: 2675 2675 info->so_timestamping = 2676 + SOF_TIMESTAMPING_TX_SOFTWARE | 2677 + SOF_TIMESTAMPING_RX_SOFTWARE | 2678 + SOF_TIMESTAMPING_SOFTWARE | 2676 2679 SOF_TIMESTAMPING_TX_HARDWARE | 2677 2680 SOF_TIMESTAMPING_RX_HARDWARE | 2678 2681 SOF_TIMESTAMPING_RAW_HARDWARE;
+4 -4
drivers/net/ethernet/jme.c
··· 1948 1948 1949 1949 JME_NAPI_DISABLE(jme); 1950 1950 1951 - tasklet_disable(&jme->linkch_task); 1952 - tasklet_disable(&jme->txclean_task); 1953 - tasklet_disable(&jme->rxclean_task); 1954 - tasklet_disable(&jme->rxempty_task); 1951 + tasklet_kill(&jme->linkch_task); 1952 + tasklet_kill(&jme->txclean_task); 1953 + tasklet_kill(&jme->rxclean_task); 1954 + tasklet_kill(&jme->rxempty_task); 1955 1955 1956 1956 jme_disable_rx_engine(jme); 1957 1957 jme_disable_tx_engine(jme);
+1 -1
drivers/net/ethernet/marvell/skge.c
··· 4026 4026 dev0 = hw->dev[0]; 4027 4027 unregister_netdev(dev0); 4028 4028 4029 - tasklet_disable(&hw->phy_task); 4029 + tasklet_kill(&hw->phy_task); 4030 4030 4031 4031 spin_lock_irq(&hw->hw_lock); 4032 4032 hw->intr_mask = 0;
+2 -2
drivers/net/ethernet/micrel/ksz884x.c
··· 5407 5407 /* Delay for receive task to stop scheduling itself. */ 5408 5408 msleep(2000 / HZ); 5409 5409 5410 - tasklet_disable(&hw_priv->rx_tasklet); 5411 - tasklet_disable(&hw_priv->tx_tasklet); 5410 + tasklet_kill(&hw_priv->rx_tasklet); 5411 + tasklet_kill(&hw_priv->tx_tasklet); 5412 5412 free_irq(dev->irq, hw_priv->dev); 5413 5413 5414 5414 transmit_cleanup(hw_priv, 0);
+1
drivers/net/ethernet/nxp/lpc_eth.c
··· 1524 1524 pldat->dma_buff_base_p); 1525 1525 free_irq(ndev->irq, ndev); 1526 1526 iounmap(pldat->net_base); 1527 + mdiobus_unregister(pldat->mii_bus); 1527 1528 mdiobus_free(pldat->mii_bus); 1528 1529 clk_disable(pldat->clk); 1529 1530 clk_put(pldat->clk);
+5
drivers/net/ethernet/realtek/r8169.c
··· 3827 3827 void __iomem *ioaddr = tp->mmio_addr; 3828 3828 3829 3829 switch (tp->mac_version) { 3830 + case RTL_GIGA_MAC_VER_25: 3831 + case RTL_GIGA_MAC_VER_26: 3830 3832 case RTL_GIGA_MAC_VER_29: 3831 3833 case RTL_GIGA_MAC_VER_30: 3832 3834 case RTL_GIGA_MAC_VER_32: ··· 4520 4518 mc_filter[0] = swab32(mc_filter[1]); 4521 4519 mc_filter[1] = swab32(data); 4522 4520 } 4521 + 4522 + if (tp->mac_version == RTL_GIGA_MAC_VER_35) 4523 + mc_filter[1] = mc_filter[0] = 0xffffffff; 4523 4524 4524 4525 RTL_W32(MAR0 + 4, mc_filter[1]); 4525 4526 RTL_W32(MAR0 + 0, mc_filter[0]);
+1 -1
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 990 990 axienet_setoptions(ndev, lp->options & 991 991 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 992 992 993 - tasklet_disable(&lp->dma_err_tasklet); 993 + tasklet_kill(&lp->dma_err_tasklet); 994 994 995 995 free_irq(lp->tx_irq, ndev); 996 996 free_irq(lp->rx_irq, ndev);
+1
drivers/net/phy/mdio-bitbang.c
··· 234 234 struct mdiobb_ctrl *ctrl = bus->priv; 235 235 236 236 module_put(ctrl->ops->owner); 237 + mdiobus_unregister(bus); 237 238 mdiobus_free(bus); 238 239 } 239 240 EXPORT_SYMBOL(free_mdio_bitbang);
+2 -1
drivers/net/usb/cdc_eem.c
··· 31 31 #include <linux/usb/cdc.h> 32 32 #include <linux/usb/usbnet.h> 33 33 #include <linux/gfp.h> 34 + #include <linux/if_vlan.h> 34 35 35 36 36 37 /* ··· 93 92 94 93 /* no jumbogram (16K) support for now */ 95 94 96 - dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; 95 + dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN; 97 96 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 98 97 99 98 return 0;
+1
drivers/net/usb/smsc95xx.c
··· 1344 1344 } else { 1345 1345 u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); 1346 1346 skb_push(skb, 4); 1347 + cpu_to_le32s(&csum_preamble); 1347 1348 memcpy(skb->data, &csum_preamble, 4); 1348 1349 } 1349 1350 }
+5 -3
drivers/net/usb/usbnet.c
··· 359 359 void usbnet_defer_kevent (struct usbnet *dev, int work) 360 360 { 361 361 set_bit (work, &dev->flags); 362 - if (!schedule_work (&dev->kevent)) 363 - netdev_err(dev->net, "kevent %d may have been dropped\n", work); 364 - else 362 + if (!schedule_work (&dev->kevent)) { 363 + if (net_ratelimit()) 364 + netdev_err(dev->net, "kevent %d may have been dropped\n", work); 365 + } else { 365 366 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 367 + } 366 368 } 367 369 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 368 370
+45 -20
drivers/net/vmxnet3/vmxnet3_drv.c
··· 744 744 745 745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 746 746 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 747 + u32 buf_size; 747 748 748 - tbi = tq->buf_info + tq->tx_ring.next2fill; 749 - tbi->map_type = VMXNET3_MAP_PAGE; 750 - tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 751 - 0, skb_frag_size(frag), 752 - DMA_TO_DEVICE); 749 + buf_offset = 0; 750 + len = skb_frag_size(frag); 751 + while (len) { 752 + tbi = tq->buf_info + tq->tx_ring.next2fill; 753 + if (len < VMXNET3_MAX_TX_BUF_SIZE) { 754 + buf_size = len; 755 + dw2 |= len; 756 + } else { 757 + buf_size = VMXNET3_MAX_TX_BUF_SIZE; 758 + /* spec says that for TxDesc.len, 0 == 2^14 */ 759 + } 760 + tbi->map_type = VMXNET3_MAP_PAGE; 761 + tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 762 + buf_offset, buf_size, 763 + DMA_TO_DEVICE); 753 764 754 - tbi->len = skb_frag_size(frag); 765 + tbi->len = buf_size; 755 766 756 - gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 757 - BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 767 + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 768 + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 758 769 759 - gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 760 - gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); 761 - gdesc->dword[3] = 0; 770 + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 771 + gdesc->dword[2] = cpu_to_le32(dw2); 772 + gdesc->dword[3] = 0; 762 773 763 - dev_dbg(&adapter->netdev->dev, 764 - "txd[%u]: 0x%llu %u %u\n", 765 - tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 766 - le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 767 - vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 768 - dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 774 + dev_dbg(&adapter->netdev->dev, 775 + "txd[%u]: 0x%llu %u %u\n", 776 + tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 777 + le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 778 + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 779 + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 780 + 781 + len -= buf_size; 782 + buf_offset += buf_size; 783 + } 769 784 } 770 785 771 786 ctx->eop_txd = gdesc; ··· 901 886 } 902 887 } 903 888 889 + static int txd_estimate(const struct sk_buff *skb) 890 + { 891 + int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 892 + int i; 893 + 894 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 895 + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 896 + 897 + count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); 898 + } 899 + return count; 900 + } 904 901 905 902 /* 906 903 * Transmits a pkt thru a given tq ··· 941 914 union Vmxnet3_GenericDesc tempTxDesc; 942 915 #endif 943 916 944 - /* conservatively estimate # of descriptors to use */ 945 - count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 946 - skb_shinfo(skb)->nr_frags + 1; 917 + count = txd_estimate(skb); 947 918 948 919 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); 949 920
+1 -1
drivers/net/vxlan.c
··· 816 816 = container_of(p, struct vxlan_fdb, hlist); 817 817 unsigned long timeout; 818 818 819 - if (f->state == NUD_PERMANENT) 819 + if (f->state & NUD_PERMANENT) 820 820 continue; 821 821 822 822 timeout = f->used + vxlan->age_interval * HZ;
+8 -2
drivers/net/wireless/ath/ath9k/xmit.c
··· 312 312 } 313 313 314 314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 315 + bf->bf_next = NULL; 315 316 list_del(&bf->list); 316 317 317 318 spin_unlock_bh(&sc->tx.txbuflock); ··· 394 393 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 395 394 u32 ba[WME_BA_BMP_SIZE >> 5]; 396 395 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 397 - bool rc_update = true; 396 + bool rc_update = true, isba; 398 397 struct ieee80211_tx_rate rates[4]; 399 398 struct ath_frame_info *fi; 400 399 int nframes; ··· 438 437 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 439 438 tid = ATH_AN_2_TID(an, tidno); 440 439 seq_first = tid->seq_start; 440 + isba = ts->ts_flags & ATH9K_TX_BA; 441 441 442 442 /* 443 443 * The hardware occasionally sends a tx status for the wrong TID. 444 444 * In this case, the BA status cannot be considered valid and all 445 445 * subframes need to be retransmitted 446 + * 447 + * Only BlockAcks have a TID and therefore normal Acks cannot be 448 + * checked 446 449 */ 447 - if (tidno != ts->tid) 450 + if (isba && tidno != ts->tid) 448 451 txok = false; 449 452 450 453 isaggr = bf_isaggr(bf); ··· 1779 1774 list_add_tail(&bf->list, &bf_head); 1780 1775 bf->bf_state.bf_type = 0; 1781 1776 1777 + bf->bf_next = NULL; 1782 1778 bf->bf_lastbf = bf; 1783 1779 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1784 1780 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
+1 -1
drivers/net/wireless/b43legacy/pio.c
··· 382 382 { 383 383 struct b43legacy_pio_txpacket *packet, *tmp_packet; 384 384 385 - tasklet_disable(&queue->txtask); 385 + tasklet_kill(&queue->txtask); 386 386 387 387 list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) 388 388 free_txpacket(packet, 0);
+1 -1
drivers/net/wireless/rt2x00/rt2800lib.c
··· 2449 2449 /* 2450 2450 * Check if temperature compensation is supported. 2451 2451 */ 2452 - if (tssi_bounds[4] == 0xff) 2452 + if (tssi_bounds[4] == 0xff || step == 0xff) 2453 2453 return 0; 2454 2454 2455 2455 /*
-3
drivers/pci/bus.c
··· 320 320 } else 321 321 next = dev->bus_list.next; 322 322 323 - /* Run device routines with the device locked */ 324 - device_lock(&dev->dev); 325 323 retval = cb(dev, userdata); 326 - device_unlock(&dev->dev); 327 324 if (retval) 328 325 break; 329 326 }
+2 -10
drivers/pci/pci-driver.c
··· 398 398 struct pci_dev *pci_dev = to_pci_dev(dev); 399 399 struct pci_driver *drv = pci_dev->driver; 400 400 401 + pm_runtime_resume(dev); 402 + 401 403 if (drv && drv->shutdown) 402 404 drv->shutdown(pci_dev); 403 405 pci_msi_shutdown(pci_dev); ··· 410 408 * continue to do DMA 411 409 */ 412 410 pci_disable_device(pci_dev); 413 - 414 - /* 415 - * Devices may be enabled to wake up by runtime PM, but they need not 416 - * be supposed to wake up the system from its "power off" state (e.g. 417 - * ACPI S5). Therefore disable wakeup for all devices that aren't 418 - * supposed to wake up the system at this point. The state argument 419 - * will be ignored by pci_enable_wake(). 420 - */ 421 - if (!device_may_wakeup(dev)) 422 - pci_enable_wake(pci_dev, PCI_UNKNOWN, false); 423 411 } 424 412 425 413 #ifdef CONFIG_PM
-34
drivers/pci/pci-sysfs.c
··· 458 458 } 459 459 struct device_attribute vga_attr = __ATTR_RO(boot_vga); 460 460 461 - static void 462 - pci_config_pm_runtime_get(struct pci_dev *pdev) 463 - { 464 - struct device *dev = &pdev->dev; 465 - struct device *parent = dev->parent; 466 - 467 - if (parent) 468 - pm_runtime_get_sync(parent); 469 - pm_runtime_get_noresume(dev); 470 - /* 471 - * pdev->current_state is set to PCI_D3cold during suspending, 472 - * so wait until suspending completes 473 - */ 474 - pm_runtime_barrier(dev); 475 - /* 476 - * Only need to resume devices in D3cold, because config 477 - * registers are still accessible for devices suspended but 478 - * not in D3cold. 479 - */ 480 - if (pdev->current_state == PCI_D3cold) 481 - pm_runtime_resume(dev); 482 - } 483 - 484 - static void 485 - pci_config_pm_runtime_put(struct pci_dev *pdev) 486 - { 487 - struct device *dev = &pdev->dev; 488 - struct device *parent = dev->parent; 489 - 490 - pm_runtime_put(dev); 491 - if (parent) 492 - pm_runtime_put_sync(parent); 493 - } 494 - 495 461 static ssize_t 496 462 pci_read_config(struct file *filp, struct kobject *kobj, 497 463 struct bin_attribute *bin_attr,
+32
drivers/pci/pci.c
··· 1858 1858 } 1859 1859 EXPORT_SYMBOL_GPL(pci_dev_run_wake); 1860 1860 1861 + void pci_config_pm_runtime_get(struct pci_dev *pdev) 1862 + { 1863 + struct device *dev = &pdev->dev; 1864 + struct device *parent = dev->parent; 1865 + 1866 + if (parent) 1867 + pm_runtime_get_sync(parent); 1868 + pm_runtime_get_noresume(dev); 1869 + /* 1870 + * pdev->current_state is set to PCI_D3cold during suspending, 1871 + * so wait until suspending completes 1872 + */ 1873 + pm_runtime_barrier(dev); 1874 + /* 1875 + * Only need to resume devices in D3cold, because config 1876 + * registers are still accessible for devices suspended but 1877 + * not in D3cold. 1878 + */ 1879 + if (pdev->current_state == PCI_D3cold) 1880 + pm_runtime_resume(dev); 1881 + } 1882 + 1883 + void pci_config_pm_runtime_put(struct pci_dev *pdev) 1884 + { 1885 + struct device *dev = &pdev->dev; 1886 + struct device *parent = dev->parent; 1887 + 1888 + pm_runtime_put(dev); 1889 + if (parent) 1890 + pm_runtime_put_sync(parent); 1891 + } 1892 + 1861 1893 /** 1862 1894 * pci_pm_init - Initialize PM functions of given PCI device 1863 1895 * @dev: PCI device to handle.
+2
drivers/pci/pci.h
··· 72 72 extern int pci_finish_runtime_suspend(struct pci_dev *dev); 73 73 extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 74 74 extern void pci_wakeup_bus(struct pci_bus *bus); 75 + extern void pci_config_pm_runtime_get(struct pci_dev *dev); 76 + extern void pci_config_pm_runtime_put(struct pci_dev *dev); 75 77 extern void pci_pm_init(struct pci_dev *dev); 76 78 extern void platform_pci_wakeup_init(struct pci_dev *dev); 77 79 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+16 -4
drivers/pci/pcie/aer/aerdrv_core.c
··· 213 213 struct aer_broadcast_data *result_data; 214 214 result_data = (struct aer_broadcast_data *) data; 215 215 216 + device_lock(&dev->dev); 216 217 dev->error_state = result_data->state; 217 218 218 219 if (!dev->driver || ··· 232 231 dev->driver ? 233 232 "no AER-aware driver" : "no driver"); 234 233 } 235 - return 0; 234 + goto out; 236 235 } 237 236 238 237 err_handler = dev->driver->err_handler; 239 238 vote = err_handler->error_detected(dev, result_data->state); 240 239 result_data->result = merge_result(result_data->result, vote); 240 + out: 241 + device_unlock(&dev->dev); 241 242 return 0; 242 243 } 243 244 ··· 250 247 struct aer_broadcast_data *result_data; 251 248 result_data = (struct aer_broadcast_data *) data; 252 249 250 + device_lock(&dev->dev); 253 251 if (!dev->driver || 254 252 !dev->driver->err_handler || 255 253 !dev->driver->err_handler->mmio_enabled) 256 - return 0; 254 + goto out; 257 255 258 256 err_handler = dev->driver->err_handler; 259 257 vote = err_handler->mmio_enabled(dev); 260 258 result_data->result = merge_result(result_data->result, vote); 259 + out: 260 + device_unlock(&dev->dev); 261 261 return 0; 262 262 } 263 263 ··· 271 265 struct aer_broadcast_data *result_data; 272 266 result_data = (struct aer_broadcast_data *) data; 273 267 268 + device_lock(&dev->dev); 274 269 if (!dev->driver || 275 270 !dev->driver->err_handler || 276 271 !dev->driver->err_handler->slot_reset) 277 - return 0; 272 + goto out; 278 273 279 274 err_handler = dev->driver->err_handler; 280 275 vote = err_handler->slot_reset(dev); 281 276 result_data->result = merge_result(result_data->result, vote); 277 + out: 278 + device_unlock(&dev->dev); 282 279 return 0; 283 280 } 284 281 ··· 289 280 { 290 281 const struct pci_error_handlers *err_handler; 291 282 283 + device_lock(&dev->dev); 292 284 dev->error_state = pci_channel_io_normal; 293 285 294 286 if (!dev->driver || 295 287 !dev->driver->err_handler || 296 288 !dev->driver->err_handler->resume) 297 - return 0; 289 + goto out; 298 290 299 291 err_handler = dev->driver->err_handler; 300 292 err_handler->resume(dev); 293 + out: 294 + device_unlock(&dev->dev); 301 295 return 0; 302 296 } 303 297
+2 -1
drivers/pci/pcie/portdrv_core.c
··· 272 272 } 273 273 274 274 /* Hot-Plug Capable */ 275 - if (cap_mask & PCIE_PORT_SERVICE_HP) { 275 + if ((cap_mask & PCIE_PORT_SERVICE_HP) && 276 + dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT) { 276 277 pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32); 277 278 if (reg32 & PCI_EXP_SLTCAP_HPC) { 278 279 services |= PCIE_PORT_SERVICE_HP;
+8
drivers/pci/proc.c
··· 76 76 if (!access_ok(VERIFY_WRITE, buf, cnt)) 77 77 return -EINVAL; 78 78 79 + pci_config_pm_runtime_get(dev); 80 + 79 81 if ((pos & 1) && cnt) { 80 82 unsigned char val; 81 83 pci_user_read_config_byte(dev, pos, &val); ··· 123 121 cnt--; 124 122 } 125 123 124 + pci_config_pm_runtime_put(dev); 125 + 126 126 *ppos = pos; 127 127 return nbytes; 128 128 } ··· 149 145 150 146 if (!access_ok(VERIFY_READ, buf, cnt)) 151 147 return -EINVAL; 148 + 149 + pci_config_pm_runtime_get(dev); 152 150 153 151 if ((pos & 1) && cnt) { 154 152 unsigned char val; ··· 196 190 pos++; 197 191 cnt--; 198 192 } 193 + 194 + pci_config_pm_runtime_put(dev); 199 195 200 196 *ppos = pos; 201 197 i_size_write(ino, dp->size);
+2
drivers/pinctrl/Kconfig
··· 179 179 180 180 config PINCTRL_SAMSUNG 181 181 bool "Samsung pinctrl driver" 182 + depends on OF && GPIOLIB 182 183 select PINMUX 183 184 select PINCONF 184 185 185 186 config PINCTRL_EXYNOS4 186 187 bool "Pinctrl driver data for Exynos4 SoC" 188 + depends on OF && GPIOLIB 187 189 select PINCTRL_SAMSUNG 188 190 189 191 config PINCTRL_MVEBU
+1 -1
drivers/pinctrl/spear/pinctrl-spear.c
··· 244 244 else 245 245 temp = ~muxreg->val; 246 246 247 - val |= temp; 247 + val |= muxreg->mask & temp; 248 248 pmx_writel(pmx, val, muxreg->reg); 249 249 } 250 250 }
+320 -45
drivers/pinctrl/spear/pinctrl-spear1310.c
··· 25 25 }; 26 26 27 27 /* registers */ 28 - #define PERIP_CFG 0x32C 29 - #define MCIF_SEL_SHIFT 3 28 + #define PERIP_CFG 0x3B0 29 + #define MCIF_SEL_SHIFT 5 30 30 #define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT) 31 31 #define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT) 32 32 #define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT) ··· 164 164 #define PMX_SSP0_CS0_MASK (1 << 29) 165 165 #define PMX_SSP0_CS1_2_MASK (1 << 30) 166 166 167 + #define PAD_DIRECTION_SEL_0 0x65C 168 + #define PAD_DIRECTION_SEL_1 0x660 169 + #define PAD_DIRECTION_SEL_2 0x664 170 + 167 171 /* combined macros */ 168 172 #define PMX_GMII_MASK (PMX_GMIICLK_MASK | \ 169 173 PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \ ··· 241 237 .reg = PAD_FUNCTION_EN_0, 242 238 .mask = PMX_I2C0_MASK, 243 239 .val = PMX_I2C0_MASK, 240 + }, { 241 + .reg = PAD_DIRECTION_SEL_0, 242 + .mask = PMX_I2C0_MASK, 243 + .val = PMX_I2C0_MASK, 244 244 }, 245 245 }; 246 246 ··· 277 269 .reg = PAD_FUNCTION_EN_0, 278 270 .mask = PMX_SSP0_MASK, 279 271 .val = PMX_SSP0_MASK, 272 + }, { 273 + .reg = PAD_DIRECTION_SEL_0, 274 + .mask = PMX_SSP0_MASK, 275 + .val = PMX_SSP0_MASK, 280 276 }, 281 277 }; 282 278 ··· 306 294 .reg = PAD_FUNCTION_EN_2, 307 295 .mask = PMX_SSP0_CS0_MASK, 308 296 .val = PMX_SSP0_CS0_MASK, 297 + }, { 298 + .reg = PAD_DIRECTION_SEL_2, 299 + .mask = PMX_SSP0_CS0_MASK, 300 + .val = PMX_SSP0_CS0_MASK, 309 301 }, 310 302 }; 311 303 ··· 333 317 static struct spear_muxreg ssp0_cs1_2_muxreg[] = { 334 318 { 335 319 .reg = PAD_FUNCTION_EN_2, 320 + .mask = PMX_SSP0_CS1_2_MASK, 321 + .val = PMX_SSP0_CS1_2_MASK, 322 + }, { 323 + .reg = PAD_DIRECTION_SEL_2, 336 324 .mask = PMX_SSP0_CS1_2_MASK, 337 325 .val = PMX_SSP0_CS1_2_MASK, 338 326 }, ··· 372 352 .reg = PAD_FUNCTION_EN_0, 373 353 .mask = PMX_I2S0_MASK, 374 354 .val = PMX_I2S0_MASK, 355 + }, { 356 + .reg = PAD_DIRECTION_SEL_0, 357 + .mask = PMX_I2S0_MASK, 358 + .val = PMX_I2S0_MASK, 375 359 }, 376 360 }; 377 361 ··· 406 382 static struct spear_muxreg i2s1_muxreg[] = { 407 383 { 408 384 .reg = PAD_FUNCTION_EN_1, 385 + .mask = PMX_I2S1_MASK, 386 + .val = PMX_I2S1_MASK, 387 + }, { 388 + .reg = PAD_DIRECTION_SEL_1, 409 389 .mask = PMX_I2S1_MASK, 410 390 .val = PMX_I2S1_MASK, 411 391 }, ··· 446 418 .reg = PAD_FUNCTION_EN_0, 447 419 .mask = PMX_CLCD1_MASK, 448 420 .val = PMX_CLCD1_MASK, 421 + }, { 422 + .reg = PAD_DIRECTION_SEL_0, 423 + .mask = PMX_CLCD1_MASK, 424 + .val = PMX_CLCD1_MASK, 449 425 }, 450 426 }; 451 427 ··· 475 443 .reg = PAD_FUNCTION_EN_1, 476 444 .mask = PMX_CLCD2_MASK, 477 445 .val = PMX_CLCD2_MASK, 446 + }, { 447 + .reg = PAD_DIRECTION_SEL_1, 448 + .mask = PMX_CLCD2_MASK, 449 + .val = PMX_CLCD2_MASK, 478 450 }, 479 451 }; 480 452 ··· 497 461 .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux), 498 462 }; 499 463 500 - static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" }; 464 + static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res_grp" }; 501 465 static struct spear_function clcd_function = { 502 466 .name = "clcd", 503 467 .groups = clcd_grps, ··· 513 477 .val = PMX_EGPIO_0_GRP_MASK, 514 478 }, { 515 479 .reg = PAD_FUNCTION_EN_1, 480 + .mask = PMX_EGPIO_1_GRP_MASK, 481 + .val = PMX_EGPIO_1_GRP_MASK, 482 + }, { 483 + .reg = PAD_DIRECTION_SEL_0, 484 + .mask = PMX_EGPIO_0_GRP_MASK, 485 + .val = PMX_EGPIO_0_GRP_MASK, 486 + }, { 487 + .reg = PAD_DIRECTION_SEL_1, 516 488 .mask = PMX_EGPIO_1_GRP_MASK, 517 489 .val = PMX_EGPIO_1_GRP_MASK, 518 490 }, ··· 555 511 .reg = PAD_FUNCTION_EN_0, 556 512 .mask = PMX_SMI_MASK, 557 513 .val = PMX_SMI_MASK, 514 + }, { 515 + .reg = PAD_DIRECTION_SEL_0, 516 + .mask = PMX_SMI_MASK, 517 + .val = PMX_SMI_MASK, 558 518 }, 559 519 }; 560 520 ··· 585 537 .val = PMX_SMI_MASK, 586 538 }, { 587 539 .reg = PAD_FUNCTION_EN_1, 540 + .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, 541 + .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, 542 + }, { 543 + .reg = PAD_DIRECTION_SEL_0, 544 + .mask = PMX_SMI_MASK, 545 + .val = PMX_SMI_MASK, 546 + }, { 547 + .reg = PAD_DIRECTION_SEL_1, 588 548 .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, 589 549 .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, 590 550 }, ··· 627 571 static struct spear_muxreg gmii_muxreg[] = { 628 572 { 629 573 .reg = PAD_FUNCTION_EN_0, 574 + .mask = PMX_GMII_MASK, 575 + .val = PMX_GMII_MASK, 576 + }, { 577 + .reg = PAD_DIRECTION_SEL_0, 630 578 .mask = PMX_GMII_MASK, 631 579 .val = PMX_GMII_MASK, 632 580 }, ··· 675 615 .reg = PAD_FUNCTION_EN_2, 676 616 .mask = PMX_RGMII_REG2_MASK, 677 617 .val = 0, 618 + }, { 619 + .reg = PAD_DIRECTION_SEL_0, 620 + .mask = PMX_RGMII_REG0_MASK, 621 + .val = PMX_RGMII_REG0_MASK, 622 + }, { 623 + .reg = PAD_DIRECTION_SEL_1, 624 + .mask = PMX_RGMII_REG1_MASK, 625 + .val = PMX_RGMII_REG1_MASK, 626 + }, { 627 + .reg = PAD_DIRECTION_SEL_2, 628 + .mask = PMX_RGMII_REG2_MASK, 629 + .val = PMX_RGMII_REG2_MASK, 678 630 }, 679 631 }; 680 632 ··· 721 649 .reg = PAD_FUNCTION_EN_1, 722 650 .mask = PMX_SMII_0_1_2_MASK, 723 651 .val = 0, 652 + }, { 653 + .reg = PAD_DIRECTION_SEL_1, 654 + .mask = PMX_SMII_0_1_2_MASK, 655 + .val = PMX_SMII_0_1_2_MASK, 724 656 }, 725 657 }; 726 658 ··· 757 681 .reg = PAD_FUNCTION_EN_1, 758 682 .mask = PMX_NFCE2_MASK, 759 683 .val = 0, 684 + }, { 685 + .reg = PAD_DIRECTION_SEL_1, 686 + .mask = PMX_NFCE2_MASK, 687 + .val = PMX_NFCE2_MASK, 760 688 }, 761 689 }; 762 690 ··· 801 721 .reg = PAD_FUNCTION_EN_1, 802 722 .mask = PMX_NAND8BIT_1_MASK, 803 723 .val = PMX_NAND8BIT_1_MASK, 724 + }, { 725 + .reg = PAD_DIRECTION_SEL_0, 726 + .mask = PMX_NAND8BIT_0_MASK, 727 + .val = PMX_NAND8BIT_0_MASK, 728 + }, { 729 + .reg = PAD_DIRECTION_SEL_1, 730 + .mask = PMX_NAND8BIT_1_MASK, 731 + .val = PMX_NAND8BIT_1_MASK, 804 732 }, 805 733 }; 806 734 ··· 835 747 .reg = PAD_FUNCTION_EN_1, 836 748 .mask = PMX_NAND16BIT_1_MASK, 837 749 .val = PMX_NAND16BIT_1_MASK, 750 + }, { 751 + .reg = PAD_DIRECTION_SEL_1, 752 + .mask = PMX_NAND16BIT_1_MASK, 753 + .val = PMX_NAND16BIT_1_MASK, 838 754 }, 839 755 }; 840 756 ··· 862 770 static struct spear_muxreg nand_4_chips_muxreg[] = { 863 771 { 864 772 .reg = PAD_FUNCTION_EN_1, 773 + .mask = PMX_NAND_4CHIPS_MASK, 774 + .val = PMX_NAND_4CHIPS_MASK, 775 + }, { 776 + .reg = PAD_DIRECTION_SEL_1, 865 777 .mask = PMX_NAND_4CHIPS_MASK, 866 778 .val = PMX_NAND_4CHIPS_MASK, 867 779 }, ··· 929 833 .reg = PAD_FUNCTION_EN_1, 930 834 .mask = PMX_KBD_ROWCOL68_MASK, 931 835 .val = PMX_KBD_ROWCOL68_MASK, 836 + }, { 837 + .reg = PAD_DIRECTION_SEL_1, 838 + .mask = PMX_KBD_ROWCOL68_MASK, 839 + .val = PMX_KBD_ROWCOL68_MASK, 932 840 }, 933 841 }; 934 842 ··· 966 866 .reg = PAD_FUNCTION_EN_0, 967 867 .mask = PMX_UART0_MASK, 968 868 .val = PMX_UART0_MASK, 869 + }, { 870 + .reg = PAD_DIRECTION_SEL_0, 871 + .mask = PMX_UART0_MASK, 872 + .val = PMX_UART0_MASK, 969 873 }, 970 874 }; 971 875 ··· 993 889 static struct spear_muxreg uart0_modem_muxreg[] = { 994 890 { 995 891 .reg = PAD_FUNCTION_EN_1, 892 + .mask = PMX_UART0_MODEM_MASK, 893 + .val = PMX_UART0_MODEM_MASK, 894 + }, { 895 + .reg = PAD_DIRECTION_SEL_1, 996 896 .mask = PMX_UART0_MODEM_MASK, 997 897 .val = PMX_UART0_MODEM_MASK, 998 898 }, ··· 1031 923 .reg = PAD_FUNCTION_EN_1, 1032 924 .mask = PMX_GPT0_TMR0_MASK, 1033 925 .val = PMX_GPT0_TMR0_MASK, 926 + }, { 927 + .reg = PAD_DIRECTION_SEL_1, 928 + .mask = PMX_GPT0_TMR0_MASK, 929 + .val = PMX_GPT0_TMR0_MASK, 1034 930 }, 1035 931 }; 1036 932 ··· 1058 946 static struct spear_muxreg gpt0_tmr1_muxreg[] = { 1059 947 { 1060 948 .reg = PAD_FUNCTION_EN_1, 949 + .mask = PMX_GPT0_TMR1_MASK, 950 + .val = PMX_GPT0_TMR1_MASK, 951 + }, { 952 + .reg = PAD_DIRECTION_SEL_1, 1061 953 .mask = PMX_GPT0_TMR1_MASK, 1062 954 .val = PMX_GPT0_TMR1_MASK, 1063 955 }, ··· 1096 980 .reg = PAD_FUNCTION_EN_1, 1097 981 .mask = PMX_GPT1_TMR0_MASK, 1098 982 .val = PMX_GPT1_TMR0_MASK, 983 + }, { 984 + .reg = PAD_DIRECTION_SEL_1, 985 + .mask = PMX_GPT1_TMR0_MASK, 986 + .val = PMX_GPT1_TMR0_MASK, 1099 987 }, 1100 988 }; 1101 989 ··· 1123 1003 static struct spear_muxreg gpt1_tmr1_muxreg[] = { 1124 1004 { 1125 1005 .reg = PAD_FUNCTION_EN_1, 1006 + .mask = PMX_GPT1_TMR1_MASK, 1007 + .val = PMX_GPT1_TMR1_MASK, 1008 + }, { 1009 + .reg = PAD_DIRECTION_SEL_1, 1126 1010 .mask = PMX_GPT1_TMR1_MASK, 1127 1011 .val = PMX_GPT1_TMR1_MASK, 1128 1012 }, ··· 1171 1047 .val = PMX_MCIFALL_1_MASK, \ 1172 1048 }, { \ 1173 1049 .reg = PAD_FUNCTION_EN_2, \ 1050 + .mask = PMX_MCIFALL_2_MASK, \ 1051 + .val = PMX_MCIFALL_2_MASK, \ 1052 + }, { \ 1053 + .reg = PAD_DIRECTION_SEL_0, \ 1054 + .mask = PMX_MCI_DATA8_15_MASK, \ 1055 + .val = PMX_MCI_DATA8_15_MASK, \ 1056 + }, { \ 1057 + .reg = PAD_DIRECTION_SEL_1, \ 1058 + .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \ 1059 + PMX_NFWPRT2_MASK, \ 1060 + .val = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \ 1061 + PMX_NFWPRT2_MASK, \ 1062 + }, { \ 1063 + .reg = PAD_DIRECTION_SEL_2, \ 1174 1064 .mask = PMX_MCIFALL_2_MASK, \ 1175 1065 .val = PMX_MCIFALL_2_MASK, \ 1176 1066 } ··· 1292 1154 .reg = PAD_FUNCTION_EN_2, 1293 1155 .mask = PMX_TOUCH_XY_MASK, 1294 1156 .val = PMX_TOUCH_XY_MASK, 1157 + }, { 1158 + .reg = PAD_DIRECTION_SEL_2, 1159 + .mask = PMX_TOUCH_XY_MASK, 1160 + .val = PMX_TOUCH_XY_MASK, 1295 1161 }, 1296 1162 }; 1297 1163 ··· 1329 1187 .reg = PAD_FUNCTION_EN_0, 1330 1188 .mask = PMX_I2C0_MASK, 1331 1189 .val = 0, 1190 + }, { 1191 + .reg = PAD_DIRECTION_SEL_0, 1192 + .mask = PMX_I2C0_MASK, 1193 + .val = PMX_I2C0_MASK, 1332 1194 }, 1333 1195 }; 1334 1196 ··· 1359 1213 .mask = PMX_MCIDATA1_MASK | 1360 1214 PMX_MCIDATA2_MASK, 1361 1215 .val = 0, 1216 + }, { 1217 + .reg = PAD_DIRECTION_SEL_1, 1218 + .mask = PMX_MCIDATA1_MASK | 1219 + PMX_MCIDATA2_MASK, 1220 + .val = PMX_MCIDATA1_MASK | 1221 + PMX_MCIDATA2_MASK, 1362 1222 }, 1363 1223 }; 1364 1224 ··· 1398 1246 .reg = PAD_FUNCTION_EN_0, 1399 1247 .mask = PMX_I2S0_MASK, 1400 1248 .val = 0, 1249 + }, { 1250 + .reg = PAD_DIRECTION_SEL_0, 1251 + .mask = PMX_I2S0_MASK, 1252 + .val = PMX_I2S0_MASK, 1401 1253 }, 1402 1254 }; 1403 1255 ··· 1434 1278 .reg = PAD_FUNCTION_EN_0, 1435 1279 .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK, 1436 1280 .val = 0, 1281 + }, { 1282 + .reg = PAD_DIRECTION_SEL_0, 1283 + .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK, 1284 + .val = PMX_I2S0_MASK | PMX_CLCD1_MASK, 1437 1285 }, 1438 1286 }; 1439 1287 ··· 1470 1310 .reg = PAD_FUNCTION_EN_0, 1471 1311 .mask = PMX_CLCD1_MASK, 1472 1312 .val = 0, 1313 + }, { 1314 + .reg = PAD_DIRECTION_SEL_0, 1315 + .mask = PMX_CLCD1_MASK, 1316 + .val = PMX_CLCD1_MASK, 1473 1317 }, 1474 1318 }; 1475 1319 ··· 1508 1344 .reg = PAD_FUNCTION_EN_0, 1509 1345 .mask = PMX_CLCD1_MASK, 1510 1346 .val = 0, 1347 + }, { 1348 + .reg = PAD_DIRECTION_SEL_0, 1349 + .mask = PMX_CLCD1_MASK, 1350 + .val = PMX_CLCD1_MASK, 1511 1351 }, 1512 1352 }; 1513 1353 ··· 1544 1376 .reg = PAD_FUNCTION_EN_0, 1545 1377 .mask = PMX_CLCD1_MASK, 1546 1378 .val = 0, 1379 + }, { 1380 + .reg = PAD_DIRECTION_SEL_0, 1381 + .mask = PMX_CLCD1_MASK, 1382 + .val = PMX_CLCD1_MASK, 1547 1383 }, 1548 1384 }; 1549 1385 ··· 1581 1409 .reg = PAD_FUNCTION_EN_0, 1582 1410 .mask = PMX_CLCD1_MASK | PMX_SMI_MASK, 1583 1411 .val = 0, 1412 + }, { 1413 + .reg = PAD_DIRECTION_SEL_0, 1414 + .mask = PMX_CLCD1_MASK | PMX_SMI_MASK, 1415 + .val = PMX_CLCD1_MASK | PMX_SMI_MASK, 1584 1416 }, 1585 1417 }; 1586 1418 ··· 1611 1435 .reg = PAD_FUNCTION_EN_1, 1612 1436 .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, 1613 1437 .val = 0, 1438 + }, { 1439 + .reg = PAD_DIRECTION_SEL_1, 1440 + .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, 1441 + .val = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, 1614 1442 }, 1615 1443 }; 1616 1444 ··· 1649 1469 .reg = PAD_FUNCTION_EN_0, 1650 1470 .mask = PMX_SMI_MASK, 1651 1471 .val = 0, 1472 + }, { 1473 + .reg = PAD_DIRECTION_SEL_0, 1474 + .mask = PMX_SMI_MASK, 1475 + .val = PMX_SMI_MASK, 1652 1476 }, 1653 1477 }; 1654 1478 ··· 1683 1499 .reg = PAD_FUNCTION_EN_2, 1684 1500 .mask = PMX_MCIDATA5_MASK, 1685 1501 .val = 0, 1502 + }, { 1503 + .reg = PAD_DIRECTION_SEL_1, 1504 + .mask = PMX_MCIDATA4_MASK, 1505 + .val = PMX_MCIDATA4_MASK, 1506 + }, { 1507 + .reg = PAD_DIRECTION_SEL_2, 1508 + .mask = PMX_MCIDATA5_MASK, 1509 + .val = PMX_MCIDATA5_MASK, 1686 1510 }, 1687 1511 }; 1688 1512 ··· 1718 1526 .mask = PMX_MCIDATA6_MASK | 1719 1527 PMX_MCIDATA7_MASK, 1720 1528 .val = 0, 1529 + }, { 1530 + .reg = PAD_DIRECTION_SEL_2, 1531 + .mask = PMX_MCIDATA6_MASK | 1532 + PMX_MCIDATA7_MASK, 1533 + .val = PMX_MCIDATA6_MASK | 1534 + PMX_MCIDATA7_MASK, 1721 1535 }, 1722 1536 }; 1723 1537 ··· 1758 1560 .reg = PAD_FUNCTION_EN_1, 1759 1561 .mask = PMX_KBD_ROWCOL25_MASK, 1760 1562 .val = 0, 1563 + }, { 1564 + .reg = PAD_DIRECTION_SEL_1, 1565 + .mask = PMX_KBD_ROWCOL25_MASK, 1566 + .val = PMX_KBD_ROWCOL25_MASK, 1761 1567 }, 1762 1568 }; 1763 1569 ··· 1789 1587 .mask = PMX_MCIIORDRE_MASK | 1790 1588 PMX_MCIIOWRWE_MASK, 1791 1589 .val = 0, 1590 + }, { 1591 + .reg = PAD_DIRECTION_SEL_2, 1592 + .mask = PMX_MCIIORDRE_MASK | 1593 + PMX_MCIIOWRWE_MASK, 1594 + .val = PMX_MCIIORDRE_MASK | 1595 + PMX_MCIIOWRWE_MASK, 1792 1596 }, 1793 1597 }; 1794 1598 ··· 1821 1613 .mask = PMX_MCIRESETCF_MASK | 1822 1614 PMX_MCICS0CE_MASK, 1823 1615 .val = 0, 1616 + }, { 1617 + .reg = PAD_DIRECTION_SEL_2, 1618 + .mask = PMX_MCIRESETCF_MASK | 1619 + PMX_MCICS0CE_MASK, 1620 + .val = PMX_MCIRESETCF_MASK | 1621 + PMX_MCICS0CE_MASK, 1824 1622 }, 1825 1623 }; 1826 1624 ··· 1865 1651 .reg = PAD_FUNCTION_EN_1, 1866 1652 .mask = PMX_NFRSTPWDWN3_MASK, 1867 1653 .val = 0, 1654 + }, { 1655 + .reg = PAD_DIRECTION_SEL_0, 1656 + .mask = PMX_NFRSTPWDWN2_MASK, 1657 + .val = PMX_NFRSTPWDWN2_MASK, 1658 + }, { 1659 + .reg = PAD_DIRECTION_SEL_1, 1660 + .mask = PMX_NFRSTPWDWN3_MASK, 1661 + .val = PMX_NFRSTPWDWN3_MASK, 1868 1662 }, 1869 1663 }; 1870 1664 ··· 1899 1677 .reg = PAD_FUNCTION_EN_2, 1900 1678 .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, 1901 1679 .val = 0, 1680 + }, { 1681 + .reg = PAD_DIRECTION_SEL_2, 1682 + .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, 1683 + .val = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, 1902 1684 }, 1903 1685 }; 1904 1686 ··· 1937 1711 .reg = PAD_FUNCTION_EN_2, 1938 1712 .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, 1939 1713 .val = 0, 1714 + }, { 1715 + .reg = PAD_DIRECTION_SEL_2, 1716 + .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, 1717 + .val = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, 1940 1718 }, 1941 1719 }; 1942 1720 ··· 1967 1737 .reg = PAD_FUNCTION_EN_1, 1968 1738 .mask = PMX_KBD_ROWCOL25_MASK, 1969 1739 .val = 0, 1740 + }, { 1741 + .reg = PAD_DIRECTION_SEL_1, 1742 + .mask = PMX_KBD_ROWCOL25_MASK, 1743 + .val = PMX_KBD_ROWCOL25_MASK, 1970 1744 }, 1971 1745 }; 1972 1746 ··· 1997 1763 .ngroups = ARRAY_SIZE(can1_grps), 1998 1764 }; 1999 1765 2000 - /* Pad multiplexing for pci device */ 2001 - static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 1766 + /* Pad multiplexing for (ras-ip) pci device */ 1767 + static const unsigned pci_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 2002 1768 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 2003 1769 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 2004 1770 55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 }; 2005 - #define PCI_SATA_MUXREG \ 2006 - { \ 2007 - .reg = PAD_FUNCTION_EN_0, \ 2008 - .mask = PMX_MCI_DATA8_15_MASK, \ 2009 - .val = 0, \ 2010 - }, { \ 2011 - .reg = PAD_FUNCTION_EN_1, \ 2012 - .mask = PMX_PCI_REG1_MASK, \ 2013 - .val = 0, \ 2014 - }, { \ 2015 - .reg = PAD_FUNCTION_EN_2, \ 2016 - .mask = PMX_PCI_REG2_MASK, \ 2017 - .val = 0, \ 2018 - } 2019 1771 2020 - /* pad multiplexing for pcie0 device */ 1772 + static struct spear_muxreg pci_muxreg[] = { 1773 + { 1774 + .reg = PAD_FUNCTION_EN_0, 1775 + .mask = PMX_MCI_DATA8_15_MASK, 1776 + .val = 0, 1777 + }, { 1778 + .reg = PAD_FUNCTION_EN_1, 1779 + .mask = PMX_PCI_REG1_MASK, 1780 + .val = 0, 1781 + }, { 1782 + .reg = PAD_FUNCTION_EN_2, 1783 + .mask = PMX_PCI_REG2_MASK, 1784 + .val = 0, 1785 + }, { 1786 + .reg = PAD_DIRECTION_SEL_0, 1787 + .mask = PMX_MCI_DATA8_15_MASK, 1788 + .val = PMX_MCI_DATA8_15_MASK, 1789 + }, { 1790 + .reg = PAD_DIRECTION_SEL_1, 1791 + .mask = PMX_PCI_REG1_MASK, 1792 + .val = PMX_PCI_REG1_MASK, 1793 + }, { 1794 + .reg = PAD_DIRECTION_SEL_2, 1795 + .mask = PMX_PCI_REG2_MASK, 1796 + .val = PMX_PCI_REG2_MASK, 1797 + }, 1798 + }; 1799 + 1800 + static struct spear_modemux pci_modemux[] = { 1801 + { 1802 + .muxregs = pci_muxreg, 1803 + .nmuxregs = ARRAY_SIZE(pci_muxreg), 1804 + }, 1805 + }; 1806 + 1807 + static struct spear_pingroup pci_pingroup = { 1808 + .name = "pci_grp", 1809 + .pins = pci_pins, 1810 + .npins = ARRAY_SIZE(pci_pins), 1811 + .modemuxs = pci_modemux, 1812 + .nmodemuxs = ARRAY_SIZE(pci_modemux), 1813 + }; 1814 + 1815 + static const char *const pci_grps[] = { "pci_grp" }; 1816 + static struct spear_function pci_function = { 1817 + .name = "pci", 1818 + .groups = pci_grps, 1819 + .ngroups = ARRAY_SIZE(pci_grps), 1820 + }; 1821 + 1822 + /* pad multiplexing for (fix-part) pcie0 device */ 2021 1823 static struct spear_muxreg pcie0_muxreg[] = { 2022 - PCI_SATA_MUXREG, 2023 1824 { 2024 1825 .reg = PCIE_SATA_CFG, 2025 1826 .mask = PCIE_CFG_VAL(0), ··· 2071 1802 2072 1803 static struct spear_pingroup pcie0_pingroup = { 2073 1804 .name = "pcie0_grp", 2074 - .pins = pci_sata_pins, 2075 - .npins = ARRAY_SIZE(pci_sata_pins), 2076 1805 .modemuxs = pcie0_modemux, 2077 1806 .nmodemuxs = ARRAY_SIZE(pcie0_modemux), 2078 1807 }; 2079 1808 2080 - /* pad multiplexing for pcie1 device */ 1809 + /* pad multiplexing for (fix-part) pcie1 device */ 2081 1810 static struct spear_muxreg pcie1_muxreg[] = { 2082 - PCI_SATA_MUXREG, 2083 1811 { 2084 1812 .reg = PCIE_SATA_CFG, 2085 1813 .mask = PCIE_CFG_VAL(1), ··· 2093 1827 2094 1828 static struct spear_pingroup pcie1_pingroup = { 2095 1829 .name = "pcie1_grp", 2096 - .pins = pci_sata_pins, 2097 - .npins = ARRAY_SIZE(pci_sata_pins), 2098 1830 .modemuxs = pcie1_modemux, 2099 1831 .nmodemuxs = ARRAY_SIZE(pcie1_modemux), 2100 1832 }; 2101 1833 2102 - /* pad multiplexing for pcie2 device */ 1834 + /* pad multiplexing for (fix-part) pcie2 device */ 2103 1835 static struct spear_muxreg pcie2_muxreg[] = { 2104 - PCI_SATA_MUXREG, 2105 1836 { 2106 1837 .reg = PCIE_SATA_CFG, 2107 1838 .mask = PCIE_CFG_VAL(2), ··· 2115 1852 2116 1853 static struct spear_pingroup pcie2_pingroup = { 2117 1854 .name = "pcie2_grp", 2118 - .pins = pci_sata_pins, 2119 - .npins = ARRAY_SIZE(pci_sata_pins), 2120 1855 .modemuxs = pcie2_modemux, 2121 1856 .nmodemuxs = ARRAY_SIZE(pcie2_modemux), 2122 1857 }; 2123 1858 2124 - static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" }; 2125 - static struct spear_function pci_function = { 2126 - .name = "pci", 2127 - .groups = pci_grps, 2128 - .ngroups = ARRAY_SIZE(pci_grps), 1859 + static const char *const pcie_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" 1860 + }; 1861 + static struct spear_function pcie_function = { 1862 + .name = "pci_express", 1863 + .groups = pcie_grps, 1864 + .ngroups = ARRAY_SIZE(pcie_grps), 2129 1865 }; 2130 1866 2131 1867 /* pad multiplexing for sata0 device */ 2132 1868 static struct spear_muxreg sata0_muxreg[] = { 2133 - PCI_SATA_MUXREG, 2134 1869 { 2135 1870 .reg = PCIE_SATA_CFG, 2136 1871 .mask = SATA_CFG_VAL(0), ··· 2145 1884 2146 1885 static struct spear_pingroup sata0_pingroup = { 2147 1886 .name = "sata0_grp", 2148 - .pins = pci_sata_pins, 2149 - .npins = ARRAY_SIZE(pci_sata_pins), 2150 1887 .modemuxs = sata0_modemux, 2151 1888 .nmodemuxs = ARRAY_SIZE(sata0_modemux), 2152 1889 }; 2153 1890 2154 1891 /* pad multiplexing for sata1 device */ 2155 1892 static struct spear_muxreg sata1_muxreg[] = { 2156 - PCI_SATA_MUXREG, 2157 1893 { 2158 1894 .reg = PCIE_SATA_CFG, 2159 1895 .mask = SATA_CFG_VAL(1), ··· 2167 1909 2168 1910 static struct spear_pingroup sata1_pingroup = { 2169 1911 .name = "sata1_grp", 2170 - .pins = pci_sata_pins, 2171 - .npins = ARRAY_SIZE(pci_sata_pins), 2172 1912 .modemuxs = sata1_modemux, 2173 1913 .nmodemuxs = ARRAY_SIZE(sata1_modemux), 2174 1914 }; 2175 1915 2176 1916 /* pad multiplexing for sata2 device */ 2177 1917 static struct spear_muxreg sata2_muxreg[] = { 2178 - PCI_SATA_MUXREG, 2179 1918 { 2180 1919 .reg = PCIE_SATA_CFG, 2181 1920 .mask = SATA_CFG_VAL(2), ··· 2189 1934 2190 1935 static struct spear_pingroup sata2_pingroup = { 2191 1936 .name = "sata2_grp", 2192 - .pins = pci_sata_pins, 2193 - .npins = ARRAY_SIZE(pci_sata_pins), 2194 1937 .modemuxs = sata2_modemux, 2195 1938 .nmodemuxs = ARRAY_SIZE(sata2_modemux), 2196 1939 }; ··· 2210 1957 PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | 2211 1958 PMX_NFCE2_MASK, 2212 1959 .val = 0, 1960 + }, { 1961 + .reg = PAD_DIRECTION_SEL_1, 1962 + .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK | 1963 + PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | 1964 + PMX_NFCE2_MASK, 1965 + .val = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK | 1966 + PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | 1967 + PMX_NFCE2_MASK, 2213 1968 }, 2214 1969 }; 2215 1970 ··· 2244 1983 .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | 2245 1984 PMX_MCICECF_MASK | PMX_MCICEXD_MASK, 2246 1985 .val = 0, 1986 + }, { 1987 + .reg = PAD_DIRECTION_SEL_2, 1988 + .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | 1989 + PMX_MCICECF_MASK | PMX_MCICEXD_MASK, 1990 + .val = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | 1991 + PMX_MCICECF_MASK | PMX_MCICEXD_MASK, 2247 1992 }, 2248 1993 }; 2249 1994 ··· 2284 2017 .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK 2285 2018 | PMX_MCILEDS_MASK, 2286 2019 .val = 0, 2020 + }, { 2021 + .reg = PAD_DIRECTION_SEL_2, 2022 + .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK 2023 + | PMX_MCILEDS_MASK, 2024 + .val = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK 2025 + | PMX_MCILEDS_MASK, 2287 2026 }, 2288 2027 }; 2289 2028 ··· 2366 2093 &can0_dis_sd_pingroup, 2367 2094 &can1_dis_sd_pingroup, 2368 2095 &can1_dis_kbd_pingroup, 2096 + &pci_pingroup, 2369 2097 &pcie0_pingroup, 2370 2098 &pcie1_pingroup, 2371 2099 &pcie2_pingroup, ··· 2412 2138 &can0_function, 2413 2139 &can1_function, 2414 2140 &pci_function, 2141 + &pcie_function, 2415 2142 &sata_function, 2416 2143 &ssp1_function, 2417 2144 &gpt64_function,
+39 -2
drivers/pinctrl/spear/pinctrl-spear1340.c
··· 213 213 * Pad multiplexing for making all pads as gpio's. This is done to override the 214 214 * values passed from bootloader and start from scratch. 215 215 */ 216 - static const unsigned pads_as_gpio_pins[] = { 251 }; 216 + static const unsigned pads_as_gpio_pins[] = { 12, 88, 89, 251 }; 217 217 static struct spear_muxreg pads_as_gpio_muxreg[] = { 218 218 { 219 219 .reg = PAD_FUNCTION_EN_1, ··· 1692 1692 .nmodemuxs = ARRAY_SIZE(clcd_modemux), 1693 1693 }; 1694 1694 1695 - static const char *const clcd_grps[] = { "clcd_grp" }; 1695 + /* Disable cld runtime to save panel damage */ 1696 + static struct spear_muxreg clcd_sleep_muxreg[] = { 1697 + { 1698 + .reg = PAD_SHARED_IP_EN_1, 1699 + .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK, 1700 + .val = 0, 1701 + }, { 1702 + .reg = PAD_FUNCTION_EN_5, 1703 + .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK, 1704 + .val = 0x0, 1705 + }, { 1706 + .reg = PAD_FUNCTION_EN_6, 1707 + .mask = CLCD_AND_ARM_TRACE_REG5_MASK, 1708 + .val = 0x0, 1709 + }, { 1710 + .reg = PAD_FUNCTION_EN_7, 1711 + .mask = CLCD_AND_ARM_TRACE_REG6_MASK, 1712 + .val = 0x0, 1713 + }, 1714 + }; 1715 + 1716 + static struct spear_modemux clcd_sleep_modemux[] = { 1717 + { 1718 + .muxregs = clcd_sleep_muxreg, 1719 + .nmuxregs = ARRAY_SIZE(clcd_sleep_muxreg), 1720 + }, 1721 + }; 1722 + 1723 + static struct spear_pingroup clcd_sleep_pingroup = { 1724 + .name = "clcd_sleep_grp", 1725 + .pins = clcd_pins, 1726 + .npins = ARRAY_SIZE(clcd_pins), 1727 + .modemuxs = clcd_sleep_modemux, 1728 + .nmodemuxs = ARRAY_SIZE(clcd_sleep_modemux), 1729 + }; 1730 + 1731 + static const char *const clcd_grps[] = { "clcd_grp", "clcd_sleep_grp" }; 1696 1732 static struct spear_function clcd_function = { 1697 1733 .name = "clcd", 1698 1734 .groups = clcd_grps, ··· 1929 1893 &sdhci_pingroup, 1930 1894 &cf_pingroup, 1931 1895 &xd_pingroup, 1896 + &clcd_sleep_pingroup, 1932 1897 &clcd_pingroup, 1933 1898 &arm_trace_pingroup, 1934 1899 &miphy_dbg_pingroup,
+6 -2
drivers/pinctrl/spear/pinctrl-spear320.c
··· 2240 2240 .mask = PMX_SSP_CS_MASK, 2241 2241 .val = 0, 2242 2242 }, { 2243 + .reg = MODE_CONFIG_REG, 2244 + .mask = PMX_PWM_MASK, 2245 + .val = PMX_PWM_MASK, 2246 + }, { 2243 2247 .reg = IP_SEL_PAD_30_39_REG, 2244 2248 .mask = PMX_PL_34_MASK, 2245 2249 .val = PMX_PWM2_PL_34_VAL, ··· 2960 2956 }; 2961 2957 2962 2958 /* Pad multiplexing for cadence mii 1_2 as smii or rmii device */ 2963 - static const unsigned smii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 2959 + static const unsigned rmii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 2964 2960 21, 22, 23, 24, 25, 26, 27 }; 2965 - static const unsigned rmii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 }; 2961 + static const unsigned smii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 }; 2966 2962 static struct spear_muxreg mii0_1_muxreg[] = { 2967 2963 { 2968 2964 .reg = PMX_CONFIG_REG,
+1
drivers/pinctrl/spear/pinctrl-spear3xx.h
··· 15 15 #include "pinctrl-spear.h" 16 16 17 17 /* pad mux declarations */ 18 + #define PMX_PWM_MASK (1 << 16) 18 19 #define PMX_FIRDA_MASK (1 << 14) 19 20 #define PMX_I2C_MASK (1 << 13) 20 21 #define PMX_SSP_CS_MASK (1 << 12)
-3
drivers/s390/cio/css.h
··· 112 112 extern void css_reiterate_subchannels(void); 113 113 void css_update_ssd_info(struct subchannel *sch); 114 114 115 - #define __MAX_SUBCHANNEL 65535 116 - #define __MAX_SSID 3 117 - 118 115 struct channel_subsystem { 119 116 u8 cssid; 120 117 int valid;
+1 -7
drivers/s390/cio/device.c
··· 1424 1424 } 1425 1425 if (device_is_disconnected(cdev)) 1426 1426 return IO_SCH_REPROBE; 1427 - if (cdev->online) 1427 + if (cdev->online && !cdev->private->flags.resuming) 1428 1428 return IO_SCH_VERIFY; 1429 1429 if (cdev->private->state == DEV_STATE_NOT_OPER) 1430 1430 return IO_SCH_UNREG_ATTACH; ··· 1469 1469 rc = 0; 1470 1470 goto out_unlock; 1471 1471 case IO_SCH_VERIFY: 1472 - if (cdev->private->flags.resuming == 1) { 1473 - if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) { 1474 - ccw_device_set_notoper(cdev); 1475 - break; 1476 - } 1477 - } 1478 1472 /* Trigger path verification. */ 1479 1473 io_subchannel_verify(sch); 1480 1474 rc = 0;
+1 -2
drivers/s390/cio/idset.c
··· 125 125 126 126 void idset_add_set(struct idset *to, struct idset *from) 127 127 { 128 - int len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), 129 - __BITOPS_WORDS(from->num_ssid * from->num_id)); 128 + int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id); 130 129 131 130 bitmap_or(to->bitmap, to->bitmap, from->bitmap, len); 132 131 }
+3
drivers/scsi/qla2xxx/qla_mid.c
··· 149 149 int 150 150 qla24xx_disable_vp(scsi_qla_host_t *vha) 151 151 { 152 + unsigned long flags; 152 153 int ret; 153 154 154 155 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); ··· 157 156 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 158 157 159 158 /* Remove port id from vp target map */ 159 + spin_lock_irqsave(&vha->hw->vport_slock, flags); 160 160 qlt_update_vp_map(vha, RESET_AL_PA); 161 + spin_unlock_irqrestore(&vha->hw->vport_slock, flags); 161 162 162 163 qla2x00_mark_vp_devices_dead(vha); 163 164 atomic_set(&vha->vp_state, VP_FAILED);
+11 -14
drivers/scsi/qla2xxx/qla_target.c
··· 557 557 int pmap_len; 558 558 fc_port_t *fcport; 559 559 int global_resets; 560 + unsigned long flags; 560 561 561 562 retry: 562 563 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); ··· 626 625 sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, 627 626 fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); 628 627 629 - sess->s_id = fcport->d_id; 630 - sess->loop_id = fcport->loop_id; 631 - sess->conf_compl_supported = !!(fcport->flags & 632 - FCF_CONF_COMP_SUPPORTED); 628 + spin_lock_irqsave(&ha->hardware_lock, flags); 629 + ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 630 + (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 631 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 633 632 634 633 res = true; 635 634 ··· 741 740 qlt_undelete_sess(sess); 742 741 743 742 kref_get(&sess->se_sess->sess_kref); 744 - sess->s_id = fcport->d_id; 745 - sess->loop_id = fcport->loop_id; 746 - sess->conf_compl_supported = !!(fcport->flags & 747 - FCF_CONF_COMP_SUPPORTED); 743 + ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 744 + (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 745 + 748 746 if (sess->local && !local) 749 747 sess->local = 0; 750 748 spin_unlock_irqrestore(&ha->hardware_lock, flags); ··· 796 796 */ 797 797 kref_get(&sess->se_sess->sess_kref); 798 798 799 - sess->conf_compl_supported = !!(fcport->flags & 800 - FCF_CONF_COMP_SUPPORTED); 799 + sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 801 800 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 802 801 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 803 802 ··· 868 869 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 869 870 "Reappeared sess %p\n", sess); 870 871 } 871 - sess->s_id = fcport->d_id; 872 - sess->loop_id = fcport->loop_id; 873 - sess->conf_compl_supported = !!(fcport->flags & 874 - FCF_CONF_COMP_SUPPORTED); 872 + ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 873 + (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 875 874 } 876 875 877 876 if (sess && sess->local) {
+1
drivers/scsi/qla2xxx/qla_target.h
··· 648 648 649 649 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, 650 650 void *, uint8_t *, uint16_t); 651 + void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool); 651 652 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, 652 653 const uint16_t); 653 654 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+76 -1
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 237 237 struct tcm_qla2xxx_tpg, se_tpg); 238 238 struct tcm_qla2xxx_lport *lport = tpg->lport; 239 239 240 - return &lport->lport_name[0]; 240 + return lport->lport_naa_name; 241 241 } 242 242 243 243 static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) ··· 1457 1457 return 0; 1458 1458 } 1459 1459 1460 + static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, 1461 + uint16_t loop_id, bool conf_compl_supported) 1462 + { 1463 + struct qla_tgt *tgt = sess->tgt; 1464 + struct qla_hw_data *ha = tgt->ha; 1465 + struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; 1466 + struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1467 + struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1468 + struct tcm_qla2xxx_nacl, se_node_acl); 1469 + u32 key; 1470 + 1471 + 1472 + if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) 1473 + pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", 1474 + sess, 1475 + sess->port_name[0], sess->port_name[1], 1476 + sess->port_name[2], sess->port_name[3], 1477 + sess->port_name[4], sess->port_name[5], 1478 + sess->port_name[6], sess->port_name[7], 1479 + sess->loop_id, loop_id, 1480 + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, 1481 + s_id.b.domain, s_id.b.area, s_id.b.al_pa); 1482 + 1483 + if (sess->loop_id != loop_id) { 1484 + /* 1485 + * Because we can shuffle loop IDs around and we 1486 + * update different sessions non-atomically, we might 1487 + * have overwritten this session's old loop ID 1488 + * already, and we might end up overwriting some other 1489 + * session that will be updated later. So we have to 1490 + * be extra careful and we can't warn about those things... 1491 + */ 1492 + if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) 1493 + lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; 1494 + 1495 + lport->lport_loopid_map[loop_id].se_nacl = se_nacl; 1496 + 1497 + sess->loop_id = loop_id; 1498 + } 1499 + 1500 + if (sess->s_id.b24 != s_id.b24) { 1501 + key = (((u32) sess->s_id.b.domain << 16) | 1502 + ((u32) sess->s_id.b.area << 8) | 1503 + ((u32) sess->s_id.b.al_pa)); 1504 + 1505 + if (btree_lookup32(&lport->lport_fcport_map, key)) 1506 + WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, 1507 + "Found wrong se_nacl when updating s_id %x:%x:%x\n", 1508 + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1509 + else 1510 + WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", 1511 + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1512 + 1513 + key = (((u32) s_id.b.domain << 16) | 1514 + ((u32) s_id.b.area << 8) | 1515 + ((u32) s_id.b.al_pa)); 1516 + 1517 + if (btree_lookup32(&lport->lport_fcport_map, key)) { 1518 + WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", 1519 + s_id.b.domain, s_id.b.area, s_id.b.al_pa); 1520 + btree_update32(&lport->lport_fcport_map, key, se_nacl); 1521 + } else { 1522 + btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); 1523 + } 1524 + 1525 + sess->s_id = s_id; 1526 + nacl->nport_id = key; 1527 + } 1528 + 1529 + sess->conf_compl_supported = conf_compl_supported; 1530 + } 1531 + 1460 1532 /* 1461 1533 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. 1462 1534 */ ··· 1539 1467 .free_cmd = tcm_qla2xxx_free_cmd, 1540 1468 .free_mcmd = tcm_qla2xxx_free_mcmd, 1541 1469 .free_session = tcm_qla2xxx_free_session, 1470 + .update_sess = tcm_qla2xxx_update_sess, 1542 1471 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, 1543 1472 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, 1544 1473 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, ··· 1607 1534 lport->lport_wwpn = wwpn; 1608 1535 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, 1609 1536 wwpn); 1537 + sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); 1610 1538 1611 1539 ret = tcm_qla2xxx_init_lport(lport); 1612 1540 if (ret != 0) ··· 1675 1601 lport->lport_npiv_wwnn = npiv_wwnn; 1676 1602 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], 1677 1603 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1604 + sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1678 1605 1679 1606 /* FIXME: tcm_qla2xxx_npiv_make_lport */ 1680 1607 ret = -ENOSYS;
+2
drivers/scsi/qla2xxx/tcm_qla2xxx.h
··· 61 61 u64 lport_npiv_wwnn; 62 62 /* ASCII formatted WWPN for FC Target Lport */ 63 63 char lport_name[TCM_QLA2XXX_NAMELEN]; 64 + /* ASCII formatted naa WWPN for VPD page 83 etc */ 65 + char lport_naa_name[TCM_QLA2XXX_NAMELEN]; 64 66 /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */ 65 67 char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN]; 66 68 /* map for fc_port pointers in 24-bit FC Port ID space */
+1 -12
drivers/scsi/qlogicpti.c
··· 1294 1294 static const struct of_device_id qpti_match[]; 1295 1295 static int __devinit qpti_sbus_probe(struct platform_device *op) 1296 1296 { 1297 - const struct of_device_id *match; 1298 - struct scsi_host_template *tpnt; 1299 1297 struct device_node *dp = op->dev.of_node; 1300 1298 struct Scsi_Host *host; 1301 1299 struct qlogicpti *qpti; 1302 1300 static int nqptis; 1303 1301 const char *fcode; 1304 - 1305 - match = of_match_device(qpti_match, &op->dev); 1306 - if (!match) 1307 - return -EINVAL; 1308 - tpnt = match->data; 1309 1302 1310 1303 /* Sometimes Antares cards come up not completely 1311 1304 * setup, and we get a report of a zero IRQ. ··· 1306 1313 if (op->archdata.irqs[0] == 0) 1307 1314 return -ENODEV; 1308 1315 1309 - host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); 1316 + host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti)); 1310 1317 if (!host) 1311 1318 return -ENOMEM; 1312 1319 ··· 1438 1445 static const struct of_device_id qpti_match[] = { 1439 1446 { 1440 1447 .name = "ptisp", 1441 - .data = &qpti_template, 1442 1448 }, 1443 1449 { 1444 1450 .name = "PTI,ptisp", 1445 - .data = &qpti_template, 1446 1451 }, 1447 1452 { 1448 1453 .name = "QLGC,isp", 1449 - .data = &qpti_template, 1450 1454 }, 1451 1455 { 1452 1456 .name = "SUNW,isp", 1453 - .data = &qpti_template, 1454 1457 }, 1455 1458 {}, 1456 1459 };
+3 -1
drivers/target/iscsi/iscsi_target.c
··· 3719 3719 */ 3720 3720 iscsit_thread_check_cpumask(conn, current, 1); 3721 3721 3722 - schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 3722 + wait_event_interruptible(conn->queues_wq, 3723 + !iscsit_conn_all_queues_empty(conn) || 3724 + ts->status == ISCSI_THREAD_SET_RESET); 3723 3725 3724 3726 if ((ts->status == ISCSI_THREAD_SET_RESET) || 3725 3727 signal_pending(current))
+1
drivers/target/iscsi/iscsi_target_core.h
··· 486 486 }; 487 487 488 488 struct iscsi_conn { 489 + wait_queue_head_t queues_wq; 489 490 /* Authentication Successful for this connection */ 490 491 u8 auth_complete; 491 492 /* State connection is currently in */
+1
drivers/target/iscsi/iscsi_target_login.c
··· 41 41 42 42 static int iscsi_login_init_conn(struct iscsi_conn *conn) 43 43 { 44 + init_waitqueue_head(&conn->queues_wq); 44 45 INIT_LIST_HEAD(&conn->conn_list); 45 46 INIT_LIST_HEAD(&conn->conn_cmd_list); 46 47 INIT_LIST_HEAD(&conn->immed_queue_list);
+20 -2
drivers/target/iscsi/iscsi_target_util.c
··· 488 488 atomic_set(&conn->check_immediate_queue, 1); 489 489 spin_unlock_bh(&conn->immed_queue_lock); 490 490 491 - wake_up_process(conn->thread_set->tx_thread); 491 + wake_up(&conn->queues_wq); 492 492 } 493 493 494 494 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) ··· 562 562 atomic_inc(&cmd->response_queue_count); 563 563 spin_unlock_bh(&conn->response_queue_lock); 564 564 565 - wake_up_process(conn->thread_set->tx_thread); 565 + wake_up(&conn->queues_wq); 566 566 } 567 567 568 568 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) ··· 614 614 cmd->init_task_tag, 615 615 atomic_read(&cmd->response_queue_count)); 616 616 } 617 + } 618 + 619 + bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn) 620 + { 621 + bool empty; 622 + 623 + spin_lock_bh(&conn->immed_queue_lock); 624 + empty = list_empty(&conn->immed_queue_list); 625 + spin_unlock_bh(&conn->immed_queue_lock); 626 + 627 + if (!empty) 628 + return empty; 629 + 630 + spin_lock_bh(&conn->response_queue_lock); 631 + empty = list_empty(&conn->response_queue_list); 632 + spin_unlock_bh(&conn->response_queue_lock); 633 + 634 + return empty; 617 635 } 618 636 619 637 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+1
drivers/target/iscsi/iscsi_target_util.h
··· 25 25 extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 26 26 extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); 27 27 extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); 28 + extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 28 29 extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); 29 30 extern void iscsit_release_cmd(struct iscsi_cmd *); 30 31 extern void iscsit_free_cmd(struct iscsi_cmd *);
+2 -1
drivers/target/target_core_configfs.c
··· 3206 3206 if (ret < 0) 3207 3207 goto out; 3208 3208 3209 - if (core_dev_setup_virtual_lun0() < 0) 3209 + ret = core_dev_setup_virtual_lun0(); 3210 + if (ret < 0) 3210 3211 goto out; 3211 3212 3212 3213 return 0;
+9 -9
drivers/target/target_core_device.c
··· 850 850 851 851 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 852 852 { 853 - u32 tmp, aligned_max_sectors; 853 + u32 aligned_max_sectors; 854 + u32 alignment; 854 855 /* 855 856 * Limit max_sectors to a PAGE_SIZE aligned value for modern 856 857 * transport_allocate_data_tasks() operation. 857 858 */ 858 - tmp = rounddown((max_sectors * block_size), PAGE_SIZE); 859 - aligned_max_sectors = (tmp / block_size); 860 - if (max_sectors != aligned_max_sectors) { 861 - printk(KERN_INFO "Rounding down aligned max_sectors from %u" 862 - " to %u\n", max_sectors, aligned_max_sectors); 863 - return aligned_max_sectors; 864 - } 859 + alignment = max(1ul, PAGE_SIZE / block_size); 860 + aligned_max_sectors = rounddown(max_sectors, alignment); 865 861 866 - return max_sectors; 862 + if (max_sectors != aligned_max_sectors) 863 + pr_info("Rounding down aligned max_sectors from %u to %u\n", 864 + max_sectors, aligned_max_sectors); 865 + 866 + return aligned_max_sectors; 867 867 } 868 868 869 869 void se_dev_set_default_attribs(
+18
drivers/target/target_core_sbc.c
··· 135 135 return 0; 136 136 } 137 137 138 + static int sbc_emulate_noop(struct se_cmd *cmd) 139 + { 140 + target_complete_cmd(cmd, GOOD); 141 + return 0; 142 + } 143 + 138 144 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 139 145 { 140 146 return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; ··· 536 530 case VERIFY: 537 531 size = 0; 538 532 cmd->execute_cmd = sbc_emulate_verify; 533 + break; 534 + case REZERO_UNIT: 535 + case SEEK_6: 536 + case SEEK_10: 537 + /* 538 + * There are still clients out there which use these old SCSI-2 539 + * commands. This mainly happens when running VMs with legacy 540 + * guest systems, connected via SCSI command pass-through to 541 + * iSCSI targets. Make them happy and return status GOOD. 542 + */ 543 + size = 0; 544 + cmd->execute_cmd = sbc_emulate_noop; 539 545 break; 540 546 default: 541 547 ret = spc_parse_cdb(cmd, &size);
+2
drivers/target/target_core_spc.c
··· 605 605 unsigned char buf[SE_INQUIRY_BUF]; 606 606 int p, ret; 607 607 608 + memset(buf, 0, SE_INQUIRY_BUF); 609 + 608 610 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 609 611 buf[0] = 0x3f; /* Not connected */ 610 612 else
+3 -3
drivers/target/target_core_tmr.c
··· 140 140 printk("ABORT_TASK: Found referenced %s task_tag: %u\n", 141 141 se_cmd->se_tfo->get_fabric_name(), ref_tag); 142 142 143 - spin_lock_irq(&se_cmd->t_state_lock); 143 + spin_lock(&se_cmd->t_state_lock); 144 144 if (se_cmd->transport_state & CMD_T_COMPLETE) { 145 145 printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); 146 - spin_unlock_irq(&se_cmd->t_state_lock); 146 + spin_unlock(&se_cmd->t_state_lock); 147 147 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 148 148 goto out; 149 149 } 150 150 se_cmd->transport_state |= CMD_T_ABORTED; 151 - spin_unlock_irq(&se_cmd->t_state_lock); 151 + spin_unlock(&se_cmd->t_state_lock); 152 152 153 153 list_del_init(&se_cmd->se_cmd_list); 154 154 kref_get(&se_cmd->cmd_kref);
-1
drivers/target/target_core_transport.c
··· 1616 1616 1617 1617 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1618 1618 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1619 - transport_generic_free_cmd(se_cmd, 0); 1620 1619 } 1621 1620 1622 1621 /**
+1 -1
drivers/thermal/exynos_thermal.c
··· 815 815 }, 816 816 { }, 817 817 }; 818 - MODULE_DEVICE_TABLE(platform, exynos4_tmu_driver_ids); 818 + MODULE_DEVICE_TABLE(platform, exynos_tmu_driver_ids); 819 819 820 820 static inline struct exynos_tmu_platform_data *exynos_get_driver_data( 821 821 struct platform_device *pdev)
+1 -1
drivers/thermal/rcar_thermal.c
··· 210 210 goto error_free_priv; 211 211 } 212 212 213 - zone = thermal_zone_device_register("rcar_thermal", 0, priv, 213 + zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv, 214 214 &rcar_thermal_zone_ops, 0, 0); 215 215 if (IS_ERR(zone)) { 216 216 dev_err(&pdev->dev, "thermal zone device is NULL\n");
+2 -1
drivers/usb/gadget/u_ether.c
··· 20 20 #include <linux/ctype.h> 21 21 #include <linux/etherdevice.h> 22 22 #include <linux/ethtool.h> 23 + #include <linux/if_vlan.h> 23 24 24 25 #include "u_ether.h" 25 26 ··· 296 295 while (skb2) { 297 296 if (status < 0 298 297 || ETH_HLEN > skb2->len 299 - || skb2->len > ETH_FRAME_LEN) { 298 + || skb2->len > VLAN_ETH_FRAME_LEN) { 300 299 dev->net->stats.rx_errors++; 301 300 dev->net->stats.rx_length_errors++; 302 301 DBG(dev, "rx length %d\n", skb2->len);
+4 -1
drivers/video/xen-fbfront.c
··· 641 641 case XenbusStateReconfiguring: 642 642 case XenbusStateReconfigured: 643 643 case XenbusStateUnknown: 644 - case XenbusStateClosed: 645 644 break; 646 645 647 646 case XenbusStateInitWait: ··· 669 670 info->feature_resize = val; 670 671 break; 671 672 673 + case XenbusStateClosed: 674 + if (dev->state == XenbusStateClosed) 675 + break; 676 + /* Missed the backend's CLOSING state -- fallthrough */ 672 677 case XenbusStateClosing: 673 678 xenbus_frontend_closed(dev); 674 679 break;
+3 -1
drivers/virtio/virtio.c
··· 225 225 226 226 void unregister_virtio_device(struct virtio_device *dev) 227 227 { 228 + int index = dev->index; /* save for after device release */ 229 + 228 230 device_unregister(&dev->dev); 229 - ida_simple_remove(&virtio_index_ida, dev->index); 231 + ida_simple_remove(&virtio_index_ida, index); 230 232 } 231 233 EXPORT_SYMBOL_GPL(unregister_virtio_device); 232 234
+1
drivers/xen/Makefile
··· 2 2 obj-y += manage.o balloon.o 3 3 obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 4 4 endif 5 + obj-$(CONFIG_X86) += fallback.o 5 6 obj-y += grant-table.o features.o events.o 6 7 obj-y += xenbus/ 7 8
+1 -1
drivers/xen/events.c
··· 1395 1395 { 1396 1396 struct pt_regs *old_regs = set_irq_regs(regs); 1397 1397 1398 + irq_enter(); 1398 1399 #ifdef CONFIG_X86 1399 1400 exit_idle(); 1400 1401 #endif 1401 - irq_enter(); 1402 1402 1403 1403 __xen_evtchn_do_upcall(); 1404 1404
+80
drivers/xen/fallback.c
··· 1 + #include <linux/kernel.h> 2 + #include <linux/string.h> 3 + #include <linux/bug.h> 4 + #include <linux/export.h> 5 + #include <asm/hypervisor.h> 6 + #include <asm/xen/hypercall.h> 7 + 8 + int xen_event_channel_op_compat(int cmd, void *arg) 9 + { 10 + struct evtchn_op op; 11 + int rc; 12 + 13 + op.cmd = cmd; 14 + memcpy(&op.u, arg, sizeof(op.u)); 15 + rc = _hypercall1(int, event_channel_op_compat, &op); 16 + 17 + switch (cmd) { 18 + case EVTCHNOP_close: 19 + case EVTCHNOP_send: 20 + case EVTCHNOP_bind_vcpu: 21 + case EVTCHNOP_unmask: 22 + /* no output */ 23 + break; 24 + 25 + #define COPY_BACK(eop) \ 26 + case EVTCHNOP_##eop: \ 27 + memcpy(arg, &op.u.eop, sizeof(op.u.eop)); \ 28 + break 29 + 30 + COPY_BACK(bind_interdomain); 31 + COPY_BACK(bind_virq); 32 + COPY_BACK(bind_pirq); 33 + COPY_BACK(status); 34 + COPY_BACK(alloc_unbound); 35 + COPY_BACK(bind_ipi); 36 + #undef COPY_BACK 37 + 38 + default: 39 + WARN_ON(rc != -ENOSYS); 40 + break; 41 + } 42 + 43 + return rc; 44 + } 45 + EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); 46 + 47 + int HYPERVISOR_physdev_op_compat(int cmd, void *arg) 48 + { 49 + struct physdev_op op; 50 + int rc; 51 + 52 + op.cmd = cmd; 53 + memcpy(&op.u, arg, sizeof(op.u)); 54 + rc = _hypercall1(int, physdev_op_compat, &op); 55 + 56 + switch (cmd) { 57 + case PHYSDEVOP_IRQ_UNMASK_NOTIFY: 58 + case PHYSDEVOP_set_iopl: 59 + case PHYSDEVOP_set_iobitmap: 60 + case PHYSDEVOP_apic_write: 61 + /* no output */ 62 + break; 63 + 64 + #define COPY_BACK(pop, fld) \ 65 + case PHYSDEVOP_##pop: \ 66 + memcpy(arg, &op.u.fld, sizeof(op.u.fld)); \ 67 + break 68 + 69 + COPY_BACK(irq_status_query, irq_status_query); 70 + COPY_BACK(apic_read, apic_op); 71 + COPY_BACK(ASSIGN_VECTOR, irq_op); 72 + #undef COPY_BACK 73 + 74 + default: 75 + WARN_ON(rc != -ENOSYS); 76 + break; 77 + } 78 + 79 + return rc; 80 + }
+19 -17
drivers/xen/gntdev.c
··· 105 105 #endif 106 106 } 107 107 108 + static void gntdev_free_map(struct grant_map *map) 109 + { 110 + if (map == NULL) 111 + return; 112 + 113 + if (map->pages) 114 + free_xenballooned_pages(map->count, map->pages); 115 + kfree(map->pages); 116 + kfree(map->grants); 117 + kfree(map->map_ops); 118 + kfree(map->unmap_ops); 119 + kfree(map->kmap_ops); 120 + kfree(map); 121 + } 122 + 108 123 static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) 109 124 { 110 125 struct grant_map *add; ··· 157 142 return add; 158 143 159 144 err: 160 - kfree(add->pages); 161 - kfree(add->grants); 162 - kfree(add->map_ops); 163 - kfree(add->unmap_ops); 164 - kfree(add->kmap_ops); 165 - kfree(add); 145 + gntdev_free_map(add); 166 146 return NULL; 167 147 } 168 148 ··· 208 198 evtchn_put(map->notify.event); 209 199 } 210 200 211 - if (map->pages) { 212 - if (!use_ptemod) 213 - unmap_grant_pages(map, 0, map->count); 214 - 215 - free_xenballooned_pages(map->count, map->pages); 216 - } 217 - kfree(map->pages); 218 - kfree(map->grants); 219 - kfree(map->map_ops); 220 - kfree(map->unmap_ops); 221 - kfree(map); 201 + if (map->pages && !use_ptemod) 202 + unmap_grant_pages(map, 0, map->count); 203 + gntdev_free_map(map); 222 204 } 223 205 224 206 /* ------------------------------------------------------------------ */
+1 -1
drivers/xen/xenbus/xenbus_dev_frontend.c
··· 458 458 goto out; 459 459 460 460 /* Can't write a xenbus message larger we can buffer */ 461 - if ((len + u->len) > sizeof(u->u.buffer)) { 461 + if (len > sizeof(u->u.buffer) - u->len) { 462 462 /* On error, dump existing buffer */ 463 463 u->len = 0; 464 464 rc = -EINVAL;
+4 -2
fs/bio.c
··· 75 75 unsigned int sz = sizeof(struct bio) + extra_size; 76 76 struct kmem_cache *slab = NULL; 77 77 struct bio_slab *bslab, *new_bio_slabs; 78 + unsigned int new_bio_slab_max; 78 79 unsigned int i, entry = -1; 79 80 80 81 mutex_lock(&bio_slab_lock); ··· 98 97 goto out_unlock; 99 98 100 99 if (bio_slab_nr == bio_slab_max && entry == -1) { 101 - bio_slab_max <<= 1; 100 + new_bio_slab_max = bio_slab_max << 1; 102 101 new_bio_slabs = krealloc(bio_slabs, 103 - bio_slab_max * sizeof(struct bio_slab), 102 + new_bio_slab_max * sizeof(struct bio_slab), 104 103 GFP_KERNEL); 105 104 if (!new_bio_slabs) 106 105 goto out_unlock; 106 + bio_slab_max = new_bio_slab_max; 107 107 bio_slabs = new_bio_slabs; 108 108 } 109 109 if (entry == -1)
+2
fs/ceph/export.c
··· 90 90 *max_len = handle_length; 91 91 type = 255; 92 92 } 93 + if (dentry) 94 + dput(dentry); 93 95 return type; 94 96 } 95 97
+20 -29
fs/cifs/cifsacl.c
··· 225 225 } 226 226 227 227 static void 228 + cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src) 229 + { 230 + memcpy(dst, src, sizeof(*dst)); 231 + dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS); 232 + } 233 + 234 + static void 228 235 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr, 229 236 struct cifs_sid_id **psidid, char *typestr) 230 237 { ··· 255 248 } 256 249 } 257 250 258 - memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid)); 251 + cifs_copy_sid(&(*psidid)->sid, sidptr); 259 252 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1); 260 253 (*psidid)->refcount = 0; 261 254 ··· 361 354 * any fields of the node after a reference is put . 362 355 */ 363 356 if (test_bit(SID_ID_MAPPED, &psidid->state)) { 364 - memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); 357 + cifs_copy_sid(ssid, &psidid->sid); 365 358 psidid->time = jiffies; /* update ts for accessing */ 366 359 goto id_sid_out; 367 360 } ··· 377 370 if (IS_ERR(sidkey)) { 378 371 rc = -EINVAL; 379 372 cFYI(1, "%s: Can't map and id to a SID", __func__); 373 + } else if (sidkey->datalen < sizeof(struct cifs_sid)) { 374 + rc = -EIO; 375 + cFYI(1, "%s: Downcall contained malformed key " 376 + "(datalen=%hu)", __func__, sidkey->datalen); 380 377 } else { 381 378 lsid = (struct cifs_sid *)sidkey->payload.data; 382 - memcpy(&psidid->sid, lsid, 383 - sidkey->datalen < sizeof(struct cifs_sid) ? 384 - sidkey->datalen : sizeof(struct cifs_sid)); 385 - memcpy(ssid, &psidid->sid, 386 - sidkey->datalen < sizeof(struct cifs_sid) ? 387 - sidkey->datalen : sizeof(struct cifs_sid)); 379 + cifs_copy_sid(&psidid->sid, lsid); 380 + cifs_copy_sid(ssid, &psidid->sid); 388 381 set_bit(SID_ID_MAPPED, &psidid->state); 389 382 key_put(sidkey); 390 383 kfree(psidid->sidstr); ··· 403 396 return rc; 404 397 } 405 398 if (test_bit(SID_ID_MAPPED, &psidid->state)) 406 - memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); 399 + cifs_copy_sid(ssid, &psidid->sid); 407 400 else 408 401 rc = -EINVAL; 409 402 } ··· 682 675 static void copy_sec_desc(const struct cifs_ntsd *pntsd, 683 676 struct cifs_ntsd *pnntsd, __u32 sidsoffset) 684 677 { 685 - int i; 686 - 687 678 struct cifs_sid *owner_sid_ptr, *group_sid_ptr; 688 679 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr; 689 680 ··· 697 692 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + 698 693 le32_to_cpu(pntsd->osidoffset)); 699 694 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset); 700 - 701 - nowner_sid_ptr->revision = owner_sid_ptr->revision; 702 - nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth; 703 - for (i = 0; i < 6; i++) 704 - nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i]; 705 - for (i = 0; i < 5; i++) 706 - nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i]; 695 + cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr); 707 696 708 697 /* copy group sid */ 709 698 group_sid_ptr = (struct cifs_sid *)((char *)pntsd + 710 699 le32_to_cpu(pntsd->gsidoffset)); 711 700 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset + 712 701 sizeof(struct cifs_sid)); 713 - 714 - ngroup_sid_ptr->revision = group_sid_ptr->revision; 715 - ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth; 716 - for (i = 0; i < 6; i++) 717 - ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i]; 718 - for (i = 0; i < 5; i++) 719 - ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i]; 702 + cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr); 720 703 721 704 return; 722 705 } ··· 1113 1120 kfree(nowner_sid_ptr); 1114 1121 return rc; 1115 1122 } 1116 - memcpy(owner_sid_ptr, nowner_sid_ptr, 1117 - sizeof(struct cifs_sid)); 1123 + cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr); 1118 1124 kfree(nowner_sid_ptr); 1119 1125 *aclflag = CIFS_ACL_OWNER; 1120 1126 } ··· 1131 1139 kfree(ngroup_sid_ptr); 1132 1140 return rc; 1133 1141 } 1134 - memcpy(group_sid_ptr, ngroup_sid_ptr, 1135 - sizeof(struct cifs_sid)); 1142 + cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr); 1136 1143 kfree(ngroup_sid_ptr); 1137 1144 *aclflag = CIFS_ACL_GROUP; 1138 1145 }
+10 -1
fs/cifs/dir.c
··· 398 398 * in network traffic in the other paths. 399 399 */ 400 400 if (!(oflags & O_CREAT)) { 401 - struct dentry *res = cifs_lookup(inode, direntry, 0); 401 + struct dentry *res; 402 + 403 + /* 404 + * Check for hashed negative dentry. We have already revalidated 405 + * the dentry and it is fine. No need to perform another lookup. 406 + */ 407 + if (!d_unhashed(direntry)) 408 + return -ENOENT; 409 + 410 + res = cifs_lookup(inode, direntry, 0); 402 411 if (IS_ERR(res)) 403 412 return PTR_ERR(res); 404 413
+3 -35
fs/eventpoll.c
··· 346 346 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ 347 347 static inline int ep_op_has_event(int op) 348 348 { 349 - return op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD; 349 + return op != EPOLL_CTL_DEL; 350 350 } 351 351 352 352 /* Initialize the poll safe wake up structure */ ··· 674 674 atomic_long_dec(&ep->user->epoll_watches); 675 675 676 676 return 0; 677 - } 678 - 679 - /* 680 - * Disables a "struct epitem" in the eventpoll set. Returns -EBUSY if the item 681 - * had no event flags set, indicating that another thread may be currently 682 - * handling that item's events (in the case that EPOLLONESHOT was being 683 - * used). Otherwise a zero result indicates that the item has been disabled 684 - * from receiving events. A disabled item may be re-enabled via 685 - * EPOLL_CTL_MOD. Must be called with "mtx" held. 686 - */ 687 - static int ep_disable(struct eventpoll *ep, struct epitem *epi) 688 - { 689 - int result = 0; 690 - unsigned long flags; 691 - 692 - spin_lock_irqsave(&ep->lock, flags); 693 - if (epi->event.events & ~EP_PRIVATE_BITS) { 694 - if (ep_is_linked(&epi->rdllink)) 695 - list_del_init(&epi->rdllink); 696 - /* Ensure ep_poll_callback will not add epi back onto ready 697 - list: */ 698 - epi->event.events &= EP_PRIVATE_BITS; 699 - } 700 - else 701 - result = -EBUSY; 702 - spin_unlock_irqrestore(&ep->lock, flags); 703 - 704 - return result; 705 677 } 706 678 707 679 static void ep_free(struct eventpoll *ep) ··· 1019 1047 rb_link_node(&epi->rbn, parent, p); 1020 1048 rb_insert_color(&epi->rbn, &ep->rbr); 1021 1049 } 1050 + 1051 + 1022 1052 1023 1053 #define PATH_ARR_SIZE 5 1024 1054 /* ··· 1785 1811 epds.events |= POLLERR | POLLHUP; 1786 1812 error = ep_modify(ep, epi, &epds); 1787 1813 } else 1788 - error = -ENOENT; 1789 - break; 1790 - case EPOLL_CTL_DISABLE: 1791 - if (epi) 1792 - error = ep_disable(ep, epi); 1793 - else 1794 1814 error = -ENOENT; 1795 1815 break; 1796 1816 }
+9 -10
fs/ext4/ialloc.c
··· 725 725 "inode=%lu", ino + 1); 726 726 continue; 727 727 } 728 + BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); 729 + err = ext4_journal_get_write_access(handle, inode_bitmap_bh); 730 + if (err) 731 + goto fail; 728 732 ext4_lock_group(sb, group); 729 733 ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data); 730 734 ext4_unlock_group(sb, group); ··· 742 738 goto out; 743 739 744 740 got: 741 + BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); 742 + err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); 743 + if (err) 744 + goto fail; 745 + 745 746 /* We may have to initialize the block bitmap if it isn't already */ 746 747 if (ext4_has_group_desc_csum(sb) && 747 748 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { ··· 779 770 if (err) 780 771 goto fail; 781 772 } 782 - 783 - BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); 784 - err = ext4_journal_get_write_access(handle, inode_bitmap_bh); 785 - if (err) 786 - goto fail; 787 773 788 774 BUFFER_TRACE(group_desc_bh, "get_write_access"); 789 775 err = ext4_journal_get_write_access(handle, group_desc_bh); ··· 826 822 ext4_group_desc_csum_set(sb, group, gdp); 827 823 } 828 824 ext4_unlock_group(sb, group); 829 - 830 - BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); 831 - err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); 832 - if (err) 833 - goto fail; 834 825 835 826 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); 836 827 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
+2 -2
fs/file.c
··· 900 900 return __close_fd(files, fd); 901 901 902 902 if (fd >= rlimit(RLIMIT_NOFILE)) 903 - return -EMFILE; 903 + return -EBADF; 904 904 905 905 spin_lock(&files->file_lock); 906 906 err = expand_files(files, fd); ··· 926 926 return -EINVAL; 927 927 928 928 if (newfd >= rlimit(RLIMIT_NOFILE)) 929 - return -EMFILE; 929 + return -EBADF; 930 930 931 931 spin_lock(&files->file_lock); 932 932 err = expand_files(files, newfd);
+5 -9
fs/gfs2/file.c
··· 516 516 struct gfs2_holder i_gh; 517 517 int error; 518 518 519 - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 520 - error = gfs2_glock_nq(&i_gh); 521 - if (error == 0) { 522 - file_accessed(file); 523 - gfs2_glock_dq(&i_gh); 524 - } 525 - gfs2_holder_uninit(&i_gh); 519 + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 520 + &i_gh); 526 521 if (error) 527 522 return error; 523 + /* grab lock to update inode */ 524 + gfs2_glock_dq_uninit(&i_gh); 525 + file_accessed(file); 528 526 } 529 527 vma->vm_ops = &gfs2_vm_ops; 530 528 ··· 675 677 size_t writesize = iov_length(iov, nr_segs); 676 678 struct dentry *dentry = file->f_dentry; 677 679 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 678 - struct gfs2_sbd *sdp; 679 680 int ret; 680 681 681 - sdp = GFS2_SB(file->f_mapping->host); 682 682 ret = gfs2_rs_alloc(ip); 683 683 if (ret) 684 684 return ret;
+2 -14
fs/gfs2/lops.c
··· 393 393 struct gfs2_meta_header *mh; 394 394 struct gfs2_trans *tr; 395 395 396 - lock_buffer(bd->bd_bh); 397 - gfs2_log_lock(sdp); 398 396 tr = current->journal_info; 399 397 tr->tr_touched = 1; 400 398 if (!list_empty(&bd->bd_list)) 401 - goto out; 399 + return; 402 400 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 403 401 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 404 402 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; ··· 412 414 sdp->sd_log_num_buf++; 413 415 list_add(&bd->bd_list, &sdp->sd_log_le_buf); 414 416 tr->tr_num_buf_new++; 415 - out: 416 - gfs2_log_unlock(sdp); 417 - unlock_buffer(bd->bd_bh); 418 417 } 419 418 420 419 static void gfs2_check_magic(struct buffer_head *bh) ··· 616 621 617 622 static void revoke_lo_before_commit(struct gfs2_sbd *sdp) 618 623 { 619 - struct gfs2_log_descriptor *ld; 620 624 struct gfs2_meta_header *mh; 621 625 unsigned int offset; 622 626 struct list_head *head = &sdp->sd_log_le_revoke; ··· 628 634 629 635 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64)); 630 636 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); 631 - ld = page_address(page); 632 637 offset = sizeof(struct gfs2_log_descriptor); 633 638 634 639 list_for_each_entry(bd, head, bd_list) { ··· 770 777 struct address_space *mapping = bd->bd_bh->b_page->mapping; 771 778 struct gfs2_inode *ip = GFS2_I(mapping->host); 772 779 773 - lock_buffer(bd->bd_bh); 774 - gfs2_log_lock(sdp); 775 780 if (tr) 776 781 tr->tr_touched = 1; 777 782 if (!list_empty(&bd->bd_list)) 778 - goto out; 783 + return; 779 784 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 780 785 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 781 786 if (gfs2_is_jdata(ip)) { ··· 784 793 } else { 785 794 list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered); 786 795 } 787 - out: 788 - gfs2_log_unlock(sdp); 789 - unlock_buffer(bd->bd_bh); 790 796 } 791 797 792 798 /**
+5 -2
fs/gfs2/quota.c
··· 497 497 struct gfs2_quota_data **qd; 498 498 int error; 499 499 500 - if (ip->i_res == NULL) 501 - gfs2_rs_alloc(ip); 500 + if (ip->i_res == NULL) { 501 + error = gfs2_rs_alloc(ip); 502 + if (error) 503 + return error; 504 + } 502 505 503 506 qd = ip->i_res->rs_qa_qd; 504 507
+21 -12
fs/gfs2/rgrp.c
··· 553 553 */ 554 554 int gfs2_rs_alloc(struct gfs2_inode *ip) 555 555 { 556 - int error = 0; 557 556 struct gfs2_blkreserv *res; 558 557 559 558 if (ip->i_res) ··· 560 561 561 562 res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS); 562 563 if (!res) 563 - error = -ENOMEM; 564 + return -ENOMEM; 564 565 565 566 RB_CLEAR_NODE(&res->rs_node); 566 567 ··· 570 571 else 571 572 ip->i_res = res; 572 573 up_write(&ip->i_rw_mutex); 573 - return error; 574 + return 0; 574 575 } 575 576 576 577 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) ··· 1262 1263 int ret = 0; 1263 1264 u64 amt; 1264 1265 u64 trimmed = 0; 1266 + u64 start, end, minlen; 1265 1267 unsigned int x; 1268 + unsigned bs_shift = sdp->sd_sb.sb_bsize_shift; 1266 1269 1267 1270 if (!capable(CAP_SYS_ADMIN)) 1268 1271 return -EPERM; ··· 1272 1271 if (!blk_queue_discard(q)) 1273 1272 return -EOPNOTSUPP; 1274 1273 1275 - if (argp == NULL) { 1276 - r.start = 0; 1277 - r.len = ULLONG_MAX; 1278 - r.minlen = 0; 1279 - } else if (copy_from_user(&r, argp, sizeof(r))) 1274 + if (copy_from_user(&r, argp, sizeof(r))) 1280 1275 return -EFAULT; 1281 1276 1282 1277 ret = gfs2_rindex_update(sdp); 1283 1278 if (ret) 1284 1279 return ret; 1285 1280 1286 - rgd = gfs2_blk2rgrpd(sdp, r.start, 0); 1287 - rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0); 1281 + start = r.start >> bs_shift; 1282 + end = start + (r.len >> bs_shift); 1283 + minlen = max_t(u64, r.minlen, 1284 + q->limits.discard_granularity) >> bs_shift; 1285 + 1286 + rgd = gfs2_blk2rgrpd(sdp, start, 0); 1287 + rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0); 1288 + 1289 + if (end <= start || 1290 + minlen > sdp->sd_max_rg_data || 1291 + start > rgd_end->rd_data0 + rgd_end->rd_data) 1292 + return -EINVAL; 1288 1293 1289 1294 while (1) { 1290 1295 ··· 1302 1295 /* Trim each bitmap in the rgrp */ 1303 1296 for (x = 0; x < rgd->rd_length; x++) { 1304 1297 struct gfs2_bitmap *bi = rgd->rd_bits + x; 1305 - ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt); 1298 + ret = gfs2_rgrp_send_discards(sdp, 1299 + rgd->rd_data0, NULL, bi, minlen, 1300 + &amt); 1306 1301 if (ret) { 1307 1302 gfs2_glock_dq_uninit(&gh); 1308 1303 goto out; ··· 1333 1324 1334 1325 out: 1335 1326 r.len = trimmed << 9; 1336 - if (argp && copy_to_user(argp, &r, sizeof(r))) 1327 + if (copy_to_user(argp, &r, sizeof(r))) 1337 1328 return -EFAULT; 1338 1329 1339 1330 return ret;
+2 -1
fs/gfs2/super.c
··· 810 810 return; 811 811 } 812 812 need_unlock = 1; 813 - } 813 + } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) 814 + return; 814 815 815 816 if (current->journal_info == NULL) { 816 817 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
+8
fs/gfs2/trans.c
··· 155 155 struct gfs2_sbd *sdp = gl->gl_sbd; 156 156 struct gfs2_bufdata *bd; 157 157 158 + lock_buffer(bh); 159 + gfs2_log_lock(sdp); 158 160 bd = bh->b_private; 159 161 if (bd) 160 162 gfs2_assert(sdp, bd->bd_gl == gl); 161 163 else { 164 + gfs2_log_unlock(sdp); 165 + unlock_buffer(bh); 162 166 gfs2_attach_bufdata(gl, bh, meta); 163 167 bd = bh->b_private; 168 + lock_buffer(bh); 169 + gfs2_log_lock(sdp); 164 170 } 165 171 lops_add(sdp, bd); 172 + gfs2_log_unlock(sdp); 173 + unlock_buffer(bh); 166 174 } 167 175 168 176 void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
+3 -2
fs/nfs/dns_resolve.c
··· 217 217 { 218 218 char buf1[NFS_DNS_HOSTNAME_MAXLEN+1]; 219 219 struct nfs_dns_ent key, *item; 220 - unsigned long ttl; 220 + unsigned int ttl; 221 221 ssize_t len; 222 222 int ret = -EINVAL; 223 223 ··· 240 240 key.namelen = len; 241 241 memset(&key.h, 0, sizeof(key.h)); 242 242 243 - ttl = get_expiry(&buf); 243 + if (get_uint(&buf, &ttl) < 0) 244 + goto out; 244 245 if (ttl == 0) 245 246 goto out; 246 247 key.h.expiry_time = ttl + seconds_since_boot();
+4 -1
fs/nfs/inode.c
··· 685 685 if (ctx->cred != NULL) 686 686 put_rpccred(ctx->cred); 687 687 dput(ctx->dentry); 688 - nfs_sb_deactive(sb); 688 + if (is_sync) 689 + nfs_sb_deactive(sb); 690 + else 691 + nfs_sb_deactive_async(sb); 689 692 kfree(ctx->mdsthreshold); 690 693 kfree(ctx); 691 694 }
+4 -2
fs/nfs/internal.h
··· 351 351 extern void __exit unregister_nfs_fs(void); 352 352 extern void nfs_sb_active(struct super_block *sb); 353 353 extern void nfs_sb_deactive(struct super_block *sb); 354 + extern void nfs_sb_deactive_async(struct super_block *sb); 354 355 355 356 /* namespace.c */ 357 + #define NFS_PATH_CANONICAL 1 356 358 extern char *nfs_path(char **p, struct dentry *dentry, 357 - char *buffer, ssize_t buflen); 359 + char *buffer, ssize_t buflen, unsigned flags); 358 360 extern struct vfsmount *nfs_d_automount(struct path *path); 359 361 struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *, 360 362 struct nfs_fh *, struct nfs_fattr *); ··· 500 498 char *buffer, ssize_t buflen) 501 499 { 502 500 char *dummy; 503 - return nfs_path(&dummy, dentry, buffer, buflen); 501 + return nfs_path(&dummy, dentry, buffer, buflen, NFS_PATH_CANONICAL); 504 502 } 505 503 506 504 /*
+1 -1
fs/nfs/mount_clnt.c
··· 181 181 else 182 182 msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT]; 183 183 184 - status = rpc_call_sync(mnt_clnt, &msg, 0); 184 + status = rpc_call_sync(mnt_clnt, &msg, RPC_TASK_SOFT|RPC_TASK_TIMEOUT); 185 185 rpc_shutdown_client(mnt_clnt); 186 186 187 187 if (status < 0)
+14 -5
fs/nfs/namespace.c
··· 33 33 * @dentry - pointer to dentry 34 34 * @buffer - result buffer 35 35 * @buflen - length of buffer 36 + * @flags - options (see below) 36 37 * 37 38 * Helper function for constructing the server pathname 38 39 * by arbitrary hashed dentry. ··· 41 40 * This is mainly for use in figuring out the path on the 42 41 * server side when automounting on top of an existing partition 43 42 * and in generating /proc/mounts and friends. 43 + * 44 + * Supported flags: 45 + * NFS_PATH_CANONICAL: ensure there is exactly one slash after 46 + * the original device (export) name 47 + * (if unset, the original name is returned verbatim) 44 48 */ 45 - char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen) 49 + char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen, 50 + unsigned flags) 46 51 { 47 52 char *end; 48 53 int namelen; ··· 81 74 rcu_read_unlock(); 82 75 goto rename_retry; 83 76 } 84 - if (*end != '/') { 77 + if ((flags & NFS_PATH_CANONICAL) && *end != '/') { 85 78 if (--buflen < 0) { 86 79 spin_unlock(&dentry->d_lock); 87 80 rcu_read_unlock(); ··· 98 91 return end; 99 92 } 100 93 namelen = strlen(base); 101 - /* Strip off excess slashes in base string */ 102 - while (namelen > 0 && base[namelen - 1] == '/') 103 - namelen--; 94 + if (flags & NFS_PATH_CANONICAL) { 95 + /* Strip off excess slashes in base string */ 96 + while (namelen > 0 && base[namelen - 1] == '/') 97 + namelen--; 98 + } 104 99 buflen -= namelen; 105 100 if (buflen < 0) { 106 101 spin_unlock(&dentry->d_lock);
+2 -1
fs/nfs/nfs4namespace.c
··· 81 81 static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen) 82 82 { 83 83 char *limit; 84 - char *path = nfs_path(&limit, dentry, buffer, buflen); 84 + char *path = nfs_path(&limit, dentry, buffer, buflen, 85 + NFS_PATH_CANONICAL); 85 86 if (!IS_ERR(path)) { 86 87 char *path_component = nfs_path_component(path, limit); 87 88 if (path_component)
+28 -18
fs/nfs/nfs4proc.c
··· 339 339 dprintk("%s ERROR: %d Reset session\n", __func__, 340 340 errorcode); 341 341 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 342 - exception->retry = 1; 343 - break; 342 + goto wait_on_recovery; 344 343 #endif /* defined(CONFIG_NFS_V4_1) */ 345 344 case -NFS4ERR_FILE_OPEN: 346 345 if (exception->timeout > HZ) { ··· 1571 1572 data->timestamp = jiffies; 1572 1573 if (nfs4_setup_sequence(data->o_arg.server, 1573 1574 &data->o_arg.seq_args, 1574 - &data->o_res.seq_res, task)) 1575 - return; 1576 - rpc_call_start(task); 1575 + &data->o_res.seq_res, 1576 + task) != 0) 1577 + nfs_release_seqid(data->o_arg.seqid); 1578 + else 1579 + rpc_call_start(task); 1577 1580 return; 1578 1581 unlock_no_action: 1579 1582 rcu_read_unlock(); ··· 1749 1748 1750 1749 /* even though OPEN succeeded, access is denied. Close the file */ 1751 1750 nfs4_close_state(state, fmode); 1752 - return -NFS4ERR_ACCESS; 1751 + return -EACCES; 1753 1752 } 1754 1753 1755 1754 /* ··· 2197 2196 nfs4_put_open_state(calldata->state); 2198 2197 nfs_free_seqid(calldata->arg.seqid); 2199 2198 nfs4_put_state_owner(sp); 2200 - nfs_sb_deactive(sb); 2199 + nfs_sb_deactive_async(sb); 2201 2200 kfree(calldata); 2202 2201 } 2203 2202 ··· 2297 2296 if (nfs4_setup_sequence(NFS_SERVER(inode), 2298 2297 &calldata->arg.seq_args, 2299 2298 &calldata->res.seq_res, 2300 - task)) 2301 - goto out; 2302 - rpc_call_start(task); 2299 + task) != 0) 2300 + nfs_release_seqid(calldata->arg.seqid); 2301 + else 2302 + rpc_call_start(task); 2303 2303 out: 2304 2304 dprintk("%s: done!\n", __func__); 2305 2305 } ··· 4531 4529 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 4532 4530 rpc_restart_call_prepare(task); 4533 4531 } 4532 + nfs_release_seqid(calldata->arg.seqid); 4534 4533 } 4535 4534 4536 4535 static void nfs4_locku_prepare(struct rpc_task *task, void *data) ··· 4548 4545 calldata->timestamp = jiffies; 4549 4546 if (nfs4_setup_sequence(calldata->server, 4550 4547 &calldata->arg.seq_args, 4551 - &calldata->res.seq_res, task)) 4552 - return; 4553 - rpc_call_start(task); 4548 + &calldata->res.seq_res, 4549 + task) != 0) 4550 + nfs_release_seqid(calldata->arg.seqid); 4551 + else 4552 + rpc_call_start(task); 4554 4553 } 4555 4554 4556 4555 static const struct rpc_call_ops nfs4_locku_ops = { ··· 4697 4692 /* Do we need to do an open_to_lock_owner? */ 4698 4693 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { 4699 4694 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) 4700 - return; 4695 + goto out_release_lock_seqid; 4701 4696 data->arg.open_stateid = &state->stateid; 4702 4697 data->arg.new_lock_owner = 1; 4703 4698 data->res.open_seqid = data->arg.open_seqid; ··· 4706 4701 data->timestamp = jiffies; 4707 4702 if (nfs4_setup_sequence(data->server, 4708 4703 &data->arg.seq_args, 4709 - &data->res.seq_res, task)) 4704 + &data->res.seq_res, 4705 + task) == 0) { 4706 + rpc_call_start(task); 4710 4707 return; 4711 - rpc_call_start(task); 4712 - dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 4708 + } 4709 + nfs_release_seqid(data->arg.open_seqid); 4710 + out_release_lock_seqid: 4711 + nfs_release_seqid(data->arg.lock_seqid); 4712 + dprintk("%s: done!, ret = %d\n", __func__, task->tk_status); 4713 4713 } 4714 4714 4715 4715 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) ··· 5677 5667 tbl->slots = new; 5678 5668 tbl->max_slots = max_slots; 5679 5669 } 5680 - tbl->highest_used_slotid = -1; /* no slot is currently used */ 5670 + tbl->highest_used_slotid = NFS4_NO_SLOT; 5681 5671 for (i = 0; i < tbl->max_slots; i++) 5682 5672 tbl->slots[i].seq_nr = ivalue; 5683 5673 spin_unlock(&tbl->slot_tbl_lock);
+2 -2
fs/nfs/pnfs.c
··· 925 925 if (likely(nfsi->layout == NULL)) { /* Won the race? */ 926 926 nfsi->layout = new; 927 927 return new; 928 - } 929 - pnfs_free_layout_hdr(new); 928 + } else if (new != NULL) 929 + pnfs_free_layout_hdr(new); 930 930 out_existing: 931 931 pnfs_get_layout_hdr(nfsi->layout); 932 932 return nfsi->layout;
+50 -1
fs/nfs/super.c
··· 54 54 #include <linux/parser.h> 55 55 #include <linux/nsproxy.h> 56 56 #include <linux/rcupdate.h> 57 + #include <linux/kthread.h> 57 58 58 59 #include <asm/uaccess.h> 59 60 ··· 416 415 } 417 416 EXPORT_SYMBOL_GPL(nfs_sb_deactive); 418 417 418 + static int nfs_deactivate_super_async_work(void *ptr) 419 + { 420 + struct super_block *sb = ptr; 421 + 422 + deactivate_super(sb); 423 + module_put_and_exit(0); 424 + return 0; 425 + } 426 + 427 + /* 428 + * same effect as deactivate_super, but will do final unmount in kthread 429 + * context 430 + */ 431 + static void nfs_deactivate_super_async(struct super_block *sb) 432 + { 433 + struct task_struct *task; 434 + char buf[INET6_ADDRSTRLEN + 1]; 435 + struct nfs_server *server = NFS_SB(sb); 436 + struct nfs_client *clp = server->nfs_client; 437 + 438 + if (!atomic_add_unless(&sb->s_active, -1, 1)) { 439 + rcu_read_lock(); 440 + snprintf(buf, sizeof(buf), 441 + rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 442 + rcu_read_unlock(); 443 + 444 + __module_get(THIS_MODULE); 445 + task = kthread_run(nfs_deactivate_super_async_work, sb, 446 + "%s-deactivate-super", buf); 447 + if (IS_ERR(task)) { 448 + pr_err("%s: kthread_run: %ld\n", 449 + __func__, PTR_ERR(task)); 450 + /* make synchronous call and hope for the best */ 451 + deactivate_super(sb); 452 + module_put(THIS_MODULE); 453 + } 454 + } 455 + } 456 + 457 + void nfs_sb_deactive_async(struct super_block *sb) 458 + { 459 + struct nfs_server *server = NFS_SB(sb); 460 + 461 + if (atomic_dec_and_test(&server->active)) 462 + nfs_deactivate_super_async(sb); 463 + } 464 + EXPORT_SYMBOL_GPL(nfs_sb_deactive_async); 465 + 419 466 /* 420 467 * Deliver file system statistics to userspace 421 468 */ ··· 820 771 int err = 0; 821 772 if (!page) 822 773 return -ENOMEM; 823 - devname = nfs_path(&dummy, root, page, PAGE_SIZE); 774 + devname = nfs_path(&dummy, root, page, PAGE_SIZE, 0); 824 775 if (IS_ERR(devname)) 825 776 err = PTR_ERR(devname); 826 777 else
+1 -1
fs/nfs/unlink.c
··· 95 95 96 96 nfs_dec_sillycount(data->dir); 97 97 nfs_free_unlinkdata(data); 98 - nfs_sb_deactive(sb); 98 + nfs_sb_deactive_async(sb); 99 99 } 100 100 101 101 static void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
+1
fs/notify/fanotify/fanotify.c
··· 21 21 if ((old->path.mnt == new->path.mnt) && 22 22 (old->path.dentry == new->path.dentry)) 23 23 return true; 24 + break; 24 25 case (FSNOTIFY_EVENT_NONE): 25 26 return true; 26 27 default:
+2 -41
fs/xfs/xfs_alloc.c
··· 1866 1866 /* 1867 1867 * Initialize the args structure. 1868 1868 */ 1869 + memset(&targs, 0, sizeof(targs)); 1869 1870 targs.tp = tp; 1870 1871 targs.mp = mp; 1871 1872 targs.agbp = agbp; ··· 2208 2207 * group or loop over the allocation groups to find the result. 2209 2208 */ 2210 2209 int /* error */ 2211 - __xfs_alloc_vextent( 2210 + xfs_alloc_vextent( 2212 2211 xfs_alloc_arg_t *args) /* allocation argument structure */ 2213 2212 { 2214 2213 xfs_agblock_t agsize; /* allocation group size */ ··· 2416 2415 error0: 2417 2416 xfs_perag_put(args->pag); 2418 2417 return error; 2419 - } 2420 - 2421 - static void 2422 - xfs_alloc_vextent_worker( 2423 - struct work_struct *work) 2424 - { 2425 - struct xfs_alloc_arg *args = container_of(work, 2426 - struct xfs_alloc_arg, work); 2427 - unsigned long pflags; 2428 - 2429 - /* we are in a transaction context here */ 2430 - current_set_flags_nested(&pflags, PF_FSTRANS); 2431 - 2432 - args->result = __xfs_alloc_vextent(args); 2433 - complete(args->done); 2434 - 2435 - current_restore_flags_nested(&pflags, PF_FSTRANS); 2436 - } 2437 - 2438 - /* 2439 - * Data allocation requests often come in with little stack to work on. Push 2440 - * them off to a worker thread so there is lots of stack to use. Metadata 2441 - * requests, OTOH, are generally from low stack usage paths, so avoid the 2442 - * context switch overhead here. 2443 - */ 2444 - int 2445 - xfs_alloc_vextent( 2446 - struct xfs_alloc_arg *args) 2447 - { 2448 - DECLARE_COMPLETION_ONSTACK(done); 2449 - 2450 - if (!args->userdata) 2451 - return __xfs_alloc_vextent(args); 2452 - 2453 - 2454 - args->done = &done; 2455 - INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); 2456 - queue_work(xfs_alloc_wq, &args->work); 2457 - wait_for_completion(&done); 2458 - return args->result; 2459 2418 } 2460 2419 2461 2420 /*
-3
fs/xfs/xfs_alloc.h
··· 120 120 char isfl; /* set if is freelist blocks - !acctg */ 121 121 char userdata; /* set if this is user data */ 122 122 xfs_fsblock_t firstblock; /* io first block allocated */ 123 - struct completion *done; 124 - struct work_struct work; 125 - int result; 126 123 } xfs_alloc_arg_t; 127 124 128 125 /*
+2
fs/xfs/xfs_alloc_btree.c
··· 121 121 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, 122 122 XFS_EXTENT_BUSY_SKIP_DISCARD); 123 123 xfs_trans_agbtree_delta(cur->bc_tp, -1); 124 + 125 + xfs_trans_binval(cur->bc_tp, bp); 124 126 return 0; 125 127 } 126 128
+54 -9
fs/xfs/xfs_bmap.c
··· 2437 2437 * Normal allocation, done through xfs_alloc_vextent. 2438 2438 */ 2439 2439 tryagain = isaligned = 0; 2440 + memset(&args, 0, sizeof(args)); 2440 2441 args.tp = ap->tp; 2441 2442 args.mp = mp; 2442 2443 args.fsbno = ap->blkno; ··· 3083 3082 * Convert to a btree with two levels, one record in root. 3084 3083 */ 3085 3084 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 3085 + memset(&args, 0, sizeof(args)); 3086 3086 args.tp = tp; 3087 3087 args.mp = mp; 3088 3088 args.firstblock = *firstblock; ··· 3239 3237 xfs_buf_t *bp; /* buffer for extent block */ 3240 3238 xfs_bmbt_rec_host_t *ep;/* extent record pointer */ 3241 3239 3240 + memset(&args, 0, sizeof(args)); 3242 3241 args.tp = tp; 3243 3242 args.mp = ip->i_mount; 3244 3243 args.firstblock = *firstblock; ··· 4619 4616 4620 4617 4621 4618 STATIC int 4622 - xfs_bmapi_allocate( 4623 - struct xfs_bmalloca *bma, 4624 - int flags) 4619 + __xfs_bmapi_allocate( 4620 + struct xfs_bmalloca *bma) 4625 4621 { 4626 4622 struct xfs_mount *mp = bma->ip->i_mount; 4627 - int whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 4623 + int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ? 4628 4624 XFS_ATTR_FORK : XFS_DATA_FORK; 4629 4625 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4630 4626 int tmp_logflags = 0; ··· 4656 4654 * Indicate if this is the first user data in the file, or just any 4657 4655 * user data. 4658 4656 */ 4659 - if (!(flags & XFS_BMAPI_METADATA)) { 4657 + if (!(bma->flags & XFS_BMAPI_METADATA)) { 4660 4658 bma->userdata = (bma->offset == 0) ? 4661 4659 XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; 4662 4660 } 4663 4661 4664 - bma->minlen = (flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4662 + bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4665 4663 4666 4664 /* 4667 4665 * Only want to do the alignment at the eof if it is userdata and 4668 4666 * allocation length is larger than a stripe unit. 4669 4667 */ 4670 4668 if (mp->m_dalign && bma->length >= mp->m_dalign && 4671 - !(flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4669 + !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4672 4670 error = xfs_bmap_isaeof(bma, whichfork); 4673 4671 if (error) 4674 4672 return error; 4675 4673 } 4674 + 4675 + if (bma->flags & XFS_BMAPI_STACK_SWITCH) 4676 + bma->stack_switch = 1; 4676 4677 4677 4678 error = xfs_bmap_alloc(bma); 4678 4679 if (error) ··· 4711 4706 * A wasdelay extent has been initialized, so shouldn't be flagged 4712 4707 * as unwritten. 4713 4708 */ 4714 - if (!bma->wasdel && (flags & XFS_BMAPI_PREALLOC) && 4709 + if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) && 4715 4710 xfs_sb_version_hasextflgbit(&mp->m_sb)) 4716 4711 bma->got.br_state = XFS_EXT_UNWRITTEN; 4717 4712 ··· 4737 4732 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4738 4733 bma->got.br_state == XFS_EXT_UNWRITTEN); 4739 4734 return 0; 4735 + } 4736 + 4737 + static void 4738 + xfs_bmapi_allocate_worker( 4739 + struct work_struct *work) 4740 + { 4741 + struct xfs_bmalloca *args = container_of(work, 4742 + struct xfs_bmalloca, work); 4743 + unsigned long pflags; 4744 + 4745 + /* we are in a transaction context here */ 4746 + current_set_flags_nested(&pflags, PF_FSTRANS); 4747 + 4748 + args->result = __xfs_bmapi_allocate(args); 4749 + complete(args->done); 4750 + 4751 + current_restore_flags_nested(&pflags, PF_FSTRANS); 4752 + } 4753 + 4754 + /* 4755 + * Some allocation requests often come in with little stack to work on. Push 4756 + * them off to a worker thread so there is lots of stack to use. Otherwise just 4757 + * call directly to avoid the context switch overhead here. 4758 + */ 4759 + int 4760 + xfs_bmapi_allocate( 4761 + struct xfs_bmalloca *args) 4762 + { 4763 + DECLARE_COMPLETION_ONSTACK(done); 4764 + 4765 + if (!args->stack_switch) 4766 + return __xfs_bmapi_allocate(args); 4767 + 4768 + 4769 + args->done = &done; 4770 + INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); 4771 + queue_work(xfs_alloc_wq, &args->work); 4772 + wait_for_completion(&done); 4773 + return args->result; 4740 4774 } 4741 4775 4742 4776 STATIC int ··· 4963 4919 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4964 4920 bma.wasdel = wasdelay; 4965 4921 bma.offset = bno; 4922 + bma.flags = flags; 4966 4923 4967 4924 /* 4968 4925 * There's a 32/64 bit type mismatch between the ··· 4979 4934 4980 4935 ASSERT(len > 0); 4981 4936 ASSERT(bma.length > 0); 4982 - error = xfs_bmapi_allocate(&bma, flags); 4937 + error = xfs_bmapi_allocate(&bma); 4983 4938 if (error) 4984 4939 goto error0; 4985 4940 if (bma.blkno == NULLFSBLOCK)
+8 -1
fs/xfs/xfs_bmap.h
··· 77 77 * from written to unwritten, otherwise convert from unwritten to written. 78 78 */ 79 79 #define XFS_BMAPI_CONVERT 0x040 80 + #define XFS_BMAPI_STACK_SWITCH 0x080 80 81 81 82 #define XFS_BMAPI_FLAGS \ 82 83 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ ··· 86 85 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 87 86 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 88 87 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 89 - { XFS_BMAPI_CONVERT, "CONVERT" } 88 + { XFS_BMAPI_CONVERT, "CONVERT" }, \ 89 + { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" } 90 90 91 91 92 92 static inline int xfs_bmapi_aflag(int w) ··· 135 133 char userdata;/* set if is user data */ 136 134 char aeof; /* allocated space at eof */ 137 135 char conv; /* overwriting unwritten extents */ 136 + char stack_switch; 137 + int flags; 138 + struct completion *done; 139 + struct work_struct work; 140 + int result; 138 141 } xfs_bmalloca_t; 139 142 140 143 /*
+18
fs/xfs/xfs_buf_item.c
··· 526 526 } 527 527 xfs_buf_relse(bp); 528 528 } else if (freed && remove) { 529 + /* 530 + * There are currently two references to the buffer - the active 531 + * LRU reference and the buf log item. What we are about to do 532 + * here - simulate a failed IO completion - requires 3 533 + * references. 534 + * 535 + * The LRU reference is removed by the xfs_buf_stale() call. The 536 + * buf item reference is removed by the xfs_buf_iodone() 537 + * callback that is run by xfs_buf_do_callbacks() during ioend 538 + * processing (via the bp->b_iodone callback), and then finally 539 + * the ioend processing will drop the IO reference if the buffer 540 + * is marked XBF_ASYNC. 541 + * 542 + * Hence we need to take an additional reference here so that IO 543 + * completion processing doesn't free the buffer prematurely. 544 + */ 529 545 xfs_buf_lock(bp); 546 + xfs_buf_hold(bp); 547 + bp->b_flags |= XBF_ASYNC; 530 548 xfs_buf_ioerror(bp, EIO); 531 549 XFS_BUF_UNDONE(bp); 532 550 xfs_buf_stale(bp);
+19 -2
fs/xfs/xfs_fsops.c
··· 399 399 400 400 /* update secondary superblocks. */ 401 401 for (agno = 1; agno < nagcount; agno++) { 402 - error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 402 + error = 0; 403 + /* 404 + * new secondary superblocks need to be zeroed, not read from 405 + * disk as the contents of the new area we are growing into is 406 + * completely unknown. 407 + */ 408 + if (agno < oagcount) { 409 + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 403 410 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 404 411 XFS_FSS_TO_BB(mp, 1), 0, &bp); 412 + } else { 413 + bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp, 414 + XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 415 + XFS_FSS_TO_BB(mp, 1), 0); 416 + if (bp) 417 + xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 418 + else 419 + error = ENOMEM; 420 + } 421 + 405 422 if (error) { 406 423 xfs_warn(mp, 407 424 "error %d reading secondary superblock for ag %d", ··· 440 423 break; /* no point in continuing */ 441 424 } 442 425 } 443 - return 0; 426 + return error; 444 427 445 428 error0: 446 429 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+1
fs/xfs/xfs_ialloc.c
··· 250 250 /* boundary */ 251 251 struct xfs_perag *pag; 252 252 253 + memset(&args, 0, sizeof(args)); 253 254 args.tp = tp; 254 255 args.mp = tp->t_mountp; 255 256
+2 -1
fs/xfs/xfs_inode.c
··· 1509 1509 * to mark all the active inodes on the buffer stale. 1510 1510 */ 1511 1511 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 1512 - mp->m_bsize * blks_per_cluster, 0); 1512 + mp->m_bsize * blks_per_cluster, 1513 + XBF_UNMAPPED); 1513 1514 1514 1515 if (!bp) 1515 1516 return ENOMEM;
+1 -1
fs/xfs/xfs_ioctl.c
··· 70 70 int hsize; 71 71 xfs_handle_t handle; 72 72 struct inode *inode; 73 - struct fd f; 73 + struct fd f = {0}; 74 74 struct path path; 75 75 int error; 76 76 struct xfs_inode *ip;
+3 -1
fs/xfs/xfs_iomap.c
··· 584 584 * pointer that the caller gave to us. 585 585 */ 586 586 error = xfs_bmapi_write(tp, ip, map_start_fsb, 587 - count_fsb, 0, &first_block, 1, 587 + count_fsb, 588 + XFS_BMAPI_STACK_SWITCH, 589 + &first_block, 1, 588 590 imap, &nimaps, &free_list); 589 591 if (error) 590 592 goto trans_cancel;
+16 -3
fs/xfs/xfs_log.c
··· 2387 2387 2388 2388 2389 2389 /* 2390 - * update the last_sync_lsn before we drop the 2390 + * Completion of a iclog IO does not imply that 2391 + * a transaction has completed, as transactions 2392 + * can be large enough to span many iclogs. We 2393 + * cannot change the tail of the log half way 2394 + * through a transaction as this may be the only 2395 + * transaction in the log and moving th etail to 2396 + * point to the middle of it will prevent 2397 + * recovery from finding the start of the 2398 + * transaction. Hence we should only update the 2399 + * last_sync_lsn if this iclog contains 2400 + * transaction completion callbacks on it. 2401 + * 2402 + * We have to do this before we drop the 2391 2403 * icloglock to ensure we are the only one that 2392 2404 * can update it. 2393 2405 */ 2394 2406 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2395 2407 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2396 - atomic64_set(&log->l_last_sync_lsn, 2397 - be64_to_cpu(iclog->ic_header.h_lsn)); 2408 + if (iclog->ic_callback) 2409 + atomic64_set(&log->l_last_sync_lsn, 2410 + be64_to_cpu(iclog->ic_header.h_lsn)); 2398 2411 2399 2412 } else 2400 2413 ioerrors++;
+1 -1
fs/xfs/xfs_log_recover.c
··· 3541 3541 * - order is important. 3542 3542 */ 3543 3543 error = xlog_bread_offset(log, 0, 3544 - bblks - split_bblks, hbp, 3544 + bblks - split_bblks, dbp, 3545 3545 offset + BBTOB(split_bblks)); 3546 3546 if (error) 3547 3547 goto bread_err2;
+192
include/linux/hashtable.h
··· 1 + /* 2 + * Statically sized hash table implementation 3 + * (C) 2012 Sasha Levin <levinsasha928@gmail.com> 4 + */ 5 + 6 + #ifndef _LINUX_HASHTABLE_H 7 + #define _LINUX_HASHTABLE_H 8 + 9 + #include <linux/list.h> 10 + #include <linux/types.h> 11 + #include <linux/kernel.h> 12 + #include <linux/hash.h> 13 + #include <linux/rculist.h> 14 + 15 + #define DEFINE_HASHTABLE(name, bits) \ 16 + struct hlist_head name[1 << (bits)] = \ 17 + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } 18 + 19 + #define DECLARE_HASHTABLE(name, bits) \ 20 + struct hlist_head name[1 << (bits)] 21 + 22 + #define HASH_SIZE(name) (ARRAY_SIZE(name)) 23 + #define HASH_BITS(name) ilog2(HASH_SIZE(name)) 24 + 25 + /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ 26 + #define hash_min(val, bits) \ 27 + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) 28 + 29 + static inline void __hash_init(struct hlist_head *ht, unsigned int sz) 30 + { 31 + unsigned int i; 32 + 33 + for (i = 0; i < sz; i++) 34 + INIT_HLIST_HEAD(&ht[i]); 35 + } 36 + 37 + /** 38 + * hash_init - initialize a hash table 39 + * @hashtable: hashtable to be initialized 40 + * 41 + * Calculates the size of the hashtable from the given parameter, otherwise 42 + * same as hash_init_size. 43 + * 44 + * This has to be a macro since HASH_BITS() will not work on pointers since 45 + * it calculates the size during preprocessing. 46 + */ 47 + #define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) 48 + 49 + /** 50 + * hash_add - add an object to a hashtable 51 + * @hashtable: hashtable to add to 52 + * @node: the &struct hlist_node of the object to be added 53 + * @key: the key of the object to be added 54 + */ 55 + #define hash_add(hashtable, node, key) \ 56 + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) 57 + 58 + /** 59 + * hash_add_rcu - add an object to a rcu enabled hashtable 60 + * @hashtable: hashtable to add to 61 + * @node: the &struct hlist_node of the object to be added 62 + * @key: the key of the object to be added 63 + */ 64 + #define hash_add_rcu(hashtable, node, key) \ 65 + hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) 66 + 67 + /** 68 + * hash_hashed - check whether an object is in any hashtable 69 + * @node: the &struct hlist_node of the object to be checked 70 + */ 71 + static inline bool hash_hashed(struct hlist_node *node) 72 + { 73 + return !hlist_unhashed(node); 74 + } 75 + 76 + static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) 77 + { 78 + unsigned int i; 79 + 80 + for (i = 0; i < sz; i++) 81 + if (!hlist_empty(&ht[i])) 82 + return false; 83 + 84 + return true; 85 + } 86 + 87 + /** 88 + * hash_empty - check whether a hashtable is empty 89 + * @hashtable: hashtable to check 90 + * 91 + * This has to be a macro since HASH_BITS() will not work on pointers since 92 + * it calculates the size during preprocessing. 93 + */ 94 + #define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) 95 + 96 + /** 97 + * hash_del - remove an object from a hashtable 98 + * @node: &struct hlist_node of the object to remove 99 + */ 100 + static inline void hash_del(struct hlist_node *node) 101 + { 102 + hlist_del_init(node); 103 + } 104 + 105 + /** 106 + * hash_del_rcu - remove an object from a rcu enabled hashtable 107 + * @node: &struct hlist_node of the object to remove 108 + */ 109 + static inline void hash_del_rcu(struct hlist_node *node) 110 + { 111 + hlist_del_init_rcu(node); 112 + } 113 + 114 + /** 115 + * hash_for_each - iterate over a hashtable 116 + * @name: hashtable to iterate 117 + * @bkt: integer to use as bucket loop cursor 118 + * @node: the &struct list_head to use as a loop cursor for each entry 119 + * @obj: the type * to use as a loop cursor for each entry 120 + * @member: the name of the hlist_node within the struct 121 + */ 122 + #define hash_for_each(name, bkt, node, obj, member) \ 123 + for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 124 + hlist_for_each_entry(obj, node, &name[bkt], member) 125 + 126 + /** 127 + * hash_for_each_rcu - iterate over a rcu enabled hashtable 128 + * @name: hashtable to iterate 129 + * @bkt: integer to use as bucket loop cursor 130 + * @node: the &struct list_head to use as a loop cursor for each entry 131 + * @obj: the type * to use as a loop cursor for each entry 132 + * @member: the name of the hlist_node within the struct 133 + */ 134 + #define hash_for_each_rcu(name, bkt, node, obj, member) \ 135 + for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 136 + hlist_for_each_entry_rcu(obj, node, &name[bkt], member) 137 + 138 + /** 139 + * hash_for_each_safe - iterate over a hashtable safe against removal of 140 + * hash entry 141 + * @name: hashtable to iterate 142 + * @bkt: integer to use as bucket loop cursor 143 + * @node: the &struct list_head to use as a loop cursor for each entry 144 + * @tmp: a &struct used for temporary storage 145 + * @obj: the type * to use as a loop cursor for each entry 146 + * @member: the name of the hlist_node within the struct 147 + */ 148 + #define hash_for_each_safe(name, bkt, node, tmp, obj, member) \ 149 + for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 150 + hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member) 151 + 152 + /** 153 + * hash_for_each_possible - iterate over all possible objects hashing to the 154 + * same bucket 155 + * @name: hashtable to iterate 156 + * @obj: the type * to use as a loop cursor for each entry 157 + * @node: the &struct list_head to use as a loop cursor for each entry 158 + * @member: the name of the hlist_node within the struct 159 + * @key: the key of the objects to iterate over 160 + */ 161 + #define hash_for_each_possible(name, obj, node, member, key) \ 162 + hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member) 163 + 164 + /** 165 + * hash_for_each_possible_rcu - iterate over all possible objects hashing to the 166 + * same bucket in an rcu enabled hashtable 167 + * in a rcu enabled hashtable 168 + * @name: hashtable to iterate 169 + * @obj: the type * to use as a loop cursor for each entry 170 + * @node: the &struct list_head to use as a loop cursor for each entry 171 + * @member: the name of the hlist_node within the struct 172 + * @key: the key of the objects to iterate over 173 + */ 174 + #define hash_for_each_possible_rcu(name, obj, node, member, key) \ 175 + hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member) 176 + 177 + /** 178 + * hash_for_each_possible_safe - iterate over all possible objects hashing to the 179 + * same bucket safe against removals 180 + * @name: hashtable to iterate 181 + * @obj: the type * to use as a loop cursor for each entry 182 + * @node: the &struct list_head to use as a loop cursor for each entry 183 + * @tmp: a &struct used for temporary storage 184 + * @member: the name of the hlist_node within the struct 185 + * @key: the key of the objects to iterate over 186 + */ 187 + #define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \ 188 + hlist_for_each_entry_safe(obj, node, tmp, \ 189 + &name[hash_min(key, HASH_BITS(name))], member) 190 + 191 + 192 + #endif
+2 -13
include/linux/kvm_host.h
··· 42 42 */ 43 43 #define KVM_MEMSLOT_INVALID (1UL << 16) 44 44 45 - /* 46 - * If we support unaligned MMIO, at most one fragment will be split into two: 47 - */ 48 - #ifdef KVM_UNALIGNED_MMIO 49 - # define KVM_EXTRA_MMIO_FRAGMENTS 1 50 - #else 51 - # define KVM_EXTRA_MMIO_FRAGMENTS 0 52 - #endif 53 - 54 - #define KVM_USER_MMIO_SIZE 8 55 - 56 - #define KVM_MAX_MMIO_FRAGMENTS \ 57 - (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) 45 + /* Two fragments for cross MMIO pages. */ 46 + #define KVM_MAX_MMIO_FRAGMENTS 2 58 47 59 48 /* 60 49 * For the normal pfn, the highest 12 bits should be zero,
+3 -3
include/linux/mmc/dw_mmc.h
··· 137 137 138 138 dma_addr_t sg_dma; 139 139 void *sg_cpu; 140 - struct dw_mci_dma_ops *dma_ops; 140 + const struct dw_mci_dma_ops *dma_ops; 141 141 #ifdef CONFIG_MMC_DW_IDMAC 142 142 unsigned int ring_size; 143 143 #else ··· 162 162 u16 data_offset; 163 163 struct device *dev; 164 164 struct dw_mci_board *pdata; 165 - struct dw_mci_drv_data *drv_data; 165 + const struct dw_mci_drv_data *drv_data; 166 166 void *priv; 167 167 struct clk *biu_clk; 168 168 struct clk *ciu_clk; ··· 186 186 187 187 struct regulator *vmmc; /* Power regulator */ 188 188 unsigned long irq_flags; /* IRQ flags */ 189 - unsigned int irq; 189 + int irq; 190 190 }; 191 191 192 192 /* DMA ops for Internal/External DMAC interface */
+1
include/linux/mmc/sdhci.h
··· 91 91 unsigned int quirks2; /* More deviations from spec. */ 92 92 93 93 #define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0) 94 + #define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1) 94 95 95 96 int irq; /* Device IRQ */ 96 97 void __iomem *ioaddr; /* Mapped address */
+2
include/linux/of_address.h
··· 28 28 #endif 29 29 30 30 #else /* CONFIG_OF_ADDRESS */ 31 + #ifndef of_address_to_resource 31 32 static inline int of_address_to_resource(struct device_node *dev, int index, 32 33 struct resource *r) 33 34 { 34 35 return -EINVAL; 35 36 } 37 + #endif 36 38 static inline struct device_node *of_find_matching_node_by_address( 37 39 struct device_node *from, 38 40 const struct of_device_id *matches,
+2 -1
include/linux/ptp_clock_kernel.h
··· 54 54 * clock operations 55 55 * 56 56 * @adjfreq: Adjusts the frequency of the hardware clock. 57 - * parameter delta: Desired period change in parts per billion. 57 + * parameter delta: Desired frequency offset from nominal frequency 58 + * in parts per billion 58 59 * 59 60 * @adjtime: Shifts the time of the hardware clock. 60 61 * parameter delta: Desired change in nanoseconds.
-2
include/linux/raid/Kbuild
··· 1 - header-y += md_p.h 2 - header-y += md_u.h
include/linux/raid/md_p.h include/uapi/linux/raid/md_p.h
+1 -140
include/linux/raid/md_u.h
··· 11 11 (for example /usr/src/linux/COPYING); if not, write to the Free 12 12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 13 13 */ 14 - 15 14 #ifndef _MD_U_H 16 15 #define _MD_U_H 17 16 18 - /* 19 - * Different major versions are not compatible. 20 - * Different minor versions are only downward compatible. 21 - * Different patchlevel versions are downward and upward compatible. 22 - */ 23 - #define MD_MAJOR_VERSION 0 24 - #define MD_MINOR_VERSION 90 25 - /* 26 - * MD_PATCHLEVEL_VERSION indicates kernel functionality. 27 - * >=1 means different superblock formats are selectable using SET_ARRAY_INFO 28 - * and major_version/minor_version accordingly 29 - * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT 30 - * in the super status byte 31 - * >=3 means that bitmap superblock version 4 is supported, which uses 32 - * little-ending representation rather than host-endian 33 - */ 34 - #define MD_PATCHLEVEL_VERSION 3 17 + #include <uapi/linux/raid/md_u.h> 35 18 36 - /* ioctls */ 37 - 38 - /* status */ 39 - #define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t) 40 - #define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t) 41 - #define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t) 42 - #define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13) 43 - #define RAID_AUTORUN _IO (MD_MAJOR, 0x14) 44 - #define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t) 45 - 46 - /* configuration */ 47 - #define CLEAR_ARRAY _IO (MD_MAJOR, 0x20) 48 - #define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t) 49 - #define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22) 50 - #define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t) 51 - #define SET_DISK_INFO _IO (MD_MAJOR, 0x24) 52 - #define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25) 53 - #define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26) 54 - #define PROTECT_ARRAY _IO (MD_MAJOR, 0x27) 55 - #define HOT_ADD_DISK _IO (MD_MAJOR, 0x28) 56 - #define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29) 57 - #define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a) 58 - #define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int) 59 - 60 - /* usage */ 61 - #define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t) 62 - /* 0x31 was START_ARRAY */ 63 - #define STOP_ARRAY _IO (MD_MAJOR, 0x32) 64 - #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) 65 - #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) 66 - 67 - /* 63 partitions with the alternate major number (mdp) */ 68 - #define MdpMinorShift 6 69 - #ifdef __KERNEL__ 70 19 extern int mdp_major; 71 - #endif 72 - 73 - typedef struct mdu_version_s { 74 - int major; 75 - int minor; 76 - int patchlevel; 77 - } mdu_version_t; 78 - 79 - typedef struct mdu_array_info_s { 80 - /* 81 - * Generic constant information 82 - */ 83 - int major_version; 84 - int minor_version; 85 - int patch_version; 86 - int ctime; 87 - int level; 88 - int size; 89 - int nr_disks; 90 - int raid_disks; 91 - int md_minor; 92 - int not_persistent; 93 - 94 - /* 95 - * Generic state information 96 - */ 97 - int utime; /* 0 Superblock update time */ 98 - int state; /* 1 State bits (clean, ...) */ 99 - int active_disks; /* 2 Number of currently active disks */ 100 - int working_disks; /* 3 Number of working disks */ 101 - int failed_disks; /* 4 Number of failed disks */ 102 - int spare_disks; /* 5 Number of spare disks */ 103 - 104 - /* 105 - * Personality information 106 - */ 107 - int layout; /* 0 the array's physical layout */ 108 - int chunk_size; /* 1 chunk size in bytes */ 109 - 110 - } mdu_array_info_t; 111 - 112 - /* non-obvious values for 'level' */ 113 - #define LEVEL_MULTIPATH (-4) 114 - #define LEVEL_LINEAR (-1) 115 - #define LEVEL_FAULTY (-5) 116 - 117 - /* we need a value for 'no level specified' and 0 118 - * means 'raid0', so we need something else. This is 119 - * for internal use only 120 - */ 121 - #define LEVEL_NONE (-1000000) 122 - 123 - typedef struct mdu_disk_info_s { 124 - /* 125 - * configuration/status of one particular disk 126 - */ 127 - int number; 128 - int major; 129 - int minor; 130 - int raid_disk; 131 - int state; 132 - 133 - } mdu_disk_info_t; 134 - 135 - typedef struct mdu_start_info_s { 136 - /* 137 - * configuration/status of one particular disk 138 - */ 139 - int major; 140 - int minor; 141 - int raid_disk; 142 - int state; 143 - 144 - } mdu_start_info_t; 145 - 146 - typedef struct mdu_bitmap_file_s 147 - { 148 - char pathname[4096]; 149 - } mdu_bitmap_file_t; 150 - 151 - typedef struct mdu_param_s 152 - { 153 - int personality; /* 1,2,3,4 */ 154 - int chunk_size; /* in bytes */ 155 - int max_fault; /* unused for now */ 156 - } mdu_param_t; 157 - 158 20 #endif 159 -
+9
include/net/cfg80211.h
··· 2652 2652 unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc); 2653 2653 2654 2654 /** 2655 + * ieee80211_get_mesh_hdrlen - get mesh extension header length 2656 + * @meshhdr: the mesh extension header, only the flags field 2657 + * (first byte) will be accessed 2658 + * Returns the length of the extension header, which is always at 2659 + * least 6 bytes and at most 18 if address 5 and 6 are present. 2660 + */ 2661 + unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); 2662 + 2663 + /** 2655 2664 * DOC: Data path helpers 2656 2665 * 2657 2666 * In addition to generic utilities, cfg80211 also offers
+3
include/sound/core.h
··· 132 132 int shutdown; /* this card is going down */ 133 133 int free_on_last_close; /* free in context of file_release */ 134 134 wait_queue_head_t shutdown_sleep; 135 + atomic_t refcount; /* refcount for disconnection */ 135 136 struct device *dev; /* device assigned to this card */ 136 137 struct device *card_dev; /* cardX object for sysfs */ 137 138 ··· 190 189 const struct file_operations *f_ops; /* file operations */ 191 190 void *private_data; /* private data for f_ops->open */ 192 191 struct device *dev; /* device for sysfs */ 192 + struct snd_card *card_ptr; /* assigned card instance */ 193 193 }; 194 194 195 195 /* return a device pointer linked to each sound device as a parent */ ··· 297 295 int snd_component_add(struct snd_card *card, const char *component); 298 296 int snd_card_file_add(struct snd_card *card, struct file *file); 299 297 int snd_card_file_remove(struct snd_card *card, struct file *file); 298 + void snd_card_unref(struct snd_card *card); 300 299 301 300 #define snd_card_set_dev(card, devptr) ((card)->dev = (devptr)) 302 301
+8
include/trace/events/xen.h
··· 377 377 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); 378 378 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); 379 379 380 + TRACE_EVENT(xen_mmu_flush_tlb_all, 381 + TP_PROTO(int x), 382 + TP_ARGS(x), 383 + TP_STRUCT__entry(__array(char, x, 0)), 384 + TP_fast_assign((void)x), 385 + TP_printk("%s", "") 386 + ); 387 + 380 388 TRACE_EVENT(xen_mmu_flush_tlb, 381 389 TP_PROTO(int x), 382 390 TP_ARGS(x),
-1
include/uapi/linux/eventpoll.h
··· 25 25 #define EPOLL_CTL_ADD 1 26 26 #define EPOLL_CTL_DEL 2 27 27 #define EPOLL_CTL_MOD 3 28 - #define EPOLL_CTL_DISABLE 4 29 28 30 29 /* 31 30 * Request the handling of system wakeup events so as to prevent system suspends
+2
include/uapi/linux/raid/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += md_p.h 3 + header-y += md_u.h
+155
include/uapi/linux/raid/md_u.h
··· 1 + /* 2 + md_u.h : user <=> kernel API between Linux raidtools and RAID drivers 3 + Copyright (C) 1998 Ingo Molnar 4 + 5 + This program is free software; you can redistribute it and/or modify 6 + it under the terms of the GNU General Public License as published by 7 + the Free Software Foundation; either version 2, or (at your option) 8 + any later version. 9 + 10 + You should have received a copy of the GNU General Public License 11 + (for example /usr/src/linux/COPYING); if not, write to the Free 12 + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 13 + */ 14 + 15 + #ifndef _UAPI_MD_U_H 16 + #define _UAPI_MD_U_H 17 + 18 + /* 19 + * Different major versions are not compatible. 20 + * Different minor versions are only downward compatible. 21 + * Different patchlevel versions are downward and upward compatible. 22 + */ 23 + #define MD_MAJOR_VERSION 0 24 + #define MD_MINOR_VERSION 90 25 + /* 26 + * MD_PATCHLEVEL_VERSION indicates kernel functionality. 27 + * >=1 means different superblock formats are selectable using SET_ARRAY_INFO 28 + * and major_version/minor_version accordingly 29 + * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT 30 + * in the super status byte 31 + * >=3 means that bitmap superblock version 4 is supported, which uses 32 + * little-ending representation rather than host-endian 33 + */ 34 + #define MD_PATCHLEVEL_VERSION 3 35 + 36 + /* ioctls */ 37 + 38 + /* status */ 39 + #define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t) 40 + #define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t) 41 + #define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t) 42 + #define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13) 43 + #define RAID_AUTORUN _IO (MD_MAJOR, 0x14) 44 + #define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t) 45 + 46 + /* configuration */ 47 + #define CLEAR_ARRAY _IO (MD_MAJOR, 0x20) 48 + #define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t) 49 + #define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22) 50 + #define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t) 51 + #define SET_DISK_INFO _IO (MD_MAJOR, 0x24) 52 + #define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25) 53 + #define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26) 54 + #define PROTECT_ARRAY _IO (MD_MAJOR, 0x27) 55 + #define HOT_ADD_DISK _IO (MD_MAJOR, 0x28) 56 + #define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29) 57 + #define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a) 58 + #define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int) 59 + 60 + /* usage */ 61 + #define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t) 62 + /* 0x31 was START_ARRAY */ 63 + #define STOP_ARRAY _IO (MD_MAJOR, 0x32) 64 + #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) 65 + #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) 66 + 67 + /* 63 partitions with the alternate major number (mdp) */ 68 + #define MdpMinorShift 6 69 + 70 + typedef struct mdu_version_s { 71 + int major; 72 + int minor; 73 + int patchlevel; 74 + } mdu_version_t; 75 + 76 + typedef struct mdu_array_info_s { 77 + /* 78 + * Generic constant information 79 + */ 80 + int major_version; 81 + int minor_version; 82 + int patch_version; 83 + int ctime; 84 + int level; 85 + int size; 86 + int nr_disks; 87 + int raid_disks; 88 + int md_minor; 89 + int not_persistent; 90 + 91 + /* 92 + * Generic state information 93 + */ 94 + int utime; /* 0 Superblock update time */ 95 + int state; /* 1 State bits (clean, ...) */ 96 + int active_disks; /* 2 Number of currently active disks */ 97 + int working_disks; /* 3 Number of working disks */ 98 + int failed_disks; /* 4 Number of failed disks */ 99 + int spare_disks; /* 5 Number of spare disks */ 100 + 101 + /* 102 + * Personality information 103 + */ 104 + int layout; /* 0 the array's physical layout */ 105 + int chunk_size; /* 1 chunk size in bytes */ 106 + 107 + } mdu_array_info_t; 108 + 109 + /* non-obvious values for 'level' */ 110 + #define LEVEL_MULTIPATH (-4) 111 + #define LEVEL_LINEAR (-1) 112 + #define LEVEL_FAULTY (-5) 113 + 114 + /* we need a value for 'no level specified' and 0 115 + * means 'raid0', so we need something else. This is 116 + * for internal use only 117 + */ 118 + #define LEVEL_NONE (-1000000) 119 + 120 + typedef struct mdu_disk_info_s { 121 + /* 122 + * configuration/status of one particular disk 123 + */ 124 + int number; 125 + int major; 126 + int minor; 127 + int raid_disk; 128 + int state; 129 + 130 + } mdu_disk_info_t; 131 + 132 + typedef struct mdu_start_info_s { 133 + /* 134 + * configuration/status of one particular disk 135 + */ 136 + int major; 137 + int minor; 138 + int raid_disk; 139 + int state; 140 + 141 + } mdu_start_info_t; 142 + 143 + typedef struct mdu_bitmap_file_s 144 + { 145 + char pathname[4096]; 146 + } mdu_bitmap_file_t; 147 + 148 + typedef struct mdu_param_s 149 + { 150 + int personality; /* 1,2,3,4 */ 151 + int chunk_size; /* in bytes */ 152 + int max_fault; /* unused for now */ 153 + } mdu_param_t; 154 + 155 + #endif /* _UAPI_MD_U_H */
+32 -2
include/xen/hvm.h
··· 5 5 #include <xen/interface/hvm/params.h> 6 6 #include <asm/xen/hypercall.h> 7 7 8 + static const char *param_name(int op) 9 + { 10 + #define PARAM(x) [HVM_PARAM_##x] = #x 11 + static const char *const names[] = { 12 + PARAM(CALLBACK_IRQ), 13 + PARAM(STORE_PFN), 14 + PARAM(STORE_EVTCHN), 15 + PARAM(PAE_ENABLED), 16 + PARAM(IOREQ_PFN), 17 + PARAM(BUFIOREQ_PFN), 18 + PARAM(TIMER_MODE), 19 + PARAM(HPET_ENABLED), 20 + PARAM(IDENT_PT), 21 + PARAM(DM_DOMAIN), 22 + PARAM(ACPI_S_STATE), 23 + PARAM(VM86_TSS), 24 + PARAM(VPT_ALIGN), 25 + PARAM(CONSOLE_PFN), 26 + PARAM(CONSOLE_EVTCHN), 27 + }; 28 + #undef PARAM 29 + 30 + if (op >= ARRAY_SIZE(names)) 31 + return "unknown"; 32 + 33 + if (!names[op]) 34 + return "reserved"; 35 + 36 + return names[op]; 37 + } 8 38 static inline int hvm_get_parameter(int idx, uint64_t *value) 9 39 { 10 40 struct xen_hvm_param xhv; ··· 44 14 xhv.index = idx; 45 15 r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); 46 16 if (r < 0) { 47 - printk(KERN_ERR "Cannot get hvm parameter %d: %d!\n", 48 - idx, r); 17 + printk(KERN_ERR "Cannot get hvm parameter %s (%d): %d!\n", 18 + param_name(idx), idx, r); 49 19 return r; 50 20 } 51 21 *value = xhv.value;
+2
init/main.c
··· 442 442 { 443 443 } 444 444 445 + # if THREAD_SIZE >= PAGE_SIZE 445 446 void __init __weak thread_info_cache_init(void) 446 447 { 447 448 } 449 + #endif 448 450 449 451 /* 450 452 * Set up kernel memory allocators
+16 -11
kernel/module.c
··· 2293 2293 src = (void *)info->hdr + symsect->sh_offset; 2294 2294 nsrc = symsect->sh_size / sizeof(*src); 2295 2295 2296 + /* strtab always starts with a nul, so offset 0 is the empty string. */ 2297 + strtab_size = 1; 2298 + 2296 2299 /* Compute total space required for the core symbols' strtab. */ 2297 - for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src) 2298 - if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) { 2299 - strtab_size += strlen(&info->strtab[src->st_name]) + 1; 2300 + for (ndst = i = 0; i < nsrc; i++) { 2301 + if (i == 0 || 2302 + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { 2303 + strtab_size += strlen(&info->strtab[src[i].st_name])+1; 2300 2304 ndst++; 2301 2305 } 2306 + } 2302 2307 2303 2308 /* Append room for core symbols at end of core part. */ 2304 2309 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); ··· 2337 2332 mod->core_symtab = dst = mod->module_core + info->symoffs; 2338 2333 mod->core_strtab = s = mod->module_core + info->stroffs; 2339 2334 src = mod->symtab; 2340 - *dst = *src; 2341 2335 *s++ = 0; 2342 - for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { 2343 - if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) 2344 - continue; 2345 - 2346 - dst[ndst] = *src; 2347 - dst[ndst++].st_name = s - mod->core_strtab; 2348 - s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1; 2336 + for (ndst = i = 0; i < mod->num_symtab; i++) { 2337 + if (i == 0 || 2338 + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { 2339 + dst[ndst] = src[i]; 2340 + dst[ndst++].st_name = s - mod->core_strtab; 2341 + s += strlcpy(s, &mod->strtab[src[i].st_name], 2342 + KSYM_NAME_LEN) + 1; 2343 + } 2349 2344 } 2350 2345 mod->core_num_syms = ndst; 2351 2346 }
+2
mm/vmscan.c
··· 3017 3017 &balanced_classzone_idx); 3018 3018 } 3019 3019 } 3020 + 3021 + current->reclaim_state = NULL; 3020 3022 return 0; 3021 3023 } 3022 3024
+4 -2
net/ceph/messenger.c
··· 2300 2300 mutex_unlock(&con->mutex); 2301 2301 return; 2302 2302 } else { 2303 - con->ops->put(con); 2304 2303 dout("con_work %p FAILED to back off %lu\n", con, 2305 2304 con->delay); 2305 + set_bit(CON_FLAG_BACKOFF, &con->flags); 2306 2306 } 2307 + goto done; 2307 2308 } 2308 2309 2309 2310 if (con->state == CON_STATE_STANDBY) { ··· 2750 2749 msg = con->ops->alloc_msg(con, hdr, skip); 2751 2750 mutex_lock(&con->mutex); 2752 2751 if (con->state != CON_STATE_OPEN) { 2753 - ceph_msg_put(msg); 2752 + if (msg) 2753 + ceph_msg_put(msg); 2754 2754 return -EAGAIN; 2755 2755 } 2756 2756 con->in_msg = msg;
+1 -1
net/core/dev.c
··· 1666 1666 1667 1667 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1668 1668 { 1669 - if (ptype->af_packet_priv == NULL) 1669 + if (!ptype->af_packet_priv || !skb->sk) 1670 1670 return false; 1671 1671 1672 1672 if (ptype->id_match)
+2 -1
net/core/rtnetlink.c
··· 2192 2192 goto skip; 2193 2193 2194 2194 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 2195 - portid, seq, 0, NTF_SELF); 2195 + portid, seq, 2196 + RTM_NEWNEIGH, NTF_SELF); 2196 2197 if (err < 0) 2197 2198 return err; 2198 2199 skip:
+4 -1
net/ipv4/inet_diag.c
··· 892 892 struct inet_diag_req_v2 *r, struct nlattr *bc) 893 893 { 894 894 const struct inet_diag_handler *handler; 895 + int err = 0; 895 896 896 897 handler = inet_diag_lock_handler(r->sdiag_protocol); 897 898 if (!IS_ERR(handler)) 898 899 handler->dump(skb, cb, r, bc); 900 + else 901 + err = PTR_ERR(handler); 899 902 inet_diag_unlock_handler(handler); 900 903 901 - return skb->len; 904 + return err ? : skb->len; 902 905 } 903 906 904 907 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+3 -1
net/ipv4/netfilter/iptable_nat.c
··· 184 184 185 185 if ((ct->tuplehash[dir].tuple.src.u3.ip != 186 186 ct->tuplehash[!dir].tuple.dst.u3.ip) || 187 - (ct->tuplehash[dir].tuple.src.u.all != 187 + (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && 188 + ct->tuplehash[dir].tuple.src.u.all != 188 189 ct->tuplehash[!dir].tuple.dst.u.all)) 189 190 if (nf_xfrm_me_harder(skb, AF_INET) < 0) 190 191 ret = NF_DROP; ··· 222 221 } 223 222 #ifdef CONFIG_XFRM 224 223 else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && 224 + ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && 225 225 ct->tuplehash[dir].tuple.dst.u.all != 226 226 ct->tuplehash[!dir].tuple.src.u.all) 227 227 if (nf_xfrm_me_harder(skb, AF_INET) < 0)
+5 -3
net/ipv4/tcp_illinois.c
··· 313 313 .tcpv_rttcnt = ca->cnt_rtt, 314 314 .tcpv_minrtt = ca->base_rtt, 315 315 }; 316 - u64 t = ca->sum_rtt; 317 316 318 - do_div(t, ca->cnt_rtt); 319 - info.tcpv_rtt = t; 317 + if (info.tcpv_rttcnt > 0) { 318 + u64 t = ca->sum_rtt; 320 319 320 + do_div(t, info.tcpv_rttcnt); 321 + info.tcpv_rtt = t; 322 + } 321 323 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 322 324 } 323 325 }
+3
net/ipv4/tcp_input.c
··· 4529 4529 struct tcphdr *th; 4530 4530 bool fragstolen; 4531 4531 4532 + if (size == 0) 4533 + return 0; 4534 + 4532 4535 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); 4533 4536 if (!skb) 4534 4537 goto err;
+1 -1
net/ipv4/tcp_metrics.c
··· 864 864 } 865 865 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6]; 866 866 if (a) { 867 - if (nla_len(a) != sizeof(sizeof(struct in6_addr))) 867 + if (nla_len(a) != sizeof(struct in6_addr)) 868 868 return -EINVAL; 869 869 addr->family = AF_INET6; 870 870 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
+4 -4
net/ipv6/ip6_gre.c
··· 1633 1633 /* IFLA_GRE_OKEY */ 1634 1634 nla_total_size(4) + 1635 1635 /* IFLA_GRE_LOCAL */ 1636 - nla_total_size(4) + 1636 + nla_total_size(sizeof(struct in6_addr)) + 1637 1637 /* IFLA_GRE_REMOTE */ 1638 - nla_total_size(4) + 1638 + nla_total_size(sizeof(struct in6_addr)) + 1639 1639 /* IFLA_GRE_TTL */ 1640 1640 nla_total_size(1) + 1641 1641 /* IFLA_GRE_TOS */ ··· 1659 1659 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || 1660 1660 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1661 1661 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1662 - nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) || 1663 - nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) || 1662 + nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) || 1663 + nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) || 1664 1664 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || 1665 1665 /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/ 1666 1666 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
+1 -2
net/ipv6/ndisc.c
··· 535 535 { 536 536 struct inet6_dev *idev; 537 537 struct inet6_ifaddr *ifa; 538 - struct in6_addr mcaddr; 538 + struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT; 539 539 540 540 idev = in6_dev_get(dev); 541 541 if (!idev) ··· 543 543 544 544 read_lock_bh(&idev->lock); 545 545 list_for_each_entry(ifa, &idev->addr_list, if_list) { 546 - addrconf_addr_solict_mult(&ifa->addr, &mcaddr); 547 546 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, 548 547 /*router=*/ !!idev->cnf.forwarding, 549 548 /*solicited=*/ false, /*override=*/ true,
+3 -1
net/ipv6/netfilter/ip6table_nat.c
··· 186 186 187 187 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, 188 188 &ct->tuplehash[!dir].tuple.dst.u3) || 189 - (ct->tuplehash[dir].tuple.src.u.all != 189 + (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && 190 + ct->tuplehash[dir].tuple.src.u.all != 190 191 ct->tuplehash[!dir].tuple.dst.u.all)) 191 192 if (nf_xfrm_me_harder(skb, AF_INET6) < 0) 192 193 ret = NF_DROP; ··· 223 222 } 224 223 #ifdef CONFIG_XFRM 225 224 else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 225 + ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && 226 226 ct->tuplehash[dir].tuple.dst.u.all != 227 227 ct->tuplehash[!dir].tuple.src.u.all) 228 228 if (nf_xfrm_me_harder(skb, AF_INET6))
+2 -2
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 85 85 { } 86 86 }; 87 87 88 - static int __net_init nf_ct_frag6_sysctl_register(struct net *net) 88 + static int nf_ct_frag6_sysctl_register(struct net *net) 89 89 { 90 90 struct ctl_table *table; 91 91 struct ctl_table_header *hdr; ··· 127 127 } 128 128 129 129 #else 130 - static int __net_init nf_ct_frag6_sysctl_register(struct net *net) 130 + static int nf_ct_frag6_sysctl_register(struct net *net) 131 131 { 132 132 return 0; 133 133 }
+1
net/l2tp/l2tp_eth.c
··· 291 291 292 292 out_del_dev: 293 293 free_netdev(dev); 294 + spriv->dev = NULL; 294 295 out_del_session: 295 296 l2tp_session_delete(session); 296 297 out:
+1 -1
net/mac80211/ibss.c
··· 1108 1108 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; 1109 1109 sdata->u.ibss.ibss_join_req = jiffies; 1110 1110 1111 - memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); 1111 + memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); 1112 1112 sdata->u.ibss.ssid_len = params->ssid_len; 1113 1113 1114 1114 mutex_unlock(&sdata->u.ibss.mtx);
+55 -17
net/mac80211/rx.c
··· 531 531 532 532 if (ieee80211_is_action(hdr->frame_control)) { 533 533 u8 category; 534 + 535 + /* make sure category field is present */ 536 + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 537 + return RX_DROP_MONITOR; 538 + 534 539 mgmt = (struct ieee80211_mgmt *)hdr; 535 540 category = mgmt->u.action.category; 536 541 if (category != WLAN_CATEGORY_MESH_ACTION && ··· 888 883 */ 889 884 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 890 885 ieee80211_is_data_present(hdr->frame_control)) { 891 - u16 ethertype; 892 - u8 *payload; 886 + unsigned int hdrlen; 887 + __be16 ethertype; 893 888 894 - payload = rx->skb->data + 895 - ieee80211_hdrlen(hdr->frame_control); 896 - ethertype = (payload[6] << 8) | payload[7]; 897 - if (cpu_to_be16(ethertype) == 898 - rx->sdata->control_port_protocol) 889 + hdrlen = ieee80211_hdrlen(hdr->frame_control); 890 + 891 + if (rx->skb->len < hdrlen + 8) 892 + return RX_DROP_MONITOR; 893 + 894 + skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2); 895 + if (ethertype == rx->sdata->control_port_protocol) 899 896 return RX_CONTINUE; 900 897 } 901 898 ··· 1469 1462 1470 1463 hdr = (struct ieee80211_hdr *)rx->skb->data; 1471 1464 fc = hdr->frame_control; 1465 + 1466 + if (ieee80211_is_ctl(fc)) 1467 + return RX_CONTINUE; 1468 + 1472 1469 sc = le16_to_cpu(hdr->seq_ctrl); 1473 1470 frag = sc & IEEE80211_SCTL_FRAG; 1474 1471 1475 1472 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1476 - (rx->skb)->len < 24 || 1477 1473 is_multicast_ether_addr(hdr->addr1))) { 1478 1474 /* not fragmented */ 1479 1475 goto out; ··· 1899 1889 1900 1890 hdr = (struct ieee80211_hdr *) skb->data; 1901 1891 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1892 + 1893 + /* make sure fixed part of mesh header is there, also checks skb len */ 1894 + if (!pskb_may_pull(rx->skb, hdrlen + 6)) 1895 + return RX_DROP_MONITOR; 1896 + 1897 + mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1898 + 1899 + /* make sure full mesh header is there, also checks skb len */ 1900 + if (!pskb_may_pull(rx->skb, 1901 + hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 1902 + return RX_DROP_MONITOR; 1903 + 1904 + /* reload pointers */ 1905 + hdr = (struct ieee80211_hdr *) skb->data; 1902 1906 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1903 1907 1904 1908 /* frame is in RMC, don't forward */ ··· 1921 1897 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) 1922 1898 return RX_DROP_MONITOR; 1923 1899 1924 - if (!ieee80211_is_data(hdr->frame_control)) 1900 + if (!ieee80211_is_data(hdr->frame_control) || 1901 + !(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1925 1902 return RX_CONTINUE; 1926 1903 1927 1904 if (!mesh_hdr->ttl) ··· 1936 1911 if (is_multicast_ether_addr(hdr->addr1)) { 1937 1912 mpp_addr = hdr->addr3; 1938 1913 proxied_addr = mesh_hdr->eaddr1; 1939 - } else { 1914 + } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 1915 + /* has_a4 already checked in ieee80211_rx_mesh_check */ 1940 1916 mpp_addr = hdr->addr4; 1941 1917 proxied_addr = mesh_hdr->eaddr2; 1918 + } else { 1919 + return RX_DROP_MONITOR; 1942 1920 } 1943 1921 1944 1922 rcu_read_lock(); ··· 1969 1941 } 1970 1942 skb_set_queue_mapping(skb, q); 1971 1943 1972 - if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1973 - goto out; 1974 - 1975 1944 if (!--mesh_hdr->ttl) { 1976 1945 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 1977 - return RX_DROP_MONITOR; 1946 + goto out; 1978 1947 } 1979 1948 1980 1949 if (!ifmsh->mshcfg.dot11MeshForwarding) ··· 2378 2353 } 2379 2354 break; 2380 2355 case WLAN_CATEGORY_SELF_PROTECTED: 2356 + if (len < (IEEE80211_MIN_ACTION_SIZE + 2357 + sizeof(mgmt->u.action.u.self_prot.action_code))) 2358 + break; 2359 + 2381 2360 switch (mgmt->u.action.u.self_prot.action_code) { 2382 2361 case WLAN_SP_MESH_PEERING_OPEN: 2383 2362 case WLAN_SP_MESH_PEERING_CLOSE: ··· 2400 2371 } 2401 2372 break; 2402 2373 case WLAN_CATEGORY_MESH_ACTION: 2374 + if (len < (IEEE80211_MIN_ACTION_SIZE + 2375 + sizeof(mgmt->u.action.u.mesh_action.action_code))) 2376 + break; 2377 + 2403 2378 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2404 2379 break; 2405 2380 if (mesh_action_is_path_sel(mgmt) && ··· 2946 2913 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 2947 2914 local->dot11ReceivedFragmentCount++; 2948 2915 2949 - if (ieee80211_is_mgmt(fc)) 2950 - err = skb_linearize(skb); 2951 - else 2916 + if (ieee80211_is_mgmt(fc)) { 2917 + /* drop frame if too short for header */ 2918 + if (skb->len < ieee80211_hdrlen(fc)) 2919 + err = -ENOBUFS; 2920 + else 2921 + err = skb_linearize(skb); 2922 + } else { 2952 2923 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 2924 + } 2953 2925 2954 2926 if (err) { 2955 2927 dev_kfree_skb(skb);
+35 -7
net/mac80211/util.c
··· 643 643 break; 644 644 } 645 645 646 - if (id != WLAN_EID_VENDOR_SPECIFIC && 647 - id != WLAN_EID_QUIET && 648 - test_bit(id, seen_elems)) { 649 - elems->parse_error = true; 650 - left -= elen; 651 - pos += elen; 652 - continue; 646 + switch (id) { 647 + case WLAN_EID_SSID: 648 + case WLAN_EID_SUPP_RATES: 649 + case WLAN_EID_FH_PARAMS: 650 + case WLAN_EID_DS_PARAMS: 651 + case WLAN_EID_CF_PARAMS: 652 + case WLAN_EID_TIM: 653 + case WLAN_EID_IBSS_PARAMS: 654 + case WLAN_EID_CHALLENGE: 655 + case WLAN_EID_RSN: 656 + case WLAN_EID_ERP_INFO: 657 + case WLAN_EID_EXT_SUPP_RATES: 658 + case WLAN_EID_HT_CAPABILITY: 659 + case WLAN_EID_HT_OPERATION: 660 + case WLAN_EID_VHT_CAPABILITY: 661 + case WLAN_EID_VHT_OPERATION: 662 + case WLAN_EID_MESH_ID: 663 + case WLAN_EID_MESH_CONFIG: 664 + case WLAN_EID_PEER_MGMT: 665 + case WLAN_EID_PREQ: 666 + case WLAN_EID_PREP: 667 + case WLAN_EID_PERR: 668 + case WLAN_EID_RANN: 669 + case WLAN_EID_CHANNEL_SWITCH: 670 + case WLAN_EID_EXT_CHANSWITCH_ANN: 671 + case WLAN_EID_COUNTRY: 672 + case WLAN_EID_PWR_CONSTRAINT: 673 + case WLAN_EID_TIMEOUT_INTERVAL: 674 + if (test_bit(id, seen_elems)) { 675 + elems->parse_error = true; 676 + left -= elen; 677 + pos += elen; 678 + continue; 679 + } 680 + break; 653 681 } 654 682 655 683 if (calc_crc && id < 64 && (filter & (1ULL << id)))
+2 -1
net/netfilter/nf_conntrack_h323_main.c
··· 753 753 flowi4_to_flowi(&fl1), false)) { 754 754 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, 755 755 flowi4_to_flowi(&fl2), false)) { 756 - if (rt1->rt_gateway == rt2->rt_gateway && 756 + if (rt_nexthop(rt1, fl1.daddr) == 757 + rt_nexthop(rt2, fl2.daddr) && 757 758 rt1->dst.dev == rt2->dst.dev) 758 759 ret = 1; 759 760 dst_release(&rt2->dst);
+79 -30
net/sched/sch_qfq.c
··· 84 84 * grp->index is the index of the group; and grp->slot_shift 85 85 * is the shift for the corresponding (scaled) sigma_i. 86 86 */ 87 - #define QFQ_MAX_INDEX 19 88 - #define QFQ_MAX_WSHIFT 16 87 + #define QFQ_MAX_INDEX 24 88 + #define QFQ_MAX_WSHIFT 12 89 89 90 90 #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) 91 - #define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT) 91 + #define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT) 92 92 93 93 #define FRAC_BITS 30 /* fixed point arithmetic */ 94 94 #define ONE_FP (1UL << FRAC_BITS) 95 95 #define IWSUM (ONE_FP/QFQ_MAX_WSUM) 96 96 97 - #define QFQ_MTU_SHIFT 11 97 + #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ 98 98 #define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX) 99 + #define QFQ_MIN_LMAX 256 /* min possible lmax for a class */ 99 100 100 101 /* 101 102 * Possible group states. These values are used as indexes for the bitmaps ··· 232 231 q->wsum += delta_w; 233 232 } 234 233 234 + static void qfq_update_reactivate_class(struct qfq_sched *q, 235 + struct qfq_class *cl, 236 + u32 inv_w, u32 lmax, int delta_w) 237 + { 238 + bool need_reactivation = false; 239 + int i = qfq_calc_index(inv_w, lmax); 240 + 241 + if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { 242 + /* 243 + * shift cl->F back, to not charge the 244 + * class for the not-yet-served head 245 + * packet 246 + */ 247 + cl->F = cl->S; 248 + /* remove class from its slot in the old group */ 249 + qfq_deactivate_class(q, cl); 250 + need_reactivation = true; 251 + } 252 + 253 + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); 254 + 255 + if (need_reactivation) /* activate in new group */ 256 + qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); 257 + } 258 + 259 + 235 260 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 236 261 struct nlattr **tca, unsigned long *arg) 237 262 { ··· 265 238 struct qfq_class *cl = (struct qfq_class *)*arg; 266 239 struct nlattr *tb[TCA_QFQ_MAX + 1]; 267 240 u32 weight, lmax, inv_w; 268 - int i, err; 241 + int err; 269 242 int delta_w; 270 243 271 244 if (tca[TCA_OPTIONS] == NULL) { ··· 297 270 298 271 if (tb[TCA_QFQ_LMAX]) { 299 272 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); 300 - if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) { 273 + if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { 301 274 pr_notice("qfq: invalid max length %u\n", lmax); 302 275 return -EINVAL; 303 276 } 304 277 } else 305 - lmax = 1UL << QFQ_MTU_SHIFT; 278 + lmax = psched_mtu(qdisc_dev(sch)); 306 279 307 280 if (cl != NULL) { 308 - bool need_reactivation = false; 309 - 310 281 if (tca[TCA_RATE]) { 311 282 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 312 283 qdisc_root_sleeping_lock(sch), ··· 316 291 if (lmax == cl->lmax && inv_w == cl->inv_w) 317 292 return 0; /* nothing to update */ 318 293 319 - i = qfq_calc_index(inv_w, lmax); 320 294 sch_tree_lock(sch); 321 - if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { 322 - /* 323 - * shift cl->F back, to not charge the 324 - * class for the not-yet-served head 325 - * packet 326 - */ 327 - cl->F = cl->S; 328 - /* remove class from its slot in the old group */ 329 - qfq_deactivate_class(q, cl); 330 - need_reactivation = true; 331 - } 332 - 333 - qfq_update_class_params(q, cl, lmax, inv_w, delta_w); 334 - 335 - if (need_reactivation) /* activate in new group */ 336 - qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); 295 + qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w); 337 296 sch_tree_unlock(sch); 338 297 339 298 return 0; ··· 672 663 673 664 674 665 /* 675 - * XXX we should make sure that slot becomes less than 32. 676 - * This is guaranteed by the input values. 677 - * roundedS is always cl->S rounded on grp->slot_shift bits. 666 + * If the weight and lmax (max_pkt_size) of the classes do not change, 667 + * then QFQ guarantees that the slot index is never higher than 668 + * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM). 669 + * 670 + * With the current values of the above constants, the index is 671 + * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18. 672 + * 673 + * When the weight of a class is increased or the lmax of the class is 674 + * decreased, a new class with smaller slot size may happen to be 675 + * activated. The activation of this class should be properly delayed 676 + * to when the service of the class has finished in the ideal system 677 + * tracked by QFQ. If the activation of the class is not delayed to 678 + * this reference time instant, then this class may be unjustly served 679 + * before other classes waiting for service. This may cause 680 + * (unfrequently) the above bound to the slot index to be violated for 681 + * some of these unlucky classes. 682 + * 683 + * Instead of delaying the activation of the new class, which is quite 684 + * complex, the following inaccurate but simple solution is used: if 685 + * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps 686 + * of the class are shifted backward so as to let the slot index 687 + * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if 688 + * the slot index is above it, then the data structure implementing 689 + * the bucket list either gets immediately corrupted or may get 690 + * corrupted on a possible next packet arrival that causes the start 691 + * time of the group to be shifted backward. 678 692 */ 679 693 static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, 680 694 u64 roundedS) 681 695 { 682 696 u64 slot = (roundedS - grp->S) >> grp->slot_shift; 683 - unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS; 697 + unsigned int i; /* slot index in the bucket list */ 698 + 699 + if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { 700 + u64 deltaS = roundedS - grp->S - 701 + ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); 702 + cl->S -= deltaS; 703 + cl->F -= deltaS; 704 + slot = QFQ_MAX_SLOTS - 2; 705 + } 706 + 707 + i = (grp->front + slot) % QFQ_MAX_SLOTS; 684 708 685 709 hlist_add_head(&cl->next, &grp->slots[i]); 686 710 __set_bit(slot, &grp->full_slots); ··· 933 891 return err; 934 892 } 935 893 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 894 + 895 + if (unlikely(cl->lmax < qdisc_pkt_len(skb))) { 896 + pr_debug("qfq: increasing maxpkt from %u to %u for class %u", 897 + cl->lmax, qdisc_pkt_len(skb), cl->common.classid); 898 + qfq_update_reactivate_class(q, cl, cl->inv_w, 899 + qdisc_pkt_len(skb), 0); 900 + } 936 901 937 902 err = qdisc_enqueue(skb, cl->qdisc); 938 903 if (unlikely(err != NET_XMIT_SUCCESS)) {
+1 -1
net/sctp/socket.c
··· 974 974 void *addr_buf; 975 975 struct sctp_af *af; 976 976 977 - SCTP_DEBUG_PRINTK("sctp_setsocktopt_bindx: sk %p addrs %p" 977 + SCTP_DEBUG_PRINTK("sctp_setsockopt_bindx: sk %p addrs %p" 978 978 " addrs_size %d opt %d\n", sk, addrs, addrs_size, op); 979 979 980 980 if (unlikely(addrs_size <= 0))
+1 -1
net/sunrpc/backchannel_rqst.c
··· 172 172 xprt_free_allocation(req); 173 173 174 174 dprintk("RPC: setup backchannel transport failed\n"); 175 - return -1; 175 + return -ENOMEM; 176 176 } 177 177 EXPORT_SYMBOL_GPL(xprt_setup_backchannel); 178 178
-1
net/tipc/handler.c
··· 116 116 return; 117 117 118 118 handler_enabled = 0; 119 - tasklet_disable(&tipc_tasklet); 120 119 tasklet_kill(&tipc_tasklet); 121 120 122 121 spin_lock_bh(&qitem_lock);
+1 -2
net/wireless/core.c
··· 526 526 for (i = 0; i < sband->n_channels; i++) { 527 527 sband->channels[i].orig_flags = 528 528 sband->channels[i].flags; 529 - sband->channels[i].orig_mag = 530 - sband->channels[i].max_antenna_gain; 529 + sband->channels[i].orig_mag = INT_MAX; 531 530 sband->channels[i].orig_mpwr = 532 531 sband->channels[i].max_power; 533 532 sband->channels[i].band = band;
+3 -2
net/wireless/reg.c
··· 908 908 map_regdom_flags(reg_rule->flags) | bw_flags; 909 909 chan->max_antenna_gain = chan->orig_mag = 910 910 (int) MBI_TO_DBI(power_rule->max_antenna_gain); 911 - chan->max_power = chan->orig_mpwr = 911 + chan->max_reg_power = chan->max_power = chan->orig_mpwr = 912 912 (int) MBM_TO_DBM(power_rule->max_eirp); 913 913 return; 914 914 } ··· 1331 1331 1332 1332 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; 1333 1333 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1334 - chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 1334 + chan->max_reg_power = chan->max_power = 1335 + (int) MBM_TO_DBM(power_rule->max_eirp); 1335 1336 } 1336 1337 1337 1338 static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band,
+8 -6
net/wireless/util.c
··· 309 309 } 310 310 EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); 311 311 312 - static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) 312 + unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) 313 313 { 314 314 int ae = meshhdr->flags & MESH_FLAGS_AE; 315 - /* 7.1.3.5a.2 */ 315 + /* 802.11-2012, 8.2.4.7.3 */ 316 316 switch (ae) { 317 + default: 317 318 case 0: 318 319 return 6; 319 320 case MESH_FLAGS_AE_A4: 320 321 return 12; 321 322 case MESH_FLAGS_AE_A5_A6: 322 323 return 18; 323 - case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6): 324 - return 24; 325 - default: 326 - return 6; 327 324 } 328 325 } 326 + EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); 329 327 330 328 int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, 331 329 enum nl80211_iftype iftype) ··· 371 373 /* make sure meshdr->flags is on the linear part */ 372 374 if (!pskb_may_pull(skb, hdrlen + 1)) 373 375 return -1; 376 + if (meshdr->flags & MESH_FLAGS_AE_A4) 377 + return -1; 374 378 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { 375 379 skb_copy_bits(skb, hdrlen + 376 380 offsetof(struct ieee80211s_hdr, eaddr1), ··· 396 396 (struct ieee80211s_hdr *) (skb->data + hdrlen); 397 397 /* make sure meshdr->flags is on the linear part */ 398 398 if (!pskb_may_pull(skb, hdrlen + 1)) 399 + return -1; 400 + if (meshdr->flags & MESH_FLAGS_AE_A5_A6) 399 401 return -1; 400 402 if (meshdr->flags & MESH_FLAGS_AE_A4) 401 403 skb_copy_bits(skb, hdrlen +
+2 -1
scripts/Makefile.modinst
··· 16 16 __modinst: $(modules) 17 17 @: 18 18 19 + # Don't stop modules_install if we can't sign external modules. 19 20 quiet_cmd_modules_install = INSTALL $@ 20 - cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@) 21 + cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) 21 22 22 23 # Modules built outside the kernel source tree go into extra by default 23 24 INSTALL_MOD_DIR ?= extra
+4 -2
scripts/checkpatch.pl
··· 1890 1890 } 1891 1891 1892 1892 if ($realfile =~ m@^(drivers/net/|net/)@ && 1893 - $rawline !~ m@^\+[ \t]*(\/\*|\*\/)@ && 1894 - $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) { 1893 + $rawline !~ m@^\+[ \t]*\*/[ \t]*$@ && #trailing */ 1894 + $rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ && #inline /*...*/ 1895 + $rawline !~ m@^\+.*\*{2,}/[ \t]*$@ && #trailing **/ 1896 + $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) { #non blank */ 1895 1897 WARN("NETWORKING_BLOCK_COMMENT_STYLE", 1896 1898 "networking block comments put the trailing */ on a separate line\n" . $herecurr); 1897 1899 }
+7 -2
sound/core/compress_offload.c
··· 100 100 101 101 if (dirn != compr->direction) { 102 102 pr_err("this device doesn't support this direction\n"); 103 + snd_card_unref(compr->card); 103 104 return -EINVAL; 104 105 } 105 106 106 107 data = kzalloc(sizeof(*data), GFP_KERNEL); 107 - if (!data) 108 + if (!data) { 109 + snd_card_unref(compr->card); 108 110 return -ENOMEM; 111 + } 109 112 data->stream.ops = compr->ops; 110 113 data->stream.direction = dirn; 111 114 data->stream.private_data = compr->private_data; ··· 116 113 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); 117 114 if (!runtime) { 118 115 kfree(data); 116 + snd_card_unref(compr->card); 119 117 return -ENOMEM; 120 118 } 121 119 runtime->state = SNDRV_PCM_STATE_OPEN; ··· 130 126 kfree(runtime); 131 127 kfree(data); 132 128 } 133 - return ret; 129 + snd_card_unref(compr->card); 130 + return 0; 134 131 } 135 132 136 133 static int snd_compr_free(struct inode *inode, struct file *f)
+5
sound/core/control.c
··· 86 86 write_lock_irqsave(&card->ctl_files_rwlock, flags); 87 87 list_add_tail(&ctl->list, &card->ctl_files); 88 88 write_unlock_irqrestore(&card->ctl_files_rwlock, flags); 89 + snd_card_unref(card); 89 90 return 0; 90 91 91 92 __error: ··· 94 93 __error2: 95 94 snd_card_file_remove(card, file); 96 95 __error1: 96 + if (card) 97 + snd_card_unref(card); 97 98 return err; 98 99 } 99 100 ··· 1437 1434 spin_unlock_irq(&ctl->read_lock); 1438 1435 schedule(); 1439 1436 remove_wait_queue(&ctl->change_sleep, &wait); 1437 + if (ctl->card->shutdown) 1438 + return -ENODEV; 1440 1439 if (signal_pending(current)) 1441 1440 return -ERESTARTSYS; 1442 1441 spin_lock_irq(&ctl->read_lock);
+11 -1
sound/core/hwdep.c
··· 100 100 if (hw == NULL) 101 101 return -ENODEV; 102 102 103 - if (!try_module_get(hw->card->module)) 103 + if (!try_module_get(hw->card->module)) { 104 + snd_card_unref(hw->card); 104 105 return -EFAULT; 106 + } 105 107 106 108 init_waitqueue_entry(&wait, current); 107 109 add_wait_queue(&hw->open_wait, &wait); ··· 131 129 mutex_unlock(&hw->open_mutex); 132 130 schedule(); 133 131 mutex_lock(&hw->open_mutex); 132 + if (hw->card->shutdown) { 133 + err = -ENODEV; 134 + break; 135 + } 134 136 if (signal_pending(current)) { 135 137 err = -ERESTARTSYS; 136 138 break; ··· 154 148 mutex_unlock(&hw->open_mutex); 155 149 if (err < 0) 156 150 module_put(hw->card->module); 151 + snd_card_unref(hw->card); 157 152 return err; 158 153 } 159 154 ··· 466 459 mutex_unlock(&register_mutex); 467 460 return -EINVAL; 468 461 } 462 + mutex_lock(&hwdep->open_mutex); 463 + wake_up(&hwdep->open_wait); 469 464 #ifdef CONFIG_SND_OSSEMUL 470 465 if (hwdep->ossreg) 471 466 snd_unregister_oss_device(hwdep->oss_type, hwdep->card, hwdep->device); 472 467 #endif 473 468 snd_unregister_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device); 474 469 list_del_init(&hwdep->list); 470 + mutex_unlock(&hwdep->open_mutex); 475 471 mutex_unlock(&register_mutex); 476 472 return 0; 477 473 }
+30 -20
sound/core/init.c
··· 213 213 spin_lock_init(&card->files_lock); 214 214 INIT_LIST_HEAD(&card->files_list); 215 215 init_waitqueue_head(&card->shutdown_sleep); 216 + atomic_set(&card->refcount, 0); 216 217 #ifdef CONFIG_PM 217 218 mutex_init(&card->power_lock); 218 219 init_waitqueue_head(&card->power_sleep); ··· 447 446 return 0; 448 447 } 449 448 449 + /** 450 + * snd_card_unref - release the reference counter 451 + * @card: the card instance 452 + * 453 + * Decrements the reference counter. When it reaches to zero, wake up 454 + * the sleeper and call the destructor if needed. 455 + */ 456 + void snd_card_unref(struct snd_card *card) 457 + { 458 + if (atomic_dec_and_test(&card->refcount)) { 459 + wake_up(&card->shutdown_sleep); 460 + if (card->free_on_last_close) 461 + snd_card_do_free(card); 462 + } 463 + } 464 + EXPORT_SYMBOL(snd_card_unref); 465 + 450 466 int snd_card_free_when_closed(struct snd_card *card) 451 467 { 452 - int free_now = 0; 453 - int ret = snd_card_disconnect(card); 454 - if (ret) 468 + int ret; 469 + 470 + atomic_inc(&card->refcount); 471 + ret = snd_card_disconnect(card); 472 + if (ret) { 473 + atomic_dec(&card->refcount); 455 474 return ret; 475 + } 456 476 457 - spin_lock(&card->files_lock); 458 - if (list_empty(&card->files_list)) 459 - free_now = 1; 460 - else 461 - card->free_on_last_close = 1; 462 - spin_unlock(&card->files_lock); 463 - 464 - if (free_now) 477 + card->free_on_last_close = 1; 478 + if (atomic_dec_and_test(&card->refcount)) 465 479 snd_card_do_free(card); 466 480 return 0; 467 481 } ··· 490 474 return ret; 491 475 492 476 /* wait, until all devices are ready for the free operation */ 493 - wait_event(card->shutdown_sleep, list_empty(&card->files_list)); 477 + wait_event(card->shutdown_sleep, !atomic_read(&card->refcount)); 494 478 snd_card_do_free(card); 495 479 return 0; 496 480 } ··· 902 886 return -ENODEV; 903 887 } 904 888 list_add(&mfile->list, &card->files_list); 889 + atomic_inc(&card->refcount); 905 890 spin_unlock(&card->files_lock); 906 891 return 0; 907 892 } ··· 925 908 int snd_card_file_remove(struct snd_card *card, struct file *file) 926 909 { 927 910 struct snd_monitor_file *mfile, *found = NULL; 928 - int last_close = 0; 929 911 930 912 spin_lock(&card->files_lock); 931 913 list_for_each_entry(mfile, &card->files_list, list) { ··· 939 923 break; 940 924 } 941 925 } 942 - if (list_empty(&card->files_list)) 943 - last_close = 1; 944 926 spin_unlock(&card->files_lock); 945 - if (last_close) { 946 - wake_up(&card->shutdown_sleep); 947 - if (card->free_on_last_close) 948 - snd_card_do_free(card); 949 - } 950 927 if (!found) { 951 928 snd_printk(KERN_ERR "ALSA card file remove problem (%p)\n", file); 952 929 return -ENOENT; 953 930 } 954 931 kfree(found); 932 + snd_card_unref(card); 955 933 return 0; 956 934 } 957 935
+9 -2
sound/core/oss/mixer_oss.c
··· 52 52 SNDRV_OSS_DEVICE_TYPE_MIXER); 53 53 if (card == NULL) 54 54 return -ENODEV; 55 - if (card->mixer_oss == NULL) 55 + if (card->mixer_oss == NULL) { 56 + snd_card_unref(card); 56 57 return -ENODEV; 58 + } 57 59 err = snd_card_file_add(card, file); 58 - if (err < 0) 60 + if (err < 0) { 61 + snd_card_unref(card); 59 62 return err; 63 + } 60 64 fmixer = kzalloc(sizeof(*fmixer), GFP_KERNEL); 61 65 if (fmixer == NULL) { 62 66 snd_card_file_remove(card, file); 67 + snd_card_unref(card); 63 68 return -ENOMEM; 64 69 } 65 70 fmixer->card = card; ··· 73 68 if (!try_module_get(card->module)) { 74 69 kfree(fmixer); 75 70 snd_card_file_remove(card, file); 71 + snd_card_unref(card); 76 72 return -EFAULT; 77 73 } 74 + snd_card_unref(card); 78 75 return 0; 79 76 } 80 77
+7
sound/core/oss/pcm_oss.c
··· 2441 2441 mutex_unlock(&pcm->open_mutex); 2442 2442 schedule(); 2443 2443 mutex_lock(&pcm->open_mutex); 2444 + if (pcm->card->shutdown) { 2445 + err = -ENODEV; 2446 + break; 2447 + } 2444 2448 if (signal_pending(current)) { 2445 2449 err = -ERESTARTSYS; 2446 2450 break; ··· 2454 2450 mutex_unlock(&pcm->open_mutex); 2455 2451 if (err < 0) 2456 2452 goto __error; 2453 + snd_card_unref(pcm->card); 2457 2454 return err; 2458 2455 2459 2456 __error: ··· 2462 2457 __error2: 2463 2458 snd_card_file_remove(pcm->card, file); 2464 2459 __error1: 2460 + if (pcm) 2461 + snd_card_unref(pcm->card); 2465 2462 return err; 2466 2463 } 2467 2464
+11 -2
sound/core/pcm.c
··· 1086 1086 if (list_empty(&pcm->list)) 1087 1087 goto unlock; 1088 1088 1089 + mutex_lock(&pcm->open_mutex); 1090 + wake_up(&pcm->open_wait); 1089 1091 list_del_init(&pcm->list); 1090 1092 for (cidx = 0; cidx < 2; cidx++) 1091 - for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) 1092 - if (substream->runtime) 1093 + for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) { 1094 + snd_pcm_stream_lock_irq(substream); 1095 + if (substream->runtime) { 1093 1096 substream->runtime->status->state = SNDRV_PCM_STATE_DISCONNECTED; 1097 + wake_up(&substream->runtime->sleep); 1098 + wake_up(&substream->runtime->tsleep); 1099 + } 1100 + snd_pcm_stream_unlock_irq(substream); 1101 + } 1094 1102 list_for_each_entry(notify, &snd_pcm_notify_list, list) { 1095 1103 notify->n_disconnect(pcm); 1096 1104 } ··· 1118 1110 pcm->streams[cidx].chmap_kctl = NULL; 1119 1111 } 1120 1112 } 1113 + mutex_unlock(&pcm->open_mutex); 1121 1114 unlock: 1122 1115 mutex_unlock(&register_mutex); 1123 1116 return 0;
+29 -6
sound/core/pcm_native.c
··· 369 369 return usecs; 370 370 } 371 371 372 + static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state) 373 + { 374 + snd_pcm_stream_lock_irq(substream); 375 + if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED) 376 + substream->runtime->status->state = state; 377 + snd_pcm_stream_unlock_irq(substream); 378 + } 379 + 372 380 static int snd_pcm_hw_params(struct snd_pcm_substream *substream, 373 381 struct snd_pcm_hw_params *params) 374 382 { ··· 460 452 runtime->boundary *= 2; 461 453 462 454 snd_pcm_timer_resolution_change(substream); 463 - runtime->status->state = SNDRV_PCM_STATE_SETUP; 455 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); 464 456 465 457 if (pm_qos_request_active(&substream->latency_pm_qos_req)) 466 458 pm_qos_remove_request(&substream->latency_pm_qos_req); ··· 472 464 /* hardware might be unusable from this time, 473 465 so we force application to retry to set 474 466 the correct hardware parameter settings */ 475 - runtime->status->state = SNDRV_PCM_STATE_OPEN; 467 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); 476 468 if (substream->ops->hw_free != NULL) 477 469 substream->ops->hw_free(substream); 478 470 return err; ··· 520 512 return -EBADFD; 521 513 if (substream->ops->hw_free) 522 514 result = substream->ops->hw_free(substream); 523 - runtime->status->state = SNDRV_PCM_STATE_OPEN; 515 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); 524 516 pm_qos_remove_request(&substream->latency_pm_qos_req); 525 517 return result; 526 518 } ··· 1328 1320 { 1329 1321 struct snd_pcm_runtime *runtime = substream->runtime; 1330 1322 runtime->control->appl_ptr = runtime->status->hw_ptr; 1331 - runtime->status->state = SNDRV_PCM_STATE_PREPARED; 1323 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED); 1332 1324 } 1333 1325 1334 1326 static struct action_ops snd_pcm_action_prepare = { ··· 1518 1510 down_read(&snd_pcm_link_rwsem); 1519 1511 snd_pcm_stream_lock_irq(substream); 1520 1512 remove_wait_queue(&to_check->sleep, &wait); 1513 + if (card->shutdown) { 1514 + result = -ENODEV; 1515 + break; 1516 + } 1521 1517 if (tout == 0) { 1522 1518 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) 1523 1519 result = -ESTRPIPE; ··· 1646 1634 write_unlock_irq(&snd_pcm_link_rwlock); 1647 1635 up_write(&snd_pcm_link_rwsem); 1648 1636 _nolock: 1637 + snd_card_unref(substream1->pcm->card); 1649 1638 fput_light(file, fput_needed); 1650 1639 if (res < 0) 1651 1640 kfree(group); ··· 2121 2108 return err; 2122 2109 pcm = snd_lookup_minor_data(iminor(inode), 2123 2110 SNDRV_DEVICE_TYPE_PCM_PLAYBACK); 2124 - return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); 2111 + err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); 2112 + if (pcm) 2113 + snd_card_unref(pcm->card); 2114 + return err; 2125 2115 } 2126 2116 2127 2117 static int snd_pcm_capture_open(struct inode *inode, struct file *file) ··· 2135 2119 return err; 2136 2120 pcm = snd_lookup_minor_data(iminor(inode), 2137 2121 SNDRV_DEVICE_TYPE_PCM_CAPTURE); 2138 - return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); 2122 + err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); 2123 + if (pcm) 2124 + snd_card_unref(pcm->card); 2125 + return err; 2139 2126 } 2140 2127 2141 2128 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) ··· 2175 2156 mutex_unlock(&pcm->open_mutex); 2176 2157 schedule(); 2177 2158 mutex_lock(&pcm->open_mutex); 2159 + if (pcm->card->shutdown) { 2160 + err = -ENODEV; 2161 + break; 2162 + } 2178 2163 if (signal_pending(current)) { 2179 2164 err = -ERESTARTSYS; 2180 2165 break;
+25 -1
sound/core/rawmidi.c
··· 379 379 if (rmidi == NULL) 380 380 return -ENODEV; 381 381 382 - if (!try_module_get(rmidi->card->module)) 382 + if (!try_module_get(rmidi->card->module)) { 383 + snd_card_unref(rmidi->card); 383 384 return -ENXIO; 385 + } 384 386 385 387 mutex_lock(&rmidi->open_mutex); 386 388 card = rmidi->card; ··· 424 422 mutex_unlock(&rmidi->open_mutex); 425 423 schedule(); 426 424 mutex_lock(&rmidi->open_mutex); 425 + if (rmidi->card->shutdown) { 426 + err = -ENODEV; 427 + break; 428 + } 427 429 if (signal_pending(current)) { 428 430 err = -ERESTARTSYS; 429 431 break; ··· 446 440 #endif 447 441 file->private_data = rawmidi_file; 448 442 mutex_unlock(&rmidi->open_mutex); 443 + snd_card_unref(rmidi->card); 449 444 return 0; 450 445 451 446 __error: ··· 454 447 __error_card: 455 448 mutex_unlock(&rmidi->open_mutex); 456 449 module_put(rmidi->card->module); 450 + snd_card_unref(rmidi->card); 457 451 return err; 458 452 } 459 453 ··· 999 991 spin_unlock_irq(&runtime->lock); 1000 992 schedule(); 1001 993 remove_wait_queue(&runtime->sleep, &wait); 994 + if (rfile->rmidi->card->shutdown) 995 + return -ENODEV; 1002 996 if (signal_pending(current)) 1003 997 return result > 0 ? result : -ERESTARTSYS; 1004 998 if (!runtime->avail) ··· 1244 1234 spin_unlock_irq(&runtime->lock); 1245 1235 timeout = schedule_timeout(30 * HZ); 1246 1236 remove_wait_queue(&runtime->sleep, &wait); 1237 + if (rfile->rmidi->card->shutdown) 1238 + return -ENODEV; 1247 1239 if (signal_pending(current)) 1248 1240 return result > 0 ? result : -ERESTARTSYS; 1249 1241 if (!runtime->avail && !timeout) ··· 1621 1609 static int snd_rawmidi_dev_disconnect(struct snd_device *device) 1622 1610 { 1623 1611 struct snd_rawmidi *rmidi = device->device_data; 1612 + int dir; 1624 1613 1625 1614 mutex_lock(&register_mutex); 1615 + mutex_lock(&rmidi->open_mutex); 1616 + wake_up(&rmidi->open_wait); 1626 1617 list_del_init(&rmidi->list); 1618 + for (dir = 0; dir < 2; dir++) { 1619 + struct snd_rawmidi_substream *s; 1620 + list_for_each_entry(s, &rmidi->streams[dir].substreams, list) { 1621 + if (s->runtime) 1622 + wake_up(&s->runtime->sleep); 1623 + } 1624 + } 1625 + 1627 1626 #ifdef CONFIG_SND_OSSEMUL 1628 1627 if (rmidi->ossreg) { 1629 1628 if ((int)rmidi->device == midi_map[rmidi->card->number]) { ··· 1649 1626 } 1650 1627 #endif /* CONFIG_SND_OSSEMUL */ 1651 1628 snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device); 1629 + mutex_unlock(&rmidi->open_mutex); 1652 1630 mutex_unlock(&register_mutex); 1653 1631 return 0; 1654 1632 }
+9 -2
sound/core/sound.c
··· 98 98 * 99 99 * Checks that a minor device with the specified type is registered, and returns 100 100 * its user data pointer. 101 + * 102 + * This function increments the reference counter of the card instance 103 + * if an associated instance with the given minor number and type is found. 104 + * The caller must call snd_card_unref() appropriately later. 101 105 */ 102 106 void *snd_lookup_minor_data(unsigned int minor, int type) 103 107 { ··· 112 108 return NULL; 113 109 mutex_lock(&sound_mutex); 114 110 mreg = snd_minors[minor]; 115 - if (mreg && mreg->type == type) 111 + if (mreg && mreg->type == type) { 116 112 private_data = mreg->private_data; 117 - else 113 + if (private_data && mreg->card_ptr) 114 + atomic_inc(&mreg->card_ptr->refcount); 115 + } else 118 116 private_data = NULL; 119 117 mutex_unlock(&sound_mutex); 120 118 return private_data; ··· 281 275 preg->device = dev; 282 276 preg->f_ops = f_ops; 283 277 preg->private_data = private_data; 278 + preg->card_ptr = card; 284 279 mutex_lock(&sound_mutex); 285 280 #ifdef CONFIG_SND_DYNAMIC_MINORS 286 281 minor = snd_find_free_minor(type);
+8 -2
sound/core/sound_oss.c
··· 40 40 static struct snd_minor *snd_oss_minors[SNDRV_OSS_MINORS]; 41 41 static DEFINE_MUTEX(sound_oss_mutex); 42 42 43 + /* NOTE: This function increments the refcount of the associated card like 44 + * snd_lookup_minor_data(); the caller must call snd_card_unref() appropriately 45 + */ 43 46 void *snd_lookup_oss_minor_data(unsigned int minor, int type) 44 47 { 45 48 struct snd_minor *mreg; ··· 52 49 return NULL; 53 50 mutex_lock(&sound_oss_mutex); 54 51 mreg = snd_oss_minors[minor]; 55 - if (mreg && mreg->type == type) 52 + if (mreg && mreg->type == type) { 56 53 private_data = mreg->private_data; 57 - else 54 + if (private_data && mreg->card_ptr) 55 + atomic_inc(&mreg->card_ptr->refcount); 56 + } else 58 57 private_data = NULL; 59 58 mutex_unlock(&sound_oss_mutex); 60 59 return private_data; ··· 128 123 preg->device = dev; 129 124 preg->f_ops = f_ops; 130 125 preg->private_data = private_data; 126 + preg->card_ptr = card; 131 127 mutex_lock(&sound_oss_mutex); 132 128 snd_oss_minors[minor] = preg; 133 129 minor_unit = SNDRV_MINOR_OSS_DEVICE(minor);
+1 -1
sound/i2c/other/ak4113.c
··· 426 426 }, 427 427 { 428 428 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 429 - .name = "IEC958 Preample Capture Default", 429 + .name = "IEC958 Preamble Capture Default", 430 430 .access = SNDRV_CTL_ELEM_ACCESS_READ | 431 431 SNDRV_CTL_ELEM_ACCESS_VOLATILE, 432 432 .info = snd_ak4113_spdif_pinfo,
+1 -1
sound/i2c/other/ak4114.c
··· 401 401 }, 402 402 { 403 403 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 404 - .name = "IEC958 Preample Capture Default", 404 + .name = "IEC958 Preamble Capture Default", 405 405 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, 406 406 .info = snd_ak4114_spdif_pinfo, 407 407 .get = snd_ak4114_spdif_pget,
+1 -1
sound/i2c/other/ak4117.c
··· 380 380 }, 381 381 { 382 382 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 383 - .name = "IEC958 Preample Capture Default", 383 + .name = "IEC958 Preamble Capture Default", 384 384 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, 385 385 .info = snd_ak4117_spdif_pinfo, 386 386 .get = snd_ak4117_spdif_pget,
+2
sound/pci/es1968.c
··· 2655 2655 { TYPE_MAESTRO2E, 0x1179 }, 2656 2656 { TYPE_MAESTRO2E, 0x14c0 }, /* HP omnibook 4150 */ 2657 2657 { TYPE_MAESTRO2E, 0x1558 }, 2658 + { TYPE_MAESTRO2E, 0x125d }, /* a PCI card, e.g. Terratec DMX */ 2659 + { TYPE_MAESTRO2, 0x125d }, /* a PCI card, e.g. SF64-PCE2 */ 2658 2660 }; 2659 2661 2660 2662 static struct ess_device_list mpu_blacklist[] __devinitdata = {
+2
sound/pci/hda/hda_intel.c
··· 3563 3563 /* Teradici */ 3564 3564 { PCI_DEVICE(0x6549, 0x1200), 3565 3565 .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, 3566 + { PCI_DEVICE(0x6549, 0x2200), 3567 + .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, 3566 3568 /* Creative X-Fi (CA0110-IBG) */ 3567 3569 /* CTHDA chips */ 3568 3570 { PCI_DEVICE(0x1102, 0x0010),
+1
sound/pci/hda/patch_analog.c
··· 545 545 if (spec->multiout.dig_out_nid) { 546 546 info++; 547 547 codec->num_pcms++; 548 + codec->spdif_status_reset = 1; 548 549 info->name = "AD198x Digital"; 549 550 info->pcm_type = HDA_PCM_TYPE_SPDIF; 550 551 info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback;
+12 -9
sound/pci/hda/patch_cirrus.c
··· 101 101 #define CS420X_VENDOR_NID 0x11 102 102 #define CS_DIG_OUT1_PIN_NID 0x10 103 103 #define CS_DIG_OUT2_PIN_NID 0x15 104 - #define CS_DMIC1_PIN_NID 0x12 105 - #define CS_DMIC2_PIN_NID 0x0e 104 + #define CS_DMIC1_PIN_NID 0x0e 105 + #define CS_DMIC2_PIN_NID 0x12 106 106 107 107 /* coef indices */ 108 108 #define IDX_SPDIF_STAT 0x0000 ··· 1079 1079 cs_automic(codec, NULL); 1080 1080 1081 1081 coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */ 1082 + cs_vendor_coef_set(codec, IDX_ADC_CFG, coef); 1083 + 1084 + coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG); 1082 1085 if (is_active_pin(codec, CS_DMIC2_PIN_NID)) 1083 - coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */ 1086 + coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */ 1084 1087 if (is_active_pin(codec, CS_DMIC1_PIN_NID)) 1085 - coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off 1088 + coef |= 1 << 3; /* DMIC1 2 chan on, GPIO0 off 1086 1089 * No effect if SPDIF_OUT2 is 1087 1090 * selected in IDX_SPDIF_CTL. 1088 1091 */ 1089 - cs_vendor_coef_set(codec, IDX_ADC_CFG, coef); 1092 + 1093 + cs_vendor_coef_set(codec, IDX_BEEP_CFG, coef); 1090 1094 } else { 1091 1095 if (spec->mic_detect) 1092 1096 cs_automic(codec, NULL); ··· 1111 1107 | 0x0400 /* Disable Coefficient Auto increment */ 1112 1108 )}, 1113 1109 /* Beep */ 1114 - {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG}, 1110 + {0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG}, 1115 1111 {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */ 1116 1112 1117 1113 {} /* terminator */ ··· 1732 1728 1733 1729 } 1734 1730 1735 - static struct snd_kcontrol_new cs421x_capture_source = { 1736 - 1731 + static const struct snd_kcontrol_new cs421x_capture_source = { 1737 1732 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1738 1733 .name = "Capture Source", 1739 1734 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, ··· 1949 1946 } 1950 1947 #endif 1951 1948 1952 - static struct hda_codec_ops cs421x_patch_ops = { 1949 + static const struct hda_codec_ops cs421x_patch_ops = { 1953 1950 .build_controls = cs421x_build_controls, 1954 1951 .build_pcms = cs_build_pcms, 1955 1952 .init = cs421x_init,
+13 -13
sound/pci/hda/patch_realtek.c
··· 5840 5840 return alc_parse_auto_config(codec, alc269_ignore, ssids); 5841 5841 } 5842 5842 5843 - static void alc269_toggle_power_output(struct hda_codec *codec, int power_up) 5843 + static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up) 5844 5844 { 5845 5845 int val = alc_read_coef_idx(codec, 0x04); 5846 5846 if (power_up) ··· 5857 5857 if (spec->codec_variant != ALC269_TYPE_ALC269VB) 5858 5858 return; 5859 5859 5860 - if ((alc_get_coef0(codec) & 0x00ff) == 0x017) 5861 - alc269_toggle_power_output(codec, 0); 5862 - if ((alc_get_coef0(codec) & 0x00ff) == 0x018) { 5863 - alc269_toggle_power_output(codec, 0); 5860 + if (spec->codec_variant == ALC269_TYPE_ALC269VB) 5861 + alc269vb_toggle_power_output(codec, 0); 5862 + if (spec->codec_variant == ALC269_TYPE_ALC269VB && 5863 + (alc_get_coef0(codec) & 0x00ff) == 0x018) { 5864 5864 msleep(150); 5865 5865 } 5866 5866 } ··· 5870 5870 { 5871 5871 struct alc_spec *spec = codec->spec; 5872 5872 5873 - if (spec->codec_variant == ALC269_TYPE_ALC269VB || 5873 + if (spec->codec_variant == ALC269_TYPE_ALC269VB) 5874 + alc269vb_toggle_power_output(codec, 0); 5875 + if (spec->codec_variant == ALC269_TYPE_ALC269VB && 5874 5876 (alc_get_coef0(codec) & 0x00ff) == 0x018) { 5875 - alc269_toggle_power_output(codec, 0); 5876 5877 msleep(150); 5877 5878 } 5878 5879 5879 5880 codec->patch_ops.init(codec); 5880 5881 5881 - if (spec->codec_variant == ALC269_TYPE_ALC269VB || 5882 + if (spec->codec_variant == ALC269_TYPE_ALC269VB) 5883 + alc269vb_toggle_power_output(codec, 1); 5884 + if (spec->codec_variant == ALC269_TYPE_ALC269VB && 5882 5885 (alc_get_coef0(codec) & 0x00ff) == 0x017) { 5883 - alc269_toggle_power_output(codec, 1); 5884 5886 msleep(200); 5885 5887 } 5886 - 5887 - if (spec->codec_variant == ALC269_TYPE_ALC269VB || 5888 - (alc_get_coef0(codec) & 0x00ff) == 0x018) 5889 - alc269_toggle_power_output(codec, 1); 5890 5888 5891 5889 snd_hda_codec_resume_amp(codec); 5892 5890 snd_hda_codec_resume_cache(codec); ··· 7077 7079 .patch = patch_alc662 }, 7078 7080 { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 }, 7079 7081 { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, 7082 + { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, 7080 7083 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, 7081 7084 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, 7082 7085 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, ··· 7095 7096 { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 }, 7096 7097 { .id = 0x10ec0892, .name = "ALC892", .patch = patch_alc662 }, 7097 7098 { .id = 0x10ec0899, .name = "ALC898", .patch = patch_alc882 }, 7099 + { .id = 0x10ec0900, .name = "ALC1150", .patch = patch_alc882 }, 7098 7100 {} /* terminator */ 7099 7101 }; 7100 7102
+2
sound/pci/hda/patch_sigmatel.c
··· 1763 1763 "HP", STAC_HP_ZEPHYR), 1764 1764 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3660, 1765 1765 "HP Mini", STAC_92HD83XXX_HP_LED), 1766 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x144E, 1767 + "HP Pavilion dv5", STAC_92HD83XXX_HP_INV_LED), 1766 1768 {} /* terminator */ 1767 1769 }; 1768 1770
+29 -7
sound/pci/hda/patch_via.c
··· 1809 1809 { 1810 1810 struct via_spec *spec = codec->spec; 1811 1811 const struct auto_pin_cfg *cfg = &spec->autocfg; 1812 - int i, dac_num; 1812 + int i; 1813 1813 hda_nid_t nid; 1814 1814 1815 + spec->multiout.num_dacs = 0; 1815 1816 spec->multiout.dac_nids = spec->private_dac_nids; 1816 - dac_num = 0; 1817 1817 for (i = 0; i < cfg->line_outs; i++) { 1818 1818 hda_nid_t dac = 0; 1819 1819 nid = cfg->line_out_pins[i]; ··· 1824 1824 if (!i && parse_output_path(codec, nid, dac, 1, 1825 1825 &spec->out_mix_path)) 1826 1826 dac = spec->out_mix_path.path[0]; 1827 - if (dac) { 1828 - spec->private_dac_nids[i] = dac; 1829 - dac_num++; 1830 - } 1827 + if (dac) 1828 + spec->private_dac_nids[spec->multiout.num_dacs++] = dac; 1831 1829 } 1832 1830 if (!spec->out_path[0].depth && spec->out_mix_path.depth) { 1833 1831 spec->out_path[0] = spec->out_mix_path; 1834 1832 spec->out_mix_path.depth = 0; 1835 1833 } 1836 - spec->multiout.num_dacs = dac_num; 1837 1834 return 0; 1838 1835 } 1839 1836 ··· 3625 3628 */ 3626 3629 enum { 3627 3630 VIA_FIXUP_INTMIC_BOOST, 3631 + VIA_FIXUP_ASUS_G75, 3628 3632 }; 3629 3633 3630 3634 static void via_fixup_intmic_boost(struct hda_codec *codec, ··· 3640 3642 .type = HDA_FIXUP_FUNC, 3641 3643 .v.func = via_fixup_intmic_boost, 3642 3644 }, 3645 + [VIA_FIXUP_ASUS_G75] = { 3646 + .type = HDA_FIXUP_PINS, 3647 + .v.pins = (const struct hda_pintbl[]) { 3648 + /* set 0x24 and 0x33 as speakers */ 3649 + { 0x24, 0x991301f0 }, 3650 + { 0x33, 0x991301f1 }, /* subwoofer */ 3651 + { } 3652 + } 3653 + }, 3643 3654 }; 3644 3655 3645 3656 static const struct snd_pci_quirk vt2002p_fixups[] = { 3657 + SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), 3646 3658 SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), 3647 3659 {} 3648 3660 }; 3661 + 3662 + /* NIDs 0x24 and 0x33 on VT1802 have connections to non-existing NID 0x3e 3663 + * Replace this with mixer NID 0x1c 3664 + */ 3665 + static void fix_vt1802_connections(struct hda_codec *codec) 3666 + { 3667 + static hda_nid_t conn_24[] = { 0x14, 0x1c }; 3668 + static hda_nid_t conn_33[] = { 0x1c }; 3669 + 3670 + snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24); 3671 + snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33); 3672 + } 3649 3673 3650 3674 /* patch for vt2002P */ 3651 3675 static int patch_vt2002P(struct hda_codec *codec) ··· 3683 3663 spec->aa_mix_nid = 0x21; 3684 3664 override_mic_boost(codec, 0x2b, 0, 3, 40); 3685 3665 override_mic_boost(codec, 0x29, 0, 3, 40); 3666 + if (spec->codec_type == VT1802) 3667 + fix_vt1802_connections(codec); 3686 3668 add_secret_dac_path(codec); 3687 3669 3688 3670 snd_hda_pick_fixup(codec, NULL, vt2002p_fixups, via_fixups);
+6 -1
sound/pci/ice1712/ice1724.c
··· 2859 2859 ice->set_spdif_clock(ice, 0); 2860 2860 } else { 2861 2861 /* internal on-card clock */ 2862 - snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1); 2862 + int rate; 2863 + if (ice->cur_rate) 2864 + rate = ice->cur_rate; 2865 + else 2866 + rate = ice->pro_rate_default; 2867 + snd_vt1724_set_pro_rate(ice, rate, 1); 2863 2868 } 2864 2869 2865 2870 update_spdif_bits(ice, ice->pm_saved_spdif_ctrl);
+3 -2
sound/pci/rme9652/hdspm.c
··· 3979 3979 case 8: /* SYNC IN */ 3980 3980 val = hdspm_sync_in_sync_check(hdspm); break; 3981 3981 default: 3982 - val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1); 3982 + val = hdspm_s1_sync_check(hdspm, 3983 + kcontrol->private_value-1); 3983 3984 } 3984 3985 break; 3985 3986 ··· 4900 4899 insel = "Coaxial"; 4901 4900 break; 4902 4901 default: 4903 - insel = "Unkown"; 4902 + insel = "Unknown"; 4904 4903 } 4905 4904 4906 4905 snd_iprintf(buffer,
+1 -1
sound/soc/codecs/cs42l52.c
··· 763 763 if ((freq >= CS42L52_MIN_CLK) && (freq <= CS42L52_MAX_CLK)) { 764 764 cs42l52->sysclk = freq; 765 765 } else { 766 - dev_err(codec->dev, "Invalid freq paramter\n"); 766 + dev_err(codec->dev, "Invalid freq parameter\n"); 767 767 return -EINVAL; 768 768 } 769 769 return 0;
+1 -1
sound/soc/codecs/wm8994.c
··· 3722 3722 } while (count--); 3723 3723 3724 3724 if (count == 0) 3725 - dev_warn(codec->dev, "No impedence range reported for jack\n"); 3725 + dev_warn(codec->dev, "No impedance range reported for jack\n"); 3726 3726 3727 3727 #ifndef CONFIG_SND_SOC_WM8994_MODULE 3728 3728 trace_snd_soc_jack_irq(dev_name(codec->dev));
+2 -2
sound/soc/omap/omap-dmic.c
··· 464 464 465 465 mutex_init(&dmic->mutex); 466 466 467 - dmic->fclk = clk_get(dmic->dev, "dmic_fck"); 467 + dmic->fclk = clk_get(dmic->dev, "fck"); 468 468 if (IS_ERR(dmic->fclk)) { 469 - dev_err(dmic->dev, "cant get dmic_fck\n"); 469 + dev_err(dmic->dev, "cant get fck\n"); 470 470 return -ENODEV; 471 471 } 472 472
+2 -3
sound/soc/omap/zoom2.c
··· 21 21 22 22 #include <linux/clk.h> 23 23 #include <linux/platform_device.h> 24 + #include <linux/gpio.h> 24 25 #include <sound/core.h> 25 26 #include <sound/pcm.h> 26 27 #include <sound/soc.h> 27 28 28 29 #include <asm/mach-types.h> 29 - #include <mach/hardware.h> 30 - #include <mach/gpio.h> 31 - #include <mach/board-zoom.h> 32 30 #include <linux/platform_data/asoc-ti-mcbsp.h> 31 + #include <linux/platform_data/gpio-omap.h> 33 32 34 33 /* Register descriptions for twl4030 codec part */ 35 34 #include <linux/mfd/twl4030-audio.h>
+8 -4
sound/usb/card.c
··· 339 339 } 340 340 341 341 mutex_init(&chip->mutex); 342 - mutex_init(&chip->shutdown_mutex); 342 + init_rwsem(&chip->shutdown_rwsem); 343 343 chip->index = idx; 344 344 chip->dev = dev; 345 345 chip->card = card; ··· 560 560 561 561 card = chip->card; 562 562 mutex_lock(&register_mutex); 563 - mutex_lock(&chip->shutdown_mutex); 563 + down_write(&chip->shutdown_rwsem); 564 564 chip->shutdown = 1; 565 565 chip->num_interfaces--; 566 566 if (chip->num_interfaces <= 0) { ··· 582 582 snd_usb_mixer_disconnect(p); 583 583 } 584 584 usb_chip[chip->index] = NULL; 585 - mutex_unlock(&chip->shutdown_mutex); 585 + up_write(&chip->shutdown_rwsem); 586 586 mutex_unlock(&register_mutex); 587 587 snd_card_free_when_closed(card); 588 588 } else { 589 - mutex_unlock(&chip->shutdown_mutex); 589 + up_write(&chip->shutdown_rwsem); 590 590 mutex_unlock(&register_mutex); 591 591 } 592 592 } ··· 618 618 { 619 619 int err = -ENODEV; 620 620 621 + down_read(&chip->shutdown_rwsem); 621 622 if (!chip->shutdown && !chip->probing) 622 623 err = usb_autopm_get_interface(chip->pm_intf); 624 + up_read(&chip->shutdown_rwsem); 623 625 624 626 return err; 625 627 } 626 628 627 629 void snd_usb_autosuspend(struct snd_usb_audio *chip) 628 630 { 631 + down_read(&chip->shutdown_rwsem); 629 632 if (!chip->shutdown && !chip->probing) 630 633 usb_autopm_put_interface(chip->pm_intf); 634 + up_read(&chip->shutdown_rwsem); 631 635 } 632 636 633 637 static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+1
sound/usb/card.h
··· 126 126 struct snd_usb_endpoint *sync_endpoint; 127 127 unsigned long flags; 128 128 bool need_setup_ep; /* (re)configure EP at prepare? */ 129 + unsigned int speed; /* USB_SPEED_XXX */ 129 130 130 131 u64 formats; /* format bitmasks (all or'ed) */ 131 132 unsigned int num_formats; /* number of supported audio formats (list) */
+13
sound/usb/endpoint.c
··· 35 35 36 36 #define EP_FLAG_ACTIVATED 0 37 37 #define EP_FLAG_RUNNING 1 38 + #define EP_FLAG_STOPPING 2 38 39 39 40 /* 40 41 * snd_usb_endpoint is a model that abstracts everything related to an ··· 503 502 if (alive) 504 503 snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n", 505 504 alive, ep->ep_num); 505 + clear_bit(EP_FLAG_STOPPING, &ep->flags); 506 506 507 507 return 0; 508 + } 509 + 510 + /* sync the pending stop operation; 511 + * this function itself doesn't trigger the stop operation 512 + */ 513 + void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep) 514 + { 515 + if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags)) 516 + wait_clear_urbs(ep); 508 517 } 509 518 510 519 /* ··· 929 918 930 919 if (wait) 931 920 wait_clear_urbs(ep); 921 + else 922 + set_bit(EP_FLAG_STOPPING, &ep->flags); 932 923 } 933 924 } 934 925
+1
sound/usb/endpoint.h
··· 19 19 int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep); 20 20 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, 21 21 int force, int can_sleep, int wait); 22 + void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); 22 23 int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); 23 24 int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); 24 25 void snd_usb_endpoint_free(struct list_head *head);
+43 -22
sound/usb/mixer.c
··· 287 287 unsigned char buf[2]; 288 288 int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; 289 289 int timeout = 10; 290 - int err; 290 + int idx = 0, err; 291 291 292 292 err = snd_usb_autoresume(cval->mixer->chip); 293 293 if (err < 0) 294 294 return -EIO; 295 + down_read(&chip->shutdown_rwsem); 295 296 while (timeout-- > 0) { 297 + if (chip->shutdown) 298 + break; 299 + idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); 296 300 if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request, 297 301 USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, 298 - validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), 299 - buf, val_len) >= val_len) { 302 + validx, idx, buf, val_len) >= val_len) { 300 303 *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len)); 301 - snd_usb_autosuspend(cval->mixer->chip); 302 - return 0; 304 + err = 0; 305 + goto out; 303 306 } 304 307 } 305 - snd_usb_autosuspend(cval->mixer->chip); 306 308 snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", 307 - request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type); 308 - return -EINVAL; 309 + request, validx, idx, cval->val_type); 310 + err = -EINVAL; 311 + 312 + out: 313 + up_read(&chip->shutdown_rwsem); 314 + snd_usb_autosuspend(cval->mixer->chip); 315 + return err; 309 316 } 310 317 311 318 static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) ··· 320 313 struct snd_usb_audio *chip = cval->mixer->chip; 321 314 unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */ 322 315 unsigned char *val; 323 - int ret, size; 316 + int idx = 0, ret, size; 324 317 __u8 bRequest; 325 318 326 319 if (request == UAC_GET_CUR) { ··· 337 330 if (ret) 338 331 goto error; 339 332 340 - ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, 333 + down_read(&chip->shutdown_rwsem); 334 + if (chip->shutdown) 335 + ret = -ENODEV; 336 + else { 337 + idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); 338 + ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, 341 339 USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, 342 - validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), 343 - buf, size); 340 + validx, idx, buf, size); 341 + } 342 + up_read(&chip->shutdown_rwsem); 344 343 snd_usb_autosuspend(chip); 345 344 346 345 if (ret < 0) { 347 346 error: 348 347 snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", 349 - request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type); 348 + request, validx, idx, cval->val_type); 350 349 return ret; 351 350 } 352 351 ··· 430 417 { 431 418 struct snd_usb_audio *chip = cval->mixer->chip; 432 419 unsigned char buf[2]; 433 - int val_len, err, timeout = 10; 420 + int idx = 0, val_len, err, timeout = 10; 434 421 435 422 if (cval->mixer->protocol == UAC_VERSION_1) { 436 423 val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; ··· 453 440 err = snd_usb_autoresume(chip); 454 441 if (err < 0) 455 442 return -EIO; 456 - while (timeout-- > 0) 443 + down_read(&chip->shutdown_rwsem); 444 + while (timeout-- > 0) { 445 + if (chip->shutdown) 446 + break; 447 + idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); 457 448 if (snd_usb_ctl_msg(chip->dev, 458 449 usb_sndctrlpipe(chip->dev, 0), request, 459 450 USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, 460 - validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), 461 - buf, val_len) >= 0) { 462 - snd_usb_autosuspend(chip); 463 - return 0; 451 + validx, idx, buf, val_len) >= 0) { 452 + err = 0; 453 + goto out; 464 454 } 465 - snd_usb_autosuspend(chip); 455 + } 466 456 snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n", 467 - request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type, buf[0], buf[1]); 468 - return -EINVAL; 457 + request, validx, idx, cval->val_type, buf[0], buf[1]); 458 + err = -EINVAL; 459 + 460 + out: 461 + up_read(&chip->shutdown_rwsem); 462 + snd_usb_autosuspend(chip); 463 + return err; 469 464 } 470 465 471 466 static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value)
+51 -7
sound/usb/mixer_quirks.c
··· 283 283 if (value > 1) 284 284 return -EINVAL; 285 285 changed = value != mixer->audigy2nx_leds[index]; 286 + down_read(&mixer->chip->shutdown_rwsem); 287 + if (mixer->chip->shutdown) { 288 + err = -ENODEV; 289 + goto out; 290 + } 286 291 if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) 287 292 err = snd_usb_ctl_msg(mixer->chip->dev, 288 293 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, ··· 304 299 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, 305 300 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 306 301 value, index + 2, NULL, 0); 302 + out: 303 + up_read(&mixer->chip->shutdown_rwsem); 307 304 if (err < 0) 308 305 return err; 309 306 mixer->audigy2nx_leds[index] = value; ··· 399 392 400 393 for (i = 0; jacks[i].name; ++i) { 401 394 snd_iprintf(buffer, "%s: ", jacks[i].name); 402 - err = snd_usb_ctl_msg(mixer->chip->dev, 395 + down_read(&mixer->chip->shutdown_rwsem); 396 + if (mixer->chip->shutdown) 397 + err = 0; 398 + else 399 + err = snd_usb_ctl_msg(mixer->chip->dev, 403 400 usb_rcvctrlpipe(mixer->chip->dev, 0), 404 401 UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS | 405 402 USB_RECIP_INTERFACE, 0, 406 403 jacks[i].unitid << 8, buf, 3); 404 + up_read(&mixer->chip->shutdown_rwsem); 407 405 if (err == 3 && (buf[0] == 3 || buf[0] == 6)) 408 406 snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]); 409 407 else ··· 438 426 else 439 427 new_status = old_status & ~0x02; 440 428 changed = new_status != old_status; 441 - err = snd_usb_ctl_msg(mixer->chip->dev, 429 + down_read(&mixer->chip->shutdown_rwsem); 430 + if (mixer->chip->shutdown) 431 + err = -ENODEV; 432 + else 433 + err = snd_usb_ctl_msg(mixer->chip->dev, 442 434 usb_sndctrlpipe(mixer->chip->dev, 0), 0x08, 443 435 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 444 436 50, 0, &new_status, 1); 437 + up_read(&mixer->chip->shutdown_rwsem); 445 438 if (err < 0) 446 439 return err; 447 440 mixer->xonar_u1_status = new_status; ··· 485 468 u8 bRequest = (kcontrol->private_value >> 16) & 0xff; 486 469 u16 wIndex = kcontrol->private_value & 0xffff; 487 470 u8 tmp; 471 + int ret; 488 472 489 - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, 473 + down_read(&mixer->chip->shutdown_rwsem); 474 + if (mixer->chip->shutdown) 475 + ret = -ENODEV; 476 + else 477 + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, 490 478 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 491 479 0, cpu_to_le16(wIndex), 492 480 &tmp, sizeof(tmp), 1000); 481 + up_read(&mixer->chip->shutdown_rwsem); 493 482 494 483 if (ret < 0) { 495 484 snd_printk(KERN_ERR ··· 516 493 u8 bRequest = (kcontrol->private_value >> 16) & 0xff; 517 494 u16 wIndex = kcontrol->private_value & 0xffff; 518 495 u16 wValue = ucontrol->value.integer.value[0]; 496 + int ret; 519 497 520 - int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, 498 + down_read(&mixer->chip->shutdown_rwsem); 499 + if (mixer->chip->shutdown) 500 + ret = -ENODEV; 501 + else 502 + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, 521 503 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 522 504 cpu_to_le16(wValue), cpu_to_le16(wIndex), 523 505 NULL, 0, 1000); 506 + up_read(&mixer->chip->shutdown_rwsem); 524 507 525 508 if (ret < 0) { 526 509 snd_printk(KERN_ERR ··· 685 656 return -EINVAL; 686 657 687 658 688 - err = snd_usb_ctl_msg(chip->dev, 659 + down_read(&mixer->chip->shutdown_rwsem); 660 + if (mixer->chip->shutdown) 661 + err = -ENODEV; 662 + else 663 + err = snd_usb_ctl_msg(chip->dev, 689 664 usb_rcvctrlpipe(chip->dev, 0), UAC_GET_CUR, 690 665 USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, 691 666 validx << 8, snd_usb_ctrl_intf(chip) | (id << 8), 692 667 value, val_len); 668 + up_read(&mixer->chip->shutdown_rwsem); 693 669 if (err < 0) 694 670 return err; 695 671 ··· 737 703 738 704 if (!pval->is_cached) { 739 705 /* Read current value */ 740 - err = snd_usb_ctl_msg(chip->dev, 706 + down_read(&mixer->chip->shutdown_rwsem); 707 + if (mixer->chip->shutdown) 708 + err = -ENODEV; 709 + else 710 + err = snd_usb_ctl_msg(chip->dev, 741 711 usb_rcvctrlpipe(chip->dev, 0), UAC_GET_CUR, 742 712 USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, 743 713 validx << 8, snd_usb_ctrl_intf(chip) | (id << 8), 744 714 value, val_len); 715 + up_read(&mixer->chip->shutdown_rwsem); 745 716 if (err < 0) 746 717 return err; 747 718 ··· 758 719 if (cur_val != new_val) { 759 720 value[0] = new_val; 760 721 value[1] = 0; 761 - err = snd_usb_ctl_msg(chip->dev, 722 + down_read(&mixer->chip->shutdown_rwsem); 723 + if (mixer->chip->shutdown) 724 + err = -ENODEV; 725 + else 726 + err = snd_usb_ctl_msg(chip->dev, 762 727 usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR, 763 728 USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, 764 729 validx << 8, snd_usb_ctrl_intf(chip) | (id << 8), 765 730 value, val_len); 731 + up_read(&mixer->chip->shutdown_rwsem); 766 732 if (err < 0) 767 733 return err; 768 734
+37 -19
sound/usb/pcm.c
··· 71 71 unsigned int hwptr_done; 72 72 73 73 subs = (struct snd_usb_substream *)substream->runtime->private_data; 74 + if (subs->stream->chip->shutdown) 75 + return SNDRV_PCM_POS_XRUN; 74 76 spin_lock(&subs->lock); 75 77 hwptr_done = subs->hwptr_done; 76 78 substream->runtime->delay = snd_usb_pcm_delay(subs, ··· 446 444 { 447 445 int ret; 448 446 449 - mutex_lock(&subs->stream->chip->shutdown_mutex); 450 447 /* format changed */ 451 448 stop_endpoints(subs, 0, 0, 0); 452 449 ret = snd_usb_endpoint_set_params(subs->data_endpoint, ··· 456 455 subs->cur_audiofmt, 457 456 subs->sync_endpoint); 458 457 if (ret < 0) 459 - goto unlock; 458 + return ret; 460 459 461 460 if (subs->sync_endpoint) 462 461 ret = snd_usb_endpoint_set_params(subs->data_endpoint, ··· 466 465 subs->cur_rate, 467 466 subs->cur_audiofmt, 468 467 NULL); 469 - 470 - unlock: 471 - mutex_unlock(&subs->stream->chip->shutdown_mutex); 472 468 return ret; 473 469 } 474 470 ··· 503 505 return -EINVAL; 504 506 } 505 507 506 - if ((ret = set_format(subs, fmt)) < 0) 508 + down_read(&subs->stream->chip->shutdown_rwsem); 509 + if (subs->stream->chip->shutdown) 510 + ret = -ENODEV; 511 + else 512 + ret = set_format(subs, fmt); 513 + up_read(&subs->stream->chip->shutdown_rwsem); 514 + if (ret < 0) 507 515 return ret; 508 516 509 517 subs->interface = fmt->iface; ··· 531 527 subs->cur_audiofmt = NULL; 532 528 subs->cur_rate = 0; 533 529 subs->period_bytes = 0; 534 - mutex_lock(&subs->stream->chip->shutdown_mutex); 535 - stop_endpoints(subs, 0, 1, 1); 536 - deactivate_endpoints(subs); 537 - mutex_unlock(&subs->stream->chip->shutdown_mutex); 530 + down_read(&subs->stream->chip->shutdown_rwsem); 531 + if (!subs->stream->chip->shutdown) { 532 + stop_endpoints(subs, 0, 1, 1); 533 + deactivate_endpoints(subs); 534 + } 535 + up_read(&subs->stream->chip->shutdown_rwsem); 538 536 return snd_pcm_lib_free_vmalloc_buffer(substream); 539 537 } 540 538 ··· 558 552 return -ENXIO; 559 553 } 560 554 561 - if (snd_BUG_ON(!subs->data_endpoint)) 562 - return -EIO; 555 + down_read(&subs->stream->chip->shutdown_rwsem); 556 + if (subs->stream->chip->shutdown) { 557 + ret = -ENODEV; 558 + goto unlock; 559 + } 560 + if (snd_BUG_ON(!subs->data_endpoint)) { 561 + ret = -EIO; 562 + goto unlock; 563 + } 564 + 565 + snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint); 566 + snd_usb_endpoint_sync_pending_stop(subs->data_endpoint); 563 567 564 568 ret = set_format(subs, subs->cur_audiofmt); 565 569 if (ret < 0) 566 - return ret; 570 + goto unlock; 567 571 568 572 iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface); 569 573 alts = &iface->altsetting[subs->cur_audiofmt->altset_idx]; ··· 583 567 subs->cur_audiofmt, 584 568 subs->cur_rate); 585 569 if (ret < 0) 586 - return ret; 570 + goto unlock; 587 571 588 572 if (subs->need_setup_ep) { 589 573 ret = configure_endpoint(subs); 590 574 if (ret < 0) 591 - return ret; 575 + goto unlock; 592 576 subs->need_setup_ep = false; 593 577 } 594 578 ··· 608 592 /* for playback, submit the URBs now; otherwise, the first hwptr_done 609 593 * updates for all URBs would happen at the same time when starting */ 610 594 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) 611 - return start_endpoints(subs, 1); 595 + ret = start_endpoints(subs, 1); 612 596 613 - return 0; 597 + unlock: 598 + up_read(&subs->stream->chip->shutdown_rwsem); 599 + return ret; 614 600 } 615 601 616 602 static struct snd_pcm_hardware snd_usb_hardware = ··· 665 647 return 0; 666 648 } 667 649 /* check whether the period time is >= the data packet interval */ 668 - if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL) { 650 + if (subs->speed != USB_SPEED_FULL) { 669 651 ptime = 125 * (1 << fp->datainterval); 670 652 if (ptime > pt->max || (ptime == pt->max && pt->openmax)) { 671 653 hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max); ··· 943 925 return err; 944 926 945 927 param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME; 946 - if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) 928 + if (subs->speed == USB_SPEED_FULL) 947 929 /* full speed devices have fixed data packet interval */ 948 930 ptmin = 1000; 949 931 if (ptmin == 1000)
+2 -2
sound/usb/proc.c
··· 108 108 } 109 109 snd_iprintf(buffer, "\n"); 110 110 } 111 - if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL) 111 + if (subs->speed != USB_SPEED_FULL) 112 112 snd_iprintf(buffer, " Data packet interval: %d us\n", 113 113 125 * (1 << fp->datainterval)); 114 114 // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize); ··· 124 124 return; 125 125 snd_iprintf(buffer, " Packet Size = %d\n", ep->curpacksize); 126 126 snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", 127 - snd_usb_get_speed(subs->dev) == USB_SPEED_FULL 127 + subs->speed == USB_SPEED_FULL 128 128 ? get_full_speed_hz(ep->freqm) 129 129 : get_high_speed_hz(ep->freqm), 130 130 ep->freqm >> 16, ep->freqm & 0xffff);
+1
sound/usb/stream.c
··· 90 90 subs->direction = stream; 91 91 subs->dev = as->chip->dev; 92 92 subs->txfr_quirk = as->chip->txfr_quirk; 93 + subs->speed = snd_usb_get_speed(subs->dev); 93 94 94 95 snd_usb_set_pcm_ops(as->pcm, stream); 95 96
+1 -1
sound/usb/usbaudio.h
··· 37 37 struct usb_interface *pm_intf; 38 38 u32 usb_id; 39 39 struct mutex mutex; 40 - struct mutex shutdown_mutex; 40 + struct rw_semaphore shutdown_rwsem; 41 41 unsigned int shutdown:1; 42 42 unsigned int probing:1; 43 43 unsigned int autosuspended:1;
+1 -1
tools/testing/selftests/Makefile
··· 1 - TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug epoll 1 + TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug 2 2 3 3 all: 4 4 for TARGET in $(TARGETS); do \
-11
tools/testing/selftests/epoll/Makefile
··· 1 - # Makefile for epoll selftests 2 - 3 - all: test_epoll 4 - %: %.c 5 - gcc -pthread -g -o $@ $^ 6 - 7 - run_tests: all 8 - ./test_epoll 9 - 10 - clean: 11 - $(RM) test_epoll
-344
tools/testing/selftests/epoll/test_epoll.c
··· 1 - /* 2 - * tools/testing/selftests/epoll/test_epoll.c 3 - * 4 - * Copyright 2012 Adobe Systems Incorporated 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * Paton J. Lewis <palewis@adobe.com> 12 - * 13 - */ 14 - 15 - #include <errno.h> 16 - #include <fcntl.h> 17 - #include <pthread.h> 18 - #include <stdio.h> 19 - #include <stdlib.h> 20 - #include <unistd.h> 21 - #include <sys/epoll.h> 22 - #include <sys/socket.h> 23 - 24 - /* 25 - * A pointer to an epoll_item_private structure will be stored in the epoll 26 - * item's event structure so that we can get access to the epoll_item_private 27 - * data after calling epoll_wait: 28 - */ 29 - struct epoll_item_private { 30 - int index; /* Position of this struct within the epoll_items array. */ 31 - int fd; 32 - uint32_t events; 33 - pthread_mutex_t mutex; /* Guards the following variables... */ 34 - int stop; 35 - int status; /* Stores any error encountered while handling item. */ 36 - /* The following variable allows us to test whether we have encountered 37 - a problem while attempting to cancel and delete the associated 38 - event. When the test program exits, 'deleted' should be exactly 39 - one. If it is greater than one, then the failed test reflects a real 40 - world situation where we would have tried to access the epoll item's 41 - private data after deleting it: */ 42 - int deleted; 43 - }; 44 - 45 - struct epoll_item_private *epoll_items; 46 - 47 - /* 48 - * Delete the specified item from the epoll set. In a real-world secneario this 49 - * is where we would free the associated data structure, but in this testing 50 - * environment we retain the structure so that we can test for double-deletion: 51 - */ 52 - void delete_item(int index) 53 - { 54 - __sync_fetch_and_add(&epoll_items[index].deleted, 1); 55 - } 56 - 57 - /* 58 - * A pointer to a read_thread_data structure will be passed as the argument to 59 - * each read thread: 60 - */ 61 - struct read_thread_data { 62 - int stop; 63 - int status; /* Indicates any error encountered by the read thread. */ 64 - int epoll_set; 65 - }; 66 - 67 - /* 68 - * The function executed by the read threads: 69 - */ 70 - void *read_thread_function(void *function_data) 71 - { 72 - struct read_thread_data *thread_data = 73 - (struct read_thread_data *)function_data; 74 - struct epoll_event event_data; 75 - struct epoll_item_private *item_data; 76 - char socket_data; 77 - 78 - /* Handle events until we encounter an error or this thread's 'stop' 79 - condition is set: */ 80 - while (1) { 81 - int result = epoll_wait(thread_data->epoll_set, 82 - &event_data, 83 - 1, /* Number of desired events */ 84 - 1000); /* Timeout in ms */ 85 - if (result < 0) { 86 - /* Breakpoints signal all threads. Ignore that while 87 - debugging: */ 88 - if (errno == EINTR) 89 - continue; 90 - thread_data->status = errno; 91 - return 0; 92 - } else if (thread_data->stop) 93 - return 0; 94 - else if (result == 0) /* Timeout */ 95 - continue; 96 - 97 - /* We need the mutex here because checking for the stop 98 - condition and re-enabling the epoll item need to be done 99 - together as one atomic operation when EPOLL_CTL_DISABLE is 100 - available: */ 101 - item_data = (struct epoll_item_private *)event_data.data.ptr; 102 - pthread_mutex_lock(&item_data->mutex); 103 - 104 - /* Remove the item from the epoll set if we want to stop 105 - handling that event: */ 106 - if (item_data->stop) 107 - delete_item(item_data->index); 108 - else { 109 - /* Clear the data that was written to the other end of 110 - our non-blocking socket: */ 111 - do { 112 - if (read(item_data->fd, &socket_data, 1) < 1) { 113 - if ((errno == EAGAIN) || 114 - (errno == EWOULDBLOCK)) 115 - break; 116 - else 117 - goto error_unlock; 118 - } 119 - } while (item_data->events & EPOLLET); 120 - 121 - /* The item was one-shot, so re-enable it: */ 122 - event_data.events = item_data->events; 123 - if (epoll_ctl(thread_data->epoll_set, 124 - EPOLL_CTL_MOD, 125 - item_data->fd, 126 - &event_data) < 0) 127 - goto error_unlock; 128 - } 129 - 130 - pthread_mutex_unlock(&item_data->mutex); 131 - } 132 - 133 - error_unlock: 134 - thread_data->status = item_data->status = errno; 135 - pthread_mutex_unlock(&item_data->mutex); 136 - return 0; 137 - } 138 - 139 - /* 140 - * A pointer to a write_thread_data structure will be passed as the argument to 141 - * the write thread: 142 - */ 143 - struct write_thread_data { 144 - int stop; 145 - int status; /* Indicates any error encountered by the write thread. */ 146 - int n_fds; 147 - int *fds; 148 - }; 149 - 150 - /* 151 - * The function executed by the write thread. It writes a single byte to each 152 - * socket in turn until the stop condition for this thread is set. If writing to 153 - * a socket would block (i.e. errno was EAGAIN), we leave that socket alone for 154 - * the moment and just move on to the next socket in the list. We don't care 155 - * about the order in which we deliver events to the epoll set. In fact we don't 156 - * care about the data we're writing to the pipes at all; we just want to 157 - * trigger epoll events: 158 - */ 159 - void *write_thread_function(void *function_data) 160 - { 161 - const char data = 'X'; 162 - int index; 163 - struct write_thread_data *thread_data = 164 - (struct write_thread_data *)function_data; 165 - while (!thread_data->stop) 166 - for (index = 0; 167 - !thread_data->stop && (index < thread_data->n_fds); 168 - ++index) 169 - if ((write(thread_data->fds[index], &data, 1) < 1) && 170 - (errno != EAGAIN) && 171 - (errno != EWOULDBLOCK)) { 172 - thread_data->status = errno; 173 - return; 174 - } 175 - } 176 - 177 - /* 178 - * Arguments are currently ignored: 179 - */ 180 - int main(int argc, char **argv) 181 - { 182 - const int n_read_threads = 100; 183 - const int n_epoll_items = 500; 184 - int index; 185 - int epoll_set = epoll_create1(0); 186 - struct write_thread_data write_thread_data = { 187 - 0, 0, n_epoll_items, malloc(n_epoll_items * sizeof(int)) 188 - }; 189 - struct read_thread_data *read_thread_data = 190 - malloc(n_read_threads * sizeof(struct read_thread_data)); 191 - pthread_t *read_threads = malloc(n_read_threads * sizeof(pthread_t)); 192 - pthread_t write_thread; 193 - 194 - printf("-----------------\n"); 195 - printf("Runing test_epoll\n"); 196 - printf("-----------------\n"); 197 - 198 - epoll_items = malloc(n_epoll_items * sizeof(struct epoll_item_private)); 199 - 200 - if (epoll_set < 0 || epoll_items == 0 || write_thread_data.fds == 0 || 201 - read_thread_data == 0 || read_threads == 0) 202 - goto error; 203 - 204 - if (sysconf(_SC_NPROCESSORS_ONLN) < 2) { 205 - printf("Error: please run this test on a multi-core system.\n"); 206 - goto error; 207 - } 208 - 209 - /* Create the socket pairs and epoll items: */ 210 - for (index = 0; index < n_epoll_items; ++index) { 211 - int socket_pair[2]; 212 - struct epoll_event event_data; 213 - if (socketpair(AF_UNIX, 214 - SOCK_STREAM | SOCK_NONBLOCK, 215 - 0, 216 - socket_pair) < 0) 217 - goto error; 218 - write_thread_data.fds[index] = socket_pair[0]; 219 - epoll_items[index].index = index; 220 - epoll_items[index].fd = socket_pair[1]; 221 - if (pthread_mutex_init(&epoll_items[index].mutex, NULL) != 0) 222 - goto error; 223 - /* We always use EPOLLONESHOT because this test is currently 224 - structured to demonstrate the need for EPOLL_CTL_DISABLE, 225 - which only produces useful information in the EPOLLONESHOT 226 - case (without EPOLLONESHOT, calling epoll_ctl with 227 - EPOLL_CTL_DISABLE will never return EBUSY). If support for 228 - testing events without EPOLLONESHOT is desired, it should 229 - probably be implemented in a separate unit test. */ 230 - epoll_items[index].events = EPOLLIN | EPOLLONESHOT; 231 - if (index < n_epoll_items / 2) 232 - epoll_items[index].events |= EPOLLET; 233 - epoll_items[index].stop = 0; 234 - epoll_items[index].status = 0; 235 - epoll_items[index].deleted = 0; 236 - event_data.events = epoll_items[index].events; 237 - event_data.data.ptr = &epoll_items[index]; 238 - if (epoll_ctl(epoll_set, 239 - EPOLL_CTL_ADD, 240 - epoll_items[index].fd, 241 - &event_data) < 0) 242 - goto error; 243 - } 244 - 245 - /* Create and start the read threads: */ 246 - for (index = 0; index < n_read_threads; ++index) { 247 - read_thread_data[index].stop = 0; 248 - read_thread_data[index].status = 0; 249 - read_thread_data[index].epoll_set = epoll_set; 250 - if (pthread_create(&read_threads[index], 251 - NULL, 252 - read_thread_function, 253 - &read_thread_data[index]) != 0) 254 - goto error; 255 - } 256 - 257 - if (pthread_create(&write_thread, 258 - NULL, 259 - write_thread_function, 260 - &write_thread_data) != 0) 261 - goto error; 262 - 263 - /* Cancel all event pollers: */ 264 - #ifdef EPOLL_CTL_DISABLE 265 - for (index = 0; index < n_epoll_items; ++index) { 266 - pthread_mutex_lock(&epoll_items[index].mutex); 267 - ++epoll_items[index].stop; 268 - if (epoll_ctl(epoll_set, 269 - EPOLL_CTL_DISABLE, 270 - epoll_items[index].fd, 271 - NULL) == 0) 272 - delete_item(index); 273 - else if (errno != EBUSY) { 274 - pthread_mutex_unlock(&epoll_items[index].mutex); 275 - goto error; 276 - } 277 - /* EBUSY means events were being handled; allow the other thread 278 - to delete the item. */ 279 - pthread_mutex_unlock(&epoll_items[index].mutex); 280 - } 281 - #else 282 - for (index = 0; index < n_epoll_items; ++index) { 283 - pthread_mutex_lock(&epoll_items[index].mutex); 284 - ++epoll_items[index].stop; 285 - pthread_mutex_unlock(&epoll_items[index].mutex); 286 - /* Wait in case a thread running read_thread_function is 287 - currently executing code between epoll_wait and 288 - pthread_mutex_lock with this item. Note that a longer delay 289 - would make double-deletion less likely (at the expense of 290 - performance), but there is no guarantee that any delay would 291 - ever be sufficient. Note also that we delete all event 292 - pollers at once for testing purposes, but in a real-world 293 - environment we are likely to want to be able to cancel event 294 - pollers at arbitrary times. Therefore we can't improve this 295 - situation by just splitting this loop into two loops 296 - (i.e. signal 'stop' for all items, sleep, and then delete all 297 - items). We also can't fix the problem via EPOLL_CTL_DEL 298 - because that command can't prevent the case where some other 299 - thread is executing read_thread_function within the region 300 - mentioned above: */ 301 - usleep(1); 302 - pthread_mutex_lock(&epoll_items[index].mutex); 303 - if (!epoll_items[index].deleted) 304 - delete_item(index); 305 - pthread_mutex_unlock(&epoll_items[index].mutex); 306 - } 307 - #endif 308 - 309 - /* Shut down the read threads: */ 310 - for (index = 0; index < n_read_threads; ++index) 311 - __sync_fetch_and_add(&read_thread_data[index].stop, 1); 312 - for (index = 0; index < n_read_threads; ++index) { 313 - if (pthread_join(read_threads[index], NULL) != 0) 314 - goto error; 315 - if (read_thread_data[index].status) 316 - goto error; 317 - } 318 - 319 - /* Shut down the write thread: */ 320 - __sync_fetch_and_add(&write_thread_data.stop, 1); 321 - if ((pthread_join(write_thread, NULL) != 0) || write_thread_data.status) 322 - goto error; 323 - 324 - /* Check for final error conditions: */ 325 - for (index = 0; index < n_epoll_items; ++index) { 326 - if (epoll_items[index].status != 0) 327 - goto error; 328 - if (pthread_mutex_destroy(&epoll_items[index].mutex) < 0) 329 - goto error; 330 - } 331 - for (index = 0; index < n_epoll_items; ++index) 332 - if (epoll_items[index].deleted != 1) { 333 - printf("Error: item data deleted %1d times.\n", 334 - epoll_items[index].deleted); 335 - goto error; 336 - } 337 - 338 - printf("[PASS]\n"); 339 - return 0; 340 - 341 - error: 342 - printf("[FAIL]\n"); 343 - return errno; 344 - }