Merge commit 'v2.6.36-rc7' into spi/next

+994 -564
+4 -4
CREDITS
··· 3554 3554 D: portions of the Linux Security Module (LSM) framework and security modules 3555 3555 3556 3556 N: Petr Vandrovec 3557 - E: vandrove@vc.cvut.cz 3557 + E: petr@vandrovec.name 3558 3558 D: Small contributions to ncpfs 3559 3559 D: Matrox framebuffer driver 3560 - S: Chudenicka 8 3561 - S: 10200 Prague 10, Hostivar 3562 - S: Czech Republic 3560 + S: 21513 Conradia Ct 3561 + S: Cupertino, CA 95014 3562 + S: USA 3563 3563 3564 3564 N: Thibaut Varene 3565 3565 E: T-Bone@parisc-linux.org
+10 -4
MAINTAINERS
··· 962 962 S: Maintained 963 963 F: arch/arm/mach-s3c6410/ 964 964 965 + ARM/S5P ARM ARCHITECTURES 966 + M: Kukjin Kim <kgene.kim@samsung.com> 967 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 968 + L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 969 + S: Maintained 970 + F: arch/arm/mach-s5p*/ 971 + 965 972 ARM/SHMOBILE ARM ARCHITECTURE 966 973 M: Paul Mundt <lethal@linux-sh.org> 967 974 M: Magnus Damm <magnus.damm@gmail.com> ··· 3788 3781 S: Supported 3789 3782 3790 3783 MATROX FRAMEBUFFER DRIVER 3791 - M: Petr Vandrovec <vandrove@vc.cvut.cz> 3792 3784 L: linux-fbdev@vger.kernel.org 3793 - S: Maintained 3785 + S: Orphan 3794 3786 F: drivers/video/matrox/matroxfb_* 3795 3787 F: include/linux/matroxfb.h 3796 3788 ··· 3976 3970 F: drivers/net/natsemi.c 3977 3971 3978 3972 NCP FILESYSTEM 3979 - M: Petr Vandrovec <vandrove@vc.cvut.cz> 3980 - S: Maintained 3973 + M: Petr Vandrovec <petr@vandrovec.name> 3974 + S: Odd Fixes 3981 3975 F: fs/ncpfs/ 3982 3976 3983 3977 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 36 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Sheep on Meth 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/alpha/kernel/signal.c
··· 48 48 sigset_t mask; 49 49 unsigned long res; 50 50 51 - siginitset(&mask, newmask & ~_BLOCKABLE); 51 + siginitset(&mask, newmask & _BLOCKABLE); 52 52 res = sigprocmask(how, &mask, &oldmask); 53 53 if (!res) { 54 54 force_successful_syscall_return();
+5 -2
arch/arm/oprofile/common.c
··· 102 102 if (IS_ERR(pevent)) { 103 103 ret = PTR_ERR(pevent); 104 104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { 105 + perf_event_release_kernel(pevent); 105 106 pr_warning("oprofile: failed to enable event %d " 106 107 "on CPU %d\n", event, cpu); 107 108 ret = -EBUSY; ··· 366 365 ret = init_driverfs(); 367 366 if (ret) { 368 367 kfree(counter_config); 368 + counter_config = NULL; 369 369 return ret; 370 370 } 371 371 ··· 404 402 struct perf_event *event; 405 403 406 404 if (*perf_events) { 407 - exit_driverfs(); 408 405 for_each_possible_cpu(cpu) { 409 406 for (id = 0; id < perf_num_counters; ++id) { 410 407 event = perf_events[cpu][id]; ··· 414 413 } 415 414 } 416 415 417 - if (counter_config) 416 + if (counter_config) { 418 417 kfree(counter_config); 418 + exit_driverfs(); 419 + } 419 420 } 420 421 #else 421 422 int __init oprofile_arch_init(struct oprofile_operations *ops)
+1 -1
arch/arm/plat-omap/Kconfig
··· 33 33 config OMAP_DEBUG_LEDS 34 34 bool 35 35 depends on OMAP_DEBUG_DEVICES 36 - default y if LEDS 36 + default y if LEDS_CLASS 37 37 38 38 config OMAP_RESET_CLOCKS 39 39 bool "Reset unused clocks during boot"
+1 -1
arch/arm/plat-omap/mcbsp.c
··· 156 156 /* Writing zero to RSYNC_ERR clears the IRQ */ 157 157 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); 158 158 } else { 159 - complete(&mcbsp_rx->tx_irq_completion); 159 + complete(&mcbsp_rx->rx_irq_completion); 160 160 } 161 161 162 162 return IRQ_HANDLED;
+1 -2
arch/avr32/kernel/module.c
··· 314 314 vfree(module->arch.syminfo); 315 315 module->arch.syminfo = NULL; 316 316 317 - return module_bug_finalize(hdr, sechdrs, module); 317 + return 0; 318 318 } 319 319 320 320 void module_arch_cleanup(struct module *module) 321 321 { 322 - module_bug_cleanup(module); 323 322 }
+1 -2
arch/h8300/kernel/module.c
··· 112 112 const Elf_Shdr *sechdrs, 113 113 struct module *me) 114 114 { 115 - return module_bug_finalize(hdr, sechdrs, me); 115 + return 0; 116 116 } 117 117 118 118 void module_arch_cleanup(struct module *mod) 119 119 { 120 - module_bug_cleanup(mod); 121 120 }
+3 -3
arch/m68k/mac/macboing.c
··· 162 162 void mac_mksound( unsigned int freq, unsigned int length ) 163 163 { 164 164 __u32 cfreq = ( freq << 5 ) / 468; 165 - __u32 flags; 165 + unsigned long flags; 166 166 int i; 167 167 168 168 if ( mac_special_bell == NULL ) ··· 224 224 */ 225 225 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) 226 226 { 227 - __u32 flags; 227 + unsigned long flags; 228 228 229 229 /* if the bell is already ringing, ring longer */ 230 230 if ( mac_bell_duration > 0 ) ··· 271 271 static void mac_quadra_ring_bell( unsigned long ignored ) 272 272 { 273 273 int i, count = mac_asc_samplespersec / HZ; 274 - __u32 flags; 274 + unsigned long flags; 275 275 276 276 /* 277 277 * we neither want a sound buffer overflow nor underflow, so we need to match
+19 -2
arch/mips/Kconfig
··· 13 13 select HAVE_KPROBES 14 14 select HAVE_KRETPROBES 15 15 select RTC_LIB if !MACH_LOONGSON 16 + select GENERIC_ATOMIC64 if !64BIT 16 17 17 18 mainmenu "Linux/MIPS Kernel Configuration" 18 19 ··· 1647 1646 select SYS_SUPPORTS_SMP 1648 1647 select SMP_UP 1649 1648 help 1650 - This is a kernel model which is also known a VSMP or lately 1651 - has been marketesed into SMVP. 1649 + This is a kernel model which is known a VSMP but lately has been 1650 + marketesed into SMVP. 1651 + Virtual SMP uses the processor's VPEs to implement virtual 1652 + processors. In currently available configuration of the 34K processor 1653 + this allows for a dual processor. Both processors will share the same 1654 + primary caches; each will obtain the half of the TLB for it's own 1655 + exclusive use. For a layman this model can be described as similar to 1656 + what Intel calls Hyperthreading. 1657 + 1658 + For further information see http://www.linux-mips.org/wiki/34K#VSMP 1652 1659 1653 1660 config MIPS_MT_SMTC 1654 1661 bool "SMTC: Use all TCs on all VPEs for SMP" ··· 1673 1664 help 1674 1665 This is a kernel model which is known a SMTC or lately has been 1675 1666 marketesed into SMVP. 1667 + is presenting the available TC's of the core as processors to Linux. 1668 + On currently available 34K processors this means a Linux system will 1669 + see up to 5 processors. The implementation of the SMTC kernel differs 1670 + significantly from VSMP and cannot efficiently coexist in the same 1671 + kernel binary so the choice between VSMP and SMTC is a compile time 1672 + decision. 1673 + 1674 + For further information see http://www.linux-mips.org/wiki/34K#SMTC 1676 1675 1677 1676 endchoice 1678 1677
+2 -3
arch/mips/alchemy/common/prom.c
··· 43 43 char **prom_argv; 44 44 char **prom_envp; 45 45 46 - void prom_init_cmdline(void) 46 + void __init prom_init_cmdline(void) 47 47 { 48 48 int i; 49 49 ··· 104 104 } 105 105 } 106 106 107 - int prom_get_ethernet_addr(char *ethernet_addr) 107 + int __init prom_get_ethernet_addr(char *ethernet_addr) 108 108 { 109 109 char *ethaddr_str; 110 110 ··· 123 123 124 124 return 0; 125 125 } 126 - EXPORT_SYMBOL(prom_get_ethernet_addr); 127 126 128 127 void __init prom_free_prom_memory(void) 129 128 {
+1 -1
arch/mips/boot/compressed/Makefile
··· 59 59 hostprogs-y := calc_vmlinuz_load_addr 60 60 61 61 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ 62 - $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS)) 62 + $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) 63 63 64 64 vmlinuzobjs-y += $(obj)/piggy.o 65 65
+4
arch/mips/cavium-octeon/Kconfig
··· 83 83 def_bool y 84 84 select SPARSEMEM_STATIC 85 85 depends on CPU_CAVIUM_OCTEON 86 + 87 + config CAVIUM_OCTEON_HELPER 88 + def_bool y 89 + depends on OCTEON_ETHERNET || PCI
+1 -1
arch/mips/cavium-octeon/cpu.c
··· 41 41 return NOTIFY_OK; /* Let default notifier send signals */ 42 42 } 43 43 44 - static int cnmips_cu2_setup(void) 44 + static int __init cnmips_cu2_setup(void) 45 45 { 46 46 return cu2_notifier(cnmips_cu2_call, 0); 47 47 }
+1 -1
arch/mips/cavium-octeon/executive/Makefile
··· 11 11 12 12 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o 13 13 14 - obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o 14 + obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
+4
arch/mips/include/asm/atomic.h
··· 782 782 */ 783 783 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) 784 784 785 + #else /* !CONFIG_64BIT */ 786 + 787 + #include <asm-generic/atomic64.h> 788 + 785 789 #endif /* CONFIG_64BIT */ 786 790 787 791 /*
+1 -1
arch/mips/include/asm/cop2.h
··· 24 24 25 25 #define cu2_notifier(fn, pri) \ 26 26 ({ \ 27 - static struct notifier_block fn##_nb __cpuinitdata = { \ 27 + static struct notifier_block fn##_nb = { \ 28 28 .notifier_call = fn, \ 29 29 .priority = pri \ 30 30 }; \
+1
arch/mips/include/asm/gic.h
··· 321 321 */ 322 322 struct gic_intr_map { 323 323 unsigned int cpunum; /* Directed to this CPU */ 324 + #define GIC_UNUSED 0xdead /* Dummy data */ 324 325 unsigned int pin; /* Directed to this Pin */ 325 326 unsigned int polarity; /* Polarity : +/- */ 326 327 unsigned int trigtype; /* Trigger : Edge/Levl */
+1 -1
arch/mips/include/asm/mach-tx49xx/kmalloc.h
··· 1 1 #ifndef __ASM_MACH_TX49XX_KMALLOC_H 2 2 #define __ASM_MACH_TX49XX_KMALLOC_H 3 3 4 - #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 4 + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 5 5 6 6 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
-3
arch/mips/include/asm/mips-boards/maltaint.h
··· 88 88 89 89 #define GIC_EXT_INTR(x) x 90 90 91 - /* Dummy data */ 92 - #define X 0xdead 93 - 94 91 /* External Interrupts used for IPI */ 95 92 #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 96 93 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
+14
arch/mips/include/asm/page.h
··· 150 150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 151 151 #endif 152 152 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 153 + 154 + /* 155 + * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad 156 + * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The 157 + * discussion can be found in lkml posting 158 + * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is 159 + * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html 160 + * 161 + * It is unclear if the misscompilations mentioned in 162 + * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one 163 + * until GCC 3.x has been retired before we can apply 164 + * https://patchwork.linux-mips.org/patch/1541/ 165 + */ 166 + 153 167 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 154 168 155 169 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+2 -1
arch/mips/include/asm/thread_info.h
··· 146 146 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 147 147 148 148 /* work to do on interrupt/exception return */ 149 - #define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP) 149 + #define _TIF_WORK_MASK (0x0000ffef & \ 150 + ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) 150 151 /* work to do on any return to u-space */ 151 152 #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) 152 153
+15 -6
arch/mips/include/asm/unistd.h
··· 356 356 #define __NR_perf_event_open (__NR_Linux + 333) 357 357 #define __NR_accept4 (__NR_Linux + 334) 358 358 #define __NR_recvmmsg (__NR_Linux + 335) 359 + #define __NR_fanotify_init (__NR_Linux + 336) 360 + #define __NR_fanotify_mark (__NR_Linux + 337) 361 + #define __NR_prlimit64 (__NR_Linux + 338) 359 362 360 363 /* 361 364 * Offset of the last Linux o32 flavoured syscall 362 365 */ 363 - #define __NR_Linux_syscalls 335 366 + #define __NR_Linux_syscalls 338 364 367 365 368 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 366 369 367 370 #define __NR_O32_Linux 4000 368 - #define __NR_O32_Linux_syscalls 335 371 + #define __NR_O32_Linux_syscalls 338 369 372 370 373 #if _MIPS_SIM == _MIPS_SIM_ABI64 371 374 ··· 671 668 #define __NR_perf_event_open (__NR_Linux + 292) 672 669 #define __NR_accept4 (__NR_Linux + 293) 673 670 #define __NR_recvmmsg (__NR_Linux + 294) 671 + #define __NR_fanotify_init (__NR_Linux + 295) 672 + #define __NR_fanotify_mark (__NR_Linux + 296) 673 + #define __NR_prlimit64 (__NR_Linux + 297) 674 674 675 675 /* 676 676 * Offset of the last Linux 64-bit flavoured syscall 677 677 */ 678 - #define __NR_Linux_syscalls 294 678 + #define __NR_Linux_syscalls 297 679 679 680 680 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 681 681 682 682 #define __NR_64_Linux 5000 683 - #define __NR_64_Linux_syscalls 294 683 + #define __NR_64_Linux_syscalls 297 684 684 685 685 #if _MIPS_SIM == _MIPS_SIM_NABI32 686 686 ··· 991 985 #define __NR_accept4 (__NR_Linux + 297) 992 986 #define __NR_recvmmsg (__NR_Linux + 298) 993 987 #define __NR_getdents64 (__NR_Linux + 299) 988 + #define __NR_fanotify_init (__NR_Linux + 300) 989 + #define __NR_fanotify_mark (__NR_Linux + 301) 990 + #define __NR_prlimit64 (__NR_Linux + 302) 994 991 995 992 /* 996 993 * Offset of the last N32 flavoured syscall 997 994 */ 998 - #define __NR_Linux_syscalls 299 995 + #define __NR_Linux_syscalls 302 999 996 1000 997 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1001 998 1002 999 #define __NR_N32_Linux 6000 1003 - #define __NR_N32_Linux_syscalls 299 1000 + #define __NR_N32_Linux_syscalls 302 1004 1001 1005 1002 #ifdef __KERNEL__ 1006 1003
+2 -3
arch/mips/kernel/irq-gic.c
··· 7 7 #include <asm/io.h> 8 8 #include <asm/gic.h> 9 9 #include <asm/gcmpregs.h> 10 - #include <asm/mips-boards/maltaint.h> 11 10 #include <asm/irq.h> 12 11 #include <linux/hardirq.h> 13 12 #include <asm-generic/bitops/find.h> ··· 130 131 int i; 131 132 132 133 irq -= _irqbase; 133 - pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); 134 + pr_debug("%s(%d) called\n", __func__, irq); 134 135 cpumask_and(&tmp, cpumask, cpu_online_mask); 135 136 if (cpus_empty(tmp)) 136 137 return -1; ··· 221 222 /* Setup specifics */ 222 223 for (i = 0; i < mapsize; i++) { 223 224 cpu = intrmap[i].cpunum; 224 - if (cpu == X) 225 + if (cpu == GIC_UNUSED) 225 226 continue; 226 227 if (cpu == 0 && i != 0 && intrmap[i].flags == 0) 227 228 continue;
+1 -1
arch/mips/kernel/kgdb.c
··· 283 283 struct pt_regs *regs = args->regs; 284 284 int trap = (regs->cp0_cause & 0x7c) >> 2; 285 285 286 - /* Userpace events, ignore. */ 286 + /* Userspace events, ignore. */ 287 287 if (user_mode(regs)) 288 288 return NOTIFY_DONE; 289 289
+1 -1
arch/mips/kernel/kspd.c
··· 251 251 memset(&tz, 0, sizeof(tz)); 252 252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, 253 253 (int)&tz, 0, 0)) == 0) 254 - ret.retval = tv.tv_sec; 254 + ret.retval = tv.tv_sec; 255 255 break; 256 256 257 257 case MTSP_SYSCALL_EXIT:
+7
arch/mips/kernel/linux32.c
··· 341 341 { 342 342 return sys_lookup_dcookie(merge_64(a0, a1), buf, len); 343 343 } 344 + 345 + SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, 346 + u64, a3, u64, a4, int, dfd, const char __user *, pathname) 347 + { 348 + return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), 349 + dfd, pathname); 350 + }
+4 -1
arch/mips/kernel/scall32-o32.S
··· 583 583 sys sys_rt_tgsigqueueinfo 4 584 584 sys sys_perf_event_open 5 585 585 sys sys_accept4 4 586 - sys sys_recvmmsg 5 586 + sys sys_recvmmsg 5 /* 4335 */ 587 + sys sys_fanotify_init 2 588 + sys sys_fanotify_mark 6 589 + sys sys_prlimit64 4 587 590 .endm 588 591 589 592 /* We pre-compute the number of _instruction_ bytes needed to
+5 -2
arch/mips/kernel/scall64-64.S
··· 416 416 PTR sys_pipe2 417 417 PTR sys_inotify_init1 418 418 PTR sys_preadv 419 - PTR sys_pwritev /* 5390 */ 419 + PTR sys_pwritev /* 5290 */ 420 420 PTR sys_rt_tgsigqueueinfo 421 421 PTR sys_perf_event_open 422 422 PTR sys_accept4 423 - PTR sys_recvmmsg 423 + PTR sys_recvmmsg 424 + PTR sys_fanotify_init /* 5295 */ 425 + PTR sys_fanotify_mark 426 + PTR sys_prlimit64 424 427 .size sys_call_table,.-sys_call_table
+4 -1
arch/mips/kernel/scall64-n32.S
··· 419 419 PTR sys_perf_event_open 420 420 PTR sys_accept4 421 421 PTR compat_sys_recvmmsg 422 - PTR sys_getdents 422 + PTR sys_getdents64 423 + PTR sys_fanotify_init /* 6300 */ 424 + PTR sys_fanotify_mark 425 + PTR sys_prlimit64 423 426 .size sysn32_call_table,.-sysn32_call_table
+4 -1
arch/mips/kernel/scall64-o32.S
··· 538 538 PTR compat_sys_rt_tgsigqueueinfo 539 539 PTR sys_perf_event_open 540 540 PTR sys_accept4 541 - PTR compat_sys_recvmmsg 541 + PTR compat_sys_recvmmsg /* 4335 */ 542 + PTR sys_fanotify_init 543 + PTR sys_32_fanotify_mark 544 + PTR sys_prlimit64 542 545 .size sys_call_table,.-sys_call_table
+20 -8
arch/mips/mm/dma-default.c
··· 44 44 45 45 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) 46 46 { 47 + gfp_t dma_flag; 48 + 47 49 /* ignore region specifiers */ 48 50 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 49 51 50 - #ifdef CONFIG_ZONE_DMA 52 + #ifdef CONFIG_ISA 51 53 if (dev == NULL) 52 - gfp |= __GFP_DMA; 53 - else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) 54 - gfp |= __GFP_DMA; 54 + dma_flag = __GFP_DMA; 55 55 else 56 56 #endif 57 - #ifdef CONFIG_ZONE_DMA32 57 + #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) 58 58 if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) 59 - gfp |= __GFP_DMA32; 59 + dma_flag = __GFP_DMA; 60 + else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 61 + dma_flag = __GFP_DMA32; 60 62 else 61 63 #endif 62 - ; 64 + #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) 65 + if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 66 + dma_flag = __GFP_DMA32; 67 + else 68 + #endif 69 + #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) 70 + if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 71 + dma_flag = __GFP_DMA; 72 + else 73 + #endif 74 + dma_flag = 0; 63 75 64 76 /* Don't invoke OOM killer */ 65 77 gfp |= __GFP_NORETRY; 66 78 67 - return gfp; 79 + return gfp | dma_flag; 68 80 } 69 81 70 82 void *dma_alloc_noncoherent(struct device *dev, size_t size,
+1 -1
arch/mips/mm/sc-rm7k.c
··· 30 30 #define tc_lsize 32 31 31 32 32 extern unsigned long icache_way_size, dcache_way_size; 33 - unsigned long tcache_size; 33 + static unsigned long tcache_size; 34 34 35 35 #include <asm/r4kcache.h> 36 36
+3
arch/mips/mti-malta/malta-int.c
··· 385 385 */ 386 386 387 387 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK 388 + #define X GIC_UNUSED 389 + 388 390 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { 389 391 { X, X, X, X, 0 }, 390 392 { X, X, X, X, 0 }, ··· 406 404 { X, X, X, X, 0 }, 407 405 /* The remainder of this table is initialised by fill_ipi_map */ 408 406 }; 407 + #undef X 409 408 410 409 /* 411 410 * GCMP needs to be detected before any SMP initialisation
+1 -1
arch/mips/pci/pci-rc32434.c
··· 118 118 if (!((pcicvalue == PCIM_H_EA) || 119 119 (pcicvalue == PCIM_H_IA_FIX) || 120 120 (pcicvalue == PCIM_H_IA_RR))) { 121 - pr_err(KERN_ERR "PCI init error!!!\n"); 121 + pr_err("PCI init error!!!\n"); 122 122 /* Not in Host Mode, return ERROR */ 123 123 return -1; 124 124 }
+5 -15
arch/mips/pnx8550/common/reset.c
··· 22 22 */ 23 23 #include <linux/kernel.h> 24 24 25 + #include <asm/processor.h> 25 26 #include <asm/reboot.h> 26 27 #include <glb.h> 27 28 28 29 void pnx8550_machine_restart(char *command) 29 30 { 30 - char head[] = "************* Machine restart *************"; 31 - char foot[] = "*******************************************"; 32 - 33 - printk("\n\n"); 34 - printk("%s\n", head); 35 - if (command != NULL) 36 - printk("* %s\n", command); 37 - printk("%s\n", foot); 38 - 39 31 PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; 40 32 } 41 33 42 34 void pnx8550_machine_halt(void) 43 35 { 44 - printk("*** Machine halt. (Not implemented) ***\n"); 45 - } 46 - 47 - void pnx8550_machine_power_off(void) 48 - { 49 - printk("*** Machine power off. (Not implemented) ***\n"); 36 + while (1) { 37 + if (cpu_wait) 38 + cpu_wait(); 39 + } 50 40 }
+1 -2
arch/mips/pnx8550/common/setup.c
··· 44 44 extern void __init board_setup(void); 45 45 extern void pnx8550_machine_restart(char *); 46 46 extern void pnx8550_machine_halt(void); 47 - extern void pnx8550_machine_power_off(void); 48 47 extern struct resource ioport_resource; 49 48 extern struct resource iomem_resource; 50 49 extern char *prom_getcmdline(void); ··· 99 100 100 101 _machine_restart = pnx8550_machine_restart; 101 102 _machine_halt = pnx8550_machine_halt; 102 - pm_power_off = pnx8550_machine_power_off; 103 + pm_power_off = pnx8550_machine_halt; 103 104 104 105 /* Clear the Global 2 Register, PCI Inta Output Enable Registers 105 106 Bit 1:Enable DAC Powerdown
+1 -2
arch/mn10300/kernel/module.c
··· 206 206 const Elf_Shdr *sechdrs, 207 207 struct module *me) 208 208 { 209 - return module_bug_finalize(hdr, sechdrs, me); 209 + return 0; 210 210 } 211 211 212 212 /* ··· 214 214 */ 215 215 void module_arch_cleanup(struct module *mod) 216 216 { 217 - module_bug_cleanup(mod); 218 217 }
+19 -1
arch/mn10300/mm/cache.c
··· 54 54 void flush_icache_range(unsigned long start, unsigned long end) 55 55 { 56 56 #ifdef CONFIG_MN10300_CACHE_WBACK 57 - unsigned long addr, size, off; 57 + unsigned long addr, size, base, off; 58 58 struct page *page; 59 59 pgd_t *pgd; 60 60 pud_t *pud; 61 61 pmd_t *pmd; 62 62 pte_t *ppte, pte; 63 + 64 + if (end > 0x80000000UL) { 65 + /* addresses above 0xa0000000 do not go through the cache */ 66 + if (end > 0xa0000000UL) { 67 + end = 0xa0000000UL; 68 + if (start >= end) 69 + return; 70 + } 71 + 72 + /* kernel addresses between 0x80000000 and 0x9fffffff do not 73 + * require page tables, so we just map such addresses directly */ 74 + base = (start >= 0x80000000UL) ? start : 0x80000000UL; 75 + mn10300_dcache_flush_range(base, end); 76 + if (base == start) 77 + goto invalidate; 78 + end = base; 79 + } 63 80 64 81 for (; start < end; start += size) { 65 82 /* work out how much of the page to flush */ ··· 121 104 } 122 105 #endif 123 106 107 + invalidate: 124 108 mn10300_icache_inv(); 125 109 } 126 110 EXPORT_SYMBOL(flush_icache_range);
+1 -2
arch/parisc/kernel/module.c
··· 941 941 nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; 942 942 DEBUGP("NEW num_symtab %lu\n", nsyms); 943 943 symhdr->sh_size = nsyms * sizeof(Elf_Sym); 944 - return module_bug_finalize(hdr, sechdrs, me); 944 + return 0; 945 945 } 946 946 947 947 void module_arch_cleanup(struct module *mod) 948 948 { 949 949 deregister_unwind_table(mod); 950 - module_bug_cleanup(mod); 951 950 }
-6
arch/powerpc/kernel/module.c
··· 63 63 const Elf_Shdr *sechdrs, struct module *me) 64 64 { 65 65 const Elf_Shdr *sect; 66 - int err; 67 - 68 - err = module_bug_finalize(hdr, sechdrs, me); 69 - if (err) 70 - return err; 71 66 72 67 /* Apply feature fixups */ 73 68 sect = find_section(hdr, sechdrs, "__ftr_fixup"); ··· 96 101 97 102 void module_arch_cleanup(struct module *mod) 98 103 { 99 - module_bug_cleanup(mod); 100 104 }
+1 -1
arch/powerpc/platforms/512x/clock.c
··· 57 57 int id_match = 0; 58 58 59 59 if (dev == NULL || id == NULL) 60 - return NULL; 60 + return clk; 61 61 62 62 mutex_lock(&clocks_mutex); 63 63 list_for_each_entry(p, &clocks, node) {
+6 -3
arch/powerpc/platforms/52xx/efika.c
··· 99 99 if (bus_range == NULL || len < 2 * sizeof(int)) { 100 100 printk(KERN_WARNING EFIKA_PLATFORM_NAME 101 101 ": Can't get bus-range for %s\n", pcictrl->full_name); 102 - return; 102 + goto out_put; 103 103 } 104 104 105 105 if (bus_range[1] == bus_range[0]) ··· 111 111 printk(" controlled by %s\n", pcictrl->full_name); 112 112 printk("\n"); 113 113 114 - hose = pcibios_alloc_controller(of_node_get(pcictrl)); 114 + hose = pcibios_alloc_controller(pcictrl); 115 115 if (!hose) { 116 116 printk(KERN_WARNING EFIKA_PLATFORM_NAME 117 117 ": Can't allocate PCI controller structure for %s\n", 118 118 pcictrl->full_name); 119 - return; 119 + goto out_put; 120 120 } 121 121 122 122 hose->first_busno = bus_range[0]; ··· 124 124 hose->ops = &rtas_pci_ops; 125 125 126 126 pci_process_bridge_OF_ranges(hose, pcictrl, 0); 127 + return; 128 + out_put: 129 + of_node_put(pcictrl); 127 130 } 128 131 129 132 #else
+6 -2
arch/powerpc/platforms/52xx/mpc52xx_common.c
··· 325 325 clrbits32(&simple_gpio->simple_dvo, sync | out); 326 326 clrbits8(&wkup_gpio->wkup_dvo, reset); 327 327 328 - /* wait at lease 1 us */ 329 - udelay(2); 328 + /* wait for 1 us */ 329 + udelay(1); 330 330 331 331 /* Deassert reset */ 332 332 setbits8(&wkup_gpio->wkup_dvo, reset); 333 + 334 + /* wait at least 200ns */ 335 + /* 7 ~= (200ns * timebase) / ns2sec */ 336 + __delay(7); 333 337 334 338 /* Restore pin-muxing */ 335 339 out_be32(&simple_gpio->port_config, mux);
+1 -2
arch/s390/kernel/module.c
··· 407 407 { 408 408 vfree(me->arch.syminfo); 409 409 me->arch.syminfo = NULL; 410 - return module_bug_finalize(hdr, sechdrs, me); 410 + return 0; 411 411 } 412 412 413 413 void module_arch_cleanup(struct module *mod) 414 414 { 415 - module_bug_cleanup(mod); 416 415 }
-2
arch/sh/kernel/module.c
··· 149 149 int ret = 0; 150 150 151 151 ret |= module_dwarf_finalize(hdr, sechdrs, me); 152 - ret |= module_bug_finalize(hdr, sechdrs, me); 153 152 154 153 return ret; 155 154 } 156 155 157 156 void module_arch_cleanup(struct module *mod) 158 157 { 159 - module_bug_cleanup(mod); 160 158 module_dwarf_cleanup(mod); 161 159 }
+3 -14
arch/um/drivers/net_kern.c
··· 255 255 netif_wake_queue(dev); 256 256 } 257 257 258 - static int uml_net_set_mac(struct net_device *dev, void *addr) 259 - { 260 - struct uml_net_private *lp = netdev_priv(dev); 261 - struct sockaddr *hwaddr = addr; 262 - 263 - spin_lock_irq(&lp->lock); 264 - eth_mac_addr(dev, hwaddr->sa_data); 265 - spin_unlock_irq(&lp->lock); 266 - 267 - return 0; 268 - } 269 - 270 258 static int uml_net_change_mtu(struct net_device *dev, int new_mtu) 271 259 { 272 260 dev->mtu = new_mtu; ··· 361 373 .ndo_start_xmit = uml_net_start_xmit, 362 374 .ndo_set_multicast_list = uml_net_set_multicast_list, 363 375 .ndo_tx_timeout = uml_net_tx_timeout, 364 - .ndo_set_mac_address = uml_net_set_mac, 376 + .ndo_set_mac_address = eth_mac_addr, 365 377 .ndo_change_mtu = uml_net_change_mtu, 366 378 .ndo_validate_addr = eth_validate_addr, 367 379 }; ··· 460 472 ((*transport->user->init)(&lp->user, dev) != 0)) 461 473 goto out_unregister; 462 474 463 - eth_mac_addr(dev, device->mac); 475 + /* don't use eth_mac_addr, it will not work here */ 476 + memcpy(dev->dev_addr, device->mac, ETH_ALEN); 464 477 dev->mtu = transport->user->mtu; 465 478 dev->netdev_ops = &uml_netdev_ops; 466 479 dev->ethtool_ops = &uml_net_ethtool_ops;
+1 -1
arch/x86/kernel/acpi/cstate.c
··· 61 61 unsigned int ecx; 62 62 } states[ACPI_PROCESSOR_MAX_POWER]; 63 63 }; 64 - static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ 64 + static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ 65 65 66 66 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 67 67
+8 -3
arch/x86/kernel/apic/io_apic.c
··· 306 306 307 307 old_cfg = old_desc->chip_data; 308 308 309 - memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); 309 + cfg->vector = old_cfg->vector; 310 + cfg->move_in_progress = old_cfg->move_in_progress; 311 + cpumask_copy(cfg->domain, old_cfg->domain); 312 + cpumask_copy(cfg->old_domain, old_cfg->old_domain); 310 313 311 314 init_copy_irq_2_pin(old_cfg, cfg, node); 312 315 } 313 316 314 - static void free_irq_cfg(struct irq_cfg *old_cfg) 317 + static void free_irq_cfg(struct irq_cfg *cfg) 315 318 { 316 - kfree(old_cfg); 319 + free_cpumask_var(cfg->domain); 320 + free_cpumask_var(cfg->old_domain); 321 + kfree(cfg); 317 322 } 318 323 319 324 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
+1 -1
arch/x86/kernel/cpu/common.c
··· 545 545 } 546 546 } 547 547 548 - static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 548 + void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 549 549 { 550 550 u32 tfms, xlvl; 551 551 u32 ebx;
+1
arch/x86/kernel/cpu/cpu.h
··· 33 33 *const __x86_cpu_dev_end[]; 34 34 35 35 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); 36 + extern void get_cpu_cap(struct cpuinfo_x86 *c); 36 37 37 38 #endif
+12 -6
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
··· 368 368 return -ENODEV; 369 369 370 370 out_obj = output.pointer; 371 - if (out_obj->type != ACPI_TYPE_BUFFER) 372 - return -ENODEV; 371 + if (out_obj->type != ACPI_TYPE_BUFFER) { 372 + ret = -ENODEV; 373 + goto out_free; 374 + } 373 375 374 376 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); 375 - if (errors) 376 - return -ENODEV; 377 + if (errors) { 378 + ret = -ENODEV; 379 + goto out_free; 380 + } 377 381 378 382 supported = *((u32 *)(out_obj->buffer.pointer + 4)); 379 - if (!(supported & 0x1)) 380 - return -ENODEV; 383 + if (!(supported & 0x1)) { 384 + ret = -ENODEV; 385 + goto out_free; 386 + } 381 387 382 388 out_free: 383 389 kfree(output.pointer);
+1
arch/x86/kernel/cpu/intel.c
··· 39 39 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; 40 40 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 41 41 c->cpuid_level = cpuid_eax(0); 42 + get_cpu_cap(c); 42 43 } 43 44 } 44 45
+5 -1
arch/x86/kernel/cpu/perf_event_p4.c
··· 660 660 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 661 661 int overflow; 662 662 663 - if (!test_bit(idx, cpuc->active_mask)) 663 + if (!test_bit(idx, cpuc->active_mask)) { 664 + /* catch in-flight IRQs */ 665 + if (__test_and_clear_bit(idx, cpuc->running)) 666 + handled++; 664 667 continue; 668 + } 665 669 666 670 event = cpuc->events[idx]; 667 671 hwc = &event->hw;
+1 -1
arch/x86/kernel/hpet.c
··· 506 506 { 507 507 unsigned int irq; 508 508 509 - irq = create_irq(); 509 + irq = create_irq_nr(0, -1); 510 510 if (!irq) 511 511 return -EINVAL; 512 512
+1 -2
arch/x86/kernel/module.c
··· 239 239 apply_paravirt(pseg, pseg + para->sh_size); 240 240 } 241 241 242 - return module_bug_finalize(hdr, sechdrs, me); 242 + return 0; 243 243 } 244 244 245 245 void module_arch_cleanup(struct module *mod) 246 246 { 247 247 alternatives_smp_module_del(mod); 248 - module_bug_cleanup(mod); 249 248 }
+1
arch/x86/oprofile/nmi_int.c
··· 674 674 case 0x0f: 675 675 case 0x16: 676 676 case 0x17: 677 + case 0x1d: 677 678 *cpu_type = "i386/core_2"; 678 679 break; 679 680 case 0x1a:
+3 -2
arch/x86/xen/time.c
··· 489 489 __init void xen_hvm_init_time_ops(void) 490 490 { 491 491 /* vector callback is needed otherwise we cannot receive interrupts 492 - * on cpu > 0 */ 493 - if (!xen_have_vector_callback && num_present_cpus() > 1) 492 + * on cpu > 0 and at this point we don't know how many cpus are 493 + * available */ 494 + if (!xen_have_vector_callback) 494 495 return; 495 496 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { 496 497 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
+1 -1
drivers/acpi/Kconfig
··· 105 105 106 106 Be aware that using this interface can confuse your Embedded 107 107 Controller in a way that a normal reboot is not enough. You then 108 - have to power of your system, and remove the laptop battery for 108 + have to power off your system, and remove the laptop battery for 109 109 some seconds. 110 110 An Embedded Controller typically is available on laptops and reads 111 111 sensor values like battery state and temperature.
+18 -16
drivers/acpi/acpi_pad.c
··· 382 382 device_remove_file(&device->dev, &dev_attr_rrtime); 383 383 } 384 384 385 - /* Query firmware how many CPUs should be idle */ 386 - static int acpi_pad_pur(acpi_handle handle, int *num_cpus) 385 + /* 386 + * Query firmware how many CPUs should be idle 387 + * return -1 on failure 388 + */ 389 + static int acpi_pad_pur(acpi_handle handle) 387 390 { 388 391 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 389 392 union acpi_object *package; 390 - int rev, num, ret = -EINVAL; 393 + int num = -1; 391 394 392 395 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) 393 - return -EINVAL; 396 + return num; 394 397 395 398 if (!buffer.length || !buffer.pointer) 396 - return -EINVAL; 399 + return num; 397 400 398 401 package = buffer.pointer; 399 - if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) 400 - goto out; 401 - rev = package->package.elements[0].integer.value; 402 - num = package->package.elements[1].integer.value; 403 - if (rev != 1 || num < 0) 404 - goto out; 405 - *num_cpus = num; 406 - ret = 0; 407 - out: 402 + 403 + if (package->type == ACPI_TYPE_PACKAGE && 404 + package->package.count == 2 && 405 + package->package.elements[0].integer.value == 1) /* rev 1 */ 406 + 407 + num = package->package.elements[1].integer.value; 408 + 408 409 kfree(buffer.pointer); 409 - return ret; 410 + return num; 410 411 } 411 412 412 413 /* Notify firmware how many CPUs are idle */ ··· 434 433 uint32_t idle_cpus; 435 434 436 435 mutex_lock(&isolated_cpus_lock); 437 - if (acpi_pad_pur(handle, &num_cpus)) { 436 + num_cpus = acpi_pad_pur(handle); 437 + if (num_cpus < 0) { 438 438 mutex_unlock(&isolated_cpus_lock); 439 439 return; 440 440 }
+1
drivers/acpi/acpica/aclocal.h
··· 854 854 ACPI_BITMASK_POWER_BUTTON_STATUS | \ 855 855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ 856 856 ACPI_BITMASK_RT_CLOCK_STATUS | \ 857 + ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \ 857 858 ACPI_BITMASK_WAKE_STATUS) 858 859 859 860 #define ACPI_BITMASK_TIMER_ENABLE 0x0001
+1 -1
drivers/acpi/acpica/exutils.c
··· 109 109 * 110 110 * DESCRIPTION: Reacquire the interpreter execution region from within the 111 111 * interpreter code. Failure to enter the interpreter region is a 112 - * fatal system error. Used in conjuction with 112 + * fatal system error. Used in conjunction with 113 113 * relinquish_interpreter 114 114 * 115 115 ******************************************************************************/
+1 -1
drivers/acpi/acpica/rsutils.c
··· 149 149 150 150 /* 151 151 * 16-, 32-, and 64-bit cases must use the move macros that perform 152 - * endian conversion and/or accomodate hardware that cannot perform 152 + * endian conversion and/or accommodate hardware that cannot perform 153 153 * misaligned memory transfers 154 154 */ 155 155 case ACPI_RSC_MOVE16:
+1 -1
drivers/acpi/apei/Kconfig
··· 34 34 depends on ACPI_APEI 35 35 help 36 36 ERST is a way provided by APEI to save and retrieve hardware 37 - error infomation to and from a persistent store. Enable this 37 + error information to and from a persistent store. Enable this 38 38 if you want to debugging and testing the ERST kernel support 39 39 and firmware implementation.
+16 -5
drivers/acpi/apei/apei-base.c
··· 445 445 int apei_resources_request(struct apei_resources *resources, 446 446 const char *desc) 447 447 { 448 - struct apei_res *res, *res_bak; 448 + struct apei_res *res, *res_bak = NULL; 449 449 struct resource *r; 450 + int rc; 450 451 451 - apei_resources_sub(resources, &apei_resources_all); 452 + rc = apei_resources_sub(resources, &apei_resources_all); 453 + if (rc) 454 + return rc; 452 455 456 + rc = -EINVAL; 453 457 list_for_each_entry(res, &resources->iomem, list) { 454 458 r = request_mem_region(res->start, res->end - res->start, 455 459 desc); ··· 479 475 } 480 476 } 481 477 482 - apei_resources_merge(&apei_resources_all, resources); 478 + rc = apei_resources_merge(&apei_resources_all, resources); 479 + if (rc) { 480 + pr_err(APEI_PFX "Fail to merge resources!\n"); 481 + goto err_unmap_ioport; 482 + } 483 483 484 484 return 0; 485 485 err_unmap_ioport: ··· 499 491 break; 500 492 release_mem_region(res->start, res->end - res->start); 501 493 } 502 - return -EINVAL; 494 + return rc; 503 495 } 504 496 EXPORT_SYMBOL_GPL(apei_resources_request); 505 497 506 498 void apei_resources_release(struct apei_resources *resources) 507 499 { 500 + int rc; 508 501 struct apei_res *res; 509 502 510 503 list_for_each_entry(res, &resources->iomem, list) ··· 513 504 list_for_each_entry(res, &resources->ioport, list) 514 505 release_region(res->start, res->end - res->start); 515 506 516 - apei_resources_sub(&apei_resources_all, resources); 507 + rc = apei_resources_sub(&apei_resources_all, resources); 508 + if (rc) 509 + pr_err(APEI_PFX "Fail to sub resources!\n"); 517 510 } 518 511 EXPORT_SYMBOL_GPL(apei_resources_release); 519 512
+3 -1
drivers/acpi/apei/einj.c
··· 426 426 427 427 static int einj_check_table(struct acpi_table_einj *einj_tab) 428 428 { 429 - if (einj_tab->header_length != sizeof(struct acpi_table_einj)) 429 + if ((einj_tab->header_length != 430 + (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) 431 + && (einj_tab->header_length != sizeof(struct acpi_table_einj))) 430 432 return -EINVAL; 431 433 if (einj_tab->header.length < sizeof(struct acpi_table_einj)) 432 434 return -EINVAL;
+11 -7
drivers/acpi/apei/erst-dbg.c
··· 2 2 * APEI Error Record Serialization Table debug support 3 3 * 4 4 * ERST is a way provided by APEI to save and retrieve hardware error 5 - * infomation to and from a persistent store. This file provide the 5 + * information to and from a persistent store. This file provide the 6 6 * debugging/testing support for ERST kernel support and firmware 7 7 * implementation. 8 8 * ··· 111 111 goto out; 112 112 } 113 113 if (len > erst_dbg_buf_len) { 114 - kfree(erst_dbg_buf); 114 + void *p; 115 115 rc = -ENOMEM; 116 - erst_dbg_buf = kmalloc(len, GFP_KERNEL); 117 - if (!erst_dbg_buf) 116 + p = kmalloc(len, GFP_KERNEL); 117 + if (!p) 118 118 goto out; 119 + kfree(erst_dbg_buf); 120 + erst_dbg_buf = p; 119 121 erst_dbg_buf_len = len; 120 122 goto retry; 121 123 } ··· 152 150 if (mutex_lock_interruptible(&erst_dbg_mutex)) 153 151 return -EINTR; 154 152 if (usize > erst_dbg_buf_len) { 155 - kfree(erst_dbg_buf); 153 + void *p; 156 154 rc = -ENOMEM; 157 - erst_dbg_buf = kmalloc(usize, GFP_KERNEL); 158 - if (!erst_dbg_buf) 155 + p = kmalloc(usize, GFP_KERNEL); 156 + if (!p) 159 157 goto out; 158 + kfree(erst_dbg_buf); 159 + erst_dbg_buf = p; 160 160 erst_dbg_buf_len = usize; 161 161 } 162 162 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
+24 -5
drivers/acpi/apei/erst.c
··· 2 2 * APEI Error Record Serialization Table support 3 3 * 4 4 * ERST is a way provided by APEI to save and retrieve hardware error 5 - * infomation to and from a persistent store. 5 + * information to and from a persistent store. 6 6 * 7 7 * For more information about ERST, please refer to ACPI Specification 8 8 * version 4.0, section 17.4. ··· 266 266 { 267 267 int rc; 268 268 u64 offset; 269 + void *src, *dst; 270 + 271 + /* ioremap does not work in interrupt context */ 272 + if (in_interrupt()) { 273 + pr_warning(ERST_PFX 274 + "MOVE_DATA can not be used in interrupt context"); 275 + return -EBUSY; 276 + } 269 277 270 278 rc = __apei_exec_read_register(entry, &offset); 271 279 if (rc) 272 280 return rc; 273 - memmove((void *)ctx->dst_base + offset, 274 - (void *)ctx->src_base + offset, 275 - ctx->var2); 281 + 282 + src = ioremap(ctx->src_base + offset, ctx->var2); 283 + if (!src) 284 + return -ENOMEM; 285 + dst = ioremap(ctx->dst_base + offset, ctx->var2); 286 + if (!dst) 287 + return -ENOMEM; 288 + 289 + memmove(dst, src, ctx->var2); 290 + 291 + iounmap(src); 292 + iounmap(dst); 276 293 277 294 return 0; 278 295 } ··· 767 750 768 751 static int erst_check_table(struct acpi_table_erst *erst_tab) 769 752 { 770 - if (erst_tab->header_length != sizeof(struct acpi_table_erst)) 753 + if ((erst_tab->header_length != 754 + (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header))) 755 + && (erst_tab->header_length != sizeof(struct acpi_table_einj))) 771 756 return -EINVAL; 772 757 if (erst_tab->header.length < sizeof(struct acpi_table_erst)) 773 758 return -EINVAL;
+1 -1
drivers/acpi/apei/ghes.c
··· 302 302 struct ghes *ghes = NULL; 303 303 int rc = -EINVAL; 304 304 305 - generic = ghes_dev->dev.platform_data; 305 + generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; 306 306 if (!generic->enabled) 307 307 return -ENODEV; 308 308
+7 -4
drivers/acpi/apei/hest.c
··· 137 137 138 138 static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 139 139 { 140 - struct acpi_hest_generic *generic; 141 140 struct platform_device *ghes_dev; 142 141 struct ghes_arr *ghes_arr = data; 143 142 int rc; 144 143 145 144 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) 146 145 return 0; 147 - generic = (struct acpi_hest_generic *)hest_hdr; 148 - if (!generic->enabled) 146 + 147 + if (!((struct acpi_hest_generic *)hest_hdr)->enabled) 149 148 return 0; 150 149 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); 151 150 if (!ghes_dev) 152 151 return -ENOMEM; 153 - ghes_dev->dev.platform_data = generic; 152 + 153 + rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *)); 154 + if (rc) 155 + goto err; 156 + 154 157 rc = platform_device_add(ghes_dev); 155 158 if (rc) 156 159 goto err;
+1 -1
drivers/acpi/atomicio.c
··· 142 142 list_add_tail_rcu(&map->list, &acpi_iomaps); 143 143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 144 144 145 - return vaddr + (paddr - pg_off); 145 + return map->vaddr + (paddr - map->paddr); 146 146 err_unmap: 147 147 iounmap(vaddr); 148 148 return NULL;
-1
drivers/acpi/battery.c
··· 273 273 POWER_SUPPLY_PROP_CYCLE_COUNT, 274 274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 275 275 POWER_SUPPLY_PROP_VOLTAGE_NOW, 276 - POWER_SUPPLY_PROP_CURRENT_NOW, 277 276 POWER_SUPPLY_PROP_POWER_NOW, 278 277 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 279 278 POWER_SUPPLY_PROP_ENERGY_FULL,
+18
drivers/acpi/blacklist.c
··· 183 183 { 184 184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 185 185 acpi_osi_setup("!Windows 2006"); 186 + acpi_osi_setup("!Windows 2006 SP1"); 187 + acpi_osi_setup("!Windows 2006 SP2"); 186 188 return 0; 187 189 } 188 190 static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) ··· 228 226 }, 229 227 }, 230 228 { 229 + .callback = dmi_disable_osi_vista, 230 + .ident = "Toshiba Satellite L355", 231 + .matches = { 232 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 233 + DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"), 234 + }, 235 + }, 236 + { 231 237 .callback = dmi_disable_osi_win7, 232 238 .ident = "ASUS K50IJ", 233 239 .matches = { 234 240 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 235 241 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), 242 + }, 243 + }, 244 + { 245 + .callback = dmi_disable_osi_vista, 246 + .ident = "Toshiba P305D", 247 + .matches = { 248 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 249 + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"), 236 250 }, 237 251 }, 238 252
+5 -13
drivers/acpi/bus.c
··· 55 55 static int set_power_nocheck(const struct dmi_system_id *id) 56 56 { 57 57 printk(KERN_NOTICE PREFIX "%s detected - " 58 - "disable power check in power transistion\n", id->ident); 58 + "disable power check in power transition\n", id->ident); 59 59 acpi_power_nocheck = 1; 60 60 return 0; 61 61 } ··· 80 80 81 81 static struct dmi_system_id dsdt_dmi_table[] __initdata = { 82 82 /* 83 - * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. 83 + * Invoke DSDT corruption work-around on all Toshiba Satellite. 84 84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679 85 85 */ 86 86 { 87 87 .callback = set_copy_dsdt, 88 - .ident = "TOSHIBA Satellite A505", 88 + .ident = "TOSHIBA Satellite", 89 89 .matches = { 90 90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 91 - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), 92 - }, 93 - }, 94 - { 95 - .callback = set_copy_dsdt, 96 - .ident = "TOSHIBA Satellite L505D", 97 - .matches = { 98 - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 99 - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"), 91 + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), 100 92 }, 101 93 }, 102 94 {} ··· 1019 1027 1020 1028 /* 1021 1029 * If the laptop falls into the DMI check table, the power state check 1022 - * will be disabled in the course of device power transistion. 1030 + * will be disabled in the course of device power transition. 1023 1031 */ 1024 1032 dmi_check_system(power_nocheck_dmi_table); 1025 1033
+2
drivers/acpi/fan.c
··· 369 369 370 370 acpi_bus_unregister_driver(&acpi_fan_driver); 371 371 372 + #ifdef CONFIG_ACPI_PROCFS 372 373 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); 374 + #endif 373 375 374 376 return; 375 377 }
-6
drivers/acpi/processor_core.c
··· 29 29 30 30 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 31 31 { 32 - set_no_mwait, "IFL91 board", { 33 - DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), 34 - DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), 35 - DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), 36 - DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, 37 - { 38 32 set_no_mwait, "Extensa 5220", { 39 33 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 40 34 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+1 -1
drivers/acpi/processor_driver.c
··· 850 850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 851 851 acpi_idle_driver.name); 852 852 } else { 853 - printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", 853 + printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", 854 854 cpuidle_get_driver()->name); 855 855 } 856 856
+2 -2
drivers/acpi/processor_perflib.c
··· 447 447 if (!try_module_get(calling_module)) 448 448 return -EINVAL; 449 449 450 - /* is_done is set to negative if an error occured, 451 - * and to postitive if _no_ error occured, but SMM 450 + /* is_done is set to negative if an error occurred, 451 + * and to postitive if _no_ error occurred, but SMM 452 452 * was already notified. This avoids double notification 453 453 * which might lead to unexpected results... 454 454 */
+22
drivers/acpi/sleep.c
··· 363 363 return 0; 364 364 } 365 365 366 + static int __init init_nvs_nosave(const struct dmi_system_id *d) 367 + { 368 + acpi_nvs_nosave(); 369 + return 0; 370 + } 371 + 366 372 static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 367 373 { 368 374 .callback = init_old_suspend_ordering, ··· 401 395 DMI_MATCH(DMI_BOARD_VENDOR, 402 396 "Matsushita Electric Industrial Co.,Ltd."), 403 397 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 398 + }, 399 + }, 400 + { 401 + .callback = init_nvs_nosave, 402 + .ident = "Sony Vaio VGN-SR11M", 403 + .matches = { 404 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 405 + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), 406 + }, 407 + }, 408 + { 409 + .callback = init_nvs_nosave, 410 + .ident = "Everex StepNote Series", 411 + .matches = { 412 + DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), 413 + DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), 404 414 }, 405 415 }, 406 416 {},
+14 -6
drivers/acpi/sysfs.c
··· 100 100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 101 101 }; 102 102 103 - static int param_get_debug_layer(char *buffer, struct kernel_param *kp) 103 + static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) 104 104 { 105 105 int result = 0; 106 106 int i; ··· 128 128 return result; 129 129 } 130 130 131 - static int param_get_debug_level(char *buffer, struct kernel_param *kp) 131 + static int param_get_debug_level(char *buffer, const struct kernel_param *kp) 132 132 { 133 133 int result = 0; 134 134 int i; ··· 149 149 return result; 150 150 } 151 151 152 - module_param_call(debug_layer, param_set_uint, param_get_debug_layer, 153 - &acpi_dbg_layer, 0644); 154 - module_param_call(debug_level, param_set_uint, param_get_debug_level, 155 - &acpi_dbg_level, 0644); 152 + static struct kernel_param_ops param_ops_debug_layer = { 153 + .set = param_set_uint, 154 + .get = param_get_debug_layer, 155 + }; 156 + 157 + static struct kernel_param_ops param_ops_debug_level = { 158 + .set = param_set_uint, 159 + .get = param_get_debug_level, 160 + }; 161 + 162 + module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644); 163 + module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644); 156 164 157 165 static char trace_method_name[6]; 158 166 module_param_string(trace_method_name, trace_method_name, 6, 0644);
+2 -2
drivers/acpi/video_detect.c
··· 59 59 "support\n")); 60 60 *cap |= ACPI_VIDEO_BACKLIGHT; 61 61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) 62 - printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " 63 - "control misses _BQC function\n"); 62 + printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " 63 + "cannot determine initial brightness\n"); 64 64 /* We have backlight support, no need to scan further */ 65 65 return AE_CTRL_TERMINATE; 66 66 }
+1 -1
drivers/cpuidle/governors/menu.c
··· 80 80 * Limiting Performance Impact 81 81 * --------------------------- 82 82 * C states, especially those with large exit latencies, can have a real 83 - * noticable impact on workloads, which is not acceptable for most sysadmins, 83 + * noticeable impact on workloads, which is not acceptable for most sysadmins, 84 84 * and in addition, less performance has a power price of its own. 85 85 * 86 86 * As a general rule of thumb, menu assumes that the following heuristic
+2 -1
drivers/dma/shdma.c
··· 580 580 581 581 sh_chan = to_sh_chan(chan); 582 582 param = chan->private; 583 - slave_addr = param->config->addr; 584 583 585 584 /* Someone calling slave DMA on a public channel? */ 586 585 if (!param || !sg_len) { ··· 587 588 __func__, param, sg_len, param ? param->slave_id : -1); 588 589 return NULL; 589 590 } 591 + 592 + slave_addr = param->config->addr; 590 593 591 594 /* 592 595 * if (param != NULL), this is a successfully requested slave channel,
+1
drivers/edac/i7core_edac.c
··· 1140 1140 ATTR_COUNTER(0), 1141 1141 ATTR_COUNTER(1), 1142 1142 ATTR_COUNTER(2), 1143 + { .attr = { .name = NULL } } 1143 1144 }; 1144 1145 1145 1146 static struct mcidev_sysfs_group i7core_udimm_counters = {
+10 -29
drivers/gpu/drm/drm_gem.c
··· 148 148 return -ENOMEM; 149 149 150 150 kref_init(&obj->refcount); 151 - kref_init(&obj->handlecount); 151 + atomic_set(&obj->handle_count, 0); 152 152 obj->size = size; 153 153 154 154 atomic_inc(&dev->object_count); ··· 462 462 } 463 463 EXPORT_SYMBOL(drm_gem_object_free); 464 464 465 - /** 466 - * Called after the last reference to the object has been lost. 467 - * Must be called without holding struct_mutex 468 - * 469 - * Frees the object 470 - */ 471 - void 472 - drm_gem_object_free_unlocked(struct kref *kref) 473 - { 474 - struct drm_gem_object *obj = (struct drm_gem_object *) kref; 475 - struct drm_device *dev = obj->dev; 476 - 477 - if (dev->driver->gem_free_object_unlocked != NULL) 478 - dev->driver->gem_free_object_unlocked(obj); 479 - else if (dev->driver->gem_free_object != NULL) { 480 - mutex_lock(&dev->struct_mutex); 481 - dev->driver->gem_free_object(obj); 482 - mutex_unlock(&dev->struct_mutex); 483 - } 484 - } 485 - EXPORT_SYMBOL(drm_gem_object_free_unlocked); 486 - 487 465 static void drm_gem_object_ref_bug(struct kref *list_kref) 488 466 { 489 467 BUG(); ··· 474 496 * called before drm_gem_object_free or we'll be touching 475 497 * freed memory 476 498 */ 477 - void 478 - drm_gem_object_handle_free(struct kref *kref) 499 + void drm_gem_object_handle_free(struct drm_gem_object *obj) 479 500 { 480 - struct drm_gem_object *obj = container_of(kref, 481 - struct drm_gem_object, 482 - handlecount); 483 501 struct drm_device *dev = obj->dev; 484 502 485 503 /* Remove any name for this object */ ··· 502 528 struct drm_gem_object *obj = vma->vm_private_data; 503 529 504 530 drm_gem_object_reference(obj); 531 + 532 + mutex_lock(&obj->dev->struct_mutex); 533 + drm_vm_open_locked(vma); 534 + mutex_unlock(&obj->dev->struct_mutex); 505 535 } 506 536 EXPORT_SYMBOL(drm_gem_vm_open); 507 537 ··· 513 535 { 514 536 struct drm_gem_object *obj = vma->vm_private_data; 515 537 516 - drm_gem_object_unreference_unlocked(obj); 538 + mutex_lock(&obj->dev->struct_mutex); 539 + drm_vm_close_locked(vma); 540 + drm_gem_object_unreference(obj); 541 + mutex_unlock(&obj->dev->struct_mutex); 517 542 } 518 543 EXPORT_SYMBOL(drm_gem_vm_close); 519 544
+1 -1
drivers/gpu/drm/drm_info.c
··· 255 255 256 256 seq_printf(m, "%6d %8zd %7d %8d\n", 257 257 obj->name, obj->size, 258 - atomic_read(&obj->handlecount.refcount), 258 + atomic_read(&obj->handle_count), 259 259 atomic_read(&obj->refcount.refcount)); 260 260 return 0; 261 261 }
+20 -12
drivers/gpu/drm/drm_vm.c
··· 433 433 mutex_unlock(&dev->struct_mutex); 434 434 } 435 435 436 + void drm_vm_close_locked(struct vm_area_struct *vma) 437 + { 438 + struct drm_file *priv = vma->vm_file->private_data; 439 + struct drm_device *dev = priv->minor->dev; 440 + struct drm_vma_entry *pt, *temp; 441 + 442 + DRM_DEBUG("0x%08lx,0x%08lx\n", 443 + vma->vm_start, vma->vm_end - vma->vm_start); 444 + atomic_dec(&dev->vma_count); 445 + 446 + list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 447 + if (pt->vma == vma) { 448 + list_del(&pt->head); 449 + kfree(pt); 450 + break; 451 + } 452 + } 453 + } 454 + 436 455 /** 437 456 * \c close method for all virtual memory types. 438 457 * ··· 464 445 { 465 446 struct drm_file *priv = vma->vm_file->private_data; 466 447 struct drm_device *dev = priv->minor->dev; 467 - struct drm_vma_entry *pt, *temp; 468 - 469 - DRM_DEBUG("0x%08lx,0x%08lx\n", 470 - vma->vm_start, vma->vm_end - vma->vm_start); 471 - atomic_dec(&dev->vma_count); 472 448 473 449 mutex_lock(&dev->struct_mutex); 474 - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 475 - if (pt->vma == vma) { 476 - list_del(&pt->head); 477 - kfree(pt); 478 - break; 479 - } 480 - } 450 + drm_vm_close_locked(vma); 481 451 mutex_unlock(&dev->struct_mutex); 482 452 } 483 453
+1 -1
drivers/gpu/drm/i810/i810_dma.c
··· 116 116 static const struct file_operations i810_buffer_fops = { 117 117 .open = drm_open, 118 118 .release = drm_release, 119 - .unlocked_ioctl = drm_ioctl, 119 + .unlocked_ioctl = i810_ioctl, 120 120 .mmap = i810_mmap_buffers, 121 121 .fasync = drm_fasync, 122 122 };
+1 -1
drivers/gpu/drm/i830/i830_dma.c
··· 118 118 static const struct file_operations i830_buffer_fops = { 119 119 .open = drm_open, 120 120 .release = drm_release, 121 - .unlocked_ioctl = drm_ioctl, 121 + .unlocked_ioctl = i830_ioctl, 122 122 .mmap = i830_mmap_buffers, 123 123 .fasync = drm_fasync, 124 124 };
+3 -3
drivers/gpu/drm/i915/i915_dma.c
··· 1787 1787 } 1788 1788 } 1789 1789 1790 - div_u64(diff, diff1); 1790 + diff = div_u64(diff, diff1); 1791 1791 ret = ((m * diff) + c); 1792 - div_u64(ret, 10); 1792 + ret = div_u64(ret, 10); 1793 1793 1794 1794 dev_priv->last_count1 = total_count; 1795 1795 dev_priv->last_time1 = now; ··· 1858 1858 1859 1859 /* More magic constants... */ 1860 1860 diff = diff * 1181; 1861 - div_u64(diff, diffms * 10); 1861 + diff = div_u64(diff, diffms * 10); 1862 1862 dev_priv->gfx_power = diff; 1863 1863 } 1864 1864
+28 -24
drivers/gpu/drm/i915/i915_gem.c
··· 136 136 return -ENOMEM; 137 137 138 138 ret = drm_gem_handle_create(file_priv, obj, &handle); 139 + /* drop reference from allocate - handle holds it now */ 140 + drm_gem_object_unreference_unlocked(obj); 139 141 if (ret) { 140 - drm_gem_object_unreference_unlocked(obj); 141 142 return ret; 142 143 } 143 - 144 - /* Sink the floating reference from kref_init(handlecount) */ 145 - drm_gem_object_handle_unreference_unlocked(obj); 146 144 147 145 args->handle = handle; 148 146 return 0; ··· 469 471 return -ENOENT; 470 472 obj_priv = to_intel_bo(obj); 471 473 472 - /* Bounds check source. 473 - * 474 - * XXX: This could use review for overflow issues... 475 - */ 476 - if (args->offset > obj->size || args->size > obj->size || 477 - args->offset + args->size > obj->size) { 478 - drm_gem_object_unreference_unlocked(obj); 479 - return -EINVAL; 474 + /* Bounds check source. */ 475 + if (args->offset > obj->size || args->size > obj->size - args->offset) { 476 + ret = -EINVAL; 477 + goto err; 478 + } 479 + 480 + if (!access_ok(VERIFY_WRITE, 481 + (char __user *)(uintptr_t)args->data_ptr, 482 + args->size)) { 483 + ret = -EFAULT; 484 + goto err; 480 485 } 481 486 482 487 if (i915_gem_object_needs_bit17_swizzle(obj)) { ··· 491 490 file_priv); 492 491 } 493 492 493 + err: 494 494 drm_gem_object_unreference_unlocked(obj); 495 - 496 495 return ret; 497 496 } 498 497 ··· 581 580 582 581 user_data = (char __user *) (uintptr_t) args->data_ptr; 583 582 remain = args->size; 584 - if (!access_ok(VERIFY_READ, user_data, remain)) 585 - return -EFAULT; 586 583 587 584 588 585 mutex_lock(&dev->struct_mutex); ··· 933 934 return -ENOENT; 934 935 obj_priv = to_intel_bo(obj); 935 936 936 - /* Bounds check destination. 937 - * 938 - * XXX: This could use review for overflow issues... 939 - */ 940 - if (args->offset > obj->size || args->size > obj->size || 941 - args->offset + args->size > obj->size) { 942 - drm_gem_object_unreference_unlocked(obj); 943 - return -EINVAL; 937 + /* Bounds check destination. */ 938 + if (args->offset > obj->size || args->size > obj->size - args->offset) { 939 + ret = -EINVAL; 940 + goto err; 941 + } 942 + 943 + if (!access_ok(VERIFY_READ, 944 + (char __user *)(uintptr_t)args->data_ptr, 945 + args->size)) { 946 + ret = -EFAULT; 947 + goto err; 944 948 } 945 949 946 950 /* We can only do the GTT pwrite on untiled buffers, as otherwise ··· 977 975 DRM_INFO("pwrite failed %d\n", ret); 978 976 #endif 979 977 978 + err: 980 979 drm_gem_object_unreference_unlocked(obj); 981 - 982 980 return ret; 983 981 } 984 982 ··· 3260 3258 (int) reloc->offset, 3261 3259 reloc->read_domains, 3262 3260 reloc->write_domain); 3261 + drm_gem_object_unreference(target_obj); 3262 + i915_gem_object_unpin(obj); 3263 3263 return -EINVAL; 3264 3264 } 3265 3265 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+22 -27
drivers/gpu/drm/i915/i915_gem_evict.c
··· 93 93 { 94 94 drm_i915_private_t *dev_priv = dev->dev_private; 95 95 struct list_head eviction_list, unwind_list; 96 - struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; 96 + struct drm_i915_gem_object *obj_priv; 97 97 struct list_head *render_iter, *bsd_iter; 98 98 int ret = 0; 99 99 ··· 175 175 return -ENOSPC; 176 176 177 177 found: 178 + /* drm_mm doesn't allow any other other operations while 179 + * scanning, therefore store to be evicted objects on a 180 + * temporary list. */ 178 181 INIT_LIST_HEAD(&eviction_list); 179 - list_for_each_entry_safe(obj_priv, tmp_obj_priv, 180 - &unwind_list, evict_list) { 182 + while (!list_empty(&unwind_list)) { 183 + obj_priv = list_first_entry(&unwind_list, 184 + struct drm_i915_gem_object, 185 + evict_list); 181 186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 182 - /* drm_mm doesn't allow any other other operations while 183 - * scanning, therefore store to be evicted objects on a 184 - * temporary list. */ 185 187 list_move(&obj_priv->evict_list, &eviction_list); 186 - } else 187 - drm_gem_object_unreference(&obj_priv->base); 188 - } 189 - 190 - /* Unbinding will emit any required flushes */ 191 - list_for_each_entry_safe(obj_priv, tmp_obj_priv, 192 - &eviction_list, evict_list) { 193 - #if WATCH_LRU 194 - DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base); 195 - #endif 196 - ret = i915_gem_object_unbind(&obj_priv->base); 197 - if (ret) 198 - return ret; 199 - 188 + continue; 189 + } 190 + list_del(&obj_priv->evict_list); 200 191 drm_gem_object_unreference(&obj_priv->base); 201 192 } 202 193 203 - /* The just created free hole should be on the top of the free stack 204 - * maintained by drm_mm, so this BUG_ON actually executes in O(1). 205 - * Furthermore all accessed data has just recently been used, so it 206 - * should be really fast, too. */ 207 - BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, 208 - alignment, 0)); 194 + /* Unbinding will emit any required flushes */ 195 + while (!list_empty(&eviction_list)) { 196 + obj_priv = list_first_entry(&eviction_list, 197 + struct drm_i915_gem_object, 198 + evict_list); 199 + if (ret == 0) 200 + ret = i915_gem_object_unbind(&obj_priv->base); 201 + list_del(&obj_priv->evict_list); 202 + drm_gem_object_unreference(&obj_priv->base); 203 + } 209 204 210 - return 0; 205 + return ret; 211 206 } 212 207 213 208 int
+36 -22
drivers/gpu/drm/i915/intel_display.c
··· 1013 1013 DRM_DEBUG_KMS("vblank wait timed out\n"); 1014 1014 } 1015 1015 1016 - /** 1017 - * intel_wait_for_vblank_off - wait for vblank after disabling a pipe 1016 + /* 1017 + * intel_wait_for_pipe_off - wait for pipe to turn off 1018 1018 * @dev: drm device 1019 1019 * @pipe: pipe to wait for 1020 1020 * ··· 1022 1022 * spinning on the vblank interrupt status bit, since we won't actually 1023 1023 * see an interrupt when the pipe is disabled. 1024 1024 * 1025 - * So this function waits for the display line value to settle (it 1026 - * usually ends up stopping at the start of the next frame). 1025 + * On Gen4 and above: 1026 + * wait for the pipe register state bit to turn off 1027 + * 1028 + * Otherwise: 1029 + * wait for the display line value to settle (it usually 1030 + * ends up stopping at the start of the next frame). 1031 + * 1027 1032 */ 1028 - void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) 1033 + static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 1029 1034 { 1030 1035 struct drm_i915_private *dev_priv = dev->dev_private; 1031 - int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1032 - unsigned long timeout = jiffies + msecs_to_jiffies(100); 1033 - u32 last_line; 1034 1036 1035 - /* Wait for the display line to settle */ 1036 - do { 1037 - last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1038 - mdelay(5); 1039 - } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1040 - time_after(timeout, jiffies)); 1037 + if (INTEL_INFO(dev)->gen >= 4) { 1038 + int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); 1041 1039 1042 - if (time_after(jiffies, timeout)) 1043 - DRM_DEBUG_KMS("vblank wait timed out\n"); 1040 + /* Wait for the Pipe State to go off */ 1041 + if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 1042 + 100, 0)) 1043 + DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1044 + } else { 1045 + u32 last_line; 1046 + int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1047 + unsigned long timeout = jiffies + msecs_to_jiffies(100); 1048 + 1049 + /* Wait for the display line to settle */ 1050 + do { 1051 + last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1052 + mdelay(5); 1053 + } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1054 + time_after(timeout, jiffies)); 1055 + if (time_after(jiffies, timeout)) 1056 + DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1057 + } 1044 1058 } 1045 1059 1046 1060 /* Parameters have changed, update FBC info */ ··· 2342 2328 I915_READ(dspbase_reg); 2343 2329 } 2344 2330 2345 - /* Wait for vblank for the disable to take effect */ 2346 - intel_wait_for_vblank_off(dev, pipe); 2347 - 2348 2331 /* Don't disable pipe A or pipe A PLLs if needed */ 2349 2332 if (pipeconf_reg == PIPEACONF && 2350 - (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 2333 + (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { 2334 + /* Wait for vblank for the disable to take effect */ 2335 + intel_wait_for_vblank(dev, pipe); 2351 2336 goto skip_pipe_off; 2337 + } 2352 2338 2353 2339 /* Next, disable display pipes */ 2354 2340 temp = I915_READ(pipeconf_reg); ··· 2357 2343 I915_READ(pipeconf_reg); 2358 2344 } 2359 2345 2360 - /* Wait for vblank for the disable to take effect. */ 2361 - intel_wait_for_vblank_off(dev, pipe); 2346 + /* Wait for the pipe to turn off */ 2347 + intel_wait_for_pipe_off(dev, pipe); 2362 2348 2363 2349 temp = I915_READ(dpll_reg); 2364 2350 if ((temp & DPLL_VCO_ENABLE) != 0) {
+9 -10
drivers/gpu/drm/i915/intel_dp.c
··· 1138 1138 intel_dp_set_link_train(struct intel_dp *intel_dp, 1139 1139 uint32_t dp_reg_value, 1140 1140 uint8_t dp_train_pat, 1141 - uint8_t train_set[4], 1142 - bool first) 1141 + uint8_t train_set[4]) 1143 1142 { 1144 1143 struct drm_device *dev = intel_dp->base.enc.dev; 1145 1144 struct drm_i915_private *dev_priv = dev->dev_private; 1146 - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); 1147 1145 int ret; 1148 1146 1149 1147 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1150 1148 POSTING_READ(intel_dp->output_reg); 1151 - if (first) 1152 - intel_wait_for_vblank(dev, intel_crtc->pipe); 1153 1149 1154 1150 intel_dp_aux_native_write_1(intel_dp, 1155 1151 DP_TRAINING_PATTERN_SET, ··· 1170 1174 uint8_t voltage; 1171 1175 bool clock_recovery = false; 1172 1176 bool channel_eq = false; 1173 - bool first = true; 1174 1177 int tries; 1175 1178 u32 reg; 1176 1179 uint32_t DP = intel_dp->DP; 1180 + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); 1181 + 1182 + /* Enable output, wait for it to become active */ 1183 + I915_WRITE(intel_dp->output_reg, intel_dp->DP); 1184 + POSTING_READ(intel_dp->output_reg); 1185 + intel_wait_for_vblank(dev, intel_crtc->pipe); 1177 1186 1178 1187 /* Write the link configuration data */ 1179 1188 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, ··· 1211 1210 reg = DP | DP_LINK_TRAIN_PAT_1; 1212 1211 1213 1212 if (!intel_dp_set_link_train(intel_dp, reg, 1214 - DP_TRAINING_PATTERN_1, train_set, first)) 1213 + DP_TRAINING_PATTERN_1, train_set)) 1215 1214 break; 1216 - first = false; 1217 1215 /* Set training pattern 1 */ 1218 1216 1219 1217 udelay(100); ··· 1266 1266 1267 1267 /* channel eq pattern */ 1268 1268 if (!intel_dp_set_link_train(intel_dp, reg, 1269 - DP_TRAINING_PATTERN_2, train_set, 1270 - false)) 1269 + DP_TRAINING_PATTERN_2, train_set)) 1271 1270 break; 1272 1271 1273 1272 udelay(400);
-1
drivers/gpu/drm/i915/intel_drv.h
··· 229 229 struct drm_crtc *crtc); 230 230 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 231 231 struct drm_file *file_priv); 232 - extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); 233 232 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 234 233 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 235 234 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+3 -1
drivers/gpu/drm/i915/intel_fb.c
··· 237 237 drm_fb_helper_fini(&ifbdev->helper); 238 238 239 239 drm_framebuffer_cleanup(&ifb->base); 240 - if (ifb->obj) 240 + if (ifb->obj) { 241 + drm_gem_object_handle_unreference(ifb->obj); 241 242 drm_gem_object_unreference(ifb->obj); 243 + } 242 244 243 245 return 0; 244 246 }
+1
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 352 352 353 353 if (nouveau_fb->nvbo) { 354 354 nouveau_bo_unmap(nouveau_fb->nvbo); 355 + drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); 355 356 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 356 357 nouveau_fb->nvbo = NULL; 357 358 }
+2 -4
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 167 167 goto out; 168 168 169 169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 170 + /* drop reference from allocate - handle holds it now */ 171 + drm_gem_object_unreference_unlocked(nvbo->gem); 170 172 out: 171 - drm_gem_object_handle_unreference_unlocked(nvbo->gem); 172 - 173 - if (ret) 174 - drm_gem_object_unreference_unlocked(nvbo->gem); 175 173 return ret; 176 174 } 177 175
+1
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 79 79 mutex_lock(&dev->struct_mutex); 80 80 nouveau_bo_unpin(chan->notifier_bo); 81 81 mutex_unlock(&dev->struct_mutex); 82 + drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); 82 83 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 83 84 drm_mm_takedown(&chan->notifier_heap); 84 85 }
+2 -1
drivers/gpu/drm/radeon/r600.c
··· 3528 3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3529 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3530 3530 */ 3531 - if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3531 + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 3532 + rdev->vram_scratch.ptr) { 3532 3533 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3533 3534 u32 tmp; 3534 3535
+9
drivers/gpu/drm/radeon/radeon_atombios.c
··· 317 317 *connector_type = DRM_MODE_CONNECTOR_DVID; 318 318 } 319 319 320 + /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ 321 + if ((dev->pdev->device == 0x796e) && 322 + (dev->pdev->subsystem_vendor == 0x1462) && 323 + (dev->pdev->subsystem_device == 0x7302)) { 324 + if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || 325 + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) 326 + return false; 327 + } 328 + 320 329 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 321 330 if ((dev->pdev->device == 0x7941) && 322 331 (dev->pdev->subsystem_vendor == 0x147b) &&
+4 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 349 349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 350 350 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 351 351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 352 + if (devices & ATOM_DEVICE_DFP6_SUPPORT) 353 + DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); 352 354 if (devices & ATOM_DEVICE_TV1_SUPPORT) 353 355 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 354 356 if (devices & ATOM_DEVICE_CV_SUPPORT) ··· 843 841 { 844 842 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 845 843 846 - if (radeon_fb->obj) 844 + if (radeon_fb->obj) { 847 845 drm_gem_object_unreference_unlocked(radeon_fb->obj); 846 + } 848 847 drm_framebuffer_cleanup(fb); 849 848 kfree(radeon_fb); 850 849 }
+4 -10
drivers/gpu/drm/radeon/radeon_fb.c
··· 94 94 ret = radeon_bo_reserve(rbo, false); 95 95 if (likely(ret == 0)) { 96 96 radeon_bo_kunmap(rbo); 97 + radeon_bo_unpin(rbo); 97 98 radeon_bo_unreserve(rbo); 98 99 } 100 + drm_gem_object_handle_unreference(gobj); 99 101 drm_gem_object_unreference_unlocked(gobj); 100 102 } 101 103 ··· 327 325 { 328 326 struct fb_info *info; 329 327 struct radeon_framebuffer *rfb = &rfbdev->rfb; 330 - struct radeon_bo *rbo; 331 - int r; 332 328 333 329 if (rfbdev->helper.fbdev) { 334 330 info = rfbdev->helper.fbdev; ··· 338 338 } 339 339 340 340 if (rfb->obj) { 341 - rbo = rfb->obj->driver_private; 342 - r = radeon_bo_reserve(rbo, false); 343 - if (likely(r == 0)) { 344 - radeon_bo_kunmap(rbo); 345 - radeon_bo_unpin(rbo); 346 - radeon_bo_unreserve(rbo); 347 - } 348 - drm_gem_object_unreference_unlocked(rfb->obj); 341 + radeonfb_destroy_pinned_object(rfb->obj); 342 + rfb->obj = NULL; 349 343 } 350 344 drm_fb_helper_fini(&rfbdev->helper); 351 345 drm_framebuffer_cleanup(&rfb->base);
+2 -2
drivers/gpu/drm/radeon/radeon_gem.c
··· 201 201 return r; 202 202 } 203 203 r = drm_gem_handle_create(filp, gobj, &handle); 204 + /* drop reference from allocate - handle holds it now */ 205 + drm_gem_object_unreference_unlocked(gobj); 204 206 if (r) { 205 - drm_gem_object_unreference_unlocked(gobj); 206 207 return r; 207 208 } 208 - drm_gem_object_handle_unreference_unlocked(gobj); 209 209 args->handle = handle; 210 210 return 0; 211 211 }
+110 -35
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 148 148 {0, 0, 0} 149 149 }; 150 150 151 - static char *vmw_devname = "vmwgfx"; 151 + static int enable_fbdev; 152 152 153 153 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 154 154 static void vmw_master_init(struct vmw_master *); 155 155 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 156 156 void *ptr); 157 + 158 + MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 159 + module_param_named(enable_fbdev, enable_fbdev, int, 0600); 157 160 158 161 static void vmw_print_capabilities(uint32_t capabilities) 159 162 { ··· 195 192 { 196 193 int ret; 197 194 198 - vmw_kms_save_vga(dev_priv); 199 - 200 195 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 201 196 if (unlikely(ret != 0)) { 202 197 DRM_ERROR("Unable to initialize FIFO.\n"); ··· 207 206 static void vmw_release_device(struct vmw_private *dev_priv) 208 207 { 209 208 vmw_fifo_release(dev_priv, &dev_priv->fifo); 210 - vmw_kms_restore_vga(dev_priv); 211 209 } 212 210 211 + int vmw_3d_resource_inc(struct vmw_private *dev_priv) 212 + { 213 + int ret = 0; 214 + 215 + mutex_lock(&dev_priv->release_mutex); 216 + if (unlikely(dev_priv->num_3d_resources++ == 0)) { 217 + ret = vmw_request_device(dev_priv); 218 + if (unlikely(ret != 0)) 219 + --dev_priv->num_3d_resources; 220 + } 221 + mutex_unlock(&dev_priv->release_mutex); 222 + return ret; 223 + } 224 + 225 + 226 + void vmw_3d_resource_dec(struct vmw_private *dev_priv) 227 + { 228 + int32_t n3d; 229 + 230 + mutex_lock(&dev_priv->release_mutex); 231 + if (unlikely(--dev_priv->num_3d_resources == 0)) 232 + vmw_release_device(dev_priv); 233 + n3d = (int32_t) dev_priv->num_3d_resources; 234 + mutex_unlock(&dev_priv->release_mutex); 235 + 236 + BUG_ON(n3d < 0); 237 + } 213 238 214 239 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 215 240 { ··· 255 228 dev_priv->last_read_sequence = (uint32_t) -100; 256 229 mutex_init(&dev_priv->hw_mutex); 257 230 mutex_init(&dev_priv->cmdbuf_mutex); 231 + mutex_init(&dev_priv->release_mutex); 258 232 rwlock_init(&dev_priv->resource_lock); 259 233 idr_init(&dev_priv->context_idr); 260 234 idr_init(&dev_priv->surface_idr); ··· 271 243 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 272 244 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 273 245 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 246 + 247 + dev_priv->enable_fb = enable_fbdev; 274 248 275 249 mutex_lock(&dev_priv->hw_mutex); 276 250 ··· 373 343 374 344 dev->dev_private = dev_priv; 375 345 376 - if (!dev->devname) 377 - dev->devname = vmw_devname; 378 - 379 - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 380 - ret = drm_irq_install(dev); 381 - if (unlikely(ret != 0)) { 382 - DRM_ERROR("Failed installing irq: %d\n", ret); 383 - goto out_no_irq; 384 - } 385 - } 386 - 387 346 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 388 347 dev_priv->stealth = (ret != 0); 389 348 if (dev_priv->stealth) { ··· 388 369 goto out_no_device; 389 370 } 390 371 } 391 - ret = vmw_request_device(dev_priv); 372 + ret = vmw_kms_init(dev_priv); 392 373 if (unlikely(ret != 0)) 393 - goto out_no_device; 394 - vmw_kms_init(dev_priv); 374 + goto out_no_kms; 395 375 vmw_overlay_init(dev_priv); 396 - vmw_fb_init(dev_priv); 376 + if (dev_priv->enable_fb) { 377 + ret = vmw_3d_resource_inc(dev_priv); 378 + if (unlikely(ret != 0)) 379 + goto out_no_fifo; 380 + vmw_kms_save_vga(dev_priv); 381 + vmw_fb_init(dev_priv); 382 + DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? 383 + "Detected device 3D availability.\n" : 384 + "Detected no device 3D availability.\n"); 385 + } else { 386 + DRM_INFO("Delayed 3D detection since we're not " 387 + "running the device in SVGA mode yet.\n"); 388 + } 389 + 390 + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 391 + ret = drm_irq_install(dev); 392 + if (unlikely(ret != 0)) { 393 + DRM_ERROR("Failed installing irq: %d\n", ret); 394 + goto out_no_irq; 395 + } 396 + } 397 397 398 398 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 399 399 register_pm_notifier(&dev_priv->pm_nb); 400 400 401 - DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); 402 - 403 401 return 0; 404 402 405 - out_no_device: 406 - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 407 - drm_irq_uninstall(dev_priv->dev); 408 - if (dev->devname == vmw_devname) 409 - dev->devname = NULL; 410 403 out_no_irq: 404 + if (dev_priv->enable_fb) { 405 + vmw_fb_close(dev_priv); 406 + vmw_kms_restore_vga(dev_priv); 407 + vmw_3d_resource_dec(dev_priv); 408 + } 409 + out_no_fifo: 410 + vmw_overlay_close(dev_priv); 411 + vmw_kms_close(dev_priv); 412 + out_no_kms: 413 + if (dev_priv->stealth) 414 + pci_release_region(dev->pdev, 2); 415 + else 416 + pci_release_regions(dev->pdev); 417 + out_no_device: 411 418 ttm_object_device_release(&dev_priv->tdev); 412 419 out_err4: 413 420 iounmap(dev_priv->mmio_virt); ··· 460 415 461 416 unregister_pm_notifier(&dev_priv->pm_nb); 462 417 463 - vmw_fb_close(dev_priv); 418 + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 419 + drm_irq_uninstall(dev_priv->dev); 420 + if (dev_priv->enable_fb) { 421 + vmw_fb_close(dev_priv); 422 + vmw_kms_restore_vga(dev_priv); 423 + vmw_3d_resource_dec(dev_priv); 424 + } 464 425 vmw_kms_close(dev_priv); 465 426 vmw_overlay_close(dev_priv); 466 - vmw_release_device(dev_priv); 467 427 if (dev_priv->stealth) 468 428 pci_release_region(dev->pdev, 2); 469 429 else 470 430 pci_release_regions(dev->pdev); 471 431 472 - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 473 - drm_irq_uninstall(dev_priv->dev); 474 - if (dev->devname == vmw_devname) 475 - dev->devname = NULL; 476 432 ttm_object_device_release(&dev_priv->tdev); 477 433 iounmap(dev_priv->mmio_virt); 478 434 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, ··· 546 500 struct drm_ioctl_desc *ioctl = 547 501 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 548 502 549 - if (unlikely(ioctl->cmd != cmd)) { 503 + if (unlikely(ioctl->cmd_drv != cmd)) { 550 504 DRM_ERROR("Invalid command format, ioctl %d\n", 551 505 nr - DRM_COMMAND_BASE); 552 506 return -EINVAL; ··· 635 589 struct vmw_master *vmaster = vmw_master(file_priv->master); 636 590 int ret = 0; 637 591 592 + if (!dev_priv->enable_fb) { 593 + ret = vmw_3d_resource_inc(dev_priv); 594 + if (unlikely(ret != 0)) 595 + return ret; 596 + vmw_kms_save_vga(dev_priv); 597 + mutex_lock(&dev_priv->hw_mutex); 598 + vmw_write(dev_priv, SVGA_REG_TRACES, 0); 599 + mutex_unlock(&dev_priv->hw_mutex); 600 + } 601 + 638 602 if (active) { 639 603 BUG_ON(active != &dev_priv->fbdev_master); 640 604 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); ··· 673 617 return 0; 674 618 675 619 out_no_active_lock: 676 - vmw_release_device(dev_priv); 620 + if (!dev_priv->enable_fb) { 621 + mutex_lock(&dev_priv->hw_mutex); 622 + vmw_write(dev_priv, SVGA_REG_TRACES, 1); 623 + mutex_unlock(&dev_priv->hw_mutex); 624 + vmw_kms_restore_vga(dev_priv); 625 + vmw_3d_resource_dec(dev_priv); 626 + } 677 627 return ret; 678 628 } 679 629 ··· 707 645 708 646 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 709 647 648 + if (!dev_priv->enable_fb) { 649 + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 650 + if (unlikely(ret != 0)) 651 + DRM_ERROR("Unable to clean VRAM on master drop.\n"); 652 + mutex_lock(&dev_priv->hw_mutex); 653 + vmw_write(dev_priv, SVGA_REG_TRACES, 1); 654 + mutex_unlock(&dev_priv->hw_mutex); 655 + vmw_kms_restore_vga(dev_priv); 656 + vmw_3d_resource_dec(dev_priv); 657 + } 658 + 710 659 dev_priv->active_master = &dev_priv->fbdev_master; 711 660 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 712 661 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 713 662 714 - vmw_fb_on(dev_priv); 663 + if (dev_priv->enable_fb) 664 + vmw_fb_on(dev_priv); 715 665 } 716 666 717 667 ··· 796 722 .irq_postinstall = vmw_irq_postinstall, 797 723 .irq_uninstall = vmw_irq_uninstall, 798 724 .irq_handler = vmw_irq_handler, 725 + .get_vblank_counter = vmw_get_vblank_counter, 799 726 .reclaim_buffers_locked = NULL, 800 727 .get_map_ofs = drm_core_get_map_ofs, 801 728 .get_reg_ofs = drm_core_get_reg_ofs,
+8
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 277 277 278 278 bool stealth; 279 279 bool is_opened; 280 + bool enable_fb; 280 281 281 282 /** 282 283 * Master management. ··· 286 285 struct vmw_master *active_master; 287 286 struct vmw_master fbdev_master; 288 287 struct notifier_block pm_nb; 288 + 289 + struct mutex release_mutex; 290 + uint32_t num_3d_resources; 289 291 }; 290 292 291 293 static inline struct vmw_private *vmw_priv(struct drm_device *dev) ··· 322 318 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 323 319 return val; 324 320 } 321 + 322 + int vmw_3d_resource_inc(struct vmw_private *dev_priv); 323 + void vmw_3d_resource_dec(struct vmw_private *dev_priv); 325 324 326 325 /** 327 326 * GMR utilities - vmwgfx_gmr.c ··· 518 511 unsigned bbp, unsigned depth); 519 512 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 520 513 struct drm_file *file_priv); 514 + u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); 521 515 522 516 /** 523 517 * Overlay control - vmwgfx_overlay.c
+5
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 615 615 if (unlikely(ret != 0)) 616 616 goto err_unlock; 617 617 618 + if (bo->mem.mem_type == TTM_PL_VRAM && 619 + bo->mem.mm_node->start < bo->num_pages) 620 + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 621 + false, false); 622 + 618 623 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 619 624 620 625 /* Could probably bug on */
+3
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 106 106 mutex_lock(&dev_priv->hw_mutex); 107 107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 108 108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 109 + dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 109 110 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 110 111 111 112 min = 4; ··· 176 175 dev_priv->config_done_state); 177 176 vmw_write(dev_priv, SVGA_REG_ENABLE, 178 177 dev_priv->enable_state); 178 + vmw_write(dev_priv, SVGA_REG_TRACES, 179 + dev_priv->traces_state); 179 180 180 181 mutex_unlock(&dev_priv->hw_mutex); 181 182 vmw_fence_queue_takedown(&fifo->fence_queue);
+17
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 898 898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 899 899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 900 900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 901 + if (i == 0 && vmw_priv->num_displays == 1 && 902 + save->width == 0 && save->height == 0) { 903 + 904 + /* 905 + * It should be fairly safe to assume that these 906 + * values are uninitialized. 907 + */ 908 + 909 + save->width = vmw_priv->vga_width - save->pos_x; 910 + save->height = vmw_priv->vga_height - save->pos_y; 911 + } 901 912 } 913 + 902 914 return 0; 903 915 } 904 916 ··· 995 983 out_unlock: 996 984 ttm_read_unlock(&vmaster->lock); 997 985 return ret; 986 + } 987 + 988 + u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) 989 + { 990 + return 0; 998 991 }
+17 -10
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
··· 27 27 28 28 #include "vmwgfx_kms.h" 29 29 30 + #define VMWGFX_LDU_NUM_DU 8 31 + 30 32 #define vmw_crtc_to_ldu(x) \ 31 33 container_of(x, struct vmw_legacy_display_unit, base.crtc) 32 34 #define vmw_encoder_to_ldu(x) \ ··· 538 536 539 537 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 540 538 { 539 + struct drm_device *dev = dev_priv->dev; 540 + int i; 541 + int ret; 542 + 541 543 if (dev_priv->ldu_priv) { 542 544 DRM_INFO("ldu system already on\n"); 543 545 return -EINVAL; ··· 559 553 560 554 drm_mode_create_dirty_info_property(dev_priv->dev); 561 555 562 - vmw_ldu_init(dev_priv, 0); 563 - /* for old hardware without multimon only enable one display */ 564 556 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { 565 - vmw_ldu_init(dev_priv, 1); 566 - vmw_ldu_init(dev_priv, 2); 567 - vmw_ldu_init(dev_priv, 3); 568 - vmw_ldu_init(dev_priv, 4); 569 - vmw_ldu_init(dev_priv, 5); 570 - vmw_ldu_init(dev_priv, 6); 571 - vmw_ldu_init(dev_priv, 7); 557 + for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) 558 + vmw_ldu_init(dev_priv, i); 559 + ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); 560 + } else { 561 + /* for old hardware without multimon only enable one display */ 562 + vmw_ldu_init(dev_priv, 0); 563 + ret = drm_vblank_init(dev, 1); 572 564 } 573 565 574 - return 0; 566 + return ret; 575 567 } 576 568 577 569 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 578 570 { 571 + struct drm_device *dev = dev_priv->dev; 572 + 573 + drm_vblank_cleanup(dev); 579 574 if (!dev_priv->ldu_priv) 580 575 return -ENOSYS; 581 576
+4
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 211 211 cmd->body.cid = cpu_to_le32(res->id); 212 212 213 213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 214 + vmw_3d_resource_dec(dev_priv); 214 215 } 215 216 216 217 static int vmw_context_init(struct vmw_private *dev_priv, ··· 248 247 cmd->body.cid = cpu_to_le32(res->id); 249 248 250 249 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 250 + (void) vmw_3d_resource_inc(dev_priv); 251 251 vmw_resource_activate(res, vmw_hw_context_destroy); 252 252 return 0; 253 253 } ··· 408 406 cmd->body.sid = cpu_to_le32(res->id); 409 407 410 408 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 409 + vmw_3d_resource_dec(dev_priv); 411 410 } 412 411 413 412 void vmw_surface_res_free(struct vmw_resource *res) ··· 476 473 } 477 474 478 475 vmw_fifo_commit(dev_priv, submit_size); 476 + (void) vmw_3d_resource_inc(dev_priv); 479 477 vmw_resource_activate(res, vmw_hw_surface_destroy); 480 478 return 0; 481 479 }
+19 -13
drivers/hwmon/f71882fg.c
··· 111 111 /* Super-I/O Function prototypes */ 112 112 static inline int superio_inb(int base, int reg); 113 113 static inline int superio_inw(int base, int reg); 114 - static inline void superio_enter(int base); 114 + static inline int superio_enter(int base); 115 115 static inline void superio_select(int base, int ld); 116 116 static inline void superio_exit(int base); 117 117 ··· 861 861 return val; 862 862 } 863 863 864 - static inline void superio_enter(int base) 864 + static inline int superio_enter(int base) 865 865 { 866 + /* Don't step on other drivers' I/O space by accident */ 867 + if (!request_muxed_region(base, 2, DRVNAME)) { 868 + printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", 869 + base); 870 + return -EBUSY; 871 + } 872 + 866 873 /* according to the datasheet the key must be send twice! */ 867 874 outb(SIO_UNLOCK_KEY, base); 868 875 outb(SIO_UNLOCK_KEY, base); 876 + 877 + return 0; 869 878 } 870 879 871 880 static inline void superio_select(int base, int ld) ··· 886 877 static inline void superio_exit(int base) 887 878 { 888 879 outb(SIO_LOCK_KEY, base); 880 + release_region(base, 2); 889 881 } 890 882 891 883 static inline int fan_from_reg(u16 reg) ··· 2185 2175 static int __init f71882fg_find(int sioaddr, unsigned short *address, 2186 2176 struct f71882fg_sio_data *sio_data) 2187 2177 { 2188 - int err = -ENODEV; 2189 2178 u16 devid; 2190 - 2191 - /* Don't step on other drivers' I/O space by accident */ 2192 - if (!request_region(sioaddr, 2, DRVNAME)) { 2193 - printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", 2194 - (int)sioaddr); 2195 - return -EBUSY; 2196 - } 2197 - 2198 - superio_enter(sioaddr); 2179 + int err = superio_enter(sioaddr); 2180 + if (err) 2181 + return err; 2199 2182 2200 2183 devid = superio_inw(sioaddr, SIO_REG_MANID); 2201 2184 if (devid != SIO_FINTEK_ID) { 2202 2185 pr_debug(DRVNAME ": Not a Fintek device\n"); 2186 + err = -ENODEV; 2203 2187 goto exit; 2204 2188 } 2205 2189 ··· 2217 2213 default: 2218 2214 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", 2219 2215 (unsigned int)devid); 2216 + err = -ENODEV; 2220 2217 goto exit; 2221 2218 } 2222 2219 ··· 2228 2223 2229 2224 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { 2230 2225 printk(KERN_WARNING DRVNAME ": Device not activated\n"); 2226 + err = -ENODEV; 2231 2227 goto exit; 2232 2228 } 2233 2229 2234 2230 *address = superio_inw(sioaddr, SIO_REG_ADDR); 2235 2231 if (*address == 0) { 2236 2232 printk(KERN_WARNING DRVNAME ": Base address not set\n"); 2233 + err = -ENODEV; 2237 2234 goto exit; 2238 2235 } 2239 2236 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ ··· 2246 2239 (int)superio_inb(sioaddr, SIO_REG_DEVREV)); 2247 2240 exit: 2248 2241 superio_exit(sioaddr); 2249 - release_region(sioaddr, 2); 2250 2242 return err; 2251 2243 } 2252 2244
+3 -3
drivers/i2c/busses/i2c-davinci.c
··· 357 357 358 358 dev->terminate = 0; 359 359 360 - /* write the data into mode register */ 361 - davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); 362 - 363 360 /* 364 361 * First byte should be set here, not after interrupt, 365 362 * because transmit-data-ready interrupt can come before ··· 367 370 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); 368 371 dev->buf_len--; 369 372 } 373 + 374 + /* write the data into mode register; start transmitting */ 375 + davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); 370 376 371 377 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 372 378 dev->adapter.timeout);
+1 -1
drivers/i2c/busses/i2c-octeon.c
··· 218 218 return result; 219 219 } else if (result == 0) { 220 220 dev_dbg(i2c->dev, "%s: timeout\n", __func__); 221 - result = -ETIMEDOUT; 221 + return -ETIMEDOUT; 222 222 } 223 223 224 224 return 0;
+2 -2
drivers/i2c/busses/i2c-s3c2410.c
··· 662 662 unsigned long sda_delay; 663 663 664 664 if (pdata->sda_delay) { 665 - sda_delay = (freq / 1000) * pdata->sda_delay; 666 - sda_delay /= 1000000; 665 + sda_delay = clkin * pdata->sda_delay; 666 + sda_delay = DIV_ROUND_UP(sda_delay, 1000000); 667 667 sda_delay = DIV_ROUND_UP(sda_delay, 5); 668 668 if (sda_delay > 3) 669 669 sda_delay = 3;
+15 -5
drivers/idle/intel_idle.c
··· 83 83 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 84 84 static unsigned int lapic_timer_reliable_states; 85 85 86 - static struct cpuidle_device *intel_idle_cpuidle_devices; 86 + static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 87 87 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 88 88 89 89 static struct cpuidle_state *cpuidle_state_table; ··· 108 108 .name = "NHM-C3", 109 109 .desc = "MWAIT 0x10", 110 110 .driver_data = (void *) 0x10, 111 - .flags = CPUIDLE_FLAG_TIME_VALID, 111 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 112 112 .exit_latency = 20, 113 113 .power_usage = 500, 114 114 .target_residency = 80, ··· 117 117 .name = "NHM-C6", 118 118 .desc = "MWAIT 0x20", 119 119 .driver_data = (void *) 0x20, 120 - .flags = CPUIDLE_FLAG_TIME_VALID, 120 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 121 121 .exit_latency = 200, 122 122 .power_usage = 350, 123 123 .target_residency = 800, ··· 149 149 .name = "ATM-C4", 150 150 .desc = "MWAIT 0x30", 151 151 .driver_data = (void *) 0x30, 152 - .flags = CPUIDLE_FLAG_TIME_VALID, 152 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 153 153 .exit_latency = 100, 154 154 .power_usage = 250, 155 155 .target_residency = 400, ··· 159 159 .name = "ATM-C6", 160 160 .desc = "MWAIT 0x40", 161 161 .driver_data = (void *) 0x40, 162 - .flags = CPUIDLE_FLAG_TIME_VALID, 162 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 163 163 .exit_latency = 200, 164 164 .power_usage = 150, 165 165 .target_residency = 800, ··· 184 184 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 185 185 186 186 local_irq_disable(); 187 + 188 + /* 189 + * If the state flag indicates that the TLB will be flushed or if this 190 + * is the deepest c-state supported, do a voluntary leave mm to avoid 191 + * costly and mostly unnecessary wakeups for flushing the user TLB's 192 + * associated with the active mm. 193 + */ 194 + if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED || 195 + (&dev->states[dev->state_count - 1] == state)) 196 + leave_mm(cpu); 187 197 188 198 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 189 199 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+7 -6
drivers/mfd/max8925-core.c
··· 429 429 irq_tsc = cache_tsc; 430 430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { 431 431 irq_data = &max8925_irqs[i]; 432 + /* 1 -- disable, 0 -- enable */ 432 433 switch (irq_data->mask_reg) { 433 434 case MAX8925_CHG_IRQ1_MASK: 434 - irq_chg[0] &= irq_data->enable; 435 + irq_chg[0] &= ~irq_data->enable; 435 436 break; 436 437 case MAX8925_CHG_IRQ2_MASK: 437 - irq_chg[1] &= irq_data->enable; 438 + irq_chg[1] &= ~irq_data->enable; 438 439 break; 439 440 case MAX8925_ON_OFF_IRQ1_MASK: 440 - irq_on[0] &= irq_data->enable; 441 + irq_on[0] &= ~irq_data->enable; 441 442 break; 442 443 case MAX8925_ON_OFF_IRQ2_MASK: 443 - irq_on[1] &= irq_data->enable; 444 + irq_on[1] &= ~irq_data->enable; 444 445 break; 445 446 case MAX8925_RTC_IRQ_MASK: 446 - irq_rtc &= irq_data->enable; 447 + irq_rtc &= ~irq_data->enable; 447 448 break; 448 449 case MAX8925_TSC_IRQ_MASK: 449 - irq_tsc &= irq_data->enable; 450 + irq_tsc &= ~irq_data->enable; 450 451 break; 451 452 default: 452 453 dev_err(chip->dev, "wrong IRQ\n");
+7 -2
drivers/mfd/wm831x-irq.c
··· 394 394 395 395 irq = irq - wm831x->irq_base; 396 396 397 - if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) 398 - return -EINVAL; 397 + if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { 398 + /* Ignore internal-only IRQs */ 399 + if (irq >= 0 && irq < WM831X_NUM_IRQS) 400 + return 0; 401 + else 402 + return -EINVAL; 403 + } 399 404 400 405 switch (type) { 401 406 case IRQ_TYPE_EDGE_BOTH:
+1 -1
drivers/mtd/nand/omap2.c
··· 413 413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); 414 414 } while (prefetch_status); 415 415 /* disable and stop the PFPW engine */ 416 - gpmc_prefetch_reset(); 416 + gpmc_prefetch_reset(info->gpmc_cs); 417 417 418 418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 419 419 return 0;
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 1411 1411 clear_bit(STATUS_SCAN_HW, &priv->status); 1412 1412 clear_bit(STATUS_SCANNING, &priv->status); 1413 1413 /* inform mac80211 scan aborted */ 1414 - queue_work(priv->workqueue, &priv->scan_completed); 1414 + queue_work(priv->workqueue, &priv->abort_scan); 1415 1415 } 1416 1416 1417 1417 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+1 -1
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 3018 3018 clear_bit(STATUS_SCANNING, &priv->status); 3019 3019 3020 3020 /* inform mac80211 scan aborted */ 3021 - queue_work(priv->workqueue, &priv->scan_completed); 3021 + queue_work(priv->workqueue, &priv->abort_scan); 3022 3022 } 3023 3023 3024 3024 static void iwl3945_bg_restart(struct work_struct *data)
+20
drivers/pci/quirks.c
··· 163 163 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 164 164 165 165 /* 166 + * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear 167 + * for some HT machines to use C4 w/o hanging. 168 + */ 169 + static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev) 170 + { 171 + u32 pmbase; 172 + u16 pm1a; 173 + 174 + pci_read_config_dword(dev, 0x40, &pmbase); 175 + pmbase = pmbase & 0xff80; 176 + pm1a = inw(pmbase); 177 + 178 + if (pm1a & 0x10) { 179 + dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); 180 + outw(0x10, pmbase); 181 + } 182 + } 183 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); 184 + 185 + /* 166 186 * Chipsets where PCI->PCI transfers vanish or hang 167 187 */ 168 188 static void __devinit quirk_nopcipci(struct pci_dev *dev)
+4 -2
drivers/regulator/core.c
··· 700 700 constraints->min_uA != constraints->max_uA) { 701 701 ret = _regulator_get_current_limit(rdev); 702 702 if (ret > 0) 703 - count += sprintf(buf + count, "at %d uA ", ret / 1000); 703 + count += sprintf(buf + count, "at %d mA ", ret / 1000); 704 704 } 705 705 706 706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) ··· 2302 2302 dev_set_name(&rdev->dev, "regulator.%d", 2303 2303 atomic_inc_return(&regulator_no) - 1); 2304 2304 ret = device_register(&rdev->dev); 2305 - if (ret != 0) 2305 + if (ret != 0) { 2306 + put_device(&rdev->dev); 2306 2307 goto clean; 2308 + } 2307 2309 2308 2310 dev_set_drvdata(&rdev->dev, rdev); 2309 2311
+1 -1
drivers/regulator/max8649.c
··· 330 330 /* set external clock frequency */ 331 331 info->extclk_freq = pdata->extclk_freq; 332 332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, 333 - info->extclk_freq); 333 + info->extclk_freq << 6); 334 334 } 335 335 336 336 if (pdata->ramp_timing) {
+1
drivers/serial/mfd.c
··· 27 27 #include <linux/init.h> 28 28 #include <linux/console.h> 29 29 #include <linux/sysrq.h> 30 + #include <linux/slab.h> 30 31 #include <linux/serial_reg.h> 31 32 #include <linux/circ_buf.h> 32 33 #include <linux/delay.h>
+1
drivers/serial/mrst_max3110.c
··· 29 29 30 30 #include <linux/module.h> 31 31 #include <linux/ioport.h> 32 + #include <linux/irq.h> 32 33 #include <linux/init.h> 33 34 #include <linux/console.h> 34 35 #include <linux/sysrq.h>
+5
drivers/spi/spi.c
··· 23 23 #include <linux/init.h> 24 24 #include <linux/cache.h> 25 25 #include <linux/mutex.h> 26 + #include <linux/of_device.h> 26 27 #include <linux/slab.h> 27 28 #include <linux/mod_devicetable.h> 28 29 #include <linux/spi/spi.h> ··· 86 85 { 87 86 const struct spi_device *spi = to_spi_device(dev); 88 87 const struct spi_driver *sdrv = to_spi_driver(drv); 88 + 89 + /* Attempt an OF style match */ 90 + if (of_driver_match_device(dev, drv)) 91 + return 1; 89 92 90 93 if (sdrv->id_table) 91 94 return !!spi_match_id(sdrv->id_table, spi);
+1 -1
drivers/spi/spi_gpio.c
··· 350 350 spi_gpio->bitbang.master = spi_master_get(master); 351 351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect; 352 352 353 - if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { 353 + if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { 354 354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; 355 355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; 356 356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
+8 -2
drivers/spi/spi_mpc8xxx.c
··· 408 408 409 409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 410 410 411 - out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 411 + if (mspi->rx_dma == mspi->dma_dummy_rx) 412 + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); 413 + else 414 + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 412 415 out_be16(&rx_bd->cbd_datlen, 0); 413 416 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 414 417 415 - out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 418 + if (mspi->tx_dma == mspi->dma_dummy_tx) 419 + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); 420 + else 421 + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 416 422 out_be16(&tx_bd->cbd_datlen, xfer_len); 417 423 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 418 424 BD_SC_LAST);
+6 -3
drivers/xen/xenbus/xenbus_probe.c
··· 755 755 { 756 756 int ret = 0; 757 757 758 - blocking_notifier_chain_register(&xenstore_chain, nb); 758 + if (xenstored_ready > 0) 759 + ret = nb->notifier_call(nb, 0, NULL); 760 + else 761 + blocking_notifier_chain_register(&xenstore_chain, nb); 759 762 760 763 return ret; 761 764 } ··· 772 769 773 770 void xenbus_probe(struct work_struct *unused) 774 771 { 775 - BUG_ON((xenstored_ready <= 0)); 772 + xenstored_ready = 1; 776 773 777 774 /* Enumerate devices in xenstore and watch for changes. */ 778 775 xenbus_probe_devices(&xenbus_frontend); ··· 838 835 xen_store_evtchn = xen_start_info->store_evtchn; 839 836 xen_store_mfn = xen_start_info->store_mfn; 840 837 xen_store_interface = mfn_to_virt(xen_store_mfn); 838 + xenstored_ready = 1; 841 839 } 842 - xenstored_ready = 1; 843 840 } 844 841 845 842 /* Initialize the interface to xenstore. */
+33 -16
fs/cifs/cifssmb.c
··· 232 232 small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 233 233 void **request_buf) 234 234 { 235 - int rc = 0; 235 + int rc; 236 236 237 237 rc = cifs_reconnect_tcon(tcon, smb_command); 238 238 if (rc) ··· 250 250 if (tcon != NULL) 251 251 cifs_stats_inc(&tcon->num_smbs_sent); 252 252 253 - return rc; 253 + return 0; 254 254 } 255 255 256 256 int ··· 281 281 282 282 /* If the return code is zero, this function must fill in request_buf pointer */ 283 283 static int 284 - smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 285 - void **request_buf /* returned */ , 286 - void **response_buf /* returned */ ) 284 + __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 285 + void **request_buf, void **response_buf) 287 286 { 288 - int rc = 0; 289 - 290 - rc = cifs_reconnect_tcon(tcon, smb_command); 291 - if (rc) 292 - return rc; 293 - 294 287 *request_buf = cifs_buf_get(); 295 288 if (*request_buf == NULL) { 296 289 /* BB should we add a retry in here if not a writepage? */ ··· 302 309 if (tcon != NULL) 303 310 cifs_stats_inc(&tcon->num_smbs_sent); 304 311 305 - return rc; 312 + return 0; 313 + } 314 + 315 + /* If the return code is zero, this function must fill in request_buf pointer */ 316 + static int 317 + smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 318 + void **request_buf, void **response_buf) 319 + { 320 + int rc; 321 + 322 + rc = cifs_reconnect_tcon(tcon, smb_command); 323 + if (rc) 324 + return rc; 325 + 326 + return __smb_init(smb_command, wct, tcon, request_buf, response_buf); 327 + } 328 + 329 + static int 330 + smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon, 331 + void **request_buf, void **response_buf) 332 + { 333 + if (tcon->ses->need_reconnect || tcon->need_reconnect) 334 + return -EHOSTDOWN; 335 + 336 + return __smb_init(smb_command, wct, tcon, request_buf, response_buf); 306 337 } 307 338 308 339 static int validate_t2(struct smb_t2_rsp *pSMB) ··· 4551 4534 4552 4535 cFYI(1, "In QFSUnixInfo"); 4553 4536 QFSUnixRetry: 4554 - rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 4555 - (void **) &pSMBr); 4537 + rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, 4538 + (void **) &pSMB, (void **) &pSMBr); 4556 4539 if (rc) 4557 4540 return rc; 4558 4541 ··· 4621 4604 cFYI(1, "In SETFSUnixInfo"); 4622 4605 SETFSUnixRetry: 4623 4606 /* BB switch to small buf init to save memory */ 4624 - rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 4625 - (void **) &pSMBr); 4607 + rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, 4608 + (void **) &pSMB, (void **) &pSMBr); 4626 4609 if (rc) 4627 4610 return rc; 4628 4611
+2
fs/cifs/inode.c
··· 801 801 inode->i_flags |= S_NOATIME | S_NOCMTIME; 802 802 if (inode->i_state & I_NEW) { 803 803 inode->i_ino = hash; 804 + if (S_ISREG(inode->i_mode)) 805 + inode->i_data.backing_dev_info = sb->s_bdi; 804 806 #ifdef CONFIG_CIFS_FSCACHE 805 807 /* initialize per-inode cache cookie pointer */ 806 808 CIFS_I(inode)->fscache = NULL;
+4 -15
fs/fs-writeback.c
··· 72 72 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 73 73 { 74 74 struct super_block *sb = inode->i_sb; 75 - struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 76 75 77 - /* 78 - * For inodes on standard filesystems, we use superblock's bdi. For 79 - * inodes on virtual filesystems, we want to use inode mapping's bdi 80 - * because they can possibly point to something useful (think about 81 - * block_dev filesystem). 82 - */ 83 - if (sb->s_bdi && sb->s_bdi != &noop_backing_dev_info) { 84 - /* Some device inodes could play dirty tricks. Catch them... */ 85 - WARN(bdi != sb->s_bdi && bdi_cap_writeback_dirty(bdi), 86 - "Dirtiable inode bdi %s != sb bdi %s\n", 87 - bdi->name, sb->s_bdi->name); 88 - return sb->s_bdi; 89 - } 90 - return bdi; 76 + if (strcmp(sb->s_type->name, "bdev") == 0) 77 + return inode->i_mapping->backing_dev_info; 78 + 79 + return sb->s_bdi; 91 80 } 92 81 93 82 static void bdi_queue_work(struct backing_dev_info *bdi,
+1 -1
fs/fuse/dev.c
··· 1354 1354 loff_t file_size; 1355 1355 unsigned int num; 1356 1356 unsigned int offset; 1357 - size_t total_len; 1357 + size_t total_len = 0; 1358 1358 1359 1359 req = fuse_get_req(fc); 1360 1360 if (IS_ERR(req))
+1 -1
fs/ocfs2/symlink.c
··· 128 128 } 129 129 130 130 /* Fast symlinks can't be large */ 131 - len = strlen(target); 131 + len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb)); 132 132 link = kzalloc(len + 1, GFP_NOFS); 133 133 if (!link) { 134 134 status = -ENOMEM;
+2 -2
fs/proc/base.c
··· 2675 2675 INF("auxv", S_IRUSR, proc_pid_auxv), 2676 2676 ONE("status", S_IRUGO, proc_pid_status), 2677 2677 ONE("personality", S_IRUSR, proc_pid_personality), 2678 - INF("limits", S_IRUSR, proc_pid_limits), 2678 + INF("limits", S_IRUGO, proc_pid_limits), 2679 2679 #ifdef CONFIG_SCHED_DEBUG 2680 2680 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2681 2681 #endif ··· 3011 3011 INF("auxv", S_IRUSR, proc_pid_auxv), 3012 3012 ONE("status", S_IRUGO, proc_pid_status), 3013 3013 ONE("personality", S_IRUSR, proc_pid_personality), 3014 - INF("limits", S_IRUSR, proc_pid_limits), 3014 + INF("limits", S_IRUGO, proc_pid_limits), 3015 3015 #ifdef CONFIG_SCHED_DEBUG 3016 3016 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 3017 3017 #endif
+4 -3
fs/reiserfs/ioctl.c
··· 170 170 int reiserfs_unpack(struct inode *inode, struct file *filp) 171 171 { 172 172 int retval = 0; 173 + int depth; 173 174 int index; 174 175 struct page *page; 175 176 struct address_space *mapping; ··· 189 188 /* we need to make sure nobody is changing the file size beneath 190 189 ** us 191 190 */ 192 - mutex_lock(&inode->i_mutex); 193 - reiserfs_write_lock(inode->i_sb); 191 + reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); 192 + depth = reiserfs_write_lock_once(inode->i_sb); 194 193 195 194 write_from = inode->i_size & (blocksize - 1); 196 195 /* if we are on a block boundary, we are already unpacked. */ ··· 225 224 226 225 out: 227 226 mutex_unlock(&inode->i_mutex); 228 - reiserfs_write_unlock(inode->i_sb); 227 + reiserfs_write_unlock_once(inode->i_sb, depth); 229 228 return retval; 230 229 }
+9 -3
fs/xfs/xfs_log_cil.c
··· 405 405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 406 406 new_ctx->ticket = xlog_cil_ticket_alloc(log); 407 407 408 - /* lock out transaction commit, but don't block on background push */ 408 + /* 409 + * Lock out transaction commit, but don't block for background pushes 410 + * unless we are well over the CIL space limit. See the definition of 411 + * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic 412 + * used here. 413 + */ 409 414 if (!down_write_trylock(&cil->xc_ctx_lock)) { 410 - if (!push_seq) 415 + if (!push_seq && 416 + cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log)) 411 417 goto out_free_ticket; 412 418 down_write(&cil->xc_ctx_lock); 413 419 } ··· 428 422 goto out_skip; 429 423 430 424 /* check for a previously pushed seqeunce */ 431 - if (push_seq < cil->xc_ctx->sequence) 425 + if (push_seq && push_seq < cil->xc_ctx->sequence) 432 426 goto out_skip; 433 427 434 428 /*
+21 -16
fs/xfs/xfs_log_priv.h
··· 426 426 }; 427 427 428 428 /* 429 - * The amount of log space we should the CIL to aggregate is difficult to size. 430 - * Whatever we chose we have to make we can get a reservation for the log space 431 - * effectively, that it is large enough to capture sufficient relogging to 432 - * reduce log buffer IO significantly, but it is not too large for the log or 433 - * induces too much latency when writing out through the iclogs. We track both 434 - * space consumed and the number of vectors in the checkpoint context, so we 435 - * need to decide which to use for limiting. 429 + * The amount of log space we allow the CIL to aggregate is difficult to size. 430 + * Whatever we choose, we have to make sure we can get a reservation for the 431 + * log space effectively, that it is large enough to capture sufficient 432 + * relogging to reduce log buffer IO significantly, but it is not too large for 433 + * the log or induces too much latency when writing out through the iclogs. We 434 + * track both space consumed and the number of vectors in the checkpoint 435 + * context, so we need to decide which to use for limiting. 436 436 * 437 437 * Every log buffer we write out during a push needs a header reserved, which 438 438 * is at least one sector and more for v2 logs. Hence we need a reservation of ··· 459 459 * checkpoint transaction ticket is specific to the checkpoint context, rather 460 460 * than the CIL itself. 461 461 * 462 - * With dynamic reservations, we can basically make up arbitrary limits for the 463 - * checkpoint size so long as they don't violate any other size rules. Hence 464 - * the initial maximum size for the checkpoint transaction will be set to a 465 - * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit 466 - * right now based on the latency of writing out a large amount of data through 467 - * the circular iclog buffers. 462 + * With dynamic reservations, we can effectively make up arbitrary limits for 463 + * the checkpoint size so long as they don't violate any other size rules. 464 + * Recovery imposes a rule that no transaction exceed half the log, so we are 465 + * limited by that. Furthermore, the log transaction reservation subsystem 466 + * tries to keep 25% of the log free, so we need to keep below that limit or we 467 + * risk running out of free log space to start any new transactions. 468 + * 469 + * In order to keep background CIL push efficient, we will set a lower 470 + * threshold at which background pushing is attempted without blocking current 471 + * transaction commits. A separate, higher bound defines when CIL pushes are 472 + * enforced to ensure we stay within our maximum checkpoint size bounds. 473 + * threshold, yet give us plenty of space for aggregation on large logs. 468 474 */ 469 - 470 - #define XLOG_CIL_SPACE_LIMIT(log) \ 471 - (min((log->l_logsize >> 2), (8 * 1024 * 1024))) 475 + #define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3) 476 + #define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4)) 472 477 473 478 /* 474 479 * The reservation head lsn is not made up of a cycle number and block number.
+1 -1
include/acpi/acpixf.h
··· 55 55 extern u8 acpi_gbl_permanent_mmap; 56 56 57 57 /* 58 - * Globals that are publically available, allowing for 58 + * Globals that are publicly available, allowing for 59 59 * run time configuration 60 60 */ 61 61 extern u32 acpi_dbg_level;
+20 -9
include/drm/drmP.h
··· 612 612 struct kref refcount; 613 613 614 614 /** Handle count of this object. Each handle also holds a reference */ 615 - struct kref handlecount; 615 + atomic_t handle_count; /* number of handles on this object */ 616 616 617 617 /** Related drm device */ 618 618 struct drm_device *dev; ··· 808 808 */ 809 809 int (*gem_init_object) (struct drm_gem_object *obj); 810 810 void (*gem_free_object) (struct drm_gem_object *obj); 811 - void (*gem_free_object_unlocked) (struct drm_gem_object *obj); 812 811 813 812 /* vga arb irq handler */ 814 813 void (*vgaarb_irq)(struct drm_device *dev, bool state); ··· 1174 1175 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); 1175 1176 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 1176 1177 extern void drm_vm_open_locked(struct vm_area_struct *vma); 1178 + extern void drm_vm_close_locked(struct vm_area_struct *vma); 1177 1179 extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); 1178 1180 extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); 1179 1181 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); ··· 1455 1455 void drm_gem_destroy(struct drm_device *dev); 1456 1456 void drm_gem_object_release(struct drm_gem_object *obj); 1457 1457 void drm_gem_object_free(struct kref *kref); 1458 - void drm_gem_object_free_unlocked(struct kref *kref); 1459 1458 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1460 1459 size_t size); 1461 1460 int drm_gem_object_init(struct drm_device *dev, 1462 1461 struct drm_gem_object *obj, size_t size); 1463 - void drm_gem_object_handle_free(struct kref *kref); 1462 + void drm_gem_object_handle_free(struct drm_gem_object *obj); 1464 1463 void drm_gem_vm_open(struct vm_area_struct *vma); 1465 1464 void drm_gem_vm_close(struct vm_area_struct *vma); 1466 1465 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); ··· 1482 1483 static inline void 1483 1484 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 1484 1485 { 1485 - if (obj != NULL) 1486 - kref_put(&obj->refcount, drm_gem_object_free_unlocked); 1486 + if (obj != NULL) { 1487 + struct drm_device *dev = obj->dev; 1488 + mutex_lock(&dev->struct_mutex); 1489 + kref_put(&obj->refcount, drm_gem_object_free); 1490 + mutex_unlock(&dev->struct_mutex); 1491 + } 1487 1492 } 1488 1493 1489 1494 int drm_gem_handle_create(struct drm_file *file_priv, ··· 1498 1495 drm_gem_object_handle_reference(struct drm_gem_object *obj) 1499 1496 { 1500 1497 drm_gem_object_reference(obj); 1501 - kref_get(&obj->handlecount); 1498 + atomic_inc(&obj->handle_count); 1502 1499 } 1503 1500 1504 1501 static inline void ··· 1507 1504 if (obj == NULL) 1508 1505 return; 1509 1506 1507 + if (atomic_read(&obj->handle_count) == 0) 1508 + return; 1510 1509 /* 1511 1510 * Must bump handle count first as this may be the last 1512 1511 * ref, in which case the object would disappear before we 1513 1512 * checked for a name 1514 1513 */ 1515 - kref_put(&obj->handlecount, drm_gem_object_handle_free); 1514 + if (atomic_dec_and_test(&obj->handle_count)) 1515 + drm_gem_object_handle_free(obj); 1516 1516 drm_gem_object_unreference(obj); 1517 1517 } 1518 1518 ··· 1525 1519 if (obj == NULL) 1526 1520 return; 1527 1521 1522 + if (atomic_read(&obj->handle_count) == 0) 1523 + return; 1524 + 1528 1525 /* 1529 1526 * Must bump handle count first as this may be the last 1530 1527 * ref, in which case the object would disappear before we 1531 1528 * checked for a name 1532 1529 */ 1533 - kref_put(&obj->handlecount, drm_gem_object_handle_free); 1530 + 1531 + if (atomic_dec_and_test(&obj->handle_count)) 1532 + drm_gem_object_handle_free(obj); 1534 1533 drm_gem_object_unreference_unlocked(obj); 1535 1534 } 1536 1535
+1 -1
include/drm/drm_pciids.h
··· 85 85 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 86 86 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 87 87 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 88 - {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 89 88 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 90 89 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 91 90 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ ··· 102 103 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 103 104 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 104 105 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 106 + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 105 107 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ 106 108 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ 107 109 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+1
include/linux/cpuidle.h
··· 53 53 #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ 54 54 #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ 55 55 #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ 56 + #define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ 56 57 57 58 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 58 59
+1 -1
include/linux/dmaengine.h
··· 548 548 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 549 549 } 550 550 551 - static unsigned short dma_dev_to_maxpq(struct dma_device *dma) 551 + static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) 552 552 { 553 553 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 554 554 }
+2 -3
include/linux/module.h
··· 686 686 687 687 688 688 #ifdef CONFIG_GENERIC_BUG 689 - int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, 689 + void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, 690 690 struct module *); 691 691 void module_bug_cleanup(struct module *); 692 692 693 693 #else /* !CONFIG_GENERIC_BUG */ 694 694 695 - static inline int module_bug_finalize(const Elf_Ehdr *hdr, 695 + static inline void module_bug_finalize(const Elf_Ehdr *hdr, 696 696 const Elf_Shdr *sechdrs, 697 697 struct module *mod) 698 698 { 699 - return 0; 700 699 } 701 700 static inline void module_bug_cleanup(struct module *mod) {} 702 701 #endif /* CONFIG_GENERIC_BUG */
+1 -1
include/linux/rcupdate.h
··· 454 454 * Makes rcu_dereference_check() do the dirty work. 455 455 */ 456 456 #define rcu_dereference_bh(p) \ 457 - rcu_dereference_check(p, rcu_read_lock_bh_held()) 457 + rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) 458 458 459 459 /** 460 460 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
+1
include/linux/wait.h
··· 614 614 (wait)->private = current; \ 615 615 (wait)->func = autoremove_wake_function; \ 616 616 INIT_LIST_HEAD(&(wait)->task_list); \ 617 + (wait)->flags = 0; \ 617 618 } while (0) 618 619 619 620 /**
+2
ipc/sem.c
··· 743 743 { 744 744 struct semid_ds out; 745 745 746 + memset(&out, 0, sizeof(out)); 747 + 746 748 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); 747 749 748 750 out.sem_otime = in->sem_otime;
-2
kernel/kfifo.c
··· 365 365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l); 366 366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); 367 367 368 - if (n) 369 - sg_mark_end(sgl + n - 1); 370 368 return n; 371 369 } 372 370
+4
kernel/module.c
··· 1537 1537 { 1538 1538 struct module *mod = _mod; 1539 1539 list_del(&mod->list); 1540 + module_bug_cleanup(mod); 1540 1541 return 0; 1541 1542 } 1542 1543 ··· 2626 2625 if (err < 0) 2627 2626 goto ddebug; 2628 2627 2628 + module_bug_finalize(info.hdr, info.sechdrs, mod); 2629 2629 list_add_rcu(&mod->list, &modules); 2630 2630 mutex_unlock(&module_mutex); 2631 2631 ··· 2652 2650 mutex_lock(&module_mutex); 2653 2651 /* Unlink carefully: kallsyms could be walking list. */ 2654 2652 list_del_rcu(&mod->list); 2653 + module_bug_cleanup(mod); 2654 + 2655 2655 ddebug: 2656 2656 if (!mod->taints) 2657 2657 dynamic_debug_remove(info.debug);
+14 -3
kernel/smp.c
··· 365 365 EXPORT_SYMBOL_GPL(smp_call_function_any); 366 366 367 367 /** 368 - * __smp_call_function_single(): Run a function on another CPU 368 + * __smp_call_function_single(): Run a function on a specific CPU 369 369 * @cpu: The CPU to run on. 370 370 * @data: Pre-allocated and setup data structure 371 + * @wait: If true, wait until function has completed on specified CPU. 371 372 * 372 373 * Like smp_call_function_single(), but allow caller to pass in a 373 374 * pre-allocated data structure. Useful for embedding @data inside ··· 377 376 void __smp_call_function_single(int cpu, struct call_single_data *data, 378 377 int wait) 379 378 { 380 - csd_lock(data); 379 + unsigned int this_cpu; 380 + unsigned long flags; 381 381 382 + this_cpu = get_cpu(); 382 383 /* 383 384 * Can deadlock when called with interrupts disabled. 384 385 * We allow cpu's that are not yet online though, as no one else can ··· 390 387 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() 391 388 && !oops_in_progress); 392 389 393 - generic_exec_single(cpu, data, wait); 390 + if (cpu == this_cpu) { 391 + local_irq_save(flags); 392 + data->func(data->info); 393 + local_irq_restore(flags); 394 + } else { 395 + csd_lock(data); 396 + generic_exec_single(cpu, data, wait); 397 + } 398 + put_cpu(); 394 399 } 395 400 396 401 /**
+2 -4
lib/bug.c
··· 72 72 return NULL; 73 73 } 74 74 75 - int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 76 - struct module *mod) 75 + void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 76 + struct module *mod) 77 77 { 78 78 char *secstrings; 79 79 unsigned int i; ··· 97 97 * could potentially lead to deadlock and thus be counter-productive. 98 98 */ 99 99 list_add(&mod->bug_list, &module_bug_list); 100 - 101 - return 0; 102 100 } 103 101 104 102 void module_bug_cleanup(struct module *mod)
+1 -1
lib/list_sort.c
··· 70 70 * element comparison is needed, so the client's cmp() 71 71 * routine can invoke cond_resched() periodically. 72 72 */ 73 - (*cmp)(priv, tail, tail); 73 + (*cmp)(priv, tail->next, tail->next); 74 74 75 75 tail->next->prev = tail; 76 76 tail = tail->next;
+4 -2
mm/ksm.c
··· 712 712 if (!ptep) 713 713 goto out; 714 714 715 - if (pte_write(*ptep)) { 715 + if (pte_write(*ptep) || pte_dirty(*ptep)) { 716 716 pte_t entry; 717 717 718 718 swapped = PageSwapCache(page); ··· 735 735 set_pte_at(mm, addr, ptep, entry); 736 736 goto out_unlock; 737 737 } 738 - entry = pte_wrprotect(entry); 738 + if (pte_dirty(entry)) 739 + set_page_dirty(page); 740 + entry = pte_mkclean(pte_wrprotect(entry)); 739 741 set_pte_at_notify(mm, addr, ptep, entry); 740 742 } 741 743 *orig_pte = *ptep;
+7 -1
mm/rmap.c
··· 381 381 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 382 382 { 383 383 if (PageAnon(page)) { 384 - if (vma->anon_vma->root != page_anon_vma(page)->root) 384 + struct anon_vma *page__anon_vma = page_anon_vma(page); 385 + /* 386 + * Note: swapoff's unuse_vma() is more efficient with this 387 + * check, and needs it to match anon_vma when KSM is active. 388 + */ 389 + if (!vma->anon_vma || !page__anon_vma || 390 + vma->anon_vma->root != page__anon_vma->root) 385 391 return -EFAULT; 386 392 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 387 393 if (!vma->vm_file ||
+10 -4
net/8021q/vlan_core.c
··· 24 24 25 25 if (vlan_dev) 26 26 skb->dev = vlan_dev; 27 - else if (vlan_id) 28 - goto drop; 27 + else if (vlan_id) { 28 + if (!(skb->dev->flags & IFF_PROMISC)) 29 + goto drop; 30 + skb->pkt_type = PACKET_OTHERHOST; 31 + } 29 32 30 33 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 31 34 ··· 105 102 106 103 if (vlan_dev) 107 104 skb->dev = vlan_dev; 108 - else if (vlan_id) 109 - goto drop; 105 + else if (vlan_id) { 106 + if (!(skb->dev->flags & IFF_PROMISC)) 107 + goto drop; 108 + skb->pkt_type = PACKET_OTHERHOST; 109 + } 110 110 111 111 for (p = napi->gro_list; p; p = p->next) { 112 112 NAPI_GRO_CB(p)->same_flow =
+1
net/ipv4/Kconfig
··· 217 217 218 218 config NET_IPGRE 219 219 tristate "IP: GRE tunnels over IP" 220 + depends on IPV6 || IPV6=n 220 221 help 221 222 Tunneling means encapsulating data of one protocol type within 222 223 another protocol and sending it over a channel that understands the
+14 -10
net/ipv4/tcp_timer.c
··· 135 135 136 136 /* This function calculates a "timeout" which is equivalent to the timeout of a 137 137 * TCP connection after "boundary" unsuccessful, exponentially backed-off 138 - * retransmissions with an initial RTO of TCP_RTO_MIN. 138 + * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if 139 + * syn_set flag is set. 139 140 */ 140 141 static bool retransmits_timed_out(struct sock *sk, 141 - unsigned int boundary) 142 + unsigned int boundary, 143 + bool syn_set) 142 144 { 143 145 unsigned int timeout, linear_backoff_thresh; 144 146 unsigned int start_ts; 147 + unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; 145 148 146 149 if (!inet_csk(sk)->icsk_retransmits) 147 150 return false; ··· 154 151 else 155 152 start_ts = tcp_sk(sk)->retrans_stamp; 156 153 157 - linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); 154 + linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); 158 155 159 156 if (boundary <= linear_backoff_thresh) 160 - timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; 157 + timeout = ((2 << boundary) - 1) * rto_base; 161 158 else 162 - timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + 159 + timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + 163 160 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 164 161 165 162 return (tcp_time_stamp - start_ts) >= timeout; ··· 170 167 { 171 168 struct inet_connection_sock *icsk = inet_csk(sk); 172 169 int retry_until; 173 - bool do_reset; 170 + bool do_reset, syn_set = 0; 174 171 175 172 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 176 173 if (icsk->icsk_retransmits) 177 174 dst_negative_advice(sk); 178 175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 176 + syn_set = 1; 179 177 } else { 180 - if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 178 + if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) { 181 179 /* Black hole detection */ 182 180 tcp_mtu_probing(icsk, sk); 183 181 ··· 191 187 192 188 retry_until = tcp_orphan_retries(sk, alive); 193 189 do_reset = alive || 194 - !retransmits_timed_out(sk, retry_until); 190 + !retransmits_timed_out(sk, retry_until, 0); 195 191 196 192 if (tcp_out_of_resources(sk, do_reset)) 197 193 return 1; 198 194 } 199 195 } 200 196 201 - if (retransmits_timed_out(sk, retry_until)) { 197 + if (retransmits_timed_out(sk, retry_until, syn_set)) { 202 198 /* Has it gone just too far? */ 203 199 tcp_write_err(sk); 204 200 return 1; ··· 440 436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 441 437 } 442 438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 443 - if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 439 + if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0)) 444 440 __sk_dst_reset(sk); 445 441 446 442 out:;
-4
net/mac80211/rx.c
··· 2199 2199 struct net_device *prev_dev = NULL; 2200 2200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2201 2201 2202 - if (status->flag & RX_FLAG_INTERNAL_CMTR) 2203 - goto out_free_skb; 2204 - 2205 2202 if (skb_headroom(skb) < sizeof(*rthdr) && 2206 2203 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2207 2204 goto out_free_skb; ··· 2257 2260 } else 2258 2261 goto out_free_skb; 2259 2262 2260 - status->flag |= RX_FLAG_INTERNAL_CMTR; 2261 2263 return; 2262 2264 2263 2265 out_free_skb:
+2 -1
net/phonet/pep.c
··· 225 225 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) 226 226 { 227 227 struct pep_sock *pn = pep_sk(sk); 228 - struct pnpipehdr *hdr = pnp_hdr(skb); 228 + struct pnpipehdr *hdr; 229 229 int wake = 0; 230 230 231 231 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) 232 232 return -EINVAL; 233 233 234 + hdr = pnp_hdr(skb); 234 235 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 235 236 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", 236 237 (unsigned)hdr->data[0]);
+9 -8
samples/kfifo/dma-example.c
··· 24 24 { 25 25 int i; 26 26 unsigned int ret; 27 + unsigned int nents; 27 28 struct scatterlist sg[10]; 28 29 29 30 printk(KERN_INFO "DMA fifo test start\n"); ··· 62 61 * byte at the beginning, after the kfifo_skip(). 63 62 */ 64 63 sg_init_table(sg, ARRAY_SIZE(sg)); 65 - ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); 66 - printk(KERN_INFO "DMA sgl entries: %d\n", ret); 67 - if (!ret) { 64 + nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); 65 + printk(KERN_INFO "DMA sgl entries: %d\n", nents); 66 + if (!nents) { 68 67 /* fifo is full and no sgl was created */ 69 68 printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); 70 69 return -EIO; ··· 72 71 73 72 /* receive data */ 74 73 printk(KERN_INFO "scatterlist for receive:\n"); 75 - for (i = 0; i < ARRAY_SIZE(sg); i++) { 74 + for (i = 0; i < nents; i++) { 76 75 printk(KERN_INFO 77 76 "sg[%d] -> " 78 77 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", ··· 92 91 kfifo_dma_in_finish(&fifo, ret); 93 92 94 93 /* Prepare to transmit data, example: 8 bytes */ 95 - ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); 96 - printk(KERN_INFO "DMA sgl entries: %d\n", ret); 97 - if (!ret) { 94 + nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); 95 + printk(KERN_INFO "DMA sgl entries: %d\n", nents); 96 + if (!nents) { 98 97 /* no data was available and no sgl was created */ 99 98 printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); 100 99 return -EIO; 101 100 } 102 101 103 102 printk(KERN_INFO "scatterlist for transmit:\n"); 104 - for (i = 0; i < ARRAY_SIZE(sg); i++) { 103 + for (i = 0; i < nents; i++) { 105 104 printk(KERN_INFO 106 105 "sg[%d] -> " 107 106 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
+5
sound/core/control.c
··· 31 31 32 32 /* max number of user-defined controls */ 33 33 #define MAX_USER_CONTROLS 32 34 + #define MAX_CONTROL_COUNT 1028 34 35 35 36 struct snd_kctl_ioctl { 36 37 struct list_head list; /* list of all ioctls */ ··· 196 195 197 196 if (snd_BUG_ON(!control || !control->count)) 198 197 return NULL; 198 + 199 + if (control->count > MAX_CONTROL_COUNT) 200 + return NULL; 201 + 199 202 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); 200 203 if (kctl == NULL) { 201 204 snd_printk(KERN_ERR "Cannot allocate control instance\n");
+1 -1
sound/i2c/other/ak4xxx-adda.c
··· 900 900 return 0; 901 901 } 902 902 #else /* !CONFIG_PROC_FS */ 903 - static int proc_init(struct snd_akm4xxx *ak) {} 903 + static int proc_init(struct snd_akm4xxx *ak) { return 0; } 904 904 #endif 905 905 906 906 int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
+1 -1
tools/perf/Makefile
··· 1017 1017 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So 1018 1018 # we depend the various files onto their directories. 1019 1019 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h 1020 - $(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS))) 1020 + $(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS))) 1021 1021 # In the second step, we make a rule to actually create these directories 1022 1022 $(sort $(dir $(DIRECTORY_DEPS))): 1023 1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+2 -2
tools/perf/util/trace-event-scripting.c
··· 97 97 register_python_scripting(&python_scripting_unsupported_ops); 98 98 } 99 99 #else 100 - struct scripting_ops python_scripting_ops; 100 + extern struct scripting_ops python_scripting_ops; 101 101 102 102 void setup_python_scripting(void) 103 103 { ··· 158 158 register_perl_scripting(&perl_scripting_unsupported_ops); 159 159 } 160 160 #else 161 - struct scripting_ops perl_scripting_ops; 161 + extern struct scripting_ops perl_scripting_ops; 162 162 163 163 void setup_perl_scripting(void) 164 164 {
+1 -1
tools/perf/util/ui/browsers/hists.c
··· 773 773 774 774 switch (key) { 775 775 case 'a': 776 - if (browser->selection->map == NULL && 776 + if (browser->selection->map == NULL || 777 777 browser->selection->map->dso->annotate_warned) 778 778 continue; 779 779 goto do_annotate;