Merge commit 'v2.6.36-rc7' into spi/next

+994 -564
+4 -4
CREDITS
··· 3554 D: portions of the Linux Security Module (LSM) framework and security modules 3555 3556 N: Petr Vandrovec 3557 - E: vandrove@vc.cvut.cz 3558 D: Small contributions to ncpfs 3559 D: Matrox framebuffer driver 3560 - S: Chudenicka 8 3561 - S: 10200 Prague 10, Hostivar 3562 - S: Czech Republic 3563 3564 N: Thibaut Varene 3565 E: T-Bone@parisc-linux.org
··· 3554 D: portions of the Linux Security Module (LSM) framework and security modules 3555 3556 N: Petr Vandrovec 3557 + E: petr@vandrovec.name 3558 D: Small contributions to ncpfs 3559 D: Matrox framebuffer driver 3560 + S: 21513 Conradia Ct 3561 + S: Cupertino, CA 95014 3562 + S: USA 3563 3564 N: Thibaut Varene 3565 E: T-Bone@parisc-linux.org
+10 -4
MAINTAINERS
··· 962 S: Maintained 963 F: arch/arm/mach-s3c6410/ 964 965 ARM/SHMOBILE ARM ARCHITECTURE 966 M: Paul Mundt <lethal@linux-sh.org> 967 M: Magnus Damm <magnus.damm@gmail.com> ··· 3788 S: Supported 3789 3790 MATROX FRAMEBUFFER DRIVER 3791 - M: Petr Vandrovec <vandrove@vc.cvut.cz> 3792 L: linux-fbdev@vger.kernel.org 3793 - S: Maintained 3794 F: drivers/video/matrox/matroxfb_* 3795 F: include/linux/matroxfb.h 3796 ··· 3976 F: drivers/net/natsemi.c 3977 3978 NCP FILESYSTEM 3979 - M: Petr Vandrovec <vandrove@vc.cvut.cz> 3980 - S: Maintained 3981 F: fs/ncpfs/ 3982 3983 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
··· 962 S: Maintained 963 F: arch/arm/mach-s3c6410/ 964 965 + ARM/S5P ARM ARCHITECTURES 966 + M: Kukjin Kim <kgene.kim@samsung.com> 967 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 968 + L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 969 + S: Maintained 970 + F: arch/arm/mach-s5p*/ 971 + 972 ARM/SHMOBILE ARM ARCHITECTURE 973 M: Paul Mundt <lethal@linux-sh.org> 974 M: Magnus Damm <magnus.damm@gmail.com> ··· 3781 S: Supported 3782 3783 MATROX FRAMEBUFFER DRIVER 3784 L: linux-fbdev@vger.kernel.org 3785 + S: Orphan 3786 F: drivers/video/matrox/matroxfb_* 3787 F: include/linux/matroxfb.h 3788 ··· 3970 F: drivers/net/natsemi.c 3971 3972 NCP FILESYSTEM 3973 + M: Petr Vandrovec <petr@vandrovec.name> 3974 + S: Odd Fixes 3975 F: fs/ncpfs/ 3976 3977 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
+1 -1
Makefile
··· 1 VERSION = 2 2 PATCHLEVEL = 6 3 SUBLEVEL = 36 4 - EXTRAVERSION = -rc6 5 NAME = Sheep on Meth 6 7 # *DOCUMENTATION*
··· 1 VERSION = 2 2 PATCHLEVEL = 6 3 SUBLEVEL = 36 4 + EXTRAVERSION = -rc7 5 NAME = Sheep on Meth 6 7 # *DOCUMENTATION*
+1 -1
arch/alpha/kernel/signal.c
··· 48 sigset_t mask; 49 unsigned long res; 50 51 - siginitset(&mask, newmask & ~_BLOCKABLE); 52 res = sigprocmask(how, &mask, &oldmask); 53 if (!res) { 54 force_successful_syscall_return();
··· 48 sigset_t mask; 49 unsigned long res; 50 51 + siginitset(&mask, newmask & _BLOCKABLE); 52 res = sigprocmask(how, &mask, &oldmask); 53 if (!res) { 54 force_successful_syscall_return();
+5 -2
arch/arm/oprofile/common.c
··· 102 if (IS_ERR(pevent)) { 103 ret = PTR_ERR(pevent); 104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { 105 pr_warning("oprofile: failed to enable event %d " 106 "on CPU %d\n", event, cpu); 107 ret = -EBUSY; ··· 366 ret = init_driverfs(); 367 if (ret) { 368 kfree(counter_config); 369 return ret; 370 } 371 ··· 404 struct perf_event *event; 405 406 if (*perf_events) { 407 - exit_driverfs(); 408 for_each_possible_cpu(cpu) { 409 for (id = 0; id < perf_num_counters; ++id) { 410 event = perf_events[cpu][id]; ··· 414 } 415 } 416 417 - if (counter_config) 418 kfree(counter_config); 419 } 420 #else 421 int __init oprofile_arch_init(struct oprofile_operations *ops)
··· 102 if (IS_ERR(pevent)) { 103 ret = PTR_ERR(pevent); 104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { 105 + perf_event_release_kernel(pevent); 106 pr_warning("oprofile: failed to enable event %d " 107 "on CPU %d\n", event, cpu); 108 ret = -EBUSY; ··· 365 ret = init_driverfs(); 366 if (ret) { 367 kfree(counter_config); 368 + counter_config = NULL; 369 return ret; 370 } 371 ··· 402 struct perf_event *event; 403 404 if (*perf_events) { 405 for_each_possible_cpu(cpu) { 406 for (id = 0; id < perf_num_counters; ++id) { 407 event = perf_events[cpu][id]; ··· 413 } 414 } 415 416 + if (counter_config) { 417 kfree(counter_config); 418 + exit_driverfs(); 419 + } 420 } 421 #else 422 int __init oprofile_arch_init(struct oprofile_operations *ops)
+1 -1
arch/arm/plat-omap/Kconfig
··· 33 config OMAP_DEBUG_LEDS 34 bool 35 depends on OMAP_DEBUG_DEVICES 36 - default y if LEDS 37 38 config OMAP_RESET_CLOCKS 39 bool "Reset unused clocks during boot"
··· 33 config OMAP_DEBUG_LEDS 34 bool 35 depends on OMAP_DEBUG_DEVICES 36 + default y if LEDS_CLASS 37 38 config OMAP_RESET_CLOCKS 39 bool "Reset unused clocks during boot"
+1 -1
arch/arm/plat-omap/mcbsp.c
··· 156 /* Writing zero to RSYNC_ERR clears the IRQ */ 157 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); 158 } else { 159 - complete(&mcbsp_rx->tx_irq_completion); 160 } 161 162 return IRQ_HANDLED;
··· 156 /* Writing zero to RSYNC_ERR clears the IRQ */ 157 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); 158 } else { 159 + complete(&mcbsp_rx->rx_irq_completion); 160 } 161 162 return IRQ_HANDLED;
+1 -2
arch/avr32/kernel/module.c
··· 314 vfree(module->arch.syminfo); 315 module->arch.syminfo = NULL; 316 317 - return module_bug_finalize(hdr, sechdrs, module); 318 } 319 320 void module_arch_cleanup(struct module *module) 321 { 322 - module_bug_cleanup(module); 323 }
··· 314 vfree(module->arch.syminfo); 315 module->arch.syminfo = NULL; 316 317 + return 0; 318 } 319 320 void module_arch_cleanup(struct module *module) 321 { 322 }
+1 -2
arch/h8300/kernel/module.c
··· 112 const Elf_Shdr *sechdrs, 113 struct module *me) 114 { 115 - return module_bug_finalize(hdr, sechdrs, me); 116 } 117 118 void module_arch_cleanup(struct module *mod) 119 { 120 - module_bug_cleanup(mod); 121 }
··· 112 const Elf_Shdr *sechdrs, 113 struct module *me) 114 { 115 + return 0; 116 } 117 118 void module_arch_cleanup(struct module *mod) 119 { 120 }
+3 -3
arch/m68k/mac/macboing.c
··· 162 void mac_mksound( unsigned int freq, unsigned int length ) 163 { 164 __u32 cfreq = ( freq << 5 ) / 468; 165 - __u32 flags; 166 int i; 167 168 if ( mac_special_bell == NULL ) ··· 224 */ 225 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) 226 { 227 - __u32 flags; 228 229 /* if the bell is already ringing, ring longer */ 230 if ( mac_bell_duration > 0 ) ··· 271 static void mac_quadra_ring_bell( unsigned long ignored ) 272 { 273 int i, count = mac_asc_samplespersec / HZ; 274 - __u32 flags; 275 276 /* 277 * we neither want a sound buffer overflow nor underflow, so we need to match
··· 162 void mac_mksound( unsigned int freq, unsigned int length ) 163 { 164 __u32 cfreq = ( freq << 5 ) / 468; 165 + unsigned long flags; 166 int i; 167 168 if ( mac_special_bell == NULL ) ··· 224 */ 225 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) 226 { 227 + unsigned long flags; 228 229 /* if the bell is already ringing, ring longer */ 230 if ( mac_bell_duration > 0 ) ··· 271 static void mac_quadra_ring_bell( unsigned long ignored ) 272 { 273 int i, count = mac_asc_samplespersec / HZ; 274 + unsigned long flags; 275 276 /* 277 * we neither want a sound buffer overflow nor underflow, so we need to match
+19 -2
arch/mips/Kconfig
··· 13 select HAVE_KPROBES 14 select HAVE_KRETPROBES 15 select RTC_LIB if !MACH_LOONGSON 16 17 mainmenu "Linux/MIPS Kernel Configuration" 18 ··· 1647 select SYS_SUPPORTS_SMP 1648 select SMP_UP 1649 help 1650 - This is a kernel model which is also known a VSMP or lately 1651 - has been marketesed into SMVP. 1652 1653 config MIPS_MT_SMTC 1654 bool "SMTC: Use all TCs on all VPEs for SMP" ··· 1673 help 1674 This is a kernel model which is known a SMTC or lately has been 1675 marketesed into SMVP. 1676 1677 endchoice 1678
··· 13 select HAVE_KPROBES 14 select HAVE_KRETPROBES 15 select RTC_LIB if !MACH_LOONGSON 16 + select GENERIC_ATOMIC64 if !64BIT 17 18 mainmenu "Linux/MIPS Kernel Configuration" 19 ··· 1646 select SYS_SUPPORTS_SMP 1647 select SMP_UP 1648 help 1649 + This is a kernel model which is known a VSMP but lately has been 1650 + marketesed into SMVP. 1651 + Virtual SMP uses the processor's VPEs to implement virtual 1652 + processors. In currently available configuration of the 34K processor 1653 + this allows for a dual processor. Both processors will share the same 1654 + primary caches; each will obtain the half of the TLB for it's own 1655 + exclusive use. For a layman this model can be described as similar to 1656 + what Intel calls Hyperthreading. 1657 + 1658 + For further information see http://www.linux-mips.org/wiki/34K#VSMP 1659 1660 config MIPS_MT_SMTC 1661 bool "SMTC: Use all TCs on all VPEs for SMP" ··· 1664 help 1665 This is a kernel model which is known a SMTC or lately has been 1666 marketesed into SMVP. 1667 + is presenting the available TC's of the core as processors to Linux. 1668 + On currently available 34K processors this means a Linux system will 1669 + see up to 5 processors. The implementation of the SMTC kernel differs 1670 + significantly from VSMP and cannot efficiently coexist in the same 1671 + kernel binary so the choice between VSMP and SMTC is a compile time 1672 + decision. 1673 + 1674 + For further information see http://www.linux-mips.org/wiki/34K#SMTC 1675 1676 endchoice 1677
+2 -3
arch/mips/alchemy/common/prom.c
··· 43 char **prom_argv; 44 char **prom_envp; 45 46 - void prom_init_cmdline(void) 47 { 48 int i; 49 ··· 104 } 105 } 106 107 - int prom_get_ethernet_addr(char *ethernet_addr) 108 { 109 char *ethaddr_str; 110 ··· 123 124 return 0; 125 } 126 - EXPORT_SYMBOL(prom_get_ethernet_addr); 127 128 void __init prom_free_prom_memory(void) 129 {
··· 43 char **prom_argv; 44 char **prom_envp; 45 46 + void __init prom_init_cmdline(void) 47 { 48 int i; 49 ··· 104 } 105 } 106 107 + int __init prom_get_ethernet_addr(char *ethernet_addr) 108 { 109 char *ethaddr_str; 110 ··· 123 124 return 0; 125 } 126 127 void __init prom_free_prom_memory(void) 128 {
+1 -1
arch/mips/boot/compressed/Makefile
··· 59 hostprogs-y := calc_vmlinuz_load_addr 60 61 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ 62 - $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS)) 63 64 vmlinuzobjs-y += $(obj)/piggy.o 65
··· 59 hostprogs-y := calc_vmlinuz_load_addr 60 61 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ 62 + $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) 63 64 vmlinuzobjs-y += $(obj)/piggy.o 65
+4
arch/mips/cavium-octeon/Kconfig
··· 83 def_bool y 84 select SPARSEMEM_STATIC 85 depends on CPU_CAVIUM_OCTEON
··· 83 def_bool y 84 select SPARSEMEM_STATIC 85 depends on CPU_CAVIUM_OCTEON 86 + 87 + config CAVIUM_OCTEON_HELPER 88 + def_bool y 89 + depends on OCTEON_ETHERNET || PCI
+1 -1
arch/mips/cavium-octeon/cpu.c
··· 41 return NOTIFY_OK; /* Let default notifier send signals */ 42 } 43 44 - static int cnmips_cu2_setup(void) 45 { 46 return cu2_notifier(cnmips_cu2_call, 0); 47 }
··· 41 return NOTIFY_OK; /* Let default notifier send signals */ 42 } 43 44 + static int __init cnmips_cu2_setup(void) 45 { 46 return cu2_notifier(cnmips_cu2_call, 0); 47 }
+1 -1
arch/mips/cavium-octeon/executive/Makefile
··· 11 12 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o 13 14 - obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
··· 11 12 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o 13 14 + obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
+4
arch/mips/include/asm/atomic.h
··· 782 */ 783 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) 784 785 #endif /* CONFIG_64BIT */ 786 787 /*
··· 782 */ 783 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) 784 785 + #else /* !CONFIG_64BIT */ 786 + 787 + #include <asm-generic/atomic64.h> 788 + 789 #endif /* CONFIG_64BIT */ 790 791 /*
+1 -1
arch/mips/include/asm/cop2.h
··· 24 25 #define cu2_notifier(fn, pri) \ 26 ({ \ 27 - static struct notifier_block fn##_nb __cpuinitdata = { \ 28 .notifier_call = fn, \ 29 .priority = pri \ 30 }; \
··· 24 25 #define cu2_notifier(fn, pri) \ 26 ({ \ 27 + static struct notifier_block fn##_nb = { \ 28 .notifier_call = fn, \ 29 .priority = pri \ 30 }; \
+1
arch/mips/include/asm/gic.h
··· 321 */ 322 struct gic_intr_map { 323 unsigned int cpunum; /* Directed to this CPU */ 324 unsigned int pin; /* Directed to this Pin */ 325 unsigned int polarity; /* Polarity : +/- */ 326 unsigned int trigtype; /* Trigger : Edge/Levl */
··· 321 */ 322 struct gic_intr_map { 323 unsigned int cpunum; /* Directed to this CPU */ 324 + #define GIC_UNUSED 0xdead /* Dummy data */ 325 unsigned int pin; /* Directed to this Pin */ 326 unsigned int polarity; /* Polarity : +/- */ 327 unsigned int trigtype; /* Trigger : Edge/Levl */
+1 -1
arch/mips/include/asm/mach-tx49xx/kmalloc.h
··· 1 #ifndef __ASM_MACH_TX49XX_KMALLOC_H 2 #define __ASM_MACH_TX49XX_KMALLOC_H 3 4 - #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 5 6 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
··· 1 #ifndef __ASM_MACH_TX49XX_KMALLOC_H 2 #define __ASM_MACH_TX49XX_KMALLOC_H 3 4 + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 5 6 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
-3
arch/mips/include/asm/mips-boards/maltaint.h
··· 88 89 #define GIC_EXT_INTR(x) x 90 91 - /* Dummy data */ 92 - #define X 0xdead 93 - 94 /* External Interrupts used for IPI */ 95 #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 96 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
··· 88 89 #define GIC_EXT_INTR(x) x 90 91 /* External Interrupts used for IPI */ 92 #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 93 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
+14
arch/mips/include/asm/page.h
··· 150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 151 #endif 152 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 153 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 154 155 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
··· 150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 151 #endif 152 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 153 + 154 + /* 155 + * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad 156 + * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The 157 + * discussion can be found in lkml posting 158 + * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is 159 + * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html 160 + * 161 + * It is unclear if the misscompilations mentioned in 162 + * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one 163 + * until GCC 3.x has been retired before we can apply 164 + * https://patchwork.linux-mips.org/patch/1541/ 165 + */ 166 + 167 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 168 169 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+2 -1
arch/mips/include/asm/thread_info.h
··· 146 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 147 148 /* work to do on interrupt/exception return */ 149 - #define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP) 150 /* work to do on any return to u-space */ 151 #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) 152
··· 146 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 147 148 /* work to do on interrupt/exception return */ 149 + #define _TIF_WORK_MASK (0x0000ffef & \ 150 + ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) 151 /* work to do on any return to u-space */ 152 #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) 153
+15 -6
arch/mips/include/asm/unistd.h
··· 356 #define __NR_perf_event_open (__NR_Linux + 333) 357 #define __NR_accept4 (__NR_Linux + 334) 358 #define __NR_recvmmsg (__NR_Linux + 335) 359 360 /* 361 * Offset of the last Linux o32 flavoured syscall 362 */ 363 - #define __NR_Linux_syscalls 335 364 365 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 366 367 #define __NR_O32_Linux 4000 368 - #define __NR_O32_Linux_syscalls 335 369 370 #if _MIPS_SIM == _MIPS_SIM_ABI64 371 ··· 671 #define __NR_perf_event_open (__NR_Linux + 292) 672 #define __NR_accept4 (__NR_Linux + 293) 673 #define __NR_recvmmsg (__NR_Linux + 294) 674 675 /* 676 * Offset of the last Linux 64-bit flavoured syscall 677 */ 678 - #define __NR_Linux_syscalls 294 679 680 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 681 682 #define __NR_64_Linux 5000 683 - #define __NR_64_Linux_syscalls 294 684 685 #if _MIPS_SIM == _MIPS_SIM_NABI32 686 ··· 991 #define __NR_accept4 (__NR_Linux + 297) 992 #define __NR_recvmmsg (__NR_Linux + 298) 993 #define __NR_getdents64 (__NR_Linux + 299) 994 995 /* 996 * Offset of the last N32 flavoured syscall 997 */ 998 - #define __NR_Linux_syscalls 299 999 1000 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1001 1002 #define __NR_N32_Linux 6000 1003 - #define __NR_N32_Linux_syscalls 299 1004 1005 #ifdef __KERNEL__ 1006
··· 356 #define __NR_perf_event_open (__NR_Linux + 333) 357 #define __NR_accept4 (__NR_Linux + 334) 358 #define __NR_recvmmsg (__NR_Linux + 335) 359 + #define __NR_fanotify_init (__NR_Linux + 336) 360 + #define __NR_fanotify_mark (__NR_Linux + 337) 361 + #define __NR_prlimit64 (__NR_Linux + 338) 362 363 /* 364 * Offset of the last Linux o32 flavoured syscall 365 */ 366 + #define __NR_Linux_syscalls 338 367 368 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 369 370 #define __NR_O32_Linux 4000 371 + #define __NR_O32_Linux_syscalls 338 372 373 #if _MIPS_SIM == _MIPS_SIM_ABI64 374 ··· 668 #define __NR_perf_event_open (__NR_Linux + 292) 669 #define __NR_accept4 (__NR_Linux + 293) 670 #define __NR_recvmmsg (__NR_Linux + 294) 671 + #define __NR_fanotify_init (__NR_Linux + 295) 672 + #define __NR_fanotify_mark (__NR_Linux + 296) 673 + #define __NR_prlimit64 (__NR_Linux + 297) 674 675 /* 676 * Offset of the last Linux 64-bit flavoured syscall 677 */ 678 + #define __NR_Linux_syscalls 297 679 680 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 681 682 #define __NR_64_Linux 5000 683 + #define __NR_64_Linux_syscalls 297 684 685 #if _MIPS_SIM == _MIPS_SIM_NABI32 686 ··· 985 #define __NR_accept4 (__NR_Linux + 297) 986 #define __NR_recvmmsg (__NR_Linux + 298) 987 #define __NR_getdents64 (__NR_Linux + 299) 988 + #define __NR_fanotify_init (__NR_Linux + 300) 989 + #define __NR_fanotify_mark (__NR_Linux + 301) 990 + #define __NR_prlimit64 (__NR_Linux + 302) 991 992 /* 993 * Offset of the last N32 flavoured syscall 994 */ 995 + #define __NR_Linux_syscalls 302 996 997 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 998 999 #define __NR_N32_Linux 6000 1000 + #define __NR_N32_Linux_syscalls 302 1001 1002 #ifdef __KERNEL__ 1003
+2 -3
arch/mips/kernel/irq-gic.c
··· 7 #include <asm/io.h> 8 #include <asm/gic.h> 9 #include <asm/gcmpregs.h> 10 - #include <asm/mips-boards/maltaint.h> 11 #include <asm/irq.h> 12 #include <linux/hardirq.h> 13 #include <asm-generic/bitops/find.h> ··· 130 int i; 131 132 irq -= _irqbase; 133 - pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); 134 cpumask_and(&tmp, cpumask, cpu_online_mask); 135 if (cpus_empty(tmp)) 136 return -1; ··· 221 /* Setup specifics */ 222 for (i = 0; i < mapsize; i++) { 223 cpu = intrmap[i].cpunum; 224 - if (cpu == X) 225 continue; 226 if (cpu == 0 && i != 0 && intrmap[i].flags == 0) 227 continue;
··· 7 #include <asm/io.h> 8 #include <asm/gic.h> 9 #include <asm/gcmpregs.h> 10 #include <asm/irq.h> 11 #include <linux/hardirq.h> 12 #include <asm-generic/bitops/find.h> ··· 131 int i; 132 133 irq -= _irqbase; 134 + pr_debug("%s(%d) called\n", __func__, irq); 135 cpumask_and(&tmp, cpumask, cpu_online_mask); 136 if (cpus_empty(tmp)) 137 return -1; ··· 222 /* Setup specifics */ 223 for (i = 0; i < mapsize; i++) { 224 cpu = intrmap[i].cpunum; 225 + if (cpu == GIC_UNUSED) 226 continue; 227 if (cpu == 0 && i != 0 && intrmap[i].flags == 0) 228 continue;
+1 -1
arch/mips/kernel/kgdb.c
··· 283 struct pt_regs *regs = args->regs; 284 int trap = (regs->cp0_cause & 0x7c) >> 2; 285 286 - /* Userpace events, ignore. */ 287 if (user_mode(regs)) 288 return NOTIFY_DONE; 289
··· 283 struct pt_regs *regs = args->regs; 284 int trap = (regs->cp0_cause & 0x7c) >> 2; 285 286 + /* Userspace events, ignore. */ 287 if (user_mode(regs)) 288 return NOTIFY_DONE; 289
+1 -1
arch/mips/kernel/kspd.c
··· 251 memset(&tz, 0, sizeof(tz)); 252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, 253 (int)&tz, 0, 0)) == 0) 254 - ret.retval = tv.tv_sec; 255 break; 256 257 case MTSP_SYSCALL_EXIT:
··· 251 memset(&tz, 0, sizeof(tz)); 252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, 253 (int)&tz, 0, 0)) == 0) 254 + ret.retval = tv.tv_sec; 255 break; 256 257 case MTSP_SYSCALL_EXIT:
+7
arch/mips/kernel/linux32.c
··· 341 { 342 return sys_lookup_dcookie(merge_64(a0, a1), buf, len); 343 }
··· 341 { 342 return sys_lookup_dcookie(merge_64(a0, a1), buf, len); 343 } 344 + 345 + SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, 346 + u64, a3, u64, a4, int, dfd, const char __user *, pathname) 347 + { 348 + return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), 349 + dfd, pathname); 350 + }
+4 -1
arch/mips/kernel/scall32-o32.S
··· 583 sys sys_rt_tgsigqueueinfo 4 584 sys sys_perf_event_open 5 585 sys sys_accept4 4 586 - sys sys_recvmmsg 5 587 .endm 588 589 /* We pre-compute the number of _instruction_ bytes needed to
··· 583 sys sys_rt_tgsigqueueinfo 4 584 sys sys_perf_event_open 5 585 sys sys_accept4 4 586 + sys sys_recvmmsg 5 /* 4335 */ 587 + sys sys_fanotify_init 2 588 + sys sys_fanotify_mark 6 589 + sys sys_prlimit64 4 590 .endm 591 592 /* We pre-compute the number of _instruction_ bytes needed to
+5 -2
arch/mips/kernel/scall64-64.S
··· 416 PTR sys_pipe2 417 PTR sys_inotify_init1 418 PTR sys_preadv 419 - PTR sys_pwritev /* 5390 */ 420 PTR sys_rt_tgsigqueueinfo 421 PTR sys_perf_event_open 422 PTR sys_accept4 423 - PTR sys_recvmmsg 424 .size sys_call_table,.-sys_call_table
··· 416 PTR sys_pipe2 417 PTR sys_inotify_init1 418 PTR sys_preadv 419 + PTR sys_pwritev /* 5290 */ 420 PTR sys_rt_tgsigqueueinfo 421 PTR sys_perf_event_open 422 PTR sys_accept4 423 + PTR sys_recvmmsg 424 + PTR sys_fanotify_init /* 5295 */ 425 + PTR sys_fanotify_mark 426 + PTR sys_prlimit64 427 .size sys_call_table,.-sys_call_table
+4 -1
arch/mips/kernel/scall64-n32.S
··· 419 PTR sys_perf_event_open 420 PTR sys_accept4 421 PTR compat_sys_recvmmsg 422 - PTR sys_getdents 423 .size sysn32_call_table,.-sysn32_call_table
··· 419 PTR sys_perf_event_open 420 PTR sys_accept4 421 PTR compat_sys_recvmmsg 422 + PTR sys_getdents64 423 + PTR sys_fanotify_init /* 6300 */ 424 + PTR sys_fanotify_mark 425 + PTR sys_prlimit64 426 .size sysn32_call_table,.-sysn32_call_table
+4 -1
arch/mips/kernel/scall64-o32.S
··· 538 PTR compat_sys_rt_tgsigqueueinfo 539 PTR sys_perf_event_open 540 PTR sys_accept4 541 - PTR compat_sys_recvmmsg 542 .size sys_call_table,.-sys_call_table
··· 538 PTR compat_sys_rt_tgsigqueueinfo 539 PTR sys_perf_event_open 540 PTR sys_accept4 541 + PTR compat_sys_recvmmsg /* 4335 */ 542 + PTR sys_fanotify_init 543 + PTR sys_32_fanotify_mark 544 + PTR sys_prlimit64 545 .size sys_call_table,.-sys_call_table
+20 -8
arch/mips/mm/dma-default.c
··· 44 45 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) 46 { 47 /* ignore region specifiers */ 48 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 49 50 - #ifdef CONFIG_ZONE_DMA 51 if (dev == NULL) 52 - gfp |= __GFP_DMA; 53 - else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) 54 - gfp |= __GFP_DMA; 55 else 56 #endif 57 - #ifdef CONFIG_ZONE_DMA32 58 if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) 59 - gfp |= __GFP_DMA32; 60 else 61 #endif 62 - ; 63 64 /* Don't invoke OOM killer */ 65 gfp |= __GFP_NORETRY; 66 67 - return gfp; 68 } 69 70 void *dma_alloc_noncoherent(struct device *dev, size_t size,
··· 44 45 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) 46 { 47 + gfp_t dma_flag; 48 + 49 /* ignore region specifiers */ 50 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 51 52 + #ifdef CONFIG_ISA 53 if (dev == NULL) 54 + dma_flag = __GFP_DMA; 55 else 56 #endif 57 + #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) 58 if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) 59 + dma_flag = __GFP_DMA; 60 + else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 61 + dma_flag = __GFP_DMA32; 62 else 63 #endif 64 + #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) 65 + if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 66 + dma_flag = __GFP_DMA32; 67 + else 68 + #endif 69 + #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) 70 + if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 71 + dma_flag = __GFP_DMA; 72 + else 73 + #endif 74 + dma_flag = 0; 75 76 /* Don't invoke OOM killer */ 77 gfp |= __GFP_NORETRY; 78 79 + return gfp | dma_flag; 80 } 81 82 void *dma_alloc_noncoherent(struct device *dev, size_t size,
+1 -1
arch/mips/mm/sc-rm7k.c
··· 30 #define tc_lsize 32 31 32 extern unsigned long icache_way_size, dcache_way_size; 33 - unsigned long tcache_size; 34 35 #include <asm/r4kcache.h> 36
··· 30 #define tc_lsize 32 31 32 extern unsigned long icache_way_size, dcache_way_size; 33 + static unsigned long tcache_size; 34 35 #include <asm/r4kcache.h> 36
+3
arch/mips/mti-malta/malta-int.c
··· 385 */ 386 387 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK 388 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { 389 { X, X, X, X, 0 }, 390 { X, X, X, X, 0 }, ··· 406 { X, X, X, X, 0 }, 407 /* The remainder of this table is initialised by fill_ipi_map */ 408 }; 409 410 /* 411 * GCMP needs to be detected before any SMP initialisation
··· 385 */ 386 387 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK 388 + #define X GIC_UNUSED 389 + 390 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { 391 { X, X, X, X, 0 }, 392 { X, X, X, X, 0 }, ··· 404 { X, X, X, X, 0 }, 405 /* The remainder of this table is initialised by fill_ipi_map */ 406 }; 407 + #undef X 408 409 /* 410 * GCMP needs to be detected before any SMP initialisation
+1 -1
arch/mips/pci/pci-rc32434.c
··· 118 if (!((pcicvalue == PCIM_H_EA) || 119 (pcicvalue == PCIM_H_IA_FIX) || 120 (pcicvalue == PCIM_H_IA_RR))) { 121 - pr_err(KERN_ERR "PCI init error!!!\n"); 122 /* Not in Host Mode, return ERROR */ 123 return -1; 124 }
··· 118 if (!((pcicvalue == PCIM_H_EA) || 119 (pcicvalue == PCIM_H_IA_FIX) || 120 (pcicvalue == PCIM_H_IA_RR))) { 121 + pr_err("PCI init error!!!\n"); 122 /* Not in Host Mode, return ERROR */ 123 return -1; 124 }
+5 -15
arch/mips/pnx8550/common/reset.c
··· 22 */ 23 #include <linux/kernel.h> 24 25 #include <asm/reboot.h> 26 #include <glb.h> 27 28 void pnx8550_machine_restart(char *command) 29 { 30 - char head[] = "************* Machine restart *************"; 31 - char foot[] = "*******************************************"; 32 - 33 - printk("\n\n"); 34 - printk("%s\n", head); 35 - if (command != NULL) 36 - printk("* %s\n", command); 37 - printk("%s\n", foot); 38 - 39 PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; 40 } 41 42 void pnx8550_machine_halt(void) 43 { 44 - printk("*** Machine halt. (Not implemented) ***\n"); 45 - } 46 - 47 - void pnx8550_machine_power_off(void) 48 - { 49 - printk("*** Machine power off. (Not implemented) ***\n"); 50 }
··· 22 */ 23 #include <linux/kernel.h> 24 25 + #include <asm/processor.h> 26 #include <asm/reboot.h> 27 #include <glb.h> 28 29 void pnx8550_machine_restart(char *command) 30 { 31 PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; 32 } 33 34 void pnx8550_machine_halt(void) 35 { 36 + while (1) { 37 + if (cpu_wait) 38 + cpu_wait(); 39 + } 40 }
+1 -2
arch/mips/pnx8550/common/setup.c
··· 44 extern void __init board_setup(void); 45 extern void pnx8550_machine_restart(char *); 46 extern void pnx8550_machine_halt(void); 47 - extern void pnx8550_machine_power_off(void); 48 extern struct resource ioport_resource; 49 extern struct resource iomem_resource; 50 extern char *prom_getcmdline(void); ··· 99 100 _machine_restart = pnx8550_machine_restart; 101 _machine_halt = pnx8550_machine_halt; 102 - pm_power_off = pnx8550_machine_power_off; 103 104 /* Clear the Global 2 Register, PCI Inta Output Enable Registers 105 Bit 1:Enable DAC Powerdown
··· 44 extern void __init board_setup(void); 45 extern void pnx8550_machine_restart(char *); 46 extern void pnx8550_machine_halt(void); 47 extern struct resource ioport_resource; 48 extern struct resource iomem_resource; 49 extern char *prom_getcmdline(void); ··· 100 101 _machine_restart = pnx8550_machine_restart; 102 _machine_halt = pnx8550_machine_halt; 103 + pm_power_off = pnx8550_machine_halt; 104 105 /* Clear the Global 2 Register, PCI Inta Output Enable Registers 106 Bit 1:Enable DAC Powerdown
+1 -2
arch/mn10300/kernel/module.c
··· 206 const Elf_Shdr *sechdrs, 207 struct module *me) 208 { 209 - return module_bug_finalize(hdr, sechdrs, me); 210 } 211 212 /* ··· 214 */ 215 void module_arch_cleanup(struct module *mod) 216 { 217 - module_bug_cleanup(mod); 218 }
··· 206 const Elf_Shdr *sechdrs, 207 struct module *me) 208 { 209 + return 0; 210 } 211 212 /* ··· 214 */ 215 void module_arch_cleanup(struct module *mod) 216 { 217 }
+19 -1
arch/mn10300/mm/cache.c
··· 54 void flush_icache_range(unsigned long start, unsigned long end) 55 { 56 #ifdef CONFIG_MN10300_CACHE_WBACK 57 - unsigned long addr, size, off; 58 struct page *page; 59 pgd_t *pgd; 60 pud_t *pud; 61 pmd_t *pmd; 62 pte_t *ppte, pte; 63 64 for (; start < end; start += size) { 65 /* work out how much of the page to flush */ ··· 121 } 122 #endif 123 124 mn10300_icache_inv(); 125 } 126 EXPORT_SYMBOL(flush_icache_range);
··· 54 void flush_icache_range(unsigned long start, unsigned long end) 55 { 56 #ifdef CONFIG_MN10300_CACHE_WBACK 57 + unsigned long addr, size, base, off; 58 struct page *page; 59 pgd_t *pgd; 60 pud_t *pud; 61 pmd_t *pmd; 62 pte_t *ppte, pte; 63 + 64 + if (end > 0x80000000UL) { 65 + /* addresses above 0xa0000000 do not go through the cache */ 66 + if (end > 0xa0000000UL) { 67 + end = 0xa0000000UL; 68 + if (start >= end) 69 + return; 70 + } 71 + 72 + /* kernel addresses between 0x80000000 and 0x9fffffff do not 73 + * require page tables, so we just map such addresses directly */ 74 + base = (start >= 0x80000000UL) ? start : 0x80000000UL; 75 + mn10300_dcache_flush_range(base, end); 76 + if (base == start) 77 + goto invalidate; 78 + end = base; 79 + } 80 81 for (; start < end; start += size) { 82 /* work out how much of the page to flush */ ··· 104 } 105 #endif 106 107 + invalidate: 108 mn10300_icache_inv(); 109 } 110 EXPORT_SYMBOL(flush_icache_range);
+1 -2
arch/parisc/kernel/module.c
··· 941 nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; 942 DEBUGP("NEW num_symtab %lu\n", nsyms); 943 symhdr->sh_size = nsyms * sizeof(Elf_Sym); 944 - return module_bug_finalize(hdr, sechdrs, me); 945 } 946 947 void module_arch_cleanup(struct module *mod) 948 { 949 deregister_unwind_table(mod); 950 - module_bug_cleanup(mod); 951 }
··· 941 nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; 942 DEBUGP("NEW num_symtab %lu\n", nsyms); 943 symhdr->sh_size = nsyms * sizeof(Elf_Sym); 944 + return 0; 945 } 946 947 void module_arch_cleanup(struct module *mod) 948 { 949 deregister_unwind_table(mod); 950 }
-6
arch/powerpc/kernel/module.c
··· 63 const Elf_Shdr *sechdrs, struct module *me) 64 { 65 const Elf_Shdr *sect; 66 - int err; 67 - 68 - err = module_bug_finalize(hdr, sechdrs, me); 69 - if (err) 70 - return err; 71 72 /* Apply feature fixups */ 73 sect = find_section(hdr, sechdrs, "__ftr_fixup"); ··· 96 97 void module_arch_cleanup(struct module *mod) 98 { 99 - module_bug_cleanup(mod); 100 }
··· 63 const Elf_Shdr *sechdrs, struct module *me) 64 { 65 const Elf_Shdr *sect; 66 67 /* Apply feature fixups */ 68 sect = find_section(hdr, sechdrs, "__ftr_fixup"); ··· 101 102 void module_arch_cleanup(struct module *mod) 103 { 104 }
+1 -1
arch/powerpc/platforms/512x/clock.c
··· 57 int id_match = 0; 58 59 if (dev == NULL || id == NULL) 60 - return NULL; 61 62 mutex_lock(&clocks_mutex); 63 list_for_each_entry(p, &clocks, node) {
··· 57 int id_match = 0; 58 59 if (dev == NULL || id == NULL) 60 + return clk; 61 62 mutex_lock(&clocks_mutex); 63 list_for_each_entry(p, &clocks, node) {
+6 -3
arch/powerpc/platforms/52xx/efika.c
··· 99 if (bus_range == NULL || len < 2 * sizeof(int)) { 100 printk(KERN_WARNING EFIKA_PLATFORM_NAME 101 ": Can't get bus-range for %s\n", pcictrl->full_name); 102 - return; 103 } 104 105 if (bus_range[1] == bus_range[0]) ··· 111 printk(" controlled by %s\n", pcictrl->full_name); 112 printk("\n"); 113 114 - hose = pcibios_alloc_controller(of_node_get(pcictrl)); 115 if (!hose) { 116 printk(KERN_WARNING EFIKA_PLATFORM_NAME 117 ": Can't allocate PCI controller structure for %s\n", 118 pcictrl->full_name); 119 - return; 120 } 121 122 hose->first_busno = bus_range[0]; ··· 124 hose->ops = &rtas_pci_ops; 125 126 pci_process_bridge_OF_ranges(hose, pcictrl, 0); 127 } 128 129 #else
··· 99 if (bus_range == NULL || len < 2 * sizeof(int)) { 100 printk(KERN_WARNING EFIKA_PLATFORM_NAME 101 ": Can't get bus-range for %s\n", pcictrl->full_name); 102 + goto out_put; 103 } 104 105 if (bus_range[1] == bus_range[0]) ··· 111 printk(" controlled by %s\n", pcictrl->full_name); 112 printk("\n"); 113 114 + hose = pcibios_alloc_controller(pcictrl); 115 if (!hose) { 116 printk(KERN_WARNING EFIKA_PLATFORM_NAME 117 ": Can't allocate PCI controller structure for %s\n", 118 pcictrl->full_name); 119 + goto out_put; 120 } 121 122 hose->first_busno = bus_range[0]; ··· 124 hose->ops = &rtas_pci_ops; 125 126 pci_process_bridge_OF_ranges(hose, pcictrl, 0); 127 + return; 128 + out_put: 129 + of_node_put(pcictrl); 130 } 131 132 #else
+6 -2
arch/powerpc/platforms/52xx/mpc52xx_common.c
··· 325 clrbits32(&simple_gpio->simple_dvo, sync | out); 326 clrbits8(&wkup_gpio->wkup_dvo, reset); 327 328 - /* wait at lease 1 us */ 329 - udelay(2); 330 331 /* Deassert reset */ 332 setbits8(&wkup_gpio->wkup_dvo, reset); 333 334 /* Restore pin-muxing */ 335 out_be32(&simple_gpio->port_config, mux);
··· 325 clrbits32(&simple_gpio->simple_dvo, sync | out); 326 clrbits8(&wkup_gpio->wkup_dvo, reset); 327 328 + /* wait for 1 us */ 329 + udelay(1); 330 331 /* Deassert reset */ 332 setbits8(&wkup_gpio->wkup_dvo, reset); 333 + 334 + /* wait at least 200ns */ 335 + /* 7 ~= (200ns * timebase) / ns2sec */ 336 + __delay(7); 337 338 /* Restore pin-muxing */ 339 out_be32(&simple_gpio->port_config, mux);
+1 -2
arch/s390/kernel/module.c
··· 407 { 408 vfree(me->arch.syminfo); 409 me->arch.syminfo = NULL; 410 - return module_bug_finalize(hdr, sechdrs, me); 411 } 412 413 void module_arch_cleanup(struct module *mod) 414 { 415 - module_bug_cleanup(mod); 416 }
··· 407 { 408 vfree(me->arch.syminfo); 409 me->arch.syminfo = NULL; 410 + return 0; 411 } 412 413 void module_arch_cleanup(struct module *mod) 414 { 415 }
-2
arch/sh/kernel/module.c
··· 149 int ret = 0; 150 151 ret |= module_dwarf_finalize(hdr, sechdrs, me); 152 - ret |= module_bug_finalize(hdr, sechdrs, me); 153 154 return ret; 155 } 156 157 void module_arch_cleanup(struct module *mod) 158 { 159 - module_bug_cleanup(mod); 160 module_dwarf_cleanup(mod); 161 }
··· 149 int ret = 0; 150 151 ret |= module_dwarf_finalize(hdr, sechdrs, me); 152 153 return ret; 154 } 155 156 void module_arch_cleanup(struct module *mod) 157 { 158 module_dwarf_cleanup(mod); 159 }
+3 -14
arch/um/drivers/net_kern.c
··· 255 netif_wake_queue(dev); 256 } 257 258 - static int uml_net_set_mac(struct net_device *dev, void *addr) 259 - { 260 - struct uml_net_private *lp = netdev_priv(dev); 261 - struct sockaddr *hwaddr = addr; 262 - 263 - spin_lock_irq(&lp->lock); 264 - eth_mac_addr(dev, hwaddr->sa_data); 265 - spin_unlock_irq(&lp->lock); 266 - 267 - return 0; 268 - } 269 - 270 static int uml_net_change_mtu(struct net_device *dev, int new_mtu) 271 { 272 dev->mtu = new_mtu; ··· 361 .ndo_start_xmit = uml_net_start_xmit, 362 .ndo_set_multicast_list = uml_net_set_multicast_list, 363 .ndo_tx_timeout = uml_net_tx_timeout, 364 - .ndo_set_mac_address = uml_net_set_mac, 365 .ndo_change_mtu = uml_net_change_mtu, 366 .ndo_validate_addr = eth_validate_addr, 367 }; ··· 460 ((*transport->user->init)(&lp->user, dev) != 0)) 461 goto out_unregister; 462 463 - eth_mac_addr(dev, device->mac); 464 dev->mtu = transport->user->mtu; 465 dev->netdev_ops = &uml_netdev_ops; 466 dev->ethtool_ops = &uml_net_ethtool_ops;
··· 255 netif_wake_queue(dev); 256 } 257 258 static int uml_net_change_mtu(struct net_device *dev, int new_mtu) 259 { 260 dev->mtu = new_mtu; ··· 373 .ndo_start_xmit = uml_net_start_xmit, 374 .ndo_set_multicast_list = uml_net_set_multicast_list, 375 .ndo_tx_timeout = uml_net_tx_timeout, 376 + .ndo_set_mac_address = eth_mac_addr, 377 .ndo_change_mtu = uml_net_change_mtu, 378 .ndo_validate_addr = eth_validate_addr, 379 }; ··· 472 ((*transport->user->init)(&lp->user, dev) != 0)) 473 goto out_unregister; 474 475 + /* don't use eth_mac_addr, it will not work here */ 476 + memcpy(dev->dev_addr, device->mac, ETH_ALEN); 477 dev->mtu = transport->user->mtu; 478 dev->netdev_ops = &uml_netdev_ops; 479 dev->ethtool_ops = &uml_net_ethtool_ops;
+1 -1
arch/x86/kernel/acpi/cstate.c
··· 61 unsigned int ecx; 62 } states[ACPI_PROCESSOR_MAX_POWER]; 63 }; 64 - static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ 65 66 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 67
··· 61 unsigned int ecx; 62 } states[ACPI_PROCESSOR_MAX_POWER]; 63 }; 64 + static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ 65 66 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 67
+8 -3
arch/x86/kernel/apic/io_apic.c
··· 306 307 old_cfg = old_desc->chip_data; 308 309 - memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); 310 311 init_copy_irq_2_pin(old_cfg, cfg, node); 312 } 313 314 - static void free_irq_cfg(struct irq_cfg *old_cfg) 315 { 316 - kfree(old_cfg); 317 } 318 319 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
··· 306 307 old_cfg = old_desc->chip_data; 308 309 + cfg->vector = old_cfg->vector; 310 + cfg->move_in_progress = old_cfg->move_in_progress; 311 + cpumask_copy(cfg->domain, old_cfg->domain); 312 + cpumask_copy(cfg->old_domain, old_cfg->old_domain); 313 314 init_copy_irq_2_pin(old_cfg, cfg, node); 315 } 316 317 + static void free_irq_cfg(struct irq_cfg *cfg) 318 { 319 + free_cpumask_var(cfg->domain); 320 + free_cpumask_var(cfg->old_domain); 321 + kfree(cfg); 322 } 323 324 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
+1 -1
arch/x86/kernel/cpu/common.c
··· 545 } 546 } 547 548 - static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 549 { 550 u32 tfms, xlvl; 551 u32 ebx;
··· 545 } 546 } 547 548 + void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 549 { 550 u32 tfms, xlvl; 551 u32 ebx;
+1
arch/x86/kernel/cpu/cpu.h
··· 33 *const __x86_cpu_dev_end[]; 34 35 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); 36 37 #endif
··· 33 *const __x86_cpu_dev_end[]; 34 35 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); 36 + extern void get_cpu_cap(struct cpuinfo_x86 *c); 37 38 #endif
+12 -6
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
··· 368 return -ENODEV; 369 370 out_obj = output.pointer; 371 - if (out_obj->type != ACPI_TYPE_BUFFER) 372 - return -ENODEV; 373 374 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); 375 - if (errors) 376 - return -ENODEV; 377 378 supported = *((u32 *)(out_obj->buffer.pointer + 4)); 379 - if (!(supported & 0x1)) 380 - return -ENODEV; 381 382 out_free: 383 kfree(output.pointer);
··· 368 return -ENODEV; 369 370 out_obj = output.pointer; 371 + if (out_obj->type != ACPI_TYPE_BUFFER) { 372 + ret = -ENODEV; 373 + goto out_free; 374 + } 375 376 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); 377 + if (errors) { 378 + ret = -ENODEV; 379 + goto out_free; 380 + } 381 382 supported = *((u32 *)(out_obj->buffer.pointer + 4)); 383 + if (!(supported & 0x1)) { 384 + ret = -ENODEV; 385 + goto out_free; 386 + } 387 388 out_free: 389 kfree(output.pointer);
+1
arch/x86/kernel/cpu/intel.c
··· 39 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; 40 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 41 c->cpuid_level = cpuid_eax(0); 42 } 43 } 44
··· 39 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; 40 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 41 c->cpuid_level = cpuid_eax(0); 42 + get_cpu_cap(c); 43 } 44 } 45
+5 -1
arch/x86/kernel/cpu/perf_event_p4.c
··· 660 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 661 int overflow; 662 663 - if (!test_bit(idx, cpuc->active_mask)) 664 continue; 665 666 event = cpuc->events[idx]; 667 hwc = &event->hw;
··· 660 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 661 int overflow; 662 663 + if (!test_bit(idx, cpuc->active_mask)) { 664 + /* catch in-flight IRQs */ 665 + if (__test_and_clear_bit(idx, cpuc->running)) 666 + handled++; 667 continue; 668 + } 669 670 event = cpuc->events[idx]; 671 hwc = &event->hw;
+1 -1
arch/x86/kernel/hpet.c
··· 506 { 507 unsigned int irq; 508 509 - irq = create_irq(); 510 if (!irq) 511 return -EINVAL; 512
··· 506 { 507 unsigned int irq; 508 509 + irq = create_irq_nr(0, -1); 510 if (!irq) 511 return -EINVAL; 512
+1 -2
arch/x86/kernel/module.c
··· 239 apply_paravirt(pseg, pseg + para->sh_size); 240 } 241 242 - return module_bug_finalize(hdr, sechdrs, me); 243 } 244 245 void module_arch_cleanup(struct module *mod) 246 { 247 alternatives_smp_module_del(mod); 248 - module_bug_cleanup(mod); 249 }
··· 239 apply_paravirt(pseg, pseg + para->sh_size); 240 } 241 242 + return 0; 243 } 244 245 void module_arch_cleanup(struct module *mod) 246 { 247 alternatives_smp_module_del(mod); 248 }
+1
arch/x86/oprofile/nmi_int.c
··· 674 case 0x0f: 675 case 0x16: 676 case 0x17: 677 *cpu_type = "i386/core_2"; 678 break; 679 case 0x1a:
··· 674 case 0x0f: 675 case 0x16: 676 case 0x17: 677 + case 0x1d: 678 *cpu_type = "i386/core_2"; 679 break; 680 case 0x1a:
+3 -2
arch/x86/xen/time.c
··· 489 __init void xen_hvm_init_time_ops(void) 490 { 491 /* vector callback is needed otherwise we cannot receive interrupts 492 - * on cpu > 0 */ 493 - if (!xen_have_vector_callback && num_present_cpus() > 1) 494 return; 495 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { 496 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
··· 489 __init void xen_hvm_init_time_ops(void) 490 { 491 /* vector callback is needed otherwise we cannot receive interrupts 492 + * on cpu > 0 and at this point we don't know how many cpus are 493 + * available */ 494 + if (!xen_have_vector_callback) 495 return; 496 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { 497 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
+1 -1
drivers/acpi/Kconfig
··· 105 106 Be aware that using this interface can confuse your Embedded 107 Controller in a way that a normal reboot is not enough. You then 108 - have to power of your system, and remove the laptop battery for 109 some seconds. 110 An Embedded Controller typically is available on laptops and reads 111 sensor values like battery state and temperature.
··· 105 106 Be aware that using this interface can confuse your Embedded 107 Controller in a way that a normal reboot is not enough. You then 108 + have to power off your system, and remove the laptop battery for 109 some seconds. 110 An Embedded Controller typically is available on laptops and reads 111 sensor values like battery state and temperature.
+18 -16
drivers/acpi/acpi_pad.c
··· 382 device_remove_file(&device->dev, &dev_attr_rrtime); 383 } 384 385 - /* Query firmware how many CPUs should be idle */ 386 - static int acpi_pad_pur(acpi_handle handle, int *num_cpus) 387 { 388 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 389 union acpi_object *package; 390 - int rev, num, ret = -EINVAL; 391 392 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) 393 - return -EINVAL; 394 395 if (!buffer.length || !buffer.pointer) 396 - return -EINVAL; 397 398 package = buffer.pointer; 399 - if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) 400 - goto out; 401 - rev = package->package.elements[0].integer.value; 402 - num = package->package.elements[1].integer.value; 403 - if (rev != 1 || num < 0) 404 - goto out; 405 - *num_cpus = num; 406 - ret = 0; 407 - out: 408 kfree(buffer.pointer); 409 - return ret; 410 } 411 412 /* Notify firmware how many CPUs are idle */ ··· 434 uint32_t idle_cpus; 435 436 mutex_lock(&isolated_cpus_lock); 437 - if (acpi_pad_pur(handle, &num_cpus)) { 438 mutex_unlock(&isolated_cpus_lock); 439 return; 440 }
··· 382 device_remove_file(&device->dev, &dev_attr_rrtime); 383 } 384 385 + /* 386 + * Query firmware how many CPUs should be idle 387 + * return -1 on failure 388 + */ 389 + static int acpi_pad_pur(acpi_handle handle) 390 { 391 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 392 union acpi_object *package; 393 + int num = -1; 394 395 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) 396 + return num; 397 398 if (!buffer.length || !buffer.pointer) 399 + return num; 400 401 package = buffer.pointer; 402 + 403 + if (package->type == ACPI_TYPE_PACKAGE && 404 + package->package.count == 2 && 405 + package->package.elements[0].integer.value == 1) /* rev 1 */ 406 + 407 + num = package->package.elements[1].integer.value; 408 + 409 kfree(buffer.pointer); 410 + return num; 411 } 412 413 /* Notify firmware how many CPUs are idle */ ··· 433 uint32_t idle_cpus; 434 435 mutex_lock(&isolated_cpus_lock); 436 + num_cpus = acpi_pad_pur(handle); 437 + if (num_cpus < 0) { 438 mutex_unlock(&isolated_cpus_lock); 439 return; 440 }
+1
drivers/acpi/acpica/aclocal.h
··· 854 ACPI_BITMASK_POWER_BUTTON_STATUS | \ 855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ 856 ACPI_BITMASK_RT_CLOCK_STATUS | \ 857 ACPI_BITMASK_WAKE_STATUS) 858 859 #define ACPI_BITMASK_TIMER_ENABLE 0x0001
··· 854 ACPI_BITMASK_POWER_BUTTON_STATUS | \ 855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ 856 ACPI_BITMASK_RT_CLOCK_STATUS | \ 857 + ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \ 858 ACPI_BITMASK_WAKE_STATUS) 859 860 #define ACPI_BITMASK_TIMER_ENABLE 0x0001
+1 -1
drivers/acpi/acpica/exutils.c
··· 109 * 110 * DESCRIPTION: Reacquire the interpreter execution region from within the 111 * interpreter code. Failure to enter the interpreter region is a 112 - * fatal system error. Used in conjuction with 113 * relinquish_interpreter 114 * 115 ******************************************************************************/
··· 109 * 110 * DESCRIPTION: Reacquire the interpreter execution region from within the 111 * interpreter code. Failure to enter the interpreter region is a 112 + * fatal system error. Used in conjunction with 113 * relinquish_interpreter 114 * 115 ******************************************************************************/
+1 -1
drivers/acpi/acpica/rsutils.c
··· 149 150 /* 151 * 16-, 32-, and 64-bit cases must use the move macros that perform 152 - * endian conversion and/or accomodate hardware that cannot perform 153 * misaligned memory transfers 154 */ 155 case ACPI_RSC_MOVE16:
··· 149 150 /* 151 * 16-, 32-, and 64-bit cases must use the move macros that perform 152 + * endian conversion and/or accommodate hardware that cannot perform 153 * misaligned memory transfers 154 */ 155 case ACPI_RSC_MOVE16:
+1 -1
drivers/acpi/apei/Kconfig
··· 34 depends on ACPI_APEI 35 help 36 ERST is a way provided by APEI to save and retrieve hardware 37 - error infomation to and from a persistent store. Enable this 38 if you want to debugging and testing the ERST kernel support 39 and firmware implementation.
··· 34 depends on ACPI_APEI 35 help 36 ERST is a way provided by APEI to save and retrieve hardware 37 + error information to and from a persistent store. Enable this 38 if you want to debugging and testing the ERST kernel support 39 and firmware implementation.
+16 -5
drivers/acpi/apei/apei-base.c
··· 445 int apei_resources_request(struct apei_resources *resources, 446 const char *desc) 447 { 448 - struct apei_res *res, *res_bak; 449 struct resource *r; 450 451 - apei_resources_sub(resources, &apei_resources_all); 452 453 list_for_each_entry(res, &resources->iomem, list) { 454 r = request_mem_region(res->start, res->end - res->start, 455 desc); ··· 479 } 480 } 481 482 - apei_resources_merge(&apei_resources_all, resources); 483 484 return 0; 485 err_unmap_ioport: ··· 499 break; 500 release_mem_region(res->start, res->end - res->start); 501 } 502 - return -EINVAL; 503 } 504 EXPORT_SYMBOL_GPL(apei_resources_request); 505 506 void apei_resources_release(struct apei_resources *resources) 507 { 508 struct apei_res *res; 509 510 list_for_each_entry(res, &resources->iomem, list) ··· 513 list_for_each_entry(res, &resources->ioport, list) 514 release_region(res->start, res->end - res->start); 515 516 - apei_resources_sub(&apei_resources_all, resources); 517 } 518 EXPORT_SYMBOL_GPL(apei_resources_release); 519
··· 445 int apei_resources_request(struct apei_resources *resources, 446 const char *desc) 447 { 448 + struct apei_res *res, *res_bak = NULL; 449 struct resource *r; 450 + int rc; 451 452 + rc = apei_resources_sub(resources, &apei_resources_all); 453 + if (rc) 454 + return rc; 455 456 + rc = -EINVAL; 457 list_for_each_entry(res, &resources->iomem, list) { 458 r = request_mem_region(res->start, res->end - res->start, 459 desc); ··· 475 } 476 } 477 478 + rc = apei_resources_merge(&apei_resources_all, resources); 479 + if (rc) { 480 + pr_err(APEI_PFX "Fail to merge resources!\n"); 481 + goto err_unmap_ioport; 482 + } 483 484 return 0; 485 err_unmap_ioport: ··· 491 break; 492 release_mem_region(res->start, res->end - res->start); 493 } 494 + return rc; 495 } 496 EXPORT_SYMBOL_GPL(apei_resources_request); 497 498 void apei_resources_release(struct apei_resources *resources) 499 { 500 + int rc; 501 struct apei_res *res; 502 503 list_for_each_entry(res, &resources->iomem, list) ··· 504 list_for_each_entry(res, &resources->ioport, list) 505 release_region(res->start, res->end - res->start); 506 507 + rc = apei_resources_sub(&apei_resources_all, resources); 508 + if (rc) 509 + pr_err(APEI_PFX "Fail to sub resources!\n"); 510 } 511 EXPORT_SYMBOL_GPL(apei_resources_release); 512
+3 -1
drivers/acpi/apei/einj.c
··· 426 427 static int einj_check_table(struct acpi_table_einj *einj_tab) 428 { 429 - if (einj_tab->header_length != sizeof(struct acpi_table_einj)) 430 return -EINVAL; 431 if (einj_tab->header.length < sizeof(struct acpi_table_einj)) 432 return -EINVAL;
··· 426 427 static int einj_check_table(struct acpi_table_einj *einj_tab) 428 { 429 + if ((einj_tab->header_length != 430 + (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) 431 + && (einj_tab->header_length != sizeof(struct acpi_table_einj))) 432 return -EINVAL; 433 if (einj_tab->header.length < sizeof(struct acpi_table_einj)) 434 return -EINVAL;
+11 -7
drivers/acpi/apei/erst-dbg.c
··· 2 * APEI Error Record Serialization Table debug support 3 * 4 * ERST is a way provided by APEI to save and retrieve hardware error 5 - * infomation to and from a persistent store. This file provide the 6 * debugging/testing support for ERST kernel support and firmware 7 * implementation. 8 * ··· 111 goto out; 112 } 113 if (len > erst_dbg_buf_len) { 114 - kfree(erst_dbg_buf); 115 rc = -ENOMEM; 116 - erst_dbg_buf = kmalloc(len, GFP_KERNEL); 117 - if (!erst_dbg_buf) 118 goto out; 119 erst_dbg_buf_len = len; 120 goto retry; 121 } ··· 152 if (mutex_lock_interruptible(&erst_dbg_mutex)) 153 return -EINTR; 154 if (usize > erst_dbg_buf_len) { 155 - kfree(erst_dbg_buf); 156 rc = -ENOMEM; 157 - erst_dbg_buf = kmalloc(usize, GFP_KERNEL); 158 - if (!erst_dbg_buf) 159 goto out; 160 erst_dbg_buf_len = usize; 161 } 162 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
··· 2 * APEI Error Record Serialization Table debug support 3 * 4 * ERST is a way provided by APEI to save and retrieve hardware error 5 + * information to and from a persistent store. This file provide the 6 * debugging/testing support for ERST kernel support and firmware 7 * implementation. 8 * ··· 111 goto out; 112 } 113 if (len > erst_dbg_buf_len) { 114 + void *p; 115 rc = -ENOMEM; 116 + p = kmalloc(len, GFP_KERNEL); 117 + if (!p) 118 goto out; 119 + kfree(erst_dbg_buf); 120 + erst_dbg_buf = p; 121 erst_dbg_buf_len = len; 122 goto retry; 123 } ··· 150 if (mutex_lock_interruptible(&erst_dbg_mutex)) 151 return -EINTR; 152 if (usize > erst_dbg_buf_len) { 153 + void *p; 154 rc = -ENOMEM; 155 + p = kmalloc(usize, GFP_KERNEL); 156 + if (!p) 157 goto out; 158 + kfree(erst_dbg_buf); 159 + erst_dbg_buf = p; 160 erst_dbg_buf_len = usize; 161 } 162 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
+24 -5
drivers/acpi/apei/erst.c
··· 2 * APEI Error Record Serialization Table support 3 * 4 * ERST is a way provided by APEI to save and retrieve hardware error 5 - * infomation to and from a persistent store. 6 * 7 * For more information about ERST, please refer to ACPI Specification 8 * version 4.0, section 17.4. ··· 266 { 267 int rc; 268 u64 offset; 269 270 rc = __apei_exec_read_register(entry, &offset); 271 if (rc) 272 return rc; 273 - memmove((void *)ctx->dst_base + offset, 274 - (void *)ctx->src_base + offset, 275 - ctx->var2); 276 277 return 0; 278 } ··· 767 768 static int erst_check_table(struct acpi_table_erst *erst_tab) 769 { 770 - if (erst_tab->header_length != sizeof(struct acpi_table_erst)) 771 return -EINVAL; 772 if (erst_tab->header.length < sizeof(struct acpi_table_erst)) 773 return -EINVAL;
··· 2 * APEI Error Record Serialization Table support 3 * 4 * ERST is a way provided by APEI to save and retrieve hardware error 5 + * information to and from a persistent store. 6 * 7 * For more information about ERST, please refer to ACPI Specification 8 * version 4.0, section 17.4. ··· 266 { 267 int rc; 268 u64 offset; 269 + void *src, *dst; 270 + 271 + /* ioremap does not work in interrupt context */ 272 + if (in_interrupt()) { 273 + pr_warning(ERST_PFX 274 + "MOVE_DATA can not be used in interrupt context"); 275 + return -EBUSY; 276 + } 277 278 rc = __apei_exec_read_register(entry, &offset); 279 if (rc) 280 return rc; 281 + 282 + src = ioremap(ctx->src_base + offset, ctx->var2); 283 + if (!src) 284 + return -ENOMEM; 285 + dst = ioremap(ctx->dst_base + offset, ctx->var2); 286 + if (!dst) 287 + return -ENOMEM; 288 + 289 + memmove(dst, src, ctx->var2); 290 + 291 + iounmap(src); 292 + iounmap(dst); 293 294 return 0; 295 } ··· 750 751 static int erst_check_table(struct acpi_table_erst *erst_tab) 752 { 753 + if ((erst_tab->header_length != 754 + (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header))) 755 + && (erst_tab->header_length != sizeof(struct acpi_table_einj))) 756 return -EINVAL; 757 if (erst_tab->header.length < sizeof(struct acpi_table_erst)) 758 return -EINVAL;
+1 -1
drivers/acpi/apei/ghes.c
··· 302 struct ghes *ghes = NULL; 303 int rc = -EINVAL; 304 305 - generic = ghes_dev->dev.platform_data; 306 if (!generic->enabled) 307 return -ENODEV; 308
··· 302 struct ghes *ghes = NULL; 303 int rc = -EINVAL; 304 305 + generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; 306 if (!generic->enabled) 307 return -ENODEV; 308
+7 -4
drivers/acpi/apei/hest.c
··· 137 138 static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 139 { 140 - struct acpi_hest_generic *generic; 141 struct platform_device *ghes_dev; 142 struct ghes_arr *ghes_arr = data; 143 int rc; 144 145 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) 146 return 0; 147 - generic = (struct acpi_hest_generic *)hest_hdr; 148 - if (!generic->enabled) 149 return 0; 150 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); 151 if (!ghes_dev) 152 return -ENOMEM; 153 - ghes_dev->dev.platform_data = generic; 154 rc = platform_device_add(ghes_dev); 155 if (rc) 156 goto err;
··· 137 138 static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 139 { 140 struct platform_device *ghes_dev; 141 struct ghes_arr *ghes_arr = data; 142 int rc; 143 144 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) 145 return 0; 146 + 147 + if (!((struct acpi_hest_generic *)hest_hdr)->enabled) 148 return 0; 149 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); 150 if (!ghes_dev) 151 return -ENOMEM; 152 + 153 + rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *)); 154 + if (rc) 155 + goto err; 156 + 157 rc = platform_device_add(ghes_dev); 158 if (rc) 159 goto err;
+1 -1
drivers/acpi/atomicio.c
··· 142 list_add_tail_rcu(&map->list, &acpi_iomaps); 143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 144 145 - return vaddr + (paddr - pg_off); 146 err_unmap: 147 iounmap(vaddr); 148 return NULL;
··· 142 list_add_tail_rcu(&map->list, &acpi_iomaps); 143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 144 145 + return map->vaddr + (paddr - map->paddr); 146 err_unmap: 147 iounmap(vaddr); 148 return NULL;
-1
drivers/acpi/battery.c
··· 273 POWER_SUPPLY_PROP_CYCLE_COUNT, 274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 275 POWER_SUPPLY_PROP_VOLTAGE_NOW, 276 - POWER_SUPPLY_PROP_CURRENT_NOW, 277 POWER_SUPPLY_PROP_POWER_NOW, 278 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 279 POWER_SUPPLY_PROP_ENERGY_FULL,
··· 273 POWER_SUPPLY_PROP_CYCLE_COUNT, 274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 275 POWER_SUPPLY_PROP_VOLTAGE_NOW, 276 POWER_SUPPLY_PROP_POWER_NOW, 277 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 278 POWER_SUPPLY_PROP_ENERGY_FULL,
+18
drivers/acpi/blacklist.c
··· 183 { 184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 185 acpi_osi_setup("!Windows 2006"); 186 return 0; 187 } 188 static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) ··· 228 }, 229 }, 230 { 231 .callback = dmi_disable_osi_win7, 232 .ident = "ASUS K50IJ", 233 .matches = { 234 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 235 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), 236 }, 237 }, 238
··· 183 { 184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 185 acpi_osi_setup("!Windows 2006"); 186 + acpi_osi_setup("!Windows 2006 SP1"); 187 + acpi_osi_setup("!Windows 2006 SP2"); 188 return 0; 189 } 190 static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) ··· 226 }, 227 }, 228 { 229 + .callback = dmi_disable_osi_vista, 230 + .ident = "Toshiba Satellite L355", 231 + .matches = { 232 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 233 + DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"), 234 + }, 235 + }, 236 + { 237 .callback = dmi_disable_osi_win7, 238 .ident = "ASUS K50IJ", 239 .matches = { 240 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 241 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), 242 + }, 243 + }, 244 + { 245 + .callback = dmi_disable_osi_vista, 246 + .ident = "Toshiba P305D", 247 + .matches = { 248 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 249 + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"), 250 }, 251 }, 252
+5 -13
drivers/acpi/bus.c
··· 55 static int set_power_nocheck(const struct dmi_system_id *id) 56 { 57 printk(KERN_NOTICE PREFIX "%s detected - " 58 - "disable power check in power transistion\n", id->ident); 59 acpi_power_nocheck = 1; 60 return 0; 61 } ··· 80 81 static struct dmi_system_id dsdt_dmi_table[] __initdata = { 82 /* 83 - * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. 84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679 85 */ 86 { 87 .callback = set_copy_dsdt, 88 - .ident = "TOSHIBA Satellite A505", 89 .matches = { 90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 91 - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), 92 - }, 93 - }, 94 - { 95 - .callback = set_copy_dsdt, 96 - .ident = "TOSHIBA Satellite L505D", 97 - .matches = { 98 - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 99 - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"), 100 }, 101 }, 102 {} ··· 1019 1020 /* 1021 * If the laptop falls into the DMI check table, the power state check 1022 - * will be disabled in the course of device power transistion. 1023 */ 1024 dmi_check_system(power_nocheck_dmi_table); 1025
··· 55 static int set_power_nocheck(const struct dmi_system_id *id) 56 { 57 printk(KERN_NOTICE PREFIX "%s detected - " 58 + "disable power check in power transition\n", id->ident); 59 acpi_power_nocheck = 1; 60 return 0; 61 } ··· 80 81 static struct dmi_system_id dsdt_dmi_table[] __initdata = { 82 /* 83 + * Invoke DSDT corruption work-around on all Toshiba Satellite. 84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679 85 */ 86 { 87 .callback = set_copy_dsdt, 88 + .ident = "TOSHIBA Satellite", 89 .matches = { 90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 91 + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), 92 }, 93 }, 94 {} ··· 1027 1028 /* 1029 * If the laptop falls into the DMI check table, the power state check 1030 + * will be disabled in the course of device power transition. 1031 */ 1032 dmi_check_system(power_nocheck_dmi_table); 1033
+2
drivers/acpi/fan.c
··· 369 370 acpi_bus_unregister_driver(&acpi_fan_driver); 371 372 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); 373 374 return; 375 }
··· 369 370 acpi_bus_unregister_driver(&acpi_fan_driver); 371 372 + #ifdef CONFIG_ACPI_PROCFS 373 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); 374 + #endif 375 376 return; 377 }
-6
drivers/acpi/processor_core.c
··· 29 30 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 31 { 32 - set_no_mwait, "IFL91 board", { 33 - DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), 34 - DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), 35 - DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), 36 - DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, 37 - { 38 set_no_mwait, "Extensa 5220", { 39 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 40 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
··· 29 30 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 31 { 32 set_no_mwait, "Extensa 5220", { 33 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 34 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+1 -1
drivers/acpi/processor_driver.c
··· 850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 851 acpi_idle_driver.name); 852 } else { 853 - printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", 854 cpuidle_get_driver()->name); 855 } 856
··· 850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 851 acpi_idle_driver.name); 852 } else { 853 + printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", 854 cpuidle_get_driver()->name); 855 } 856
+2 -2
drivers/acpi/processor_perflib.c
··· 447 if (!try_module_get(calling_module)) 448 return -EINVAL; 449 450 - /* is_done is set to negative if an error occured, 451 - * and to postitive if _no_ error occured, but SMM 452 * was already notified. This avoids double notification 453 * which might lead to unexpected results... 454 */
··· 447 if (!try_module_get(calling_module)) 448 return -EINVAL; 449 450 + /* is_done is set to negative if an error occurred, 451 + * and to postitive if _no_ error occurred, but SMM 452 * was already notified. This avoids double notification 453 * which might lead to unexpected results... 454 */
+22
drivers/acpi/sleep.c
··· 363 return 0; 364 } 365 366 static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 367 { 368 .callback = init_old_suspend_ordering, ··· 401 DMI_MATCH(DMI_BOARD_VENDOR, 402 "Matsushita Electric Industrial Co.,Ltd."), 403 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 404 }, 405 }, 406 {},
··· 363 return 0; 364 } 365 366 + static int __init init_nvs_nosave(const struct dmi_system_id *d) 367 + { 368 + acpi_nvs_nosave(); 369 + return 0; 370 + } 371 + 372 static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 373 { 374 .callback = init_old_suspend_ordering, ··· 395 DMI_MATCH(DMI_BOARD_VENDOR, 396 "Matsushita Electric Industrial Co.,Ltd."), 397 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 398 + }, 399 + }, 400 + { 401 + .callback = init_nvs_nosave, 402 + .ident = "Sony Vaio VGN-SR11M", 403 + .matches = { 404 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 405 + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), 406 + }, 407 + }, 408 + { 409 + .callback = init_nvs_nosave, 410 + .ident = "Everex StepNote Series", 411 + .matches = { 412 + DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), 413 + DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), 414 }, 415 }, 416 {},
+14 -6
drivers/acpi/sysfs.c
··· 100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 101 }; 102 103 - static int param_get_debug_layer(char *buffer, struct kernel_param *kp) 104 { 105 int result = 0; 106 int i; ··· 128 return result; 129 } 130 131 - static int param_get_debug_level(char *buffer, struct kernel_param *kp) 132 { 133 int result = 0; 134 int i; ··· 149 return result; 150 } 151 152 - module_param_call(debug_layer, param_set_uint, param_get_debug_layer, 153 - &acpi_dbg_layer, 0644); 154 - module_param_call(debug_level, param_set_uint, param_get_debug_level, 155 - &acpi_dbg_level, 0644); 156 157 static char trace_method_name[6]; 158 module_param_string(trace_method_name, trace_method_name, 6, 0644);
··· 100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 101 }; 102 103 + static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) 104 { 105 int result = 0; 106 int i; ··· 128 return result; 129 } 130 131 + static int param_get_debug_level(char *buffer, const struct kernel_param *kp) 132 { 133 int result = 0; 134 int i; ··· 149 return result; 150 } 151 152 + static struct kernel_param_ops param_ops_debug_layer = { 153 + .set = param_set_uint, 154 + .get = param_get_debug_layer, 155 + }; 156 + 157 + static struct kernel_param_ops param_ops_debug_level = { 158 + .set = param_set_uint, 159 + .get = param_get_debug_level, 160 + }; 161 + 162 + module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644); 163 + module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644); 164 165 static char trace_method_name[6]; 166 module_param_string(trace_method_name, trace_method_name, 6, 0644);
+2 -2
drivers/acpi/video_detect.c
··· 59 "support\n")); 60 *cap |= ACPI_VIDEO_BACKLIGHT; 61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) 62 - printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " 63 - "control misses _BQC function\n"); 64 /* We have backlight support, no need to scan further */ 65 return AE_CTRL_TERMINATE; 66 }
··· 59 "support\n")); 60 *cap |= ACPI_VIDEO_BACKLIGHT; 61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) 62 + printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " 63 + "cannot determine initial brightness\n"); 64 /* We have backlight support, no need to scan further */ 65 return AE_CTRL_TERMINATE; 66 }
+1 -1
drivers/cpuidle/governors/menu.c
··· 80 * Limiting Performance Impact 81 * --------------------------- 82 * C states, especially those with large exit latencies, can have a real 83 - * noticable impact on workloads, which is not acceptable for most sysadmins, 84 * and in addition, less performance has a power price of its own. 85 * 86 * As a general rule of thumb, menu assumes that the following heuristic
··· 80 * Limiting Performance Impact 81 * --------------------------- 82 * C states, especially those with large exit latencies, can have a real 83 + * noticeable impact on workloads, which is not acceptable for most sysadmins, 84 * and in addition, less performance has a power price of its own. 85 * 86 * As a general rule of thumb, menu assumes that the following heuristic
+2 -1
drivers/dma/shdma.c
··· 580 581 sh_chan = to_sh_chan(chan); 582 param = chan->private; 583 - slave_addr = param->config->addr; 584 585 /* Someone calling slave DMA on a public channel? */ 586 if (!param || !sg_len) { ··· 587 __func__, param, sg_len, param ? param->slave_id : -1); 588 return NULL; 589 } 590 591 /* 592 * if (param != NULL), this is a successfully requested slave channel,
··· 580 581 sh_chan = to_sh_chan(chan); 582 param = chan->private; 583 584 /* Someone calling slave DMA on a public channel? */ 585 if (!param || !sg_len) { ··· 588 __func__, param, sg_len, param ? param->slave_id : -1); 589 return NULL; 590 } 591 + 592 + slave_addr = param->config->addr; 593 594 /* 595 * if (param != NULL), this is a successfully requested slave channel,
+1
drivers/edac/i7core_edac.c
··· 1140 ATTR_COUNTER(0), 1141 ATTR_COUNTER(1), 1142 ATTR_COUNTER(2), 1143 }; 1144 1145 static struct mcidev_sysfs_group i7core_udimm_counters = {
··· 1140 ATTR_COUNTER(0), 1141 ATTR_COUNTER(1), 1142 ATTR_COUNTER(2), 1143 + { .attr = { .name = NULL } } 1144 }; 1145 1146 static struct mcidev_sysfs_group i7core_udimm_counters = {
+10 -29
drivers/gpu/drm/drm_gem.c
··· 148 return -ENOMEM; 149 150 kref_init(&obj->refcount); 151 - kref_init(&obj->handlecount); 152 obj->size = size; 153 154 atomic_inc(&dev->object_count); ··· 462 } 463 EXPORT_SYMBOL(drm_gem_object_free); 464 465 - /** 466 - * Called after the last reference to the object has been lost. 467 - * Must be called without holding struct_mutex 468 - * 469 - * Frees the object 470 - */ 471 - void 472 - drm_gem_object_free_unlocked(struct kref *kref) 473 - { 474 - struct drm_gem_object *obj = (struct drm_gem_object *) kref; 475 - struct drm_device *dev = obj->dev; 476 - 477 - if (dev->driver->gem_free_object_unlocked != NULL) 478 - dev->driver->gem_free_object_unlocked(obj); 479 - else if (dev->driver->gem_free_object != NULL) { 480 - mutex_lock(&dev->struct_mutex); 481 - dev->driver->gem_free_object(obj); 482 - mutex_unlock(&dev->struct_mutex); 483 - } 484 - } 485 - EXPORT_SYMBOL(drm_gem_object_free_unlocked); 486 - 487 static void drm_gem_object_ref_bug(struct kref *list_kref) 488 { 489 BUG(); ··· 474 * called before drm_gem_object_free or we'll be touching 475 * freed memory 476 */ 477 - void 478 - drm_gem_object_handle_free(struct kref *kref) 479 { 480 - struct drm_gem_object *obj = container_of(kref, 481 - struct drm_gem_object, 482 - handlecount); 483 struct drm_device *dev = obj->dev; 484 485 /* Remove any name for this object */ ··· 502 struct drm_gem_object *obj = vma->vm_private_data; 503 504 drm_gem_object_reference(obj); 505 } 506 EXPORT_SYMBOL(drm_gem_vm_open); 507 ··· 513 { 514 struct drm_gem_object *obj = vma->vm_private_data; 515 516 - drm_gem_object_unreference_unlocked(obj); 517 } 518 EXPORT_SYMBOL(drm_gem_vm_close); 519
··· 148 return -ENOMEM; 149 150 kref_init(&obj->refcount); 151 + atomic_set(&obj->handle_count, 0); 152 obj->size = size; 153 154 atomic_inc(&dev->object_count); ··· 462 } 463 EXPORT_SYMBOL(drm_gem_object_free); 464 465 static void drm_gem_object_ref_bug(struct kref *list_kref) 466 { 467 BUG(); ··· 496 * called before drm_gem_object_free or we'll be touching 497 * freed memory 498 */ 499 + void drm_gem_object_handle_free(struct drm_gem_object *obj) 500 { 501 struct drm_device *dev = obj->dev; 502 503 /* Remove any name for this object */ ··· 528 struct drm_gem_object *obj = vma->vm_private_data; 529 530 drm_gem_object_reference(obj); 531 + 532 + mutex_lock(&obj->dev->struct_mutex); 533 + drm_vm_open_locked(vma); 534 + mutex_unlock(&obj->dev->struct_mutex); 535 } 536 EXPORT_SYMBOL(drm_gem_vm_open); 537 ··· 535 { 536 struct drm_gem_object *obj = vma->vm_private_data; 537 538 + mutex_lock(&obj->dev->struct_mutex); 539 + drm_vm_close_locked(vma); 540 + drm_gem_object_unreference(obj); 541 + mutex_unlock(&obj->dev->struct_mutex); 542 } 543 EXPORT_SYMBOL(drm_gem_vm_close); 544
+1 -1
drivers/gpu/drm/drm_info.c
··· 255 256 seq_printf(m, "%6d %8zd %7d %8d\n", 257 obj->name, obj->size, 258 - atomic_read(&obj->handlecount.refcount), 259 atomic_read(&obj->refcount.refcount)); 260 return 0; 261 }
··· 255 256 seq_printf(m, "%6d %8zd %7d %8d\n", 257 obj->name, obj->size, 258 + atomic_read(&obj->handle_count), 259 atomic_read(&obj->refcount.refcount)); 260 return 0; 261 }
+20 -12
drivers/gpu/drm/drm_vm.c
··· 433 mutex_unlock(&dev->struct_mutex); 434 } 435 436 /** 437 * \c close method for all virtual memory types. 438 * ··· 464 { 465 struct drm_file *priv = vma->vm_file->private_data; 466 struct drm_device *dev = priv->minor->dev; 467 - struct drm_vma_entry *pt, *temp; 468 - 469 - DRM_DEBUG("0x%08lx,0x%08lx\n", 470 - vma->vm_start, vma->vm_end - vma->vm_start); 471 - atomic_dec(&dev->vma_count); 472 473 mutex_lock(&dev->struct_mutex); 474 - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 475 - if (pt->vma == vma) { 476 - list_del(&pt->head); 477 - kfree(pt); 478 - break; 479 - } 480 - } 481 mutex_unlock(&dev->struct_mutex); 482 } 483
··· 433 mutex_unlock(&dev->struct_mutex); 434 } 435 436 + void drm_vm_close_locked(struct vm_area_struct *vma) 437 + { 438 + struct drm_file *priv = vma->vm_file->private_data; 439 + struct drm_device *dev = priv->minor->dev; 440 + struct drm_vma_entry *pt, *temp; 441 + 442 + DRM_DEBUG("0x%08lx,0x%08lx\n", 443 + vma->vm_start, vma->vm_end - vma->vm_start); 444 + atomic_dec(&dev->vma_count); 445 + 446 + list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 447 + if (pt->vma == vma) { 448 + list_del(&pt->head); 449 + kfree(pt); 450 + break; 451 + } 452 + } 453 + } 454 + 455 /** 456 * \c close method for all virtual memory types. 457 * ··· 445 { 446 struct drm_file *priv = vma->vm_file->private_data; 447 struct drm_device *dev = priv->minor->dev; 448 449 mutex_lock(&dev->struct_mutex); 450 + drm_vm_close_locked(vma); 451 mutex_unlock(&dev->struct_mutex); 452 } 453
+1 -1
drivers/gpu/drm/i810/i810_dma.c
··· 116 static const struct file_operations i810_buffer_fops = { 117 .open = drm_open, 118 .release = drm_release, 119 - .unlocked_ioctl = drm_ioctl, 120 .mmap = i810_mmap_buffers, 121 .fasync = drm_fasync, 122 };
··· 116 static const struct file_operations i810_buffer_fops = { 117 .open = drm_open, 118 .release = drm_release, 119 + .unlocked_ioctl = i810_ioctl, 120 .mmap = i810_mmap_buffers, 121 .fasync = drm_fasync, 122 };
+1 -1
drivers/gpu/drm/i830/i830_dma.c
··· 118 static const struct file_operations i830_buffer_fops = { 119 .open = drm_open, 120 .release = drm_release, 121 - .unlocked_ioctl = drm_ioctl, 122 .mmap = i830_mmap_buffers, 123 .fasync = drm_fasync, 124 };
··· 118 static const struct file_operations i830_buffer_fops = { 119 .open = drm_open, 120 .release = drm_release, 121 + .unlocked_ioctl = i830_ioctl, 122 .mmap = i830_mmap_buffers, 123 .fasync = drm_fasync, 124 };
+3 -3
drivers/gpu/drm/i915/i915_dma.c
··· 1787 } 1788 } 1789 1790 - div_u64(diff, diff1); 1791 ret = ((m * diff) + c); 1792 - div_u64(ret, 10); 1793 1794 dev_priv->last_count1 = total_count; 1795 dev_priv->last_time1 = now; ··· 1858 1859 /* More magic constants... */ 1860 diff = diff * 1181; 1861 - div_u64(diff, diffms * 10); 1862 dev_priv->gfx_power = diff; 1863 } 1864
··· 1787 } 1788 } 1789 1790 + diff = div_u64(diff, diff1); 1791 ret = ((m * diff) + c); 1792 + ret = div_u64(ret, 10); 1793 1794 dev_priv->last_count1 = total_count; 1795 dev_priv->last_time1 = now; ··· 1858 1859 /* More magic constants... */ 1860 diff = diff * 1181; 1861 + diff = div_u64(diff, diffms * 10); 1862 dev_priv->gfx_power = diff; 1863 } 1864
+28 -24
drivers/gpu/drm/i915/i915_gem.c
··· 136 return -ENOMEM; 137 138 ret = drm_gem_handle_create(file_priv, obj, &handle); 139 if (ret) { 140 - drm_gem_object_unreference_unlocked(obj); 141 return ret; 142 } 143 - 144 - /* Sink the floating reference from kref_init(handlecount) */ 145 - drm_gem_object_handle_unreference_unlocked(obj); 146 147 args->handle = handle; 148 return 0; ··· 469 return -ENOENT; 470 obj_priv = to_intel_bo(obj); 471 472 - /* Bounds check source. 473 - * 474 - * XXX: This could use review for overflow issues... 475 - */ 476 - if (args->offset > obj->size || args->size > obj->size || 477 - args->offset + args->size > obj->size) { 478 - drm_gem_object_unreference_unlocked(obj); 479 - return -EINVAL; 480 } 481 482 if (i915_gem_object_needs_bit17_swizzle(obj)) { ··· 491 file_priv); 492 } 493 494 drm_gem_object_unreference_unlocked(obj); 495 - 496 return ret; 497 } 498 ··· 581 582 user_data = (char __user *) (uintptr_t) args->data_ptr; 583 remain = args->size; 584 - if (!access_ok(VERIFY_READ, user_data, remain)) 585 - return -EFAULT; 586 587 588 mutex_lock(&dev->struct_mutex); ··· 933 return -ENOENT; 934 obj_priv = to_intel_bo(obj); 935 936 - /* Bounds check destination. 937 - * 938 - * XXX: This could use review for overflow issues... 939 - */ 940 - if (args->offset > obj->size || args->size > obj->size || 941 - args->offset + args->size > obj->size) { 942 - drm_gem_object_unreference_unlocked(obj); 943 - return -EINVAL; 944 } 945 946 /* We can only do the GTT pwrite on untiled buffers, as otherwise ··· 977 DRM_INFO("pwrite failed %d\n", ret); 978 #endif 979 980 drm_gem_object_unreference_unlocked(obj); 981 - 982 return ret; 983 } 984 ··· 3260 (int) reloc->offset, 3261 reloc->read_domains, 3262 reloc->write_domain); 3263 return -EINVAL; 3264 } 3265 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
··· 136 return -ENOMEM; 137 138 ret = drm_gem_handle_create(file_priv, obj, &handle); 139 + /* drop reference from allocate - handle holds it now */ 140 + drm_gem_object_unreference_unlocked(obj); 141 if (ret) { 142 return ret; 143 } 144 145 args->handle = handle; 146 return 0; ··· 471 return -ENOENT; 472 obj_priv = to_intel_bo(obj); 473 474 + /* Bounds check source. */ 475 + if (args->offset > obj->size || args->size > obj->size - args->offset) { 476 + ret = -EINVAL; 477 + goto err; 478 + } 479 + 480 + if (!access_ok(VERIFY_WRITE, 481 + (char __user *)(uintptr_t)args->data_ptr, 482 + args->size)) { 483 + ret = -EFAULT; 484 + goto err; 485 } 486 487 if (i915_gem_object_needs_bit17_swizzle(obj)) { ··· 490 file_priv); 491 } 492 493 + err: 494 drm_gem_object_unreference_unlocked(obj); 495 return ret; 496 } 497 ··· 580 581 user_data = (char __user *) (uintptr_t) args->data_ptr; 582 remain = args->size; 583 584 585 mutex_lock(&dev->struct_mutex); ··· 934 return -ENOENT; 935 obj_priv = to_intel_bo(obj); 936 937 + /* Bounds check destination. */ 938 + if (args->offset > obj->size || args->size > obj->size - args->offset) { 939 + ret = -EINVAL; 940 + goto err; 941 + } 942 + 943 + if (!access_ok(VERIFY_READ, 944 + (char __user *)(uintptr_t)args->data_ptr, 945 + args->size)) { 946 + ret = -EFAULT; 947 + goto err; 948 } 949 950 /* We can only do the GTT pwrite on untiled buffers, as otherwise ··· 975 DRM_INFO("pwrite failed %d\n", ret); 976 #endif 977 978 + err: 979 drm_gem_object_unreference_unlocked(obj); 980 return ret; 981 } 982 ··· 3258 (int) reloc->offset, 3259 reloc->read_domains, 3260 reloc->write_domain); 3261 + drm_gem_object_unreference(target_obj); 3262 + i915_gem_object_unpin(obj); 3263 return -EINVAL; 3264 } 3265 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+22 -27
drivers/gpu/drm/i915/i915_gem_evict.c
··· 93 { 94 drm_i915_private_t *dev_priv = dev->dev_private; 95 struct list_head eviction_list, unwind_list; 96 - struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; 97 struct list_head *render_iter, *bsd_iter; 98 int ret = 0; 99 ··· 175 return -ENOSPC; 176 177 found: 178 INIT_LIST_HEAD(&eviction_list); 179 - list_for_each_entry_safe(obj_priv, tmp_obj_priv, 180 - &unwind_list, evict_list) { 181 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 182 - /* drm_mm doesn't allow any other other operations while 183 - * scanning, therefore store to be evicted objects on a 184 - * temporary list. */ 185 list_move(&obj_priv->evict_list, &eviction_list); 186 - } else 187 - drm_gem_object_unreference(&obj_priv->base); 188 - } 189 - 190 - /* Unbinding will emit any required flushes */ 191 - list_for_each_entry_safe(obj_priv, tmp_obj_priv, 192 - &eviction_list, evict_list) { 193 - #if WATCH_LRU 194 - DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base); 195 - #endif 196 - ret = i915_gem_object_unbind(&obj_priv->base); 197 - if (ret) 198 - return ret; 199 - 200 drm_gem_object_unreference(&obj_priv->base); 201 } 202 203 - /* The just created free hole should be on the top of the free stack 204 - * maintained by drm_mm, so this BUG_ON actually executes in O(1). 205 - * Furthermore all accessed data has just recently been used, so it 206 - * should be really fast, too. */ 207 - BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, 208 - alignment, 0)); 209 210 - return 0; 211 } 212 213 int
··· 93 { 94 drm_i915_private_t *dev_priv = dev->dev_private; 95 struct list_head eviction_list, unwind_list; 96 + struct drm_i915_gem_object *obj_priv; 97 struct list_head *render_iter, *bsd_iter; 98 int ret = 0; 99 ··· 175 return -ENOSPC; 176 177 found: 178 + /* drm_mm doesn't allow any other other operations while 179 + * scanning, therefore store to be evicted objects on a 180 + * temporary list. */ 181 INIT_LIST_HEAD(&eviction_list); 182 + while (!list_empty(&unwind_list)) { 183 + obj_priv = list_first_entry(&unwind_list, 184 + struct drm_i915_gem_object, 185 + evict_list); 186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 187 list_move(&obj_priv->evict_list, &eviction_list); 188 + continue; 189 + } 190 + list_del(&obj_priv->evict_list); 191 drm_gem_object_unreference(&obj_priv->base); 192 } 193 194 + /* Unbinding will emit any required flushes */ 195 + while (!list_empty(&eviction_list)) { 196 + obj_priv = list_first_entry(&eviction_list, 197 + struct drm_i915_gem_object, 198 + evict_list); 199 + if (ret == 0) 200 + ret = i915_gem_object_unbind(&obj_priv->base); 201 + list_del(&obj_priv->evict_list); 202 + drm_gem_object_unreference(&obj_priv->base); 203 + } 204 205 + return ret; 206 } 207 208 int
+36 -22
drivers/gpu/drm/i915/intel_display.c
··· 1013 DRM_DEBUG_KMS("vblank wait timed out\n"); 1014 } 1015 1016 - /** 1017 - * intel_wait_for_vblank_off - wait for vblank after disabling a pipe 1018 * @dev: drm device 1019 * @pipe: pipe to wait for 1020 * ··· 1022 * spinning on the vblank interrupt status bit, since we won't actually 1023 * see an interrupt when the pipe is disabled. 1024 * 1025 - * So this function waits for the display line value to settle (it 1026 - * usually ends up stopping at the start of the next frame). 1027 */ 1028 - void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) 1029 { 1030 struct drm_i915_private *dev_priv = dev->dev_private; 1031 - int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1032 - unsigned long timeout = jiffies + msecs_to_jiffies(100); 1033 - u32 last_line; 1034 1035 - /* Wait for the display line to settle */ 1036 - do { 1037 - last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1038 - mdelay(5); 1039 - } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1040 - time_after(timeout, jiffies)); 1041 1042 - if (time_after(jiffies, timeout)) 1043 - DRM_DEBUG_KMS("vblank wait timed out\n"); 1044 } 1045 1046 /* Parameters have changed, update FBC info */ ··· 2342 I915_READ(dspbase_reg); 2343 } 2344 2345 - /* Wait for vblank for the disable to take effect */ 2346 - intel_wait_for_vblank_off(dev, pipe); 2347 - 2348 /* Don't disable pipe A or pipe A PLLs if needed */ 2349 if (pipeconf_reg == PIPEACONF && 2350 - (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 2351 goto skip_pipe_off; 2352 2353 /* Next, disable display pipes */ 2354 temp = I915_READ(pipeconf_reg); ··· 2357 I915_READ(pipeconf_reg); 2358 } 2359 2360 - /* Wait for vblank for the disable to take effect. */ 2361 - intel_wait_for_vblank_off(dev, pipe); 2362 2363 temp = I915_READ(dpll_reg); 2364 if ((temp & DPLL_VCO_ENABLE) != 0) {
··· 1013 DRM_DEBUG_KMS("vblank wait timed out\n"); 1014 } 1015 1016 + /* 1017 + * intel_wait_for_pipe_off - wait for pipe to turn off 1018 * @dev: drm device 1019 * @pipe: pipe to wait for 1020 * ··· 1022 * spinning on the vblank interrupt status bit, since we won't actually 1023 * see an interrupt when the pipe is disabled. 1024 * 1025 + * On Gen4 and above: 1026 + * wait for the pipe register state bit to turn off 1027 + * 1028 + * Otherwise: 1029 + * wait for the display line value to settle (it usually 1030 + * ends up stopping at the start of the next frame). 1031 + * 1032 */ 1033 + static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 1034 { 1035 struct drm_i915_private *dev_priv = dev->dev_private; 1036 1037 + if (INTEL_INFO(dev)->gen >= 4) { 1038 + int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); 1039 1040 + /* Wait for the Pipe State to go off */ 1041 + if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 1042 + 100, 0)) 1043 + DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1044 + } else { 1045 + u32 last_line; 1046 + int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1047 + unsigned long timeout = jiffies + msecs_to_jiffies(100); 1048 + 1049 + /* Wait for the display line to settle */ 1050 + do { 1051 + last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1052 + mdelay(5); 1053 + } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1054 + time_after(timeout, jiffies)); 1055 + if (time_after(jiffies, timeout)) 1056 + DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1057 + } 1058 } 1059 1060 /* Parameters have changed, update FBC info */ ··· 2328 I915_READ(dspbase_reg); 2329 } 2330 2331 /* Don't disable pipe A or pipe A PLLs if needed */ 2332 if (pipeconf_reg == PIPEACONF && 2333 + (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { 2334 + /* Wait for vblank for the disable to take effect */ 2335 + intel_wait_for_vblank(dev, pipe); 2336 goto skip_pipe_off; 2337 + } 2338 2339 /* Next, disable display pipes */ 2340 temp = I915_READ(pipeconf_reg); ··· 2343 I915_READ(pipeconf_reg); 2344 } 2345 2346 + /* Wait for the pipe to turn off */ 2347 + intel_wait_for_pipe_off(dev, pipe); 2348 2349 temp = I915_READ(dpll_reg); 2350 if ((temp & DPLL_VCO_ENABLE) != 0) {
+9 -10
drivers/gpu/drm/i915/intel_dp.c
··· 1138 intel_dp_set_link_train(struct intel_dp *intel_dp, 1139 uint32_t dp_reg_value, 1140 uint8_t dp_train_pat, 1141 - uint8_t train_set[4], 1142 - bool first) 1143 { 1144 struct drm_device *dev = intel_dp->base.enc.dev; 1145 struct drm_i915_private *dev_priv = dev->dev_private; 1146 - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); 1147 int ret; 1148 1149 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1150 POSTING_READ(intel_dp->output_reg); 1151 - if (first) 1152 - intel_wait_for_vblank(dev, intel_crtc->pipe); 1153 1154 intel_dp_aux_native_write_1(intel_dp, 1155 DP_TRAINING_PATTERN_SET, ··· 1170 uint8_t voltage; 1171 bool clock_recovery = false; 1172 bool channel_eq = false; 1173 - bool first = true; 1174 int tries; 1175 u32 reg; 1176 uint32_t DP = intel_dp->DP; 1177 1178 /* Write the link configuration data */ 1179 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, ··· 1211 reg = DP | DP_LINK_TRAIN_PAT_1; 1212 1213 if (!intel_dp_set_link_train(intel_dp, reg, 1214 - DP_TRAINING_PATTERN_1, train_set, first)) 1215 break; 1216 - first = false; 1217 /* Set training pattern 1 */ 1218 1219 udelay(100); ··· 1266 1267 /* channel eq pattern */ 1268 if (!intel_dp_set_link_train(intel_dp, reg, 1269 - DP_TRAINING_PATTERN_2, train_set, 1270 - false)) 1271 break; 1272 1273 udelay(400);
··· 1138 intel_dp_set_link_train(struct intel_dp *intel_dp, 1139 uint32_t dp_reg_value, 1140 uint8_t dp_train_pat, 1141 + uint8_t train_set[4]) 1142 { 1143 struct drm_device *dev = intel_dp->base.enc.dev; 1144 struct drm_i915_private *dev_priv = dev->dev_private; 1145 int ret; 1146 1147 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1148 POSTING_READ(intel_dp->output_reg); 1149 1150 intel_dp_aux_native_write_1(intel_dp, 1151 DP_TRAINING_PATTERN_SET, ··· 1174 uint8_t voltage; 1175 bool clock_recovery = false; 1176 bool channel_eq = false; 1177 int tries; 1178 u32 reg; 1179 uint32_t DP = intel_dp->DP; 1180 + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); 1181 + 1182 + /* Enable output, wait for it to become active */ 1183 + I915_WRITE(intel_dp->output_reg, intel_dp->DP); 1184 + POSTING_READ(intel_dp->output_reg); 1185 + intel_wait_for_vblank(dev, intel_crtc->pipe); 1186 1187 /* Write the link configuration data */ 1188 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, ··· 1210 reg = DP | DP_LINK_TRAIN_PAT_1; 1211 1212 if (!intel_dp_set_link_train(intel_dp, reg, 1213 + DP_TRAINING_PATTERN_1, train_set)) 1214 break; 1215 /* Set training pattern 1 */ 1216 1217 udelay(100); ··· 1266 1267 /* channel eq pattern */ 1268 if (!intel_dp_set_link_train(intel_dp, reg, 1269 + DP_TRAINING_PATTERN_2, train_set)) 1270 break; 1271 1272 udelay(400);
-1
drivers/gpu/drm/i915/intel_drv.h
··· 229 struct drm_crtc *crtc); 230 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 231 struct drm_file *file_priv); 232 - extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); 233 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 234 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 235 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
··· 229 struct drm_crtc *crtc); 230 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 231 struct drm_file *file_priv); 232 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 233 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 234 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+3 -1
drivers/gpu/drm/i915/intel_fb.c
··· 237 drm_fb_helper_fini(&ifbdev->helper); 238 239 drm_framebuffer_cleanup(&ifb->base); 240 - if (ifb->obj) 241 drm_gem_object_unreference(ifb->obj); 242 243 return 0; 244 }
··· 237 drm_fb_helper_fini(&ifbdev->helper); 238 239 drm_framebuffer_cleanup(&ifb->base); 240 + if (ifb->obj) { 241 + drm_gem_object_handle_unreference(ifb->obj); 242 drm_gem_object_unreference(ifb->obj); 243 + } 244 245 return 0; 246 }
+1
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 352 353 if (nouveau_fb->nvbo) { 354 nouveau_bo_unmap(nouveau_fb->nvbo); 355 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 356 nouveau_fb->nvbo = NULL; 357 }
··· 352 353 if (nouveau_fb->nvbo) { 354 nouveau_bo_unmap(nouveau_fb->nvbo); 355 + drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); 356 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 357 nouveau_fb->nvbo = NULL; 358 }
+2 -4
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 167 goto out; 168 169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 170 out: 171 - drm_gem_object_handle_unreference_unlocked(nvbo->gem); 172 - 173 - if (ret) 174 - drm_gem_object_unreference_unlocked(nvbo->gem); 175 return ret; 176 } 177
··· 167 goto out; 168 169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 170 + /* drop reference from allocate - handle holds it now */ 171 + drm_gem_object_unreference_unlocked(nvbo->gem); 172 out: 173 return ret; 174 } 175
+1
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 79 mutex_lock(&dev->struct_mutex); 80 nouveau_bo_unpin(chan->notifier_bo); 81 mutex_unlock(&dev->struct_mutex); 82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 83 drm_mm_takedown(&chan->notifier_heap); 84 }
··· 79 mutex_lock(&dev->struct_mutex); 80 nouveau_bo_unpin(chan->notifier_bo); 81 mutex_unlock(&dev->struct_mutex); 82 + drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); 83 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 84 drm_mm_takedown(&chan->notifier_heap); 85 }
+2 -1
drivers/gpu/drm/radeon/r600.c
··· 3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3530 */ 3531 - if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3532 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3533 u32 tmp; 3534
··· 3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3530 */ 3531 + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 3532 + rdev->vram_scratch.ptr) { 3533 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3534 u32 tmp; 3535
+9
drivers/gpu/drm/radeon/radeon_atombios.c
··· 317 *connector_type = DRM_MODE_CONNECTOR_DVID; 318 } 319 320 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 321 if ((dev->pdev->device == 0x7941) && 322 (dev->pdev->subsystem_vendor == 0x147b) &&
··· 317 *connector_type = DRM_MODE_CONNECTOR_DVID; 318 } 319 320 + /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ 321 + if ((dev->pdev->device == 0x796e) && 322 + (dev->pdev->subsystem_vendor == 0x1462) && 323 + (dev->pdev->subsystem_device == 0x7302)) { 324 + if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || 325 + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) 326 + return false; 327 + } 328 + 329 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 330 if ((dev->pdev->device == 0x7941) && 331 (dev->pdev->subsystem_vendor == 0x147b) &&
+4 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 350 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 352 if (devices & ATOM_DEVICE_TV1_SUPPORT) 353 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 354 if (devices & ATOM_DEVICE_CV_SUPPORT) ··· 843 { 844 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 845 846 - if (radeon_fb->obj) 847 drm_gem_object_unreference_unlocked(radeon_fb->obj); 848 drm_framebuffer_cleanup(fb); 849 kfree(radeon_fb); 850 }
··· 349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 350 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 352 + if (devices & ATOM_DEVICE_DFP6_SUPPORT) 353 + DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); 354 if (devices & ATOM_DEVICE_TV1_SUPPORT) 355 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 356 if (devices & ATOM_DEVICE_CV_SUPPORT) ··· 841 { 842 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 843 844 + if (radeon_fb->obj) { 845 drm_gem_object_unreference_unlocked(radeon_fb->obj); 846 + } 847 drm_framebuffer_cleanup(fb); 848 kfree(radeon_fb); 849 }
+4 -10
drivers/gpu/drm/radeon/radeon_fb.c
··· 94 ret = radeon_bo_reserve(rbo, false); 95 if (likely(ret == 0)) { 96 radeon_bo_kunmap(rbo); 97 radeon_bo_unreserve(rbo); 98 } 99 drm_gem_object_unreference_unlocked(gobj); 100 } 101 ··· 327 { 328 struct fb_info *info; 329 struct radeon_framebuffer *rfb = &rfbdev->rfb; 330 - struct radeon_bo *rbo; 331 - int r; 332 333 if (rfbdev->helper.fbdev) { 334 info = rfbdev->helper.fbdev; ··· 338 } 339 340 if (rfb->obj) { 341 - rbo = rfb->obj->driver_private; 342 - r = radeon_bo_reserve(rbo, false); 343 - if (likely(r == 0)) { 344 - radeon_bo_kunmap(rbo); 345 - radeon_bo_unpin(rbo); 346 - radeon_bo_unreserve(rbo); 347 - } 348 - drm_gem_object_unreference_unlocked(rfb->obj); 349 } 350 drm_fb_helper_fini(&rfbdev->helper); 351 drm_framebuffer_cleanup(&rfb->base);
··· 94 ret = radeon_bo_reserve(rbo, false); 95 if (likely(ret == 0)) { 96 radeon_bo_kunmap(rbo); 97 + radeon_bo_unpin(rbo); 98 radeon_bo_unreserve(rbo); 99 } 100 + drm_gem_object_handle_unreference(gobj); 101 drm_gem_object_unreference_unlocked(gobj); 102 } 103 ··· 325 { 326 struct fb_info *info; 327 struct radeon_framebuffer *rfb = &rfbdev->rfb; 328 329 if (rfbdev->helper.fbdev) { 330 info = rfbdev->helper.fbdev; ··· 338 } 339 340 if (rfb->obj) { 341 + radeonfb_destroy_pinned_object(rfb->obj); 342 + rfb->obj = NULL; 343 } 344 drm_fb_helper_fini(&rfbdev->helper); 345 drm_framebuffer_cleanup(&rfb->base);
+2 -2
drivers/gpu/drm/radeon/radeon_gem.c
··· 201 return r; 202 } 203 r = drm_gem_handle_create(filp, gobj, &handle); 204 if (r) { 205 - drm_gem_object_unreference_unlocked(gobj); 206 return r; 207 } 208 - drm_gem_object_handle_unreference_unlocked(gobj); 209 args->handle = handle; 210 return 0; 211 }
··· 201 return r; 202 } 203 r = drm_gem_handle_create(filp, gobj, &handle); 204 + /* drop reference from allocate - handle holds it now */ 205 + drm_gem_object_unreference_unlocked(gobj); 206 if (r) { 207 return r; 208 } 209 args->handle = handle; 210 return 0; 211 }
+110 -35
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 148 {0, 0, 0} 149 }; 150 151 - static char *vmw_devname = "vmwgfx"; 152 153 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 154 static void vmw_master_init(struct vmw_master *); 155 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 156 void *ptr); 157 158 static void vmw_print_capabilities(uint32_t capabilities) 159 { ··· 195 { 196 int ret; 197 198 - vmw_kms_save_vga(dev_priv); 199 - 200 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 201 if (unlikely(ret != 0)) { 202 DRM_ERROR("Unable to initialize FIFO.\n"); ··· 207 static void vmw_release_device(struct vmw_private *dev_priv) 208 { 209 vmw_fifo_release(dev_priv, &dev_priv->fifo); 210 - vmw_kms_restore_vga(dev_priv); 211 } 212 213 214 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 215 { ··· 255 dev_priv->last_read_sequence = (uint32_t) -100; 256 mutex_init(&dev_priv->hw_mutex); 257 mutex_init(&dev_priv->cmdbuf_mutex); 258 rwlock_init(&dev_priv->resource_lock); 259 idr_init(&dev_priv->context_idr); 260 idr_init(&dev_priv->surface_idr); ··· 271 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 272 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 273 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 274 275 mutex_lock(&dev_priv->hw_mutex); 276 ··· 373 374 dev->dev_private = dev_priv; 375 376 - if (!dev->devname) 377 - dev->devname = vmw_devname; 378 - 379 - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 380 - ret = drm_irq_install(dev); 381 - if (unlikely(ret != 0)) { 382 - DRM_ERROR("Failed installing irq: %d\n", ret); 383 - goto out_no_irq; 384 - } 385 - } 386 - 387 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 388 dev_priv->stealth = (ret != 0); 389 if (dev_priv->stealth) { ··· 388 goto out_no_device; 389 } 390 } 391 - ret = vmw_request_device(dev_priv); 392 if (unlikely(ret != 0)) 393 - goto out_no_device; 394 - vmw_kms_init(dev_priv); 395 vmw_overlay_init(dev_priv); 396 - vmw_fb_init(dev_priv); 397 398 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 399 register_pm_notifier(&dev_priv->pm_nb); 400 401 - DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); 402 - 403 return 0; 404 405 - out_no_device: 406 - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 407 - drm_irq_uninstall(dev_priv->dev); 408 - if (dev->devname == vmw_devname) 409 - dev->devname = NULL; 410 out_no_irq: 411 ttm_object_device_release(&dev_priv->tdev); 412 out_err4: 413 iounmap(dev_priv->mmio_virt); ··· 460 461 unregister_pm_notifier(&dev_priv->pm_nb); 462 463 - vmw_fb_close(dev_priv); 464 vmw_kms_close(dev_priv); 465 vmw_overlay_close(dev_priv); 466 - vmw_release_device(dev_priv); 467 if (dev_priv->stealth) 468 pci_release_region(dev->pdev, 2); 469 else 470 pci_release_regions(dev->pdev); 471 472 - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 473 - drm_irq_uninstall(dev_priv->dev); 474 - if (dev->devname == vmw_devname) 475 - dev->devname = NULL; 476 ttm_object_device_release(&dev_priv->tdev); 477 iounmap(dev_priv->mmio_virt); 478 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, ··· 546 struct drm_ioctl_desc *ioctl = 547 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 548 549 - if (unlikely(ioctl->cmd != cmd)) { 550 DRM_ERROR("Invalid command format, ioctl %d\n", 551 nr - DRM_COMMAND_BASE); 552 return -EINVAL; ··· 635 struct vmw_master *vmaster = vmw_master(file_priv->master); 636 int ret = 0; 637 638 if (active) { 639 BUG_ON(active != &dev_priv->fbdev_master); 640 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); ··· 673 return 0; 674 675 out_no_active_lock: 676 - vmw_release_device(dev_priv); 677 return ret; 678 } 679 ··· 707 708 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 709 710 dev_priv->active_master = &dev_priv->fbdev_master; 711 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 712 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 713 714 - vmw_fb_on(dev_priv); 715 } 716 717 ··· 796 .irq_postinstall = vmw_irq_postinstall, 797 .irq_uninstall = vmw_irq_uninstall, 798 .irq_handler = vmw_irq_handler, 799 .reclaim_buffers_locked = NULL, 800 .get_map_ofs = drm_core_get_map_ofs, 801 .get_reg_ofs = drm_core_get_reg_ofs,
··· 148 {0, 0, 0} 149 }; 150 151 + static int enable_fbdev; 152 153 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 154 static void vmw_master_init(struct vmw_master *); 155 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 156 void *ptr); 157 + 158 + MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 159 + module_param_named(enable_fbdev, enable_fbdev, int, 0600); 160 161 static void vmw_print_capabilities(uint32_t capabilities) 162 { ··· 192 { 193 int ret; 194 195 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 196 if (unlikely(ret != 0)) { 197 DRM_ERROR("Unable to initialize FIFO.\n"); ··· 206 static void vmw_release_device(struct vmw_private *dev_priv) 207 { 208 vmw_fifo_release(dev_priv, &dev_priv->fifo); 209 } 210 211 + int vmw_3d_resource_inc(struct vmw_private *dev_priv) 212 + { 213 + int ret = 0; 214 + 215 + mutex_lock(&dev_priv->release_mutex); 216 + if (unlikely(dev_priv->num_3d_resources++ == 0)) { 217 + ret = vmw_request_device(dev_priv); 218 + if (unlikely(ret != 0)) 219 + --dev_priv->num_3d_resources; 220 + } 221 + mutex_unlock(&dev_priv->release_mutex); 222 + return ret; 223 + } 224 + 225 + 226 + void vmw_3d_resource_dec(struct vmw_private *dev_priv) 227 + { 228 + int32_t n3d; 229 + 230 + mutex_lock(&dev_priv->release_mutex); 231 + if (unlikely(--dev_priv->num_3d_resources == 0)) 232 + vmw_release_device(dev_priv); 233 + n3d = (int32_t) dev_priv->num_3d_resources; 234 + mutex_unlock(&dev_priv->release_mutex); 235 + 236 + BUG_ON(n3d < 0); 237 + } 238 239 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 240 { ··· 228 dev_priv->last_read_sequence = (uint32_t) -100; 229 mutex_init(&dev_priv->hw_mutex); 230 mutex_init(&dev_priv->cmdbuf_mutex); 231 + mutex_init(&dev_priv->release_mutex); 232 rwlock_init(&dev_priv->resource_lock); 233 idr_init(&dev_priv->context_idr); 234 idr_init(&dev_priv->surface_idr); ··· 243 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 244 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 245 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 246 + 247 + dev_priv->enable_fb = enable_fbdev; 248 249 mutex_lock(&dev_priv->hw_mutex); 250 ··· 343 344 dev->dev_private = dev_priv; 345 346 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 347 dev_priv->stealth = (ret != 0); 348 if (dev_priv->stealth) { ··· 369 goto out_no_device; 370 } 371 } 372 + ret = vmw_kms_init(dev_priv); 373 if (unlikely(ret != 0)) 374 + goto out_no_kms; 375 vmw_overlay_init(dev_priv); 376 + if (dev_priv->enable_fb) { 377 + ret = vmw_3d_resource_inc(dev_priv); 378 + if (unlikely(ret != 0)) 379 + goto out_no_fifo; 380 + vmw_kms_save_vga(dev_priv); 381 + vmw_fb_init(dev_priv); 382 + DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? 383 + "Detected device 3D availability.\n" : 384 + "Detected no device 3D availability.\n"); 385 + } else { 386 + DRM_INFO("Delayed 3D detection since we're not " 387 + "running the device in SVGA mode yet.\n"); 388 + } 389 + 390 + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 391 + ret = drm_irq_install(dev); 392 + if (unlikely(ret != 0)) { 393 + DRM_ERROR("Failed installing irq: %d\n", ret); 394 + goto out_no_irq; 395 + } 396 + } 397 398 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 399 register_pm_notifier(&dev_priv->pm_nb); 400 401 return 0; 402 403 out_no_irq: 404 + if (dev_priv->enable_fb) { 405 + vmw_fb_close(dev_priv); 406 + vmw_kms_restore_vga(dev_priv); 407 + vmw_3d_resource_dec(dev_priv); 408 + } 409 + out_no_fifo: 410 + vmw_overlay_close(dev_priv); 411 + vmw_kms_close(dev_priv); 412 + out_no_kms: 413 + if (dev_priv->stealth) 414 + pci_release_region(dev->pdev, 2); 415 + else 416 + pci_release_regions(dev->pdev); 417 + out_no_device: 418 ttm_object_device_release(&dev_priv->tdev); 419 out_err4: 420 iounmap(dev_priv->mmio_virt); ··· 415 416 unregister_pm_notifier(&dev_priv->pm_nb); 417 418 + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 419 + drm_irq_uninstall(dev_priv->dev); 420 + if (dev_priv->enable_fb) { 421 + vmw_fb_close(dev_priv); 422 + vmw_kms_restore_vga(dev_priv); 423 + vmw_3d_resource_dec(dev_priv); 424 + } 425 vmw_kms_close(dev_priv); 426 vmw_overlay_close(dev_priv); 427 if (dev_priv->stealth) 428 pci_release_region(dev->pdev, 2); 429 else 430 pci_release_regions(dev->pdev); 431 432 ttm_object_device_release(&dev_priv->tdev); 433 iounmap(dev_priv->mmio_virt); 434 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, ··· 500 struct drm_ioctl_desc *ioctl = 501 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 502 503 + if (unlikely(ioctl->cmd_drv != cmd)) { 504 DRM_ERROR("Invalid command format, ioctl %d\n", 505 nr - DRM_COMMAND_BASE); 506 return -EINVAL; ··· 589 struct vmw_master *vmaster = vmw_master(file_priv->master); 590 int ret = 0; 591 592 + if (!dev_priv->enable_fb) { 593 + ret = vmw_3d_resource_inc(dev_priv); 594 + if (unlikely(ret != 0)) 595 + return ret; 596 + vmw_kms_save_vga(dev_priv); 597 + mutex_lock(&dev_priv->hw_mutex); 598 + vmw_write(dev_priv, SVGA_REG_TRACES, 0); 599 + mutex_unlock(&dev_priv->hw_mutex); 600 + } 601 + 602 if (active) { 603 BUG_ON(active != &dev_priv->fbdev_master); 604 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); ··· 617 return 0; 618 619 out_no_active_lock: 620 + if (!dev_priv->enable_fb) { 621 + mutex_lock(&dev_priv->hw_mutex); 622 + vmw_write(dev_priv, SVGA_REG_TRACES, 1); 623 + mutex_unlock(&dev_priv->hw_mutex); 624 + vmw_kms_restore_vga(dev_priv); 625 + vmw_3d_resource_dec(dev_priv); 626 + } 627 return ret; 628 } 629 ··· 645 646 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 647 648 + if (!dev_priv->enable_fb) { 649 + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 650 + if (unlikely(ret != 0)) 651 + DRM_ERROR("Unable to clean VRAM on master drop.\n"); 652 + mutex_lock(&dev_priv->hw_mutex); 653 + vmw_write(dev_priv, SVGA_REG_TRACES, 1); 654 + mutex_unlock(&dev_priv->hw_mutex); 655 + vmw_kms_restore_vga(dev_priv); 656 + vmw_3d_resource_dec(dev_priv); 657 + } 658 + 659 dev_priv->active_master = &dev_priv->fbdev_master; 660 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 661 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 662 663 + if (dev_priv->enable_fb) 664 + vmw_fb_on(dev_priv); 665 } 666 667 ··· 722 .irq_postinstall = vmw_irq_postinstall, 723 .irq_uninstall = vmw_irq_uninstall, 724 .irq_handler = vmw_irq_handler, 725 + .get_vblank_counter = vmw_get_vblank_counter, 726 .reclaim_buffers_locked = NULL, 727 .get_map_ofs = drm_core_get_map_ofs, 728 .get_reg_ofs = drm_core_get_reg_ofs,
+8
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 277 278 bool stealth; 279 bool is_opened; 280 281 /** 282 * Master management. ··· 286 struct vmw_master *active_master; 287 struct vmw_master fbdev_master; 288 struct notifier_block pm_nb; 289 }; 290 291 static inline struct vmw_private *vmw_priv(struct drm_device *dev) ··· 322 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 323 return val; 324 } 325 326 /** 327 * GMR utilities - vmwgfx_gmr.c ··· 518 unsigned bbp, unsigned depth); 519 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 520 struct drm_file *file_priv); 521 522 /** 523 * Overlay control - vmwgfx_overlay.c
··· 277 278 bool stealth; 279 bool is_opened; 280 + bool enable_fb; 281 282 /** 283 * Master management. ··· 285 struct vmw_master *active_master; 286 struct vmw_master fbdev_master; 287 struct notifier_block pm_nb; 288 + 289 + struct mutex release_mutex; 290 + uint32_t num_3d_resources; 291 }; 292 293 static inline struct vmw_private *vmw_priv(struct drm_device *dev) ··· 318 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 319 return val; 320 } 321 + 322 + int vmw_3d_resource_inc(struct vmw_private *dev_priv); 323 + void vmw_3d_resource_dec(struct vmw_private *dev_priv); 324 325 /** 326 * GMR utilities - vmwgfx_gmr.c ··· 511 unsigned bbp, unsigned depth); 512 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 513 struct drm_file *file_priv); 514 + u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); 515 516 /** 517 * Overlay control - vmwgfx_overlay.c
+5
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 615 if (unlikely(ret != 0)) 616 goto err_unlock; 617 618 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 619 620 /* Could probably bug on */
··· 615 if (unlikely(ret != 0)) 616 goto err_unlock; 617 618 + if (bo->mem.mem_type == TTM_PL_VRAM && 619 + bo->mem.mm_node->start < bo->num_pages) 620 + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 621 + false, false); 622 + 623 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 624 625 /* Could probably bug on */
+3
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 106 mutex_lock(&dev_priv->hw_mutex); 107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 109 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 110 111 min = 4; ··· 176 dev_priv->config_done_state); 177 vmw_write(dev_priv, SVGA_REG_ENABLE, 178 dev_priv->enable_state); 179 180 mutex_unlock(&dev_priv->hw_mutex); 181 vmw_fence_queue_takedown(&fifo->fence_queue);
··· 106 mutex_lock(&dev_priv->hw_mutex); 107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 109 + dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 110 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 111 112 min = 4; ··· 175 dev_priv->config_done_state); 176 vmw_write(dev_priv, SVGA_REG_ENABLE, 177 dev_priv->enable_state); 178 + vmw_write(dev_priv, SVGA_REG_TRACES, 179 + dev_priv->traces_state); 180 181 mutex_unlock(&dev_priv->hw_mutex); 182 vmw_fence_queue_takedown(&fifo->fence_queue);
+17
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 901 } 902 return 0; 903 } 904 ··· 995 out_unlock: 996 ttm_read_unlock(&vmaster->lock); 997 return ret; 998 }
··· 898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 901 + if (i == 0 && vmw_priv->num_displays == 1 && 902 + save->width == 0 && save->height == 0) { 903 + 904 + /* 905 + * It should be fairly safe to assume that these 906 + * values are uninitialized. 907 + */ 908 + 909 + save->width = vmw_priv->vga_width - save->pos_x; 910 + save->height = vmw_priv->vga_height - save->pos_y; 911 + } 912 } 913 + 914 return 0; 915 } 916 ··· 983 out_unlock: 984 ttm_read_unlock(&vmaster->lock); 985 return ret; 986 + } 987 + 988 + u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) 989 + { 990 + return 0; 991 }
+17 -10
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
··· 27 28 #include "vmwgfx_kms.h" 29 30 #define vmw_crtc_to_ldu(x) \ 31 container_of(x, struct vmw_legacy_display_unit, base.crtc) 32 #define vmw_encoder_to_ldu(x) \ ··· 538 539 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 540 { 541 if (dev_priv->ldu_priv) { 542 DRM_INFO("ldu system already on\n"); 543 return -EINVAL; ··· 559 560 drm_mode_create_dirty_info_property(dev_priv->dev); 561 562 - vmw_ldu_init(dev_priv, 0); 563 - /* for old hardware without multimon only enable one display */ 564 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { 565 - vmw_ldu_init(dev_priv, 1); 566 - vmw_ldu_init(dev_priv, 2); 567 - vmw_ldu_init(dev_priv, 3); 568 - vmw_ldu_init(dev_priv, 4); 569 - vmw_ldu_init(dev_priv, 5); 570 - vmw_ldu_init(dev_priv, 6); 571 - vmw_ldu_init(dev_priv, 7); 572 } 573 574 - return 0; 575 } 576 577 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 578 { 579 if (!dev_priv->ldu_priv) 580 return -ENOSYS; 581
··· 27 28 #include "vmwgfx_kms.h" 29 30 + #define VMWGFX_LDU_NUM_DU 8 31 + 32 #define vmw_crtc_to_ldu(x) \ 33 container_of(x, struct vmw_legacy_display_unit, base.crtc) 34 #define vmw_encoder_to_ldu(x) \ ··· 536 537 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 538 { 539 + struct drm_device *dev = dev_priv->dev; 540 + int i; 541 + int ret; 542 + 543 if (dev_priv->ldu_priv) { 544 DRM_INFO("ldu system already on\n"); 545 return -EINVAL; ··· 553 554 drm_mode_create_dirty_info_property(dev_priv->dev); 555 556 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { 557 + for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) 558 + vmw_ldu_init(dev_priv, i); 559 + ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); 560 + } else { 561 + /* for old hardware without multimon only enable one display */ 562 + vmw_ldu_init(dev_priv, 0); 563 + ret = drm_vblank_init(dev, 1); 564 } 565 566 + return ret; 567 } 568 569 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 570 { 571 + struct drm_device *dev = dev_priv->dev; 572 + 573 + drm_vblank_cleanup(dev); 574 if (!dev_priv->ldu_priv) 575 return -ENOSYS; 576
+4
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 211 cmd->body.cid = cpu_to_le32(res->id); 212 213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 214 } 215 216 static int vmw_context_init(struct vmw_private *dev_priv, ··· 248 cmd->body.cid = cpu_to_le32(res->id); 249 250 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 251 vmw_resource_activate(res, vmw_hw_context_destroy); 252 return 0; 253 } ··· 408 cmd->body.sid = cpu_to_le32(res->id); 409 410 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 411 } 412 413 void vmw_surface_res_free(struct vmw_resource *res) ··· 476 } 477 478 vmw_fifo_commit(dev_priv, submit_size); 479 vmw_resource_activate(res, vmw_hw_surface_destroy); 480 return 0; 481 }
··· 211 cmd->body.cid = cpu_to_le32(res->id); 212 213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 214 + vmw_3d_resource_dec(dev_priv); 215 } 216 217 static int vmw_context_init(struct vmw_private *dev_priv, ··· 247 cmd->body.cid = cpu_to_le32(res->id); 248 249 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 250 + (void) vmw_3d_resource_inc(dev_priv); 251 vmw_resource_activate(res, vmw_hw_context_destroy); 252 return 0; 253 } ··· 406 cmd->body.sid = cpu_to_le32(res->id); 407 408 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 409 + vmw_3d_resource_dec(dev_priv); 410 } 411 412 void vmw_surface_res_free(struct vmw_resource *res) ··· 473 } 474 475 vmw_fifo_commit(dev_priv, submit_size); 476 + (void) vmw_3d_resource_inc(dev_priv); 477 vmw_resource_activate(res, vmw_hw_surface_destroy); 478 return 0; 479 }
+19 -13
drivers/hwmon/f71882fg.c
··· 111 /* Super-I/O Function prototypes */ 112 static inline int superio_inb(int base, int reg); 113 static inline int superio_inw(int base, int reg); 114 - static inline void superio_enter(int base); 115 static inline void superio_select(int base, int ld); 116 static inline void superio_exit(int base); 117 ··· 861 return val; 862 } 863 864 - static inline void superio_enter(int base) 865 { 866 /* according to the datasheet the key must be send twice! */ 867 outb(SIO_UNLOCK_KEY, base); 868 outb(SIO_UNLOCK_KEY, base); 869 } 870 871 static inline void superio_select(int base, int ld) ··· 886 static inline void superio_exit(int base) 887 { 888 outb(SIO_LOCK_KEY, base); 889 } 890 891 static inline int fan_from_reg(u16 reg) ··· 2185 static int __init f71882fg_find(int sioaddr, unsigned short *address, 2186 struct f71882fg_sio_data *sio_data) 2187 { 2188 - int err = -ENODEV; 2189 u16 devid; 2190 - 2191 - /* Don't step on other drivers' I/O space by accident */ 2192 - if (!request_region(sioaddr, 2, DRVNAME)) { 2193 - printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", 2194 - (int)sioaddr); 2195 - return -EBUSY; 2196 - } 2197 - 2198 - superio_enter(sioaddr); 2199 2200 devid = superio_inw(sioaddr, SIO_REG_MANID); 2201 if (devid != SIO_FINTEK_ID) { 2202 pr_debug(DRVNAME ": Not a Fintek device\n"); 2203 goto exit; 2204 } 2205 ··· 2217 default: 2218 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", 2219 (unsigned int)devid); 2220 goto exit; 2221 } 2222 ··· 2228 2229 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { 2230 printk(KERN_WARNING DRVNAME ": Device not activated\n"); 2231 goto exit; 2232 } 2233 2234 *address = superio_inw(sioaddr, SIO_REG_ADDR); 2235 if (*address == 0) { 2236 printk(KERN_WARNING DRVNAME ": Base address not set\n"); 2237 goto exit; 2238 } 2239 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ ··· 2246 (int)superio_inb(sioaddr, SIO_REG_DEVREV)); 2247 exit: 2248 superio_exit(sioaddr); 2249 - release_region(sioaddr, 2); 2250 return err; 2251 } 2252
··· 111 /* Super-I/O Function prototypes */ 112 static inline int superio_inb(int base, int reg); 113 static inline int superio_inw(int base, int reg); 114 + static inline int superio_enter(int base); 115 static inline void superio_select(int base, int ld); 116 static inline void superio_exit(int base); 117 ··· 861 return val; 862 } 863 864 + static inline int superio_enter(int base) 865 { 866 + /* Don't step on other drivers' I/O space by accident */ 867 + if (!request_muxed_region(base, 2, DRVNAME)) { 868 + printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", 869 + base); 870 + return -EBUSY; 871 + } 872 + 873 /* according to the datasheet the key must be send twice! */ 874 outb(SIO_UNLOCK_KEY, base); 875 outb(SIO_UNLOCK_KEY, base); 876 + 877 + return 0; 878 } 879 880 static inline void superio_select(int base, int ld) ··· 877 static inline void superio_exit(int base) 878 { 879 outb(SIO_LOCK_KEY, base); 880 + release_region(base, 2); 881 } 882 883 static inline int fan_from_reg(u16 reg) ··· 2175 static int __init f71882fg_find(int sioaddr, unsigned short *address, 2176 struct f71882fg_sio_data *sio_data) 2177 { 2178 u16 devid; 2179 + int err = superio_enter(sioaddr); 2180 + if (err) 2181 + return err; 2182 2183 devid = superio_inw(sioaddr, SIO_REG_MANID); 2184 if (devid != SIO_FINTEK_ID) { 2185 pr_debug(DRVNAME ": Not a Fintek device\n"); 2186 + err = -ENODEV; 2187 goto exit; 2188 } 2189 ··· 2213 default: 2214 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", 2215 (unsigned int)devid); 2216 + err = -ENODEV; 2217 goto exit; 2218 } 2219 ··· 2223 2224 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { 2225 printk(KERN_WARNING DRVNAME ": Device not activated\n"); 2226 + err = -ENODEV; 2227 goto exit; 2228 } 2229 2230 *address = superio_inw(sioaddr, SIO_REG_ADDR); 2231 if (*address == 0) { 2232 printk(KERN_WARNING DRVNAME ": Base address not set\n"); 2233 + err = -ENODEV; 2234 goto exit; 2235 } 2236 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ ··· 2239 (int)superio_inb(sioaddr, SIO_REG_DEVREV)); 2240 exit: 2241 superio_exit(sioaddr); 2242 return err; 2243 } 2244
+3 -3
drivers/i2c/busses/i2c-davinci.c
··· 357 358 dev->terminate = 0; 359 360 - /* write the data into mode register */ 361 - davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); 362 - 363 /* 364 * First byte should be set here, not after interrupt, 365 * because transmit-data-ready interrupt can come before ··· 367 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); 368 dev->buf_len--; 369 } 370 371 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 372 dev->adapter.timeout);
··· 357 358 dev->terminate = 0; 359 360 /* 361 * First byte should be set here, not after interrupt, 362 * because transmit-data-ready interrupt can come before ··· 370 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); 371 dev->buf_len--; 372 } 373 + 374 + /* write the data into mode register; start transmitting */ 375 + davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); 376 377 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 378 dev->adapter.timeout);
+1 -1
drivers/i2c/busses/i2c-octeon.c
··· 218 return result; 219 } else if (result == 0) { 220 dev_dbg(i2c->dev, "%s: timeout\n", __func__); 221 - result = -ETIMEDOUT; 222 } 223 224 return 0;
··· 218 return result; 219 } else if (result == 0) { 220 dev_dbg(i2c->dev, "%s: timeout\n", __func__); 221 + return -ETIMEDOUT; 222 } 223 224 return 0;
+2 -2
drivers/i2c/busses/i2c-s3c2410.c
··· 662 unsigned long sda_delay; 663 664 if (pdata->sda_delay) { 665 - sda_delay = (freq / 1000) * pdata->sda_delay; 666 - sda_delay /= 1000000; 667 sda_delay = DIV_ROUND_UP(sda_delay, 5); 668 if (sda_delay > 3) 669 sda_delay = 3;
··· 662 unsigned long sda_delay; 663 664 if (pdata->sda_delay) { 665 + sda_delay = clkin * pdata->sda_delay; 666 + sda_delay = DIV_ROUND_UP(sda_delay, 1000000); 667 sda_delay = DIV_ROUND_UP(sda_delay, 5); 668 if (sda_delay > 3) 669 sda_delay = 3;
+15 -5
drivers/idle/intel_idle.c
··· 83 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 84 static unsigned int lapic_timer_reliable_states; 85 86 - static struct cpuidle_device *intel_idle_cpuidle_devices; 87 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 88 89 static struct cpuidle_state *cpuidle_state_table; ··· 108 .name = "NHM-C3", 109 .desc = "MWAIT 0x10", 110 .driver_data = (void *) 0x10, 111 - .flags = CPUIDLE_FLAG_TIME_VALID, 112 .exit_latency = 20, 113 .power_usage = 500, 114 .target_residency = 80, ··· 117 .name = "NHM-C6", 118 .desc = "MWAIT 0x20", 119 .driver_data = (void *) 0x20, 120 - .flags = CPUIDLE_FLAG_TIME_VALID, 121 .exit_latency = 200, 122 .power_usage = 350, 123 .target_residency = 800, ··· 149 .name = "ATM-C4", 150 .desc = "MWAIT 0x30", 151 .driver_data = (void *) 0x30, 152 - .flags = CPUIDLE_FLAG_TIME_VALID, 153 .exit_latency = 100, 154 .power_usage = 250, 155 .target_residency = 400, ··· 159 .name = "ATM-C6", 160 .desc = "MWAIT 0x40", 161 .driver_data = (void *) 0x40, 162 - .flags = CPUIDLE_FLAG_TIME_VALID, 163 .exit_latency = 200, 164 .power_usage = 150, 165 .target_residency = 800, ··· 184 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 185 186 local_irq_disable(); 187 188 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 189 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
··· 83 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 84 static unsigned int lapic_timer_reliable_states; 85 86 + static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 87 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 88 89 static struct cpuidle_state *cpuidle_state_table; ··· 108 .name = "NHM-C3", 109 .desc = "MWAIT 0x10", 110 .driver_data = (void *) 0x10, 111 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 112 .exit_latency = 20, 113 .power_usage = 500, 114 .target_residency = 80, ··· 117 .name = "NHM-C6", 118 .desc = "MWAIT 0x20", 119 .driver_data = (void *) 0x20, 120 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 121 .exit_latency = 200, 122 .power_usage = 350, 123 .target_residency = 800, ··· 149 .name = "ATM-C4", 150 .desc = "MWAIT 0x30", 151 .driver_data = (void *) 0x30, 152 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 153 .exit_latency = 100, 154 .power_usage = 250, 155 .target_residency = 400, ··· 159 .name = "ATM-C6", 160 .desc = "MWAIT 0x40", 161 .driver_data = (void *) 0x40, 162 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 163 .exit_latency = 200, 164 .power_usage = 150, 165 .target_residency = 800, ··· 184 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 185 186 local_irq_disable(); 187 + 188 + /* 189 + * If the state flag indicates that the TLB will be flushed or if this 190 + * is the deepest c-state supported, do a voluntary leave mm to avoid 191 + * costly and mostly unnecessary wakeups for flushing the user TLB's 192 + * associated with the active mm. 193 + */ 194 + if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED || 195 + (&dev->states[dev->state_count - 1] == state)) 196 + leave_mm(cpu); 197 198 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 199 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+7 -6
drivers/mfd/max8925-core.c
··· 429 irq_tsc = cache_tsc; 430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { 431 irq_data = &max8925_irqs[i]; 432 switch (irq_data->mask_reg) { 433 case MAX8925_CHG_IRQ1_MASK: 434 - irq_chg[0] &= irq_data->enable; 435 break; 436 case MAX8925_CHG_IRQ2_MASK: 437 - irq_chg[1] &= irq_data->enable; 438 break; 439 case MAX8925_ON_OFF_IRQ1_MASK: 440 - irq_on[0] &= irq_data->enable; 441 break; 442 case MAX8925_ON_OFF_IRQ2_MASK: 443 - irq_on[1] &= irq_data->enable; 444 break; 445 case MAX8925_RTC_IRQ_MASK: 446 - irq_rtc &= irq_data->enable; 447 break; 448 case MAX8925_TSC_IRQ_MASK: 449 - irq_tsc &= irq_data->enable; 450 break; 451 default: 452 dev_err(chip->dev, "wrong IRQ\n");
··· 429 irq_tsc = cache_tsc; 430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { 431 irq_data = &max8925_irqs[i]; 432 + /* 1 -- disable, 0 -- enable */ 433 switch (irq_data->mask_reg) { 434 case MAX8925_CHG_IRQ1_MASK: 435 + irq_chg[0] &= ~irq_data->enable; 436 break; 437 case MAX8925_CHG_IRQ2_MASK: 438 + irq_chg[1] &= ~irq_data->enable; 439 break; 440 case MAX8925_ON_OFF_IRQ1_MASK: 441 + irq_on[0] &= ~irq_data->enable; 442 break; 443 case MAX8925_ON_OFF_IRQ2_MASK: 444 + irq_on[1] &= ~irq_data->enable; 445 break; 446 case MAX8925_RTC_IRQ_MASK: 447 + irq_rtc &= ~irq_data->enable; 448 break; 449 case MAX8925_TSC_IRQ_MASK: 450 + irq_tsc &= ~irq_data->enable; 451 break; 452 default: 453 dev_err(chip->dev, "wrong IRQ\n");
+7 -2
drivers/mfd/wm831x-irq.c
··· 394 395 irq = irq - wm831x->irq_base; 396 397 - if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) 398 - return -EINVAL; 399 400 switch (type) { 401 case IRQ_TYPE_EDGE_BOTH:
··· 394 395 irq = irq - wm831x->irq_base; 396 397 + if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { 398 + /* Ignore internal-only IRQs */ 399 + if (irq >= 0 && irq < WM831X_NUM_IRQS) 400 + return 0; 401 + else 402 + return -EINVAL; 403 + } 404 405 switch (type) { 406 case IRQ_TYPE_EDGE_BOTH:
+1 -1
drivers/mtd/nand/omap2.c
··· 413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); 414 } while (prefetch_status); 415 /* disable and stop the PFPW engine */ 416 - gpmc_prefetch_reset(); 417 418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 419 return 0;
··· 413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); 414 } while (prefetch_status); 415 /* disable and stop the PFPW engine */ 416 + gpmc_prefetch_reset(info->gpmc_cs); 417 418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 419 return 0;
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 1411 clear_bit(STATUS_SCAN_HW, &priv->status); 1412 clear_bit(STATUS_SCANNING, &priv->status); 1413 /* inform mac80211 scan aborted */ 1414 - queue_work(priv->workqueue, &priv->scan_completed); 1415 } 1416 1417 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
··· 1411 clear_bit(STATUS_SCAN_HW, &priv->status); 1412 clear_bit(STATUS_SCANNING, &priv->status); 1413 /* inform mac80211 scan aborted */ 1414 + queue_work(priv->workqueue, &priv->abort_scan); 1415 } 1416 1417 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+1 -1
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 3018 clear_bit(STATUS_SCANNING, &priv->status); 3019 3020 /* inform mac80211 scan aborted */ 3021 - queue_work(priv->workqueue, &priv->scan_completed); 3022 } 3023 3024 static void iwl3945_bg_restart(struct work_struct *data)
··· 3018 clear_bit(STATUS_SCANNING, &priv->status); 3019 3020 /* inform mac80211 scan aborted */ 3021 + queue_work(priv->workqueue, &priv->abort_scan); 3022 } 3023 3024 static void iwl3945_bg_restart(struct work_struct *data)
+20
drivers/pci/quirks.c
··· 163 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 164 165 /* 166 * Chipsets where PCI->PCI transfers vanish or hang 167 */ 168 static void __devinit quirk_nopcipci(struct pci_dev *dev)
··· 163 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 164 165 /* 166 + * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear 167 + * for some HT machines to use C4 w/o hanging. 168 + */ 169 + static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev) 170 + { 171 + u32 pmbase; 172 + u16 pm1a; 173 + 174 + pci_read_config_dword(dev, 0x40, &pmbase); 175 + pmbase = pmbase & 0xff80; 176 + pm1a = inw(pmbase); 177 + 178 + if (pm1a & 0x10) { 179 + dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); 180 + outw(0x10, pmbase); 181 + } 182 + } 183 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); 184 + 185 + /* 186 * Chipsets where PCI->PCI transfers vanish or hang 187 */ 188 static void __devinit quirk_nopcipci(struct pci_dev *dev)
+4 -2
drivers/regulator/core.c
··· 700 constraints->min_uA != constraints->max_uA) { 701 ret = _regulator_get_current_limit(rdev); 702 if (ret > 0) 703 - count += sprintf(buf + count, "at %d uA ", ret / 1000); 704 } 705 706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) ··· 2302 dev_set_name(&rdev->dev, "regulator.%d", 2303 atomic_inc_return(&regulator_no) - 1); 2304 ret = device_register(&rdev->dev); 2305 - if (ret != 0) 2306 goto clean; 2307 2308 dev_set_drvdata(&rdev->dev, rdev); 2309
··· 700 constraints->min_uA != constraints->max_uA) { 701 ret = _regulator_get_current_limit(rdev); 702 if (ret > 0) 703 + count += sprintf(buf + count, "at %d mA ", ret / 1000); 704 } 705 706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) ··· 2302 dev_set_name(&rdev->dev, "regulator.%d", 2303 atomic_inc_return(&regulator_no) - 1); 2304 ret = device_register(&rdev->dev); 2305 + if (ret != 0) { 2306 + put_device(&rdev->dev); 2307 goto clean; 2308 + } 2309 2310 dev_set_drvdata(&rdev->dev, rdev); 2311
+1 -1
drivers/regulator/max8649.c
··· 330 /* set external clock frequency */ 331 info->extclk_freq = pdata->extclk_freq; 332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, 333 - info->extclk_freq); 334 } 335 336 if (pdata->ramp_timing) {
··· 330 /* set external clock frequency */ 331 info->extclk_freq = pdata->extclk_freq; 332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, 333 + info->extclk_freq << 6); 334 } 335 336 if (pdata->ramp_timing) {
+1
drivers/serial/mfd.c
··· 27 #include <linux/init.h> 28 #include <linux/console.h> 29 #include <linux/sysrq.h> 30 #include <linux/serial_reg.h> 31 #include <linux/circ_buf.h> 32 #include <linux/delay.h>
··· 27 #include <linux/init.h> 28 #include <linux/console.h> 29 #include <linux/sysrq.h> 30 + #include <linux/slab.h> 31 #include <linux/serial_reg.h> 32 #include <linux/circ_buf.h> 33 #include <linux/delay.h>
+1
drivers/serial/mrst_max3110.c
··· 29 30 #include <linux/module.h> 31 #include <linux/ioport.h> 32 #include <linux/init.h> 33 #include <linux/console.h> 34 #include <linux/sysrq.h>
··· 29 30 #include <linux/module.h> 31 #include <linux/ioport.h> 32 + #include <linux/irq.h> 33 #include <linux/init.h> 34 #include <linux/console.h> 35 #include <linux/sysrq.h>
+5
drivers/spi/spi.c
··· 23 #include <linux/init.h> 24 #include <linux/cache.h> 25 #include <linux/mutex.h> 26 #include <linux/slab.h> 27 #include <linux/mod_devicetable.h> 28 #include <linux/spi/spi.h> ··· 86 { 87 const struct spi_device *spi = to_spi_device(dev); 88 const struct spi_driver *sdrv = to_spi_driver(drv); 89 90 if (sdrv->id_table) 91 return !!spi_match_id(sdrv->id_table, spi);
··· 23 #include <linux/init.h> 24 #include <linux/cache.h> 25 #include <linux/mutex.h> 26 + #include <linux/of_device.h> 27 #include <linux/slab.h> 28 #include <linux/mod_devicetable.h> 29 #include <linux/spi/spi.h> ··· 85 { 86 const struct spi_device *spi = to_spi_device(dev); 87 const struct spi_driver *sdrv = to_spi_driver(drv); 88 + 89 + /* Attempt an OF style match */ 90 + if (of_driver_match_device(dev, drv)) 91 + return 1; 92 93 if (sdrv->id_table) 94 return !!spi_match_id(sdrv->id_table, spi);
+1 -1
drivers/spi/spi_gpio.c
··· 350 spi_gpio->bitbang.master = spi_master_get(master); 351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect; 352 353 - if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { 354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; 355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; 356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
··· 350 spi_gpio->bitbang.master = spi_master_get(master); 351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect; 352 353 + if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { 354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; 355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; 356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
+8 -2
drivers/spi/spi_mpc8xxx.c
··· 408 409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 410 411 - out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 412 out_be16(&rx_bd->cbd_datlen, 0); 413 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 414 415 - out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 416 out_be16(&tx_bd->cbd_datlen, xfer_len); 417 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 418 BD_SC_LAST);
··· 408 409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 410 411 + if (mspi->rx_dma == mspi->dma_dummy_rx) 412 + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); 413 + else 414 + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 415 out_be16(&rx_bd->cbd_datlen, 0); 416 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 417 418 + if (mspi->tx_dma == mspi->dma_dummy_tx) 419 + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); 420 + else 421 + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 422 out_be16(&tx_bd->cbd_datlen, xfer_len); 423 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 424 BD_SC_LAST);
+6 -3
drivers/xen/xenbus/xenbus_probe.c
··· 755 { 756 int ret = 0; 757 758 - blocking_notifier_chain_register(&xenstore_chain, nb); 759 760 return ret; 761 } ··· 772 773 void xenbus_probe(struct work_struct *unused) 774 { 775 - BUG_ON((xenstored_ready <= 0)); 776 777 /* Enumerate devices in xenstore and watch for changes. */ 778 xenbus_probe_devices(&xenbus_frontend); ··· 838 xen_store_evtchn = xen_start_info->store_evtchn; 839 xen_store_mfn = xen_start_info->store_mfn; 840 xen_store_interface = mfn_to_virt(xen_store_mfn); 841 } 842 - xenstored_ready = 1; 843 } 844 845 /* Initialize the interface to xenstore. */
··· 755 { 756 int ret = 0; 757 758 + if (xenstored_ready > 0) 759 + ret = nb->notifier_call(nb, 0, NULL); 760 + else 761 + blocking_notifier_chain_register(&xenstore_chain, nb); 762 763 return ret; 764 } ··· 769 770 void xenbus_probe(struct work_struct *unused) 771 { 772 + xenstored_ready = 1; 773 774 /* Enumerate devices in xenstore and watch for changes. */ 775 xenbus_probe_devices(&xenbus_frontend); ··· 835 xen_store_evtchn = xen_start_info->store_evtchn; 836 xen_store_mfn = xen_start_info->store_mfn; 837 xen_store_interface = mfn_to_virt(xen_store_mfn); 838 + xenstored_ready = 1; 839 } 840 } 841 842 /* Initialize the interface to xenstore. */
+33 -16
fs/cifs/cifssmb.c
··· 232 small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 233 void **request_buf) 234 { 235 - int rc = 0; 236 237 rc = cifs_reconnect_tcon(tcon, smb_command); 238 if (rc) ··· 250 if (tcon != NULL) 251 cifs_stats_inc(&tcon->num_smbs_sent); 252 253 - return rc; 254 } 255 256 int ··· 281 282 /* If the return code is zero, this function must fill in request_buf pointer */ 283 static int 284 - smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 285 - void **request_buf /* returned */ , 286 - void **response_buf /* returned */ ) 287 { 288 - int rc = 0; 289 - 290 - rc = cifs_reconnect_tcon(tcon, smb_command); 291 - if (rc) 292 - return rc; 293 - 294 *request_buf = cifs_buf_get(); 295 if (*request_buf == NULL) { 296 /* BB should we add a retry in here if not a writepage? */ ··· 302 if (tcon != NULL) 303 cifs_stats_inc(&tcon->num_smbs_sent); 304 305 - return rc; 306 } 307 308 static int validate_t2(struct smb_t2_rsp *pSMB) ··· 4551 4552 cFYI(1, "In QFSUnixInfo"); 4553 QFSUnixRetry: 4554 - rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 4555 - (void **) &pSMBr); 4556 if (rc) 4557 return rc; 4558 ··· 4621 cFYI(1, "In SETFSUnixInfo"); 4622 SETFSUnixRetry: 4623 /* BB switch to small buf init to save memory */ 4624 - rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 4625 - (void **) &pSMBr); 4626 if (rc) 4627 return rc; 4628
··· 232 small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 233 void **request_buf) 234 { 235 + int rc; 236 237 rc = cifs_reconnect_tcon(tcon, smb_command); 238 if (rc) ··· 250 if (tcon != NULL) 251 cifs_stats_inc(&tcon->num_smbs_sent); 252 253 + return 0; 254 } 255 256 int ··· 281 282 /* If the return code is zero, this function must fill in request_buf pointer */ 283 static int 284 + __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 285 + void **request_buf, void **response_buf) 286 { 287 *request_buf = cifs_buf_get(); 288 if (*request_buf == NULL) { 289 /* BB should we add a retry in here if not a writepage? */ ··· 309 if (tcon != NULL) 310 cifs_stats_inc(&tcon->num_smbs_sent); 311 312 + return 0; 313 + } 314 + 315 + /* If the return code is zero, this function must fill in request_buf pointer */ 316 + static int 317 + smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 318 + void **request_buf, void **response_buf) 319 + { 320 + int rc; 321 + 322 + rc = cifs_reconnect_tcon(tcon, smb_command); 323 + if (rc) 324 + return rc; 325 + 326 + return __smb_init(smb_command, wct, tcon, request_buf, response_buf); 327 + } 328 + 329 + static int 330 + smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon, 331 + void **request_buf, void **response_buf) 332 + { 333 + if (tcon->ses->need_reconnect || tcon->need_reconnect) 334 + return -EHOSTDOWN; 335 + 336 + return __smb_init(smb_command, wct, tcon, request_buf, response_buf); 337 } 338 339 static int validate_t2(struct smb_t2_rsp *pSMB) ··· 4534 4535 cFYI(1, "In QFSUnixInfo"); 4536 QFSUnixRetry: 4537 + rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, 4538 + (void **) &pSMB, (void **) &pSMBr); 4539 if (rc) 4540 return rc; 4541 ··· 4604 cFYI(1, "In SETFSUnixInfo"); 4605 SETFSUnixRetry: 4606 /* BB switch to small buf init to save memory */ 4607 + rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, 4608 + (void **) &pSMB, (void **) &pSMBr); 4609 if (rc) 4610 return rc; 4611
+2
fs/cifs/inode.c
··· 801 inode->i_flags |= S_NOATIME | S_NOCMTIME; 802 if (inode->i_state & I_NEW) { 803 inode->i_ino = hash; 804 #ifdef CONFIG_CIFS_FSCACHE 805 /* initialize per-inode cache cookie pointer */ 806 CIFS_I(inode)->fscache = NULL;
··· 801 inode->i_flags |= S_NOATIME | S_NOCMTIME; 802 if (inode->i_state & I_NEW) { 803 inode->i_ino = hash; 804 + if (S_ISREG(inode->i_mode)) 805 + inode->i_data.backing_dev_info = sb->s_bdi; 806 #ifdef CONFIG_CIFS_FSCACHE 807 /* initialize per-inode cache cookie pointer */ 808 CIFS_I(inode)->fscache = NULL;
+4 -15
fs/fs-writeback.c
··· 72 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 73 { 74 struct super_block *sb = inode->i_sb; 75 - struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 76 77 - /* 78 - * For inodes on standard filesystems, we use superblock's bdi. For 79 - * inodes on virtual filesystems, we want to use inode mapping's bdi 80 - * because they can possibly point to something useful (think about 81 - * block_dev filesystem). 82 - */ 83 - if (sb->s_bdi && sb->s_bdi != &noop_backing_dev_info) { 84 - /* Some device inodes could play dirty tricks. Catch them... */ 85 - WARN(bdi != sb->s_bdi && bdi_cap_writeback_dirty(bdi), 86 - "Dirtiable inode bdi %s != sb bdi %s\n", 87 - bdi->name, sb->s_bdi->name); 88 - return sb->s_bdi; 89 - } 90 - return bdi; 91 } 92 93 static void bdi_queue_work(struct backing_dev_info *bdi,
··· 72 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 73 { 74 struct super_block *sb = inode->i_sb; 75 76 + if (strcmp(sb->s_type->name, "bdev") == 0) 77 + return inode->i_mapping->backing_dev_info; 78 + 79 + return sb->s_bdi; 80 } 81 82 static void bdi_queue_work(struct backing_dev_info *bdi,
+1 -1
fs/fuse/dev.c
··· 1354 loff_t file_size; 1355 unsigned int num; 1356 unsigned int offset; 1357 - size_t total_len; 1358 1359 req = fuse_get_req(fc); 1360 if (IS_ERR(req))
··· 1354 loff_t file_size; 1355 unsigned int num; 1356 unsigned int offset; 1357 + size_t total_len = 0; 1358 1359 req = fuse_get_req(fc); 1360 if (IS_ERR(req))
+1 -1
fs/ocfs2/symlink.c
··· 128 } 129 130 /* Fast symlinks can't be large */ 131 - len = strlen(target); 132 link = kzalloc(len + 1, GFP_NOFS); 133 if (!link) { 134 status = -ENOMEM;
··· 128 } 129 130 /* Fast symlinks can't be large */ 131 + len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb)); 132 link = kzalloc(len + 1, GFP_NOFS); 133 if (!link) { 134 status = -ENOMEM;
+2 -2
fs/proc/base.c
··· 2675 INF("auxv", S_IRUSR, proc_pid_auxv), 2676 ONE("status", S_IRUGO, proc_pid_status), 2677 ONE("personality", S_IRUSR, proc_pid_personality), 2678 - INF("limits", S_IRUSR, proc_pid_limits), 2679 #ifdef CONFIG_SCHED_DEBUG 2680 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2681 #endif ··· 3011 INF("auxv", S_IRUSR, proc_pid_auxv), 3012 ONE("status", S_IRUGO, proc_pid_status), 3013 ONE("personality", S_IRUSR, proc_pid_personality), 3014 - INF("limits", S_IRUSR, proc_pid_limits), 3015 #ifdef CONFIG_SCHED_DEBUG 3016 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 3017 #endif
··· 2675 INF("auxv", S_IRUSR, proc_pid_auxv), 2676 ONE("status", S_IRUGO, proc_pid_status), 2677 ONE("personality", S_IRUSR, proc_pid_personality), 2678 + INF("limits", S_IRUGO, proc_pid_limits), 2679 #ifdef CONFIG_SCHED_DEBUG 2680 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2681 #endif ··· 3011 INF("auxv", S_IRUSR, proc_pid_auxv), 3012 ONE("status", S_IRUGO, proc_pid_status), 3013 ONE("personality", S_IRUSR, proc_pid_personality), 3014 + INF("limits", S_IRUGO, proc_pid_limits), 3015 #ifdef CONFIG_SCHED_DEBUG 3016 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 3017 #endif
+4 -3
fs/reiserfs/ioctl.c
··· 170 int reiserfs_unpack(struct inode *inode, struct file *filp) 171 { 172 int retval = 0; 173 int index; 174 struct page *page; 175 struct address_space *mapping; ··· 189 /* we need to make sure nobody is changing the file size beneath 190 ** us 191 */ 192 - mutex_lock(&inode->i_mutex); 193 - reiserfs_write_lock(inode->i_sb); 194 195 write_from = inode->i_size & (blocksize - 1); 196 /* if we are on a block boundary, we are already unpacked. */ ··· 225 226 out: 227 mutex_unlock(&inode->i_mutex); 228 - reiserfs_write_unlock(inode->i_sb); 229 return retval; 230 }
··· 170 int reiserfs_unpack(struct inode *inode, struct file *filp) 171 { 172 int retval = 0; 173 + int depth; 174 int index; 175 struct page *page; 176 struct address_space *mapping; ··· 188 /* we need to make sure nobody is changing the file size beneath 189 ** us 190 */ 191 + reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); 192 + depth = reiserfs_write_lock_once(inode->i_sb); 193 194 write_from = inode->i_size & (blocksize - 1); 195 /* if we are on a block boundary, we are already unpacked. */ ··· 224 225 out: 226 mutex_unlock(&inode->i_mutex); 227 + reiserfs_write_unlock_once(inode->i_sb, depth); 228 return retval; 229 }
+9 -3
fs/xfs/xfs_log_cil.c
··· 405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 406 new_ctx->ticket = xlog_cil_ticket_alloc(log); 407 408 - /* lock out transaction commit, but don't block on background push */ 409 if (!down_write_trylock(&cil->xc_ctx_lock)) { 410 - if (!push_seq) 411 goto out_free_ticket; 412 down_write(&cil->xc_ctx_lock); 413 } ··· 428 goto out_skip; 429 430 /* check for a previously pushed seqeunce */ 431 - if (push_seq < cil->xc_ctx->sequence) 432 goto out_skip; 433 434 /*
··· 405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 406 new_ctx->ticket = xlog_cil_ticket_alloc(log); 407 408 + /* 409 + * Lock out transaction commit, but don't block for background pushes 410 + * unless we are well over the CIL space limit. See the definition of 411 + * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic 412 + * used here. 413 + */ 414 if (!down_write_trylock(&cil->xc_ctx_lock)) { 415 + if (!push_seq && 416 + cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log)) 417 goto out_free_ticket; 418 down_write(&cil->xc_ctx_lock); 419 } ··· 422 goto out_skip; 423 424 /* check for a previously pushed seqeunce */ 425 + if (push_seq && push_seq < cil->xc_ctx->sequence) 426 goto out_skip; 427 428 /*
+21 -16
fs/xfs/xfs_log_priv.h
··· 426 }; 427 428 /* 429 - * The amount of log space we should the CIL to aggregate is difficult to size. 430 - * Whatever we chose we have to make we can get a reservation for the log space 431 - * effectively, that it is large enough to capture sufficient relogging to 432 - * reduce log buffer IO significantly, but it is not too large for the log or 433 - * induces too much latency when writing out through the iclogs. We track both 434 - * space consumed and the number of vectors in the checkpoint context, so we 435 - * need to decide which to use for limiting. 436 * 437 * Every log buffer we write out during a push needs a header reserved, which 438 * is at least one sector and more for v2 logs. Hence we need a reservation of ··· 459 * checkpoint transaction ticket is specific to the checkpoint context, rather 460 * than the CIL itself. 461 * 462 - * With dynamic reservations, we can basically make up arbitrary limits for the 463 - * checkpoint size so long as they don't violate any other size rules. Hence 464 - * the initial maximum size for the checkpoint transaction will be set to a 465 - * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit 466 - * right now based on the latency of writing out a large amount of data through 467 - * the circular iclog buffers. 468 */ 469 - 470 - #define XLOG_CIL_SPACE_LIMIT(log) \ 471 - (min((log->l_logsize >> 2), (8 * 1024 * 1024))) 472 473 /* 474 * The reservation head lsn is not made up of a cycle number and block number.
··· 426 }; 427 428 /* 429 + * The amount of log space we allow the CIL to aggregate is difficult to size. 430 + * Whatever we choose, we have to make sure we can get a reservation for the 431 + * log space effectively, that it is large enough to capture sufficient 432 + * relogging to reduce log buffer IO significantly, but it is not too large for 433 + * the log or induces too much latency when writing out through the iclogs. We 434 + * track both space consumed and the number of vectors in the checkpoint 435 + * context, so we need to decide which to use for limiting. 436 * 437 * Every log buffer we write out during a push needs a header reserved, which 438 * is at least one sector and more for v2 logs. Hence we need a reservation of ··· 459 * checkpoint transaction ticket is specific to the checkpoint context, rather 460 * than the CIL itself. 461 * 462 + * With dynamic reservations, we can effectively make up arbitrary limits for 463 + * the checkpoint size so long as they don't violate any other size rules. 464 + * Recovery imposes a rule that no transaction exceed half the log, so we are 465 + * limited by that. Furthermore, the log transaction reservation subsystem 466 + * tries to keep 25% of the log free, so we need to keep below that limit or we 467 + * risk running out of free log space to start any new transactions. 468 + * 469 + * In order to keep background CIL push efficient, we will set a lower 470 + * threshold at which background pushing is attempted without blocking current 471 + * transaction commits. A separate, higher bound defines when CIL pushes are 472 + * enforced to ensure we stay within our maximum checkpoint size bounds. 473 + * threshold, yet give us plenty of space for aggregation on large logs. 474 */ 475 + #define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3) 476 + #define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4)) 477 478 /* 479 * The reservation head lsn is not made up of a cycle number and block number.
+1 -1
include/acpi/acpixf.h
··· 55 extern u8 acpi_gbl_permanent_mmap; 56 57 /* 58 - * Globals that are publically available, allowing for 59 * run time configuration 60 */ 61 extern u32 acpi_dbg_level;
··· 55 extern u8 acpi_gbl_permanent_mmap; 56 57 /* 58 + * Globals that are publicly available, allowing for 59 * run time configuration 60 */ 61 extern u32 acpi_dbg_level;
+20 -9
include/drm/drmP.h
··· 612 struct kref refcount; 613 614 /** Handle count of this object. Each handle also holds a reference */ 615 - struct kref handlecount; 616 617 /** Related drm device */ 618 struct drm_device *dev; ··· 808 */ 809 int (*gem_init_object) (struct drm_gem_object *obj); 810 void (*gem_free_object) (struct drm_gem_object *obj); 811 - void (*gem_free_object_unlocked) (struct drm_gem_object *obj); 812 813 /* vga arb irq handler */ 814 void (*vgaarb_irq)(struct drm_device *dev, bool state); ··· 1174 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); 1175 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 1176 extern void drm_vm_open_locked(struct vm_area_struct *vma); 1177 extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); 1178 extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); 1179 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); ··· 1455 void drm_gem_destroy(struct drm_device *dev); 1456 void drm_gem_object_release(struct drm_gem_object *obj); 1457 void drm_gem_object_free(struct kref *kref); 1458 - void drm_gem_object_free_unlocked(struct kref *kref); 1459 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1460 size_t size); 1461 int drm_gem_object_init(struct drm_device *dev, 1462 struct drm_gem_object *obj, size_t size); 1463 - void drm_gem_object_handle_free(struct kref *kref); 1464 void drm_gem_vm_open(struct vm_area_struct *vma); 1465 void drm_gem_vm_close(struct vm_area_struct *vma); 1466 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); ··· 1482 static inline void 1483 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 1484 { 1485 - if (obj != NULL) 1486 - kref_put(&obj->refcount, drm_gem_object_free_unlocked); 1487 } 1488 1489 int drm_gem_handle_create(struct drm_file *file_priv, ··· 1498 drm_gem_object_handle_reference(struct drm_gem_object *obj) 1499 { 1500 drm_gem_object_reference(obj); 1501 - kref_get(&obj->handlecount); 1502 } 1503 1504 static inline void ··· 1507 if (obj == NULL) 1508 return; 1509 1510 /* 1511 * Must bump handle count first as this may be the last 1512 * ref, in which case the object would disappear before we 1513 * checked for a name 1514 */ 1515 - kref_put(&obj->handlecount, drm_gem_object_handle_free); 1516 drm_gem_object_unreference(obj); 1517 } 1518 ··· 1525 if (obj == NULL) 1526 return; 1527 1528 /* 1529 * Must bump handle count first as this may be the last 1530 * ref, in which case the object would disappear before we 1531 * checked for a name 1532 */ 1533 - kref_put(&obj->handlecount, drm_gem_object_handle_free); 1534 drm_gem_object_unreference_unlocked(obj); 1535 } 1536
··· 612 struct kref refcount; 613 614 /** Handle count of this object. Each handle also holds a reference */ 615 + atomic_t handle_count; /* number of handles on this object */ 616 617 /** Related drm device */ 618 struct drm_device *dev; ··· 808 */ 809 int (*gem_init_object) (struct drm_gem_object *obj); 810 void (*gem_free_object) (struct drm_gem_object *obj); 811 812 /* vga arb irq handler */ 813 void (*vgaarb_irq)(struct drm_device *dev, bool state); ··· 1175 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); 1176 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 1177 extern void drm_vm_open_locked(struct vm_area_struct *vma); 1178 + extern void drm_vm_close_locked(struct vm_area_struct *vma); 1179 extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); 1180 extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); 1181 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); ··· 1455 void drm_gem_destroy(struct drm_device *dev); 1456 void drm_gem_object_release(struct drm_gem_object *obj); 1457 void drm_gem_object_free(struct kref *kref); 1458 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1459 size_t size); 1460 int drm_gem_object_init(struct drm_device *dev, 1461 struct drm_gem_object *obj, size_t size); 1462 + void drm_gem_object_handle_free(struct drm_gem_object *obj); 1463 void drm_gem_vm_open(struct vm_area_struct *vma); 1464 void drm_gem_vm_close(struct vm_area_struct *vma); 1465 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); ··· 1483 static inline void 1484 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 1485 { 1486 + if (obj != NULL) { 1487 + struct drm_device *dev = obj->dev; 1488 + mutex_lock(&dev->struct_mutex); 1489 + kref_put(&obj->refcount, drm_gem_object_free); 1490 + mutex_unlock(&dev->struct_mutex); 1491 + } 1492 } 1493 1494 int drm_gem_handle_create(struct drm_file *file_priv, ··· 1495 drm_gem_object_handle_reference(struct drm_gem_object *obj) 1496 { 1497 drm_gem_object_reference(obj); 1498 + atomic_inc(&obj->handle_count); 1499 } 1500 1501 static inline void ··· 1504 if (obj == NULL) 1505 return; 1506 1507 + if (atomic_read(&obj->handle_count) == 0) 1508 + return; 1509 /* 1510 * Must bump handle count first as this may be the last 1511 * ref, in which case the object would disappear before we 1512 * checked for a name 1513 */ 1514 + if (atomic_dec_and_test(&obj->handle_count)) 1515 + drm_gem_object_handle_free(obj); 1516 drm_gem_object_unreference(obj); 1517 } 1518 ··· 1519 if (obj == NULL) 1520 return; 1521 1522 + if (atomic_read(&obj->handle_count) == 0) 1523 + return; 1524 + 1525 /* 1526 * Must bump handle count first as this may be the last 1527 * ref, in which case the object would disappear before we 1528 * checked for a name 1529 */ 1530 + 1531 + if (atomic_dec_and_test(&obj->handle_count)) 1532 + drm_gem_object_handle_free(obj); 1533 drm_gem_object_unreference_unlocked(obj); 1534 } 1535
+1 -1
include/drm/drm_pciids.h
··· 85 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 86 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 87 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 88 - {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 89 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 90 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 91 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ ··· 102 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 103 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 104 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 105 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ 106 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ 107 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
··· 85 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 86 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 87 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 88 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 89 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 90 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ ··· 103 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 104 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 105 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 106 + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 107 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ 108 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ 109 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+1
include/linux/cpuidle.h
··· 53 #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ 54 #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ 55 #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ 56 57 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 58
··· 53 #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ 54 #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ 55 #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ 56 + #define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ 57 58 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 59
+1 -1
include/linux/dmaengine.h
··· 548 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 549 } 550 551 - static unsigned short dma_dev_to_maxpq(struct dma_device *dma) 552 { 553 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 554 }
··· 548 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 549 } 550 551 + static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) 552 { 553 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 554 }
+2 -3
include/linux/module.h
··· 686 687 688 #ifdef CONFIG_GENERIC_BUG 689 - int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, 690 struct module *); 691 void module_bug_cleanup(struct module *); 692 693 #else /* !CONFIG_GENERIC_BUG */ 694 695 - static inline int module_bug_finalize(const Elf_Ehdr *hdr, 696 const Elf_Shdr *sechdrs, 697 struct module *mod) 698 { 699 - return 0; 700 } 701 static inline void module_bug_cleanup(struct module *mod) {} 702 #endif /* CONFIG_GENERIC_BUG */
··· 686 687 688 #ifdef CONFIG_GENERIC_BUG 689 + void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, 690 struct module *); 691 void module_bug_cleanup(struct module *); 692 693 #else /* !CONFIG_GENERIC_BUG */ 694 695 + static inline void module_bug_finalize(const Elf_Ehdr *hdr, 696 const Elf_Shdr *sechdrs, 697 struct module *mod) 698 { 699 } 700 static inline void module_bug_cleanup(struct module *mod) {} 701 #endif /* CONFIG_GENERIC_BUG */
+1 -1
include/linux/rcupdate.h
··· 454 * Makes rcu_dereference_check() do the dirty work. 455 */ 456 #define rcu_dereference_bh(p) \ 457 - rcu_dereference_check(p, rcu_read_lock_bh_held()) 458 459 /** 460 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
··· 454 * Makes rcu_dereference_check() do the dirty work. 455 */ 456 #define rcu_dereference_bh(p) \ 457 + rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) 458 459 /** 460 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
+1
include/linux/wait.h
··· 614 (wait)->private = current; \ 615 (wait)->func = autoremove_wake_function; \ 616 INIT_LIST_HEAD(&(wait)->task_list); \ 617 } while (0) 618 619 /**
··· 614 (wait)->private = current; \ 615 (wait)->func = autoremove_wake_function; \ 616 INIT_LIST_HEAD(&(wait)->task_list); \ 617 + (wait)->flags = 0; \ 618 } while (0) 619 620 /**
+2
ipc/sem.c
··· 743 { 744 struct semid_ds out; 745 746 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); 747 748 out.sem_otime = in->sem_otime;
··· 743 { 744 struct semid_ds out; 745 746 + memset(&out, 0, sizeof(out)); 747 + 748 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); 749 750 out.sem_otime = in->sem_otime;
-2
kernel/kfifo.c
··· 365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l); 366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); 367 368 - if (n) 369 - sg_mark_end(sgl + n - 1); 370 return n; 371 } 372
··· 365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l); 366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); 367 368 return n; 369 } 370
+4
kernel/module.c
··· 1537 { 1538 struct module *mod = _mod; 1539 list_del(&mod->list); 1540 return 0; 1541 } 1542 ··· 2626 if (err < 0) 2627 goto ddebug; 2628 2629 list_add_rcu(&mod->list, &modules); 2630 mutex_unlock(&module_mutex); 2631 ··· 2652 mutex_lock(&module_mutex); 2653 /* Unlink carefully: kallsyms could be walking list. */ 2654 list_del_rcu(&mod->list); 2655 ddebug: 2656 if (!mod->taints) 2657 dynamic_debug_remove(info.debug);
··· 1537 { 1538 struct module *mod = _mod; 1539 list_del(&mod->list); 1540 + module_bug_cleanup(mod); 1541 return 0; 1542 } 1543 ··· 2625 if (err < 0) 2626 goto ddebug; 2627 2628 + module_bug_finalize(info.hdr, info.sechdrs, mod); 2629 list_add_rcu(&mod->list, &modules); 2630 mutex_unlock(&module_mutex); 2631 ··· 2650 mutex_lock(&module_mutex); 2651 /* Unlink carefully: kallsyms could be walking list. */ 2652 list_del_rcu(&mod->list); 2653 + module_bug_cleanup(mod); 2654 + 2655 ddebug: 2656 if (!mod->taints) 2657 dynamic_debug_remove(info.debug);
+14 -3
kernel/smp.c
··· 365 EXPORT_SYMBOL_GPL(smp_call_function_any); 366 367 /** 368 - * __smp_call_function_single(): Run a function on another CPU 369 * @cpu: The CPU to run on. 370 * @data: Pre-allocated and setup data structure 371 * 372 * Like smp_call_function_single(), but allow caller to pass in a 373 * pre-allocated data structure. Useful for embedding @data inside ··· 377 void __smp_call_function_single(int cpu, struct call_single_data *data, 378 int wait) 379 { 380 - csd_lock(data); 381 382 /* 383 * Can deadlock when called with interrupts disabled. 384 * We allow cpu's that are not yet online though, as no one else can ··· 390 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() 391 && !oops_in_progress); 392 393 - generic_exec_single(cpu, data, wait); 394 } 395 396 /**
··· 365 EXPORT_SYMBOL_GPL(smp_call_function_any); 366 367 /** 368 + * __smp_call_function_single(): Run a function on a specific CPU 369 * @cpu: The CPU to run on. 370 * @data: Pre-allocated and setup data structure 371 + * @wait: If true, wait until function has completed on specified CPU. 372 * 373 * Like smp_call_function_single(), but allow caller to pass in a 374 * pre-allocated data structure. Useful for embedding @data inside ··· 376 void __smp_call_function_single(int cpu, struct call_single_data *data, 377 int wait) 378 { 379 + unsigned int this_cpu; 380 + unsigned long flags; 381 382 + this_cpu = get_cpu(); 383 /* 384 * Can deadlock when called with interrupts disabled. 385 * We allow cpu's that are not yet online though, as no one else can ··· 387 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() 388 && !oops_in_progress); 389 390 + if (cpu == this_cpu) { 391 + local_irq_save(flags); 392 + data->func(data->info); 393 + local_irq_restore(flags); 394 + } else { 395 + csd_lock(data); 396 + generic_exec_single(cpu, data, wait); 397 + } 398 + put_cpu(); 399 } 400 401 /**
+2 -4
lib/bug.c
··· 72 return NULL; 73 } 74 75 - int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 76 - struct module *mod) 77 { 78 char *secstrings; 79 unsigned int i; ··· 97 * could potentially lead to deadlock and thus be counter-productive. 98 */ 99 list_add(&mod->bug_list, &module_bug_list); 100 - 101 - return 0; 102 } 103 104 void module_bug_cleanup(struct module *mod)
··· 72 return NULL; 73 } 74 75 + void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 76 + struct module *mod) 77 { 78 char *secstrings; 79 unsigned int i; ··· 97 * could potentially lead to deadlock and thus be counter-productive. 98 */ 99 list_add(&mod->bug_list, &module_bug_list); 100 } 101 102 void module_bug_cleanup(struct module *mod)
+1 -1
lib/list_sort.c
··· 70 * element comparison is needed, so the client's cmp() 71 * routine can invoke cond_resched() periodically. 72 */ 73 - (*cmp)(priv, tail, tail); 74 75 tail->next->prev = tail; 76 tail = tail->next;
··· 70 * element comparison is needed, so the client's cmp() 71 * routine can invoke cond_resched() periodically. 72 */ 73 + (*cmp)(priv, tail->next, tail->next); 74 75 tail->next->prev = tail; 76 tail = tail->next;
+4 -2
mm/ksm.c
··· 712 if (!ptep) 713 goto out; 714 715 - if (pte_write(*ptep)) { 716 pte_t entry; 717 718 swapped = PageSwapCache(page); ··· 735 set_pte_at(mm, addr, ptep, entry); 736 goto out_unlock; 737 } 738 - entry = pte_wrprotect(entry); 739 set_pte_at_notify(mm, addr, ptep, entry); 740 } 741 *orig_pte = *ptep;
··· 712 if (!ptep) 713 goto out; 714 715 + if (pte_write(*ptep) || pte_dirty(*ptep)) { 716 pte_t entry; 717 718 swapped = PageSwapCache(page); ··· 735 set_pte_at(mm, addr, ptep, entry); 736 goto out_unlock; 737 } 738 + if (pte_dirty(entry)) 739 + set_page_dirty(page); 740 + entry = pte_mkclean(pte_wrprotect(entry)); 741 set_pte_at_notify(mm, addr, ptep, entry); 742 } 743 *orig_pte = *ptep;
+7 -1
mm/rmap.c
··· 381 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 382 { 383 if (PageAnon(page)) { 384 - if (vma->anon_vma->root != page_anon_vma(page)->root) 385 return -EFAULT; 386 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 387 if (!vma->vm_file ||
··· 381 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 382 { 383 if (PageAnon(page)) { 384 + struct anon_vma *page__anon_vma = page_anon_vma(page); 385 + /* 386 + * Note: swapoff's unuse_vma() is more efficient with this 387 + * check, and needs it to match anon_vma when KSM is active. 388 + */ 389 + if (!vma->anon_vma || !page__anon_vma || 390 + vma->anon_vma->root != page__anon_vma->root) 391 return -EFAULT; 392 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 393 if (!vma->vm_file ||
+10 -4
net/8021q/vlan_core.c
··· 24 25 if (vlan_dev) 26 skb->dev = vlan_dev; 27 - else if (vlan_id) 28 - goto drop; 29 30 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 31 ··· 105 106 if (vlan_dev) 107 skb->dev = vlan_dev; 108 - else if (vlan_id) 109 - goto drop; 110 111 for (p = napi->gro_list; p; p = p->next) { 112 NAPI_GRO_CB(p)->same_flow =
··· 24 25 if (vlan_dev) 26 skb->dev = vlan_dev; 27 + else if (vlan_id) { 28 + if (!(skb->dev->flags & IFF_PROMISC)) 29 + goto drop; 30 + skb->pkt_type = PACKET_OTHERHOST; 31 + } 32 33 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 34 ··· 102 103 if (vlan_dev) 104 skb->dev = vlan_dev; 105 + else if (vlan_id) { 106 + if (!(skb->dev->flags & IFF_PROMISC)) 107 + goto drop; 108 + skb->pkt_type = PACKET_OTHERHOST; 109 + } 110 111 for (p = napi->gro_list; p; p = p->next) { 112 NAPI_GRO_CB(p)->same_flow =
+1
net/ipv4/Kconfig
··· 217 218 config NET_IPGRE 219 tristate "IP: GRE tunnels over IP" 220 help 221 Tunneling means encapsulating data of one protocol type within 222 another protocol and sending it over a channel that understands the
··· 217 218 config NET_IPGRE 219 tristate "IP: GRE tunnels over IP" 220 + depends on IPV6 || IPV6=n 221 help 222 Tunneling means encapsulating data of one protocol type within 223 another protocol and sending it over a channel that understands the
+14 -10
net/ipv4/tcp_timer.c
··· 135 136 /* This function calculates a "timeout" which is equivalent to the timeout of a 137 * TCP connection after "boundary" unsuccessful, exponentially backed-off 138 - * retransmissions with an initial RTO of TCP_RTO_MIN. 139 */ 140 static bool retransmits_timed_out(struct sock *sk, 141 - unsigned int boundary) 142 { 143 unsigned int timeout, linear_backoff_thresh; 144 unsigned int start_ts; 145 146 if (!inet_csk(sk)->icsk_retransmits) 147 return false; ··· 154 else 155 start_ts = tcp_sk(sk)->retrans_stamp; 156 157 - linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); 158 159 if (boundary <= linear_backoff_thresh) 160 - timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; 161 else 162 - timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + 163 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 164 165 return (tcp_time_stamp - start_ts) >= timeout; ··· 170 { 171 struct inet_connection_sock *icsk = inet_csk(sk); 172 int retry_until; 173 - bool do_reset; 174 175 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 176 if (icsk->icsk_retransmits) 177 dst_negative_advice(sk); 178 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 179 } else { 180 - if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 181 /* Black hole detection */ 182 tcp_mtu_probing(icsk, sk); 183 ··· 191 192 retry_until = tcp_orphan_retries(sk, alive); 193 do_reset = alive || 194 - !retransmits_timed_out(sk, retry_until); 195 196 if (tcp_out_of_resources(sk, do_reset)) 197 return 1; 198 } 199 } 200 201 - if (retransmits_timed_out(sk, retry_until)) { 202 /* Has it gone just too far? */ 203 tcp_write_err(sk); 204 return 1; ··· 440 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 441 } 442 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 443 - if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 444 __sk_dst_reset(sk); 445 446 out:;
··· 135 136 /* This function calculates a "timeout" which is equivalent to the timeout of a 137 * TCP connection after "boundary" unsuccessful, exponentially backed-off 138 + * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if 139 + * syn_set flag is set. 140 */ 141 static bool retransmits_timed_out(struct sock *sk, 142 + unsigned int boundary, 143 + bool syn_set) 144 { 145 unsigned int timeout, linear_backoff_thresh; 146 unsigned int start_ts; 147 + unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; 148 149 if (!inet_csk(sk)->icsk_retransmits) 150 return false; ··· 151 else 152 start_ts = tcp_sk(sk)->retrans_stamp; 153 154 + linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); 155 156 if (boundary <= linear_backoff_thresh) 157 + timeout = ((2 << boundary) - 1) * rto_base; 158 else 159 + timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + 160 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 161 162 return (tcp_time_stamp - start_ts) >= timeout; ··· 167 { 168 struct inet_connection_sock *icsk = inet_csk(sk); 169 int retry_until; 170 + bool do_reset, syn_set = 0; 171 172 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 173 if (icsk->icsk_retransmits) 174 dst_negative_advice(sk); 175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 176 + syn_set = 1; 177 } else { 178 + if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) { 179 /* Black hole detection */ 180 tcp_mtu_probing(icsk, sk); 181 ··· 187 188 retry_until = tcp_orphan_retries(sk, alive); 189 do_reset = alive || 190 + !retransmits_timed_out(sk, retry_until, 0); 191 192 if (tcp_out_of_resources(sk, do_reset)) 193 return 1; 194 } 195 } 196 197 + if (retransmits_timed_out(sk, retry_until, syn_set)) { 198 /* Has it gone just too far? */ 199 tcp_write_err(sk); 200 return 1; ··· 436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 437 } 438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 439 + if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0)) 440 __sk_dst_reset(sk); 441 442 out:;
-4
net/mac80211/rx.c
··· 2199 struct net_device *prev_dev = NULL; 2200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2201 2202 - if (status->flag & RX_FLAG_INTERNAL_CMTR) 2203 - goto out_free_skb; 2204 - 2205 if (skb_headroom(skb) < sizeof(*rthdr) && 2206 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2207 goto out_free_skb; ··· 2257 } else 2258 goto out_free_skb; 2259 2260 - status->flag |= RX_FLAG_INTERNAL_CMTR; 2261 return; 2262 2263 out_free_skb:
··· 2199 struct net_device *prev_dev = NULL; 2200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2201 2202 if (skb_headroom(skb) < sizeof(*rthdr) && 2203 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2204 goto out_free_skb; ··· 2260 } else 2261 goto out_free_skb; 2262 2263 return; 2264 2265 out_free_skb:
+2 -1
net/phonet/pep.c
··· 225 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) 226 { 227 struct pep_sock *pn = pep_sk(sk); 228 - struct pnpipehdr *hdr = pnp_hdr(skb); 229 int wake = 0; 230 231 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) 232 return -EINVAL; 233 234 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 235 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", 236 (unsigned)hdr->data[0]);
··· 225 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) 226 { 227 struct pep_sock *pn = pep_sk(sk); 228 + struct pnpipehdr *hdr; 229 int wake = 0; 230 231 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) 232 return -EINVAL; 233 234 + hdr = pnp_hdr(skb); 235 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 236 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", 237 (unsigned)hdr->data[0]);
+9 -8
samples/kfifo/dma-example.c
··· 24 { 25 int i; 26 unsigned int ret; 27 struct scatterlist sg[10]; 28 29 printk(KERN_INFO "DMA fifo test start\n"); ··· 62 * byte at the beginning, after the kfifo_skip(). 63 */ 64 sg_init_table(sg, ARRAY_SIZE(sg)); 65 - ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); 66 - printk(KERN_INFO "DMA sgl entries: %d\n", ret); 67 - if (!ret) { 68 /* fifo is full and no sgl was created */ 69 printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); 70 return -EIO; ··· 72 73 /* receive data */ 74 printk(KERN_INFO "scatterlist for receive:\n"); 75 - for (i = 0; i < ARRAY_SIZE(sg); i++) { 76 printk(KERN_INFO 77 "sg[%d] -> " 78 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", ··· 92 kfifo_dma_in_finish(&fifo, ret); 93 94 /* Prepare to transmit data, example: 8 bytes */ 95 - ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); 96 - printk(KERN_INFO "DMA sgl entries: %d\n", ret); 97 - if (!ret) { 98 /* no data was available and no sgl was created */ 99 printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); 100 return -EIO; 101 } 102 103 printk(KERN_INFO "scatterlist for transmit:\n"); 104 - for (i = 0; i < ARRAY_SIZE(sg); i++) { 105 printk(KERN_INFO 106 "sg[%d] -> " 107 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
··· 24 { 25 int i; 26 unsigned int ret; 27 + unsigned int nents; 28 struct scatterlist sg[10]; 29 30 printk(KERN_INFO "DMA fifo test start\n"); ··· 61 * byte at the beginning, after the kfifo_skip(). 62 */ 63 sg_init_table(sg, ARRAY_SIZE(sg)); 64 + nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); 65 + printk(KERN_INFO "DMA sgl entries: %d\n", nents); 66 + if (!nents) { 67 /* fifo is full and no sgl was created */ 68 printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); 69 return -EIO; ··· 71 72 /* receive data */ 73 printk(KERN_INFO "scatterlist for receive:\n"); 74 + for (i = 0; i < nents; i++) { 75 printk(KERN_INFO 76 "sg[%d] -> " 77 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", ··· 91 kfifo_dma_in_finish(&fifo, ret); 92 93 /* Prepare to transmit data, example: 8 bytes */ 94 + nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); 95 + printk(KERN_INFO "DMA sgl entries: %d\n", nents); 96 + if (!nents) { 97 /* no data was available and no sgl was created */ 98 printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); 99 return -EIO; 100 } 101 102 printk(KERN_INFO "scatterlist for transmit:\n"); 103 + for (i = 0; i < nents; i++) { 104 printk(KERN_INFO 105 "sg[%d] -> " 106 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
+5
sound/core/control.c
··· 31 32 /* max number of user-defined controls */ 33 #define MAX_USER_CONTROLS 32 34 35 struct snd_kctl_ioctl { 36 struct list_head list; /* list of all ioctls */ ··· 196 197 if (snd_BUG_ON(!control || !control->count)) 198 return NULL; 199 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); 200 if (kctl == NULL) { 201 snd_printk(KERN_ERR "Cannot allocate control instance\n");
··· 31 32 /* max number of user-defined controls */ 33 #define MAX_USER_CONTROLS 32 34 + #define MAX_CONTROL_COUNT 1028 35 36 struct snd_kctl_ioctl { 37 struct list_head list; /* list of all ioctls */ ··· 195 196 if (snd_BUG_ON(!control || !control->count)) 197 return NULL; 198 + 199 + if (control->count > MAX_CONTROL_COUNT) 200 + return NULL; 201 + 202 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); 203 if (kctl == NULL) { 204 snd_printk(KERN_ERR "Cannot allocate control instance\n");
+1 -1
sound/i2c/other/ak4xxx-adda.c
··· 900 return 0; 901 } 902 #else /* !CONFIG_PROC_FS */ 903 - static int proc_init(struct snd_akm4xxx *ak) {} 904 #endif 905 906 int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
··· 900 return 0; 901 } 902 #else /* !CONFIG_PROC_FS */ 903 + static int proc_init(struct snd_akm4xxx *ak) { return 0; } 904 #endif 905 906 int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
+1 -1
tools/perf/Makefile
··· 1017 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So 1018 # we depend the various files onto their directories. 1019 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h 1020 - $(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS))) 1021 # In the second step, we make a rule to actually create these directories 1022 $(sort $(dir $(DIRECTORY_DEPS))): 1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
··· 1017 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So 1018 # we depend the various files onto their directories. 1019 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h 1020 + $(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS))) 1021 # In the second step, we make a rule to actually create these directories 1022 $(sort $(dir $(DIRECTORY_DEPS))): 1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+2 -2
tools/perf/util/trace-event-scripting.c
··· 97 register_python_scripting(&python_scripting_unsupported_ops); 98 } 99 #else 100 - struct scripting_ops python_scripting_ops; 101 102 void setup_python_scripting(void) 103 { ··· 158 register_perl_scripting(&perl_scripting_unsupported_ops); 159 } 160 #else 161 - struct scripting_ops perl_scripting_ops; 162 163 void setup_perl_scripting(void) 164 {
··· 97 register_python_scripting(&python_scripting_unsupported_ops); 98 } 99 #else 100 + extern struct scripting_ops python_scripting_ops; 101 102 void setup_python_scripting(void) 103 { ··· 158 register_perl_scripting(&perl_scripting_unsupported_ops); 159 } 160 #else 161 + extern struct scripting_ops perl_scripting_ops; 162 163 void setup_perl_scripting(void) 164 {
+1 -1
tools/perf/util/ui/browsers/hists.c
··· 773 774 switch (key) { 775 case 'a': 776 - if (browser->selection->map == NULL && 777 browser->selection->map->dso->annotate_warned) 778 continue; 779 goto do_annotate;
··· 773 774 switch (key) { 775 case 'a': 776 + if (browser->selection->map == NULL || 777 browser->selection->map->dso->annotate_warned) 778 continue; 779 goto do_annotate;