···3554D: portions of the Linux Security Module (LSM) framework and security modules35553556N: Petr Vandrovec3557-E: vandrove@vc.cvut.cz3558D: Small contributions to ncpfs3559D: Matrox framebuffer driver3560-S: Chudenicka 83561-S: 10200 Prague 10, Hostivar3562-S: Czech Republic35633564N: Thibaut Varene3565E: T-Bone@parisc-linux.org
···3554D: portions of the Linux Security Module (LSM) framework and security modules35553556N: Petr Vandrovec3557+E: petr@vandrovec.name3558D: Small contributions to ncpfs3559D: Matrox framebuffer driver3560+S: 21513 Conradia Ct3561+S: Cupertino, CA 950143562+S: USA35633564N: Thibaut Varene3565E: T-Bone@parisc-linux.org
+10-4
MAINTAINERS
···962S: Maintained963F: arch/arm/mach-s3c6410/9640000000965ARM/SHMOBILE ARM ARCHITECTURE966M: Paul Mundt <lethal@linux-sh.org>967M: Magnus Damm <magnus.damm@gmail.com>···3788S: Supported37893790MATROX FRAMEBUFFER DRIVER3791-M: Petr Vandrovec <vandrove@vc.cvut.cz>3792L: linux-fbdev@vger.kernel.org3793-S: Maintained3794F: drivers/video/matrox/matroxfb_*3795F: include/linux/matroxfb.h3796···3976F: drivers/net/natsemi.c39773978NCP FILESYSTEM3979-M: Petr Vandrovec <vandrove@vc.cvut.cz>3980-S: Maintained3981F: fs/ncpfs/39823983NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
···962S: Maintained963F: arch/arm/mach-s3c6410/964965+ARM/S5P ARM ARCHITECTURES966+M: Kukjin Kim <kgene.kim@samsung.com>967+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)968+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)969+S: Maintained970+F: arch/arm/mach-s5p*/971+972ARM/SHMOBILE ARM ARCHITECTURE973M: Paul Mundt <lethal@linux-sh.org>974M: Magnus Damm <magnus.damm@gmail.com>···3781S: Supported37823783MATROX FRAMEBUFFER DRIVER03784L: linux-fbdev@vger.kernel.org3785+S: Orphan3786F: drivers/video/matrox/matroxfb_*3787F: include/linux/matroxfb.h3788···3970F: drivers/net/natsemi.c39713972NCP FILESYSTEM3973+M: Petr Vandrovec <petr@vandrovec.name>3974+S: Odd Fixes3975F: fs/ncpfs/39763977NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
···48 sigset_t mask;49 unsigned long res;5051- siginitset(&mask, newmask & ~_BLOCKABLE);52 res = sigprocmask(how, &mask, &oldmask);53 if (!res) {54 force_successful_syscall_return();
···48 sigset_t mask;49 unsigned long res;5051+ siginitset(&mask, newmask & _BLOCKABLE);52 res = sigprocmask(how, &mask, &oldmask);53 if (!res) {54 force_successful_syscall_return();
+5-2
arch/arm/oprofile/common.c
···102 if (IS_ERR(pevent)) {103 ret = PTR_ERR(pevent);104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {0105 pr_warning("oprofile: failed to enable event %d "106 "on CPU %d\n", event, cpu);107 ret = -EBUSY;···366 ret = init_driverfs();367 if (ret) {368 kfree(counter_config);0369 return ret;370 }371···404 struct perf_event *event;405406 if (*perf_events) {407- exit_driverfs();408 for_each_possible_cpu(cpu) {409 for (id = 0; id < perf_num_counters; ++id) {410 event = perf_events[cpu][id];···414 }415 }416417- if (counter_config)418 kfree(counter_config);00419}420#else421int __init oprofile_arch_init(struct oprofile_operations *ops)
···102 if (IS_ERR(pevent)) {103 ret = PTR_ERR(pevent);104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {105+ perf_event_release_kernel(pevent);106 pr_warning("oprofile: failed to enable event %d "107 "on CPU %d\n", event, cpu);108 ret = -EBUSY;···365 ret = init_driverfs();366 if (ret) {367 kfree(counter_config);368+ counter_config = NULL;369 return ret;370 }371···402 struct perf_event *event;403404 if (*perf_events) {0405 for_each_possible_cpu(cpu) {406 for (id = 0; id < perf_num_counters; ++id) {407 event = perf_events[cpu][id];···413 }414 }415416+ if (counter_config) {417 kfree(counter_config);418+ exit_driverfs();419+ }420}421#else422int __init oprofile_arch_init(struct oprofile_operations *ops)
+1-1
arch/arm/plat-omap/Kconfig
···33config OMAP_DEBUG_LEDS34 bool35 depends on OMAP_DEBUG_DEVICES36- default y if LEDS3738config OMAP_RESET_CLOCKS39 bool "Reset unused clocks during boot"
···33config OMAP_DEBUG_LEDS34 bool35 depends on OMAP_DEBUG_DEVICES36+ default y if LEDS_CLASS3738config OMAP_RESET_CLOCKS39 bool "Reset unused clocks during boot"
+1-1
arch/arm/plat-omap/mcbsp.c
···156 /* Writing zero to RSYNC_ERR clears the IRQ */157 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));158 } else {159- complete(&mcbsp_rx->tx_irq_completion);160 }161162 return IRQ_HANDLED;
···156 /* Writing zero to RSYNC_ERR clears the IRQ */157 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));158 } else {159+ complete(&mcbsp_rx->rx_irq_completion);160 }161162 return IRQ_HANDLED;
···162void mac_mksound( unsigned int freq, unsigned int length )163{164 __u32 cfreq = ( freq << 5 ) / 468;165- __u32 flags;166 int i;167168 if ( mac_special_bell == NULL )···224 */225static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )226{227- __u32 flags;228229 /* if the bell is already ringing, ring longer */230 if ( mac_bell_duration > 0 )···271static void mac_quadra_ring_bell( unsigned long ignored )272{273 int i, count = mac_asc_samplespersec / HZ;274- __u32 flags;275276 /*277 * we neither want a sound buffer overflow nor underflow, so we need to match
···162void mac_mksound( unsigned int freq, unsigned int length )163{164 __u32 cfreq = ( freq << 5 ) / 468;165+ unsigned long flags;166 int i;167168 if ( mac_special_bell == NULL )···224 */225static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )226{227+ unsigned long flags;228229 /* if the bell is already ringing, ring longer */230 if ( mac_bell_duration > 0 )···271static void mac_quadra_ring_bell( unsigned long ignored )272{273 int i, count = mac_asc_samplespersec / HZ;274+ unsigned long flags;275276 /*277 * we neither want a sound buffer overflow nor underflow, so we need to match
+19-2
arch/mips/Kconfig
···13 select HAVE_KPROBES14 select HAVE_KRETPROBES15 select RTC_LIB if !MACH_LOONGSON01617mainmenu "Linux/MIPS Kernel Configuration"18···1647 select SYS_SUPPORTS_SMP1648 select SMP_UP1649 help1650- This is a kernel model which is also known a VSMP or lately1651- has been marketesed into SMVP.0000000016521653config MIPS_MT_SMTC1654 bool "SMTC: Use all TCs on all VPEs for SMP"···1673 help1674 This is a kernel model which is known a SMTC or lately has been1675 marketesed into SMVP.0000000016761677endchoice1678
···13 select HAVE_KPROBES14 select HAVE_KRETPROBES15 select RTC_LIB if !MACH_LOONGSON16+ select GENERIC_ATOMIC64 if !64BIT1718mainmenu "Linux/MIPS Kernel Configuration"19···1646 select SYS_SUPPORTS_SMP1647 select SMP_UP1648 help1649+ This is a kernel model which is known a VSMP but lately has been1650+ marketesed into SMVP.1651+ Virtual SMP uses the processor's VPEs to implement virtual1652+ processors. In currently available configuration of the 34K processor1653+ this allows for a dual processor. Both processors will share the same1654+ primary caches; each will obtain the half of the TLB for it's own1655+ exclusive use. For a layman this model can be described as similar to1656+ what Intel calls Hyperthreading.1657+1658+ For further information see http://www.linux-mips.org/wiki/34K#VSMP16591660config MIPS_MT_SMTC1661 bool "SMTC: Use all TCs on all VPEs for SMP"···1664 help1665 This is a kernel model which is known a SMTC or lately has been1666 marketesed into SMVP.1667+ is presenting the available TC's of the core as processors to Linux.1668+ On currently available 34K processors this means a Linux system will1669+ see up to 5 processors. The implementation of the SMTC kernel differs1670+ significantly from VSMP and cannot efficiently coexist in the same1671+ kernel binary so the choice between VSMP and SMTC is a compile time1672+ decision.1673+1674+ For further information see http://www.linux-mips.org/wiki/34K#SMTC16751676endchoice1677
···321 */322struct gic_intr_map {323 unsigned int cpunum; /* Directed to this CPU */0324 unsigned int pin; /* Directed to this Pin */325 unsigned int polarity; /* Polarity : +/- */326 unsigned int trigtype; /* Trigger : Edge/Levl */
···321 */322struct gic_intr_map {323 unsigned int cpunum; /* Directed to this CPU */324+#define GIC_UNUSED 0xdead /* Dummy data */325 unsigned int pin; /* Directed to this Pin */326 unsigned int polarity; /* Polarity : +/- */327 unsigned int trigtype; /* Trigger : Edge/Levl */
···8889#define GIC_EXT_INTR(x) x9091-/* Dummy data */92-#define X 0xdead93-94/* External Interrupts used for IPI */95#define GIC_IPI_EXT_INTR_RESCHED_VPE0 1696#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
···8889#define GIC_EXT_INTR(x) x9000091/* External Interrupts used for IPI */92#define GIC_IPI_EXT_INTR_RESCHED_VPE0 1693#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
···150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)151#endif152#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))153+154+/*155+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad156+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The157+ * discussion can be found in lkml posting158+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is159+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html160+ *161+ * It is unclear if the misscompilations mentioned in162+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one163+ * until GCC 3.x has been retired before we can apply164+ * https://patchwork.linux-mips.org/patch/1541/165+ */166+167#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))168169#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+2-1
arch/mips/include/asm/thread_info.h
···146#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)147148/* work to do on interrupt/exception return */149-#define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP)0150/* work to do on any return to u-space */151#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)152
···146#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)147148/* work to do on interrupt/exception return */149+#define _TIF_WORK_MASK (0x0000ffef & \150+ ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))151/* work to do on any return to u-space */152#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)153
+15-6
arch/mips/include/asm/unistd.h
···356#define __NR_perf_event_open (__NR_Linux + 333)357#define __NR_accept4 (__NR_Linux + 334)358#define __NR_recvmmsg (__NR_Linux + 335)000359360/*361 * Offset of the last Linux o32 flavoured syscall362 */363-#define __NR_Linux_syscalls 335364365#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */366367#define __NR_O32_Linux 4000368-#define __NR_O32_Linux_syscalls 335369370#if _MIPS_SIM == _MIPS_SIM_ABI64371···671#define __NR_perf_event_open (__NR_Linux + 292)672#define __NR_accept4 (__NR_Linux + 293)673#define __NR_recvmmsg (__NR_Linux + 294)000674675/*676 * Offset of the last Linux 64-bit flavoured syscall677 */678-#define __NR_Linux_syscalls 294679680#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */681682#define __NR_64_Linux 5000683-#define __NR_64_Linux_syscalls 294684685#if _MIPS_SIM == _MIPS_SIM_NABI32686···991#define __NR_accept4 (__NR_Linux + 297)992#define __NR_recvmmsg (__NR_Linux + 298)993#define __NR_getdents64 (__NR_Linux + 299)000994995/*996 * Offset of the last N32 flavoured syscall997 */998-#define __NR_Linux_syscalls 2999991000#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */10011002#define __NR_N32_Linux 60001003-#define __NR_N32_Linux_syscalls 29910041005#ifdef __KERNEL__1006
···583 sys sys_rt_tgsigqueueinfo 4584 sys sys_perf_event_open 5585 sys sys_accept4 4586- sys sys_recvmmsg 5000587 .endm588589 /* We pre-compute the number of _instruction_ bytes needed to
···583 sys sys_rt_tgsigqueueinfo 4584 sys sys_perf_event_open 5585 sys sys_accept4 4586+ sys sys_recvmmsg 5 /* 4335 */587+ sys sys_fanotify_init 2588+ sys sys_fanotify_mark 6589+ sys sys_prlimit64 4590 .endm591592 /* We pre-compute the number of _instruction_ bytes needed to
···54void flush_icache_range(unsigned long start, unsigned long end)55{56#ifdef CONFIG_MN10300_CACHE_WBACK57- unsigned long addr, size, off;58 struct page *page;59 pgd_t *pgd;60 pud_t *pud;61 pmd_t *pmd;62 pte_t *ppte, pte;000000000000000006364 for (; start < end; start += size) {65 /* work out how much of the page to flush */···121 }122#endif1230124 mn10300_icache_inv();125}126EXPORT_SYMBOL(flush_icache_range);
···54void flush_icache_range(unsigned long start, unsigned long end)55{56#ifdef CONFIG_MN10300_CACHE_WBACK57+ unsigned long addr, size, base, off;58 struct page *page;59 pgd_t *pgd;60 pud_t *pud;61 pmd_t *pmd;62 pte_t *ppte, pte;63+64+ if (end > 0x80000000UL) {65+ /* addresses above 0xa0000000 do not go through the cache */66+ if (end > 0xa0000000UL) {67+ end = 0xa0000000UL;68+ if (start >= end)69+ return;70+ }71+72+ /* kernel addresses between 0x80000000 and 0x9fffffff do not73+ * require page tables, so we just map such addresses directly */74+ base = (start >= 0x80000000UL) ? start : 0x80000000UL;75+ mn10300_dcache_flush_range(base, end);76+ if (base == start)77+ goto invalidate;78+ end = base;79+ }8081 for (; start < end; start += size) {82 /* work out how much of the page to flush */···104 }105#endif106107+invalidate:108 mn10300_icache_inv();109}110EXPORT_SYMBOL(flush_icache_range);
···255 netif_wake_queue(dev);256}257000000000000258static int uml_net_change_mtu(struct net_device *dev, int new_mtu)259{260 dev->mtu = new_mtu;···373 .ndo_start_xmit = uml_net_start_xmit,374 .ndo_set_multicast_list = uml_net_set_multicast_list,375 .ndo_tx_timeout = uml_net_tx_timeout,376+ .ndo_set_mac_address = eth_mac_addr,377 .ndo_change_mtu = uml_net_change_mtu,378 .ndo_validate_addr = eth_validate_addr,379};···472 ((*transport->user->init)(&lp->user, dev) != 0))473 goto out_unregister;474475+ /* don't use eth_mac_addr, it will not work here */476+ memcpy(dev->dev_addr, device->mac, ETH_ALEN);477 dev->mtu = transport->user->mtu;478 dev->netdev_ops = ¨_netdev_ops;479 dev->ethtool_ops = ¨_net_ethtool_ops;
+1-1
arch/x86/kernel/acpi/cstate.c
···61 unsigned int ecx;62 } states[ACPI_PROCESSOR_MAX_POWER];63};64-static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */6566static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];67
···61 unsigned int ecx;62 } states[ACPI_PROCESSOR_MAX_POWER];63};64+static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */6566static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];67
···674 case 0x0f:675 case 0x16:676 case 0x17:0677 *cpu_type = "i386/core_2";678 break;679 case 0x1a:
···674 case 0x0f:675 case 0x16:676 case 0x17:677+ case 0x1d:678 *cpu_type = "i386/core_2";679 break;680 case 0x1a:
+3-2
arch/x86/xen/time.c
···489__init void xen_hvm_init_time_ops(void)490{491 /* vector callback is needed otherwise we cannot receive interrupts492- * on cpu > 0 */493- if (!xen_have_vector_callback && num_present_cpus() > 1)0494 return;495 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {496 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
···489__init void xen_hvm_init_time_ops(void)490{491 /* vector callback is needed otherwise we cannot receive interrupts492+ * on cpu > 0 and at this point we don't know how many cpus are493+ * available */494+ if (!xen_have_vector_callback)495 return;496 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {497 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
+1-1
drivers/acpi/Kconfig
···105106 Be aware that using this interface can confuse your Embedded107 Controller in a way that a normal reboot is not enough. You then108- have to power of your system, and remove the laptop battery for109 some seconds.110 An Embedded Controller typically is available on laptops and reads111 sensor values like battery state and temperature.
···105106 Be aware that using this interface can confuse your Embedded107 Controller in a way that a normal reboot is not enough. You then108+ have to power off your system, and remove the laptop battery for109 some seconds.110 An Embedded Controller typically is available on laptops and reads111 sensor values like battery state and temperature.
+18-16
drivers/acpi/acpi_pad.c
···382 device_remove_file(&device->dev, &dev_attr_rrtime);383}384385-/* Query firmware how many CPUs should be idle */386-static int acpi_pad_pur(acpi_handle handle, int *num_cpus)000387{388 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};389 union acpi_object *package;390- int rev, num, ret = -EINVAL;391392 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))393- return -EINVAL;394395 if (!buffer.length || !buffer.pointer)396- return -EINVAL;397398 package = buffer.pointer;399- if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)400- goto out;401- rev = package->package.elements[0].integer.value;402- num = package->package.elements[1].integer.value;403- if (rev != 1 || num < 0)404- goto out;405- *num_cpus = num;406- ret = 0;407-out:408 kfree(buffer.pointer);409- return ret;410}411412/* Notify firmware how many CPUs are idle */···434 uint32_t idle_cpus;435436 mutex_lock(&isolated_cpus_lock);437- if (acpi_pad_pur(handle, &num_cpus)) {0438 mutex_unlock(&isolated_cpus_lock);439 return;440 }
···382 device_remove_file(&device->dev, &dev_attr_rrtime);383}384385+/*386+ * Query firmware how many CPUs should be idle387+ * return -1 on failure388+ */389+static int acpi_pad_pur(acpi_handle handle)390{391 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};392 union acpi_object *package;393+ int num = -1;394395 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))396+ return num;397398 if (!buffer.length || !buffer.pointer)399+ return num;400401 package = buffer.pointer;402+403+ if (package->type == ACPI_TYPE_PACKAGE &&404+ package->package.count == 2 &&405+ package->package.elements[0].integer.value == 1) /* rev 1 */406+407+ num = package->package.elements[1].integer.value;408+00409 kfree(buffer.pointer);410+ return num;411}412413/* Notify firmware how many CPUs are idle */···433 uint32_t idle_cpus;434435 mutex_lock(&isolated_cpus_lock);436+ num_cpus = acpi_pad_pur(handle);437+ if (num_cpus < 0) {438 mutex_unlock(&isolated_cpus_lock);439 return;440 }
···109 *110 * DESCRIPTION: Reacquire the interpreter execution region from within the111 * interpreter code. Failure to enter the interpreter region is a112- * fatal system error. Used in conjuction with113 * relinquish_interpreter114 *115 ******************************************************************************/
···109 *110 * DESCRIPTION: Reacquire the interpreter execution region from within the111 * interpreter code. Failure to enter the interpreter region is a112+ * fatal system error. Used in conjunction with113 * relinquish_interpreter114 *115 ******************************************************************************/
+1-1
drivers/acpi/acpica/rsutils.c
···149150 /*151 * 16-, 32-, and 64-bit cases must use the move macros that perform152- * endian conversion and/or accomodate hardware that cannot perform153 * misaligned memory transfers154 */155 case ACPI_RSC_MOVE16:
···149150 /*151 * 16-, 32-, and 64-bit cases must use the move macros that perform152+ * endian conversion and/or accommodate hardware that cannot perform153 * misaligned memory transfers154 */155 case ACPI_RSC_MOVE16:
+1-1
drivers/acpi/apei/Kconfig
···34 depends on ACPI_APEI35 help36 ERST is a way provided by APEI to save and retrieve hardware37- error infomation to and from a persistent store. Enable this38 if you want to debugging and testing the ERST kernel support39 and firmware implementation.
···34 depends on ACPI_APEI35 help36 ERST is a way provided by APEI to save and retrieve hardware37+ error information to and from a persistent store. Enable this38 if you want to debugging and testing the ERST kernel support39 and firmware implementation.
···426427static int einj_check_table(struct acpi_table_einj *einj_tab)428{429- if (einj_tab->header_length != sizeof(struct acpi_table_einj))00430 return -EINVAL;431 if (einj_tab->header.length < sizeof(struct acpi_table_einj))432 return -EINVAL;
···426427static int einj_check_table(struct acpi_table_einj *einj_tab)428{429+ if ((einj_tab->header_length !=430+ (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))431+ && (einj_tab->header_length != sizeof(struct acpi_table_einj)))432 return -EINVAL;433 if (einj_tab->header.length < sizeof(struct acpi_table_einj))434 return -EINVAL;
+11-7
drivers/acpi/apei/erst-dbg.c
···2 * APEI Error Record Serialization Table debug support3 *4 * ERST is a way provided by APEI to save and retrieve hardware error5- * infomation to and from a persistent store. This file provide the6 * debugging/testing support for ERST kernel support and firmware7 * implementation.8 *···111 goto out;112 }113 if (len > erst_dbg_buf_len) {114- kfree(erst_dbg_buf);115 rc = -ENOMEM;116- erst_dbg_buf = kmalloc(len, GFP_KERNEL);117- if (!erst_dbg_buf)118 goto out;00119 erst_dbg_buf_len = len;120 goto retry;121 }···152 if (mutex_lock_interruptible(&erst_dbg_mutex))153 return -EINTR;154 if (usize > erst_dbg_buf_len) {155- kfree(erst_dbg_buf);156 rc = -ENOMEM;157- erst_dbg_buf = kmalloc(usize, GFP_KERNEL);158- if (!erst_dbg_buf)159 goto out;00160 erst_dbg_buf_len = usize;161 }162 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
···2 * APEI Error Record Serialization Table debug support3 *4 * ERST is a way provided by APEI to save and retrieve hardware error5+ * information to and from a persistent store. This file provide the6 * debugging/testing support for ERST kernel support and firmware7 * implementation.8 *···111 goto out;112 }113 if (len > erst_dbg_buf_len) {114+ void *p;115 rc = -ENOMEM;116+ p = kmalloc(len, GFP_KERNEL);117+ if (!p)118 goto out;119+ kfree(erst_dbg_buf);120+ erst_dbg_buf = p;121 erst_dbg_buf_len = len;122 goto retry;123 }···150 if (mutex_lock_interruptible(&erst_dbg_mutex))151 return -EINTR;152 if (usize > erst_dbg_buf_len) {153+ void *p;154 rc = -ENOMEM;155+ p = kmalloc(usize, GFP_KERNEL);156+ if (!p)157 goto out;158+ kfree(erst_dbg_buf);159+ erst_dbg_buf = p;160 erst_dbg_buf_len = usize;161 }162 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
+24-5
drivers/acpi/apei/erst.c
···2 * APEI Error Record Serialization Table support3 *4 * ERST is a way provided by APEI to save and retrieve hardware error5- * infomation to and from a persistent store.6 *7 * For more information about ERST, please refer to ACPI Specification8 * version 4.0, section 17.4.···266{267 int rc;268 u64 offset;00000000269270 rc = __apei_exec_read_register(entry, &offset);271 if (rc)272 return rc;273- memmove((void *)ctx->dst_base + offset,274- (void *)ctx->src_base + offset,275- ctx->var2);000000000276277 return 0;278}···767768static int erst_check_table(struct acpi_table_erst *erst_tab)769{770- if (erst_tab->header_length != sizeof(struct acpi_table_erst))00771 return -EINVAL;772 if (erst_tab->header.length < sizeof(struct acpi_table_erst))773 return -EINVAL;
···2 * APEI Error Record Serialization Table support3 *4 * ERST is a way provided by APEI to save and retrieve hardware error5+ * information to and from a persistent store.6 *7 * For more information about ERST, please refer to ACPI Specification8 * version 4.0, section 17.4.···266{267 int rc;268 u64 offset;269+ void *src, *dst;270+271+ /* ioremap does not work in interrupt context */272+ if (in_interrupt()) {273+ pr_warning(ERST_PFX274+ "MOVE_DATA can not be used in interrupt context");275+ return -EBUSY;276+ }277278 rc = __apei_exec_read_register(entry, &offset);279 if (rc)280 return rc;281+282+ src = ioremap(ctx->src_base + offset, ctx->var2);283+ if (!src)284+ return -ENOMEM;285+ dst = ioremap(ctx->dst_base + offset, ctx->var2);286+ if (!dst)287+ return -ENOMEM;288+289+ memmove(dst, src, ctx->var2);290+291+ iounmap(src);292+ iounmap(dst);293294 return 0;295}···750751static int erst_check_table(struct acpi_table_erst *erst_tab)752{753+ if ((erst_tab->header_length !=754+ (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))755+ && (erst_tab->header_length != sizeof(struct acpi_table_einj)))756 return -EINVAL;757 if (erst_tab->header.length < sizeof(struct acpi_table_erst))758 return -EINVAL;
+1-1
drivers/acpi/apei/ghes.c
···302 struct ghes *ghes = NULL;303 int rc = -EINVAL;304305- generic = ghes_dev->dev.platform_data;306 if (!generic->enabled)307 return -ENODEV;308
···302 struct ghes *ghes = NULL;303 int rc = -EINVAL;304305+ generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;306 if (!generic->enabled)307 return -ENODEV;308
+7-4
drivers/acpi/apei/hest.c
···137138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)139{140- struct acpi_hest_generic *generic;141 struct platform_device *ghes_dev;142 struct ghes_arr *ghes_arr = data;143 int rc;144145 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)146 return 0;147- generic = (struct acpi_hest_generic *)hest_hdr;148- if (!generic->enabled)149 return 0;150 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);151 if (!ghes_dev)152 return -ENOMEM;153- ghes_dev->dev.platform_data = generic;0000154 rc = platform_device_add(ghes_dev);155 if (rc)156 goto err;
···137138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)139{0140 struct platform_device *ghes_dev;141 struct ghes_arr *ghes_arr = data;142 int rc;143144 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)145 return 0;146+147+ if (!((struct acpi_hest_generic *)hest_hdr)->enabled)148 return 0;149 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);150 if (!ghes_dev)151 return -ENOMEM;152+153+ rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));154+ if (rc)155+ goto err;156+157 rc = platform_device_add(ghes_dev);158 if (rc)159 goto err;
···55static int set_power_nocheck(const struct dmi_system_id *id)56{57 printk(KERN_NOTICE PREFIX "%s detected - "58- "disable power check in power transistion\n", id->ident);59 acpi_power_nocheck = 1;60 return 0;61}···8081static struct dmi_system_id dsdt_dmi_table[] __initdata = {82 /*83- * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.84 * https://bugzilla.kernel.org/show_bug.cgi?id=1467985 */86 {87 .callback = set_copy_dsdt,88- .ident = "TOSHIBA Satellite A505",89 .matches = {90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),91- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),92- },93- },94- {95- .callback = set_copy_dsdt,96- .ident = "TOSHIBA Satellite L505D",97- .matches = {98- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),99- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),100 },101 },102 {}···10191020 /*1021 * If the laptop falls into the DMI check table, the power state check1022- * will be disabled in the course of device power transistion.1023 */1024 dmi_check_system(power_nocheck_dmi_table);1025
···55static int set_power_nocheck(const struct dmi_system_id *id)56{57 printk(KERN_NOTICE PREFIX "%s detected - "58+ "disable power check in power transition\n", id->ident);59 acpi_power_nocheck = 1;60 return 0;61}···8081static struct dmi_system_id dsdt_dmi_table[] __initdata = {82 /*83+ * Invoke DSDT corruption work-around on all Toshiba Satellite.84 * https://bugzilla.kernel.org/show_bug.cgi?id=1467985 */86 {87 .callback = set_copy_dsdt,88+ .ident = "TOSHIBA Satellite",89 .matches = {90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),91+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),0000000092 },93 },94 {}···10271028 /*1029 * If the laptop falls into the DMI check table, the power state check1030+ * will be disabled in the course of device power transition.1031 */1032 dmi_check_system(power_nocheck_dmi_table);1033
···850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",851 acpi_idle_driver.name);852 } else {853- printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",854 cpuidle_get_driver()->name);855 }856
···850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",851 acpi_idle_driver.name);852 } else {853+ printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",854 cpuidle_get_driver()->name);855 }856
+2-2
drivers/acpi/processor_perflib.c
···447 if (!try_module_get(calling_module))448 return -EINVAL;449450- /* is_done is set to negative if an error occured,451- * and to postitive if _no_ error occured, but SMM452 * was already notified. This avoids double notification453 * which might lead to unexpected results...454 */
···447 if (!try_module_get(calling_module))448 return -EINVAL;449450+ /* is_done is set to negative if an error occurred,451+ * and to postitive if _no_ error occurred, but SMM452 * was already notified. This avoids double notification453 * which might lead to unexpected results...454 */
···100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),101};102103-static int param_get_debug_layer(char *buffer, struct kernel_param *kp)104{105 int result = 0;106 int i;···128 return result;129}130131-static int param_get_debug_level(char *buffer, struct kernel_param *kp)132{133 int result = 0;134 int i;···149 return result;150}151152-module_param_call(debug_layer, param_set_uint, param_get_debug_layer,153- &acpi_dbg_layer, 0644);154-module_param_call(debug_level, param_set_uint, param_get_debug_level,155- &acpi_dbg_level, 0644);00000000156157static char trace_method_name[6];158module_param_string(trace_method_name, trace_method_name, 6, 0644);
···100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),101};102103+static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)104{105 int result = 0;106 int i;···128 return result;129}130131+static int param_get_debug_level(char *buffer, const struct kernel_param *kp)132{133 int result = 0;134 int i;···149 return result;150}151152+static struct kernel_param_ops param_ops_debug_layer = {153+ .set = param_set_uint,154+ .get = param_get_debug_layer,155+};156+157+static struct kernel_param_ops param_ops_debug_level = {158+ .set = param_set_uint,159+ .get = param_get_debug_level,160+};161+162+module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);163+module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);164165static char trace_method_name[6];166module_param_string(trace_method_name, trace_method_name, 6, 0644);
+2-2
drivers/acpi/video_detect.c
···59 "support\n"));60 *cap |= ACPI_VIDEO_BACKLIGHT;61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))62- printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness "63- "control misses _BQC function\n");64 /* We have backlight support, no need to scan further */65 return AE_CTRL_TERMINATE;66 }
···59 "support\n"));60 *cap |= ACPI_VIDEO_BACKLIGHT;61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))62+ printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "63+ "cannot determine initial brightness\n");64 /* We have backlight support, no need to scan further */65 return AE_CTRL_TERMINATE;66 }
+1-1
drivers/cpuidle/governors/menu.c
···80 * Limiting Performance Impact81 * ---------------------------82 * C states, especially those with large exit latencies, can have a real83- * noticable impact on workloads, which is not acceptable for most sysadmins,84 * and in addition, less performance has a power price of its own.85 *86 * As a general rule of thumb, menu assumes that the following heuristic
···80 * Limiting Performance Impact81 * ---------------------------82 * C states, especially those with large exit latencies, can have a real83+ * noticeable impact on workloads, which is not acceptable for most sysadmins,84 * and in addition, less performance has a power price of its own.85 *86 * As a general rule of thumb, menu assumes that the following heuristic
+2-1
drivers/dma/shdma.c
···580581 sh_chan = to_sh_chan(chan);582 param = chan->private;583- slave_addr = param->config->addr;584585 /* Someone calling slave DMA on a public channel? */586 if (!param || !sg_len) {···587 __func__, param, sg_len, param ? param->slave_id : -1);588 return NULL;589 }00590591 /*592 * if (param != NULL), this is a successfully requested slave channel,
···580581 sh_chan = to_sh_chan(chan);582 param = chan->private;0583584 /* Someone calling slave DMA on a public channel? */585 if (!param || !sg_len) {···588 __func__, param, sg_len, param ? param->slave_id : -1);589 return NULL;590 }591+592+ slave_addr = param->config->addr;593594 /*595 * if (param != NULL), this is a successfully requested slave channel,
···136 return -ENOMEM;137138 ret = drm_gem_handle_create(file_priv, obj, &handle);00139 if (ret) {140- drm_gem_object_unreference_unlocked(obj);141 return ret;142 }143-144- /* Sink the floating reference from kref_init(handlecount) */145- drm_gem_object_handle_unreference_unlocked(obj);146147 args->handle = handle;148 return 0;···469 return -ENOENT;470 obj_priv = to_intel_bo(obj);471472- /* Bounds check source.473- *474- * XXX: This could use review for overflow issues...475- */476- if (args->offset > obj->size || args->size > obj->size ||477- args->offset + args->size > obj->size) {478- drm_gem_object_unreference_unlocked(obj);479- return -EINVAL;000480 }481482 if (i915_gem_object_needs_bit17_swizzle(obj)) {···491 file_priv);492 }4930494 drm_gem_object_unreference_unlocked(obj);495-496 return ret;497}498···581582 user_data = (char __user *) (uintptr_t) args->data_ptr;583 remain = args->size;584- if (!access_ok(VERIFY_READ, user_data, remain))585- return -EFAULT;586587588 mutex_lock(&dev->struct_mutex);···933 return -ENOENT;934 obj_priv = to_intel_bo(obj);935936- /* Bounds check destination.937- *938- * XXX: This could use review for overflow issues...939- */940- if (args->offset > obj->size || args->size > obj->size ||941- args->offset + args->size > obj->size) {942- drm_gem_object_unreference_unlocked(obj);943- return -EINVAL;000944 }945946 /* We can only do the GTT pwrite on untiled buffers, as otherwise···977 DRM_INFO("pwrite failed %d\n", ret);978#endif9790980 drm_gem_object_unreference_unlocked(obj);981-982 return ret;983}984···3260 (int) reloc->offset,3261 reloc->read_domains,3262 reloc->write_domain);003263 return -EINVAL;3264 }3265 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
···136 return -ENOMEM;137138 ret = drm_gem_handle_create(file_priv, obj, &handle);139+ /* drop reference from allocate - handle holds it now */140+ drm_gem_object_unreference_unlocked(obj);141 if (ret) {0142 return ret;143 }000144145 args->handle = handle;146 return 0;···471 return -ENOENT;472 obj_priv = to_intel_bo(obj);473474+ /* Bounds check source. */475+ if (args->offset > obj->size || args->size > obj->size - args->offset) {476+ ret = -EINVAL;477+ goto err;478+ }479+480+ if (!access_ok(VERIFY_WRITE,481+ (char __user *)(uintptr_t)args->data_ptr,482+ args->size)) {483+ ret = -EFAULT;484+ goto err;485 }486487 if (i915_gem_object_needs_bit17_swizzle(obj)) {···490 file_priv);491 }492493+err:494 drm_gem_object_unreference_unlocked(obj);0495 return ret;496}497···580581 user_data = (char __user *) (uintptr_t) args->data_ptr;582 remain = args->size;00583584585 mutex_lock(&dev->struct_mutex);···934 return -ENOENT;935 obj_priv = to_intel_bo(obj);936937+ /* Bounds check destination. */938+ if (args->offset > obj->size || args->size > obj->size - args->offset) {939+ ret = -EINVAL;940+ goto err;941+ }942+943+ if (!access_ok(VERIFY_READ,944+ (char __user *)(uintptr_t)args->data_ptr,945+ args->size)) {946+ ret = -EFAULT;947+ goto err;948 }949950 /* We can only do the GTT pwrite on untiled buffers, as otherwise···975 DRM_INFO("pwrite failed %d\n", ret);976#endif977978+err:979 drm_gem_object_unreference_unlocked(obj);0980 return ret;981}982···3258 (int) reloc->offset,3259 reloc->read_domains,3260 reloc->write_domain);3261+ drm_gem_object_unreference(target_obj);3262+ i915_gem_object_unpin(obj);3263 return -EINVAL;3264 }3265 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+22-27
drivers/gpu/drm/i915/i915_gem_evict.c
···93{94 drm_i915_private_t *dev_priv = dev->dev_private;95 struct list_head eviction_list, unwind_list;96- struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;97 struct list_head *render_iter, *bsd_iter;98 int ret = 0;99···175 return -ENOSPC;176177found:000178 INIT_LIST_HEAD(&eviction_list);179- list_for_each_entry_safe(obj_priv, tmp_obj_priv,180- &unwind_list, evict_list) {00181 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {182- /* drm_mm doesn't allow any other other operations while183- * scanning, therefore store to be evicted objects on a184- * temporary list. */185 list_move(&obj_priv->evict_list, &eviction_list);186- } else187- drm_gem_object_unreference(&obj_priv->base);188- }189-190- /* Unbinding will emit any required flushes */191- list_for_each_entry_safe(obj_priv, tmp_obj_priv,192- &eviction_list, evict_list) {193-#if WATCH_LRU194- DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base);195-#endif196- ret = i915_gem_object_unbind(&obj_priv->base);197- if (ret)198- return ret;199-200 drm_gem_object_unreference(&obj_priv->base);201 }202203- /* The just created free hole should be on the top of the free stack204- * maintained by drm_mm, so this BUG_ON actually executes in O(1).205- * Furthermore all accessed data has just recently been used, so it206- * should be really fast, too. */207- BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,208- alignment, 0));0000209210- return 0;211}212213int
···93{94 drm_i915_private_t *dev_priv = dev->dev_private;95 struct list_head eviction_list, unwind_list;96+ struct drm_i915_gem_object *obj_priv;97 struct list_head *render_iter, *bsd_iter;98 int ret = 0;99···175 return -ENOSPC;176177found:178+ /* drm_mm doesn't allow any other other operations while179+ * scanning, therefore store to be evicted objects on a180+ * temporary list. */181 INIT_LIST_HEAD(&eviction_list);182+ while (!list_empty(&unwind_list)) {183+ obj_priv = list_first_entry(&unwind_list,184+ struct drm_i915_gem_object,185+ evict_list);186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {000187 list_move(&obj_priv->evict_list, &eviction_list);188+ continue;189+ }190+ list_del(&obj_priv->evict_list);00000000000191 drm_gem_object_unreference(&obj_priv->base);192 }193194+ /* Unbinding will emit any required flushes */195+ while (!list_empty(&eviction_list)) {196+ obj_priv = list_first_entry(&eviction_list,197+ struct drm_i915_gem_object,198+ evict_list);199+ if (ret == 0)200+ ret = i915_gem_object_unbind(&obj_priv->base);201+ list_del(&obj_priv->evict_list);202+ drm_gem_object_unreference(&obj_priv->base);203+ }204205+ return ret;206}207208int
+36-22
drivers/gpu/drm/i915/intel_display.c
···1013 DRM_DEBUG_KMS("vblank wait timed out\n");1014}10151016-/**1017- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe1018 * @dev: drm device1019 * @pipe: pipe to wait for1020 *···1022 * spinning on the vblank interrupt status bit, since we won't actually1023 * see an interrupt when the pipe is disabled.1024 *1025- * So this function waits for the display line value to settle (it1026- * usually ends up stopping at the start of the next frame).000001027 */1028-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)1029{1030 struct drm_i915_private *dev_priv = dev->dev_private;1031- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);1032- unsigned long timeout = jiffies + msecs_to_jiffies(100);1033- u32 last_line;10341035- /* Wait for the display line to settle */1036- do {1037- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;1038- mdelay(5);1039- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&1040- time_after(timeout, jiffies));10411042- if (time_after(jiffies, timeout))1043- DRM_DEBUG_KMS("vblank wait timed out\n");00000000000000001044}10451046/* Parameters have changed, update FBC info */···2342 I915_READ(dspbase_reg);2343 }23442345- /* Wait for vblank for the disable to take effect */2346- intel_wait_for_vblank_off(dev, pipe);2347-2348 /* Don't disable pipe A or pipe A PLLs if needed */2349 if (pipeconf_reg == PIPEACONF &&2350- (dev_priv->quirks & QUIRK_PIPEA_FORCE))002351 goto skip_pipe_off;023522353 /* Next, disable display pipes */2354 temp = I915_READ(pipeconf_reg);···2357 I915_READ(pipeconf_reg);2358 }23592360- /* Wait for vblank for the disable to take effect. */2361- intel_wait_for_vblank_off(dev, pipe);23622363 temp = I915_READ(dpll_reg);2364 if ((temp & DPLL_VCO_ENABLE) != 0) {
···1013 DRM_DEBUG_KMS("vblank wait timed out\n");1014}10151016+/*1017+ * intel_wait_for_pipe_off - wait for pipe to turn off1018 * @dev: drm device1019 * @pipe: pipe to wait for1020 *···1022 * spinning on the vblank interrupt status bit, since we won't actually1023 * see an interrupt when the pipe is disabled.1024 *1025+ * On Gen4 and above:1026+ * wait for the pipe register state bit to turn off1027+ *1028+ * Otherwise:1029+ * wait for the display line value to settle (it usually1030+ * ends up stopping at the start of the next frame).1031+ * 1032 */1033+static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)1034{1035 struct drm_i915_private *dev_priv = dev->dev_private;00010361037+ if (INTEL_INFO(dev)->gen >= 4) {1038+ int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);000010391040+ /* Wait for the Pipe State to go off */1041+ if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,1042+ 100, 0))1043+ DRM_DEBUG_KMS("pipe_off wait timed out\n");1044+ } else {1045+ u32 last_line;1046+ int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);1047+ unsigned long timeout = jiffies + msecs_to_jiffies(100);1048+1049+ /* Wait for the display line to settle */1050+ do {1051+ last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;1052+ mdelay(5);1053+ } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&1054+ time_after(timeout, jiffies));1055+ if (time_after(jiffies, timeout))1056+ DRM_DEBUG_KMS("pipe_off wait timed out\n");1057+ }1058}10591060/* Parameters have changed, update FBC info */···2328 I915_READ(dspbase_reg);2329 }23300002331 /* Don't disable pipe A or pipe A PLLs if needed */2332 if (pipeconf_reg == PIPEACONF &&2333+ (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {2334+ /* Wait for vblank for the disable to take effect */2335+ intel_wait_for_vblank(dev, pipe);2336 goto skip_pipe_off;2337+ }23382339 /* Next, disable display pipes */2340 temp = I915_READ(pipeconf_reg);···2343 I915_READ(pipeconf_reg);2344 }23452346+ /* Wait for the pipe to turn off */2347+ intel_wait_for_pipe_off(dev, pipe);23482349 temp = I915_READ(dpll_reg);2350 if ((temp & DPLL_VCO_ENABLE) != 0) {
+9-10
drivers/gpu/drm/i915/intel_dp.c
···1138intel_dp_set_link_train(struct intel_dp *intel_dp,1139 uint32_t dp_reg_value,1140 uint8_t dp_train_pat,1141- uint8_t train_set[4],1142- bool first)1143{1144 struct drm_device *dev = intel_dp->base.enc.dev;1145 struct drm_i915_private *dev_priv = dev->dev_private;1146- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);1147 int ret;11481149 I915_WRITE(intel_dp->output_reg, dp_reg_value);1150 POSTING_READ(intel_dp->output_reg);1151- if (first)1152- intel_wait_for_vblank(dev, intel_crtc->pipe);11531154 intel_dp_aux_native_write_1(intel_dp,1155 DP_TRAINING_PATTERN_SET,···1170 uint8_t voltage;1171 bool clock_recovery = false;1172 bool channel_eq = false;1173- bool first = true;1174 int tries;1175 u32 reg;1176 uint32_t DP = intel_dp->DP;00000011771178 /* Write the link configuration data */1179 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,···1211 reg = DP | DP_LINK_TRAIN_PAT_1;12121213 if (!intel_dp_set_link_train(intel_dp, reg,1214- DP_TRAINING_PATTERN_1, train_set, first))1215 break;1216- first = false;1217 /* Set training pattern 1 */12181219 udelay(100);···12661267 /* channel eq pattern */1268 if (!intel_dp_set_link_train(intel_dp, reg,1269- DP_TRAINING_PATTERN_2, train_set,1270- false))1271 break;12721273 udelay(400);
···1138intel_dp_set_link_train(struct intel_dp *intel_dp,1139 uint32_t dp_reg_value,1140 uint8_t dp_train_pat,1141+ uint8_t train_set[4])01142{1143 struct drm_device *dev = intel_dp->base.enc.dev;1144 struct drm_i915_private *dev_priv = dev->dev_private;01145 int ret;11461147 I915_WRITE(intel_dp->output_reg, dp_reg_value);1148 POSTING_READ(intel_dp->output_reg);0011491150 intel_dp_aux_native_write_1(intel_dp,1151 DP_TRAINING_PATTERN_SET,···1174 uint8_t voltage;1175 bool clock_recovery = false;1176 bool channel_eq = false;01177 int tries;1178 u32 reg;1179 uint32_t DP = intel_dp->DP;1180+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);1181+1182+ /* Enable output, wait for it to become active */1183+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);1184+ POSTING_READ(intel_dp->output_reg);1185+ intel_wait_for_vblank(dev, intel_crtc->pipe);11861187 /* Write the link configuration data */1188 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,···1210 reg = DP | DP_LINK_TRAIN_PAT_1;12111212 if (!intel_dp_set_link_train(intel_dp, reg,1213+ DP_TRAINING_PATTERN_1, train_set))1214 break;01215 /* Set training pattern 1 */12161217 udelay(100);···12661267 /* channel eq pattern */1268 if (!intel_dp_set_link_train(intel_dp, reg,1269+ DP_TRAINING_PATTERN_2, train_set))01270 break;12711272 udelay(400);
···167 goto out;168169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);00170out:171- drm_gem_object_handle_unreference_unlocked(nvbo->gem);172-173- if (ret)174- drm_gem_object_unreference_unlocked(nvbo->gem);175 return ret;176}177
···167 goto out;168169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);170+ /* drop reference from allocate - handle holds it now */171+ drm_gem_object_unreference_unlocked(nvbo->gem);172out:0000173 return ret;174}175
···3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL3530 */3531- if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {03532 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;3533 u32 tmp;3534
···3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL3530 */3531+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&3532+ rdev->vram_scratch.ptr) {3533 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;3534 u32 tmp;3535
+9
drivers/gpu/drm/radeon/radeon_atombios.c
···317 *connector_type = DRM_MODE_CONNECTOR_DVID;318 }319000000000320 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */321 if ((dev->pdev->device == 0x7941) &&322 (dev->pdev->subsystem_vendor == 0x147b) &&
···317 *connector_type = DRM_MODE_CONNECTOR_DVID;318 }319320+ /* MSI K9A2GM V2/V3 board has no HDMI or DVI */321+ if ((dev->pdev->device == 0x796e) &&322+ (dev->pdev->subsystem_vendor == 0x1462) &&323+ (dev->pdev->subsystem_device == 0x7302)) {324+ if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||325+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))326+ return false;327+ }328+329 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */330 if ((dev->pdev->device == 0x7941) &&331 (dev->pdev->subsystem_vendor == 0x147b) &&
+4-1
drivers/gpu/drm/radeon/radeon_display.c
···349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);350 if (devices & ATOM_DEVICE_DFP5_SUPPORT)351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);00352 if (devices & ATOM_DEVICE_TV1_SUPPORT)353 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);354 if (devices & ATOM_DEVICE_CV_SUPPORT)···843{844 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);845846- if (radeon_fb->obj)847 drm_gem_object_unreference_unlocked(radeon_fb->obj);0848 drm_framebuffer_cleanup(fb);849 kfree(radeon_fb);850}
···349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);350 if (devices & ATOM_DEVICE_DFP5_SUPPORT)351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);352+ if (devices & ATOM_DEVICE_DFP6_SUPPORT)353+ DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);354 if (devices & ATOM_DEVICE_TV1_SUPPORT)355 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);356 if (devices & ATOM_DEVICE_CV_SUPPORT)···841{842 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);843844+ if (radeon_fb->obj) {845 drm_gem_object_unreference_unlocked(radeon_fb->obj);846+ }847 drm_framebuffer_cleanup(fb);848 kfree(radeon_fb);849}
+4-10
drivers/gpu/drm/radeon/radeon_fb.c
···94 ret = radeon_bo_reserve(rbo, false);95 if (likely(ret == 0)) {96 radeon_bo_kunmap(rbo);097 radeon_bo_unreserve(rbo);98 }099 drm_gem_object_unreference_unlocked(gobj);100}101···327{328 struct fb_info *info;329 struct radeon_framebuffer *rfb = &rfbdev->rfb;330- struct radeon_bo *rbo;331- int r;332333 if (rfbdev->helper.fbdev) {334 info = rfbdev->helper.fbdev;···338 }339340 if (rfb->obj) {341- rbo = rfb->obj->driver_private;342- r = radeon_bo_reserve(rbo, false);343- if (likely(r == 0)) {344- radeon_bo_kunmap(rbo);345- radeon_bo_unpin(rbo);346- radeon_bo_unreserve(rbo);347- }348- drm_gem_object_unreference_unlocked(rfb->obj);349 }350 drm_fb_helper_fini(&rfbdev->helper);351 drm_framebuffer_cleanup(&rfb->base);
···94 ret = radeon_bo_reserve(rbo, false);95 if (likely(ret == 0)) {96 radeon_bo_kunmap(rbo);97+ radeon_bo_unpin(rbo);98 radeon_bo_unreserve(rbo);99 }100+ drm_gem_object_handle_unreference(gobj);101 drm_gem_object_unreference_unlocked(gobj);102}103···325{326 struct fb_info *info;327 struct radeon_framebuffer *rfb = &rfbdev->rfb;00328329 if (rfbdev->helper.fbdev) {330 info = rfbdev->helper.fbdev;···338 }339340 if (rfb->obj) {341+ radeonfb_destroy_pinned_object(rfb->obj);342+ rfb->obj = NULL;000000343 }344 drm_fb_helper_fini(&rfbdev->helper);345 drm_framebuffer_cleanup(&rfb->base);
···615 if (unlikely(ret != 0))616 goto err_unlock;61700000618 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);619620 /* Could probably bug on */
···615 if (unlikely(ret != 0))616 goto err_unlock;617618+ if (bo->mem.mem_type == TTM_PL_VRAM &&619+ bo->mem.mm_node->start < bo->num_pages)620+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false,621+ false, false);622+623 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);624625 /* Could probably bug on */
···111/* Super-I/O Function prototypes */112static inline int superio_inb(int base, int reg);113static inline int superio_inw(int base, int reg);114-static inline void superio_enter(int base);115static inline void superio_select(int base, int ld);116static inline void superio_exit(int base);117···861 return val;862}863864-static inline void superio_enter(int base)865{0000000866 /* according to the datasheet the key must be send twice! */867 outb(SIO_UNLOCK_KEY, base);868 outb(SIO_UNLOCK_KEY, base);00869}870871static inline void superio_select(int base, int ld)···886static inline void superio_exit(int base)887{888 outb(SIO_LOCK_KEY, base);0889}890891static inline int fan_from_reg(u16 reg)···2185static int __init f71882fg_find(int sioaddr, unsigned short *address,2186 struct f71882fg_sio_data *sio_data)2187{2188- int err = -ENODEV;2189 u16 devid;2190-2191- /* Don't step on other drivers' I/O space by accident */2192- if (!request_region(sioaddr, 2, DRVNAME)) {2193- printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",2194- (int)sioaddr);2195- return -EBUSY;2196- }2197-2198- superio_enter(sioaddr);21992200 devid = superio_inw(sioaddr, SIO_REG_MANID);2201 if (devid != SIO_FINTEK_ID) {2202 pr_debug(DRVNAME ": Not a Fintek device\n");02203 goto exit;2204 }2205···2217 default:2218 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",2219 (unsigned int)devid);02220 goto exit;2221 }2222···22282229 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {2230 printk(KERN_WARNING DRVNAME ": Device not activated\n");02231 goto exit;2232 }22332234 *address = superio_inw(sioaddr, SIO_REG_ADDR);2235 if (*address == 0) {2236 printk(KERN_WARNING DRVNAME ": Base address not set\n");02237 goto exit;2238 }2239 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */···2246 (int)superio_inb(sioaddr, SIO_REG_DEVREV));2247exit:2248 superio_exit(sioaddr);2249- release_region(sioaddr, 2);2250 return err;2251}2252
···111/* Super-I/O Function prototypes */112static inline int superio_inb(int base, int reg);113static inline int superio_inw(int base, int reg);114+static inline int superio_enter(int base);115static inline void superio_select(int base, int ld);116static inline void superio_exit(int base);117···861 return val;862}863864+static inline int superio_enter(int base)865{866+ /* Don't step on other drivers' I/O space by accident */867+ if (!request_muxed_region(base, 2, DRVNAME)) {868+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",869+ base);870+ return -EBUSY;871+ }872+873 /* according to the datasheet the key must be send twice! */874 outb(SIO_UNLOCK_KEY, base);875 outb(SIO_UNLOCK_KEY, base);876+877+ return 0;878}879880static inline void superio_select(int base, int ld)···877static inline void superio_exit(int base)878{879 outb(SIO_LOCK_KEY, base);880+ release_region(base, 2);881}882883static inline int fan_from_reg(u16 reg)···2175static int __init f71882fg_find(int sioaddr, unsigned short *address,2176 struct f71882fg_sio_data *sio_data)2177{02178 u16 devid;2179+ int err = superio_enter(sioaddr);2180+ if (err)2181+ return err;00000021822183 devid = superio_inw(sioaddr, SIO_REG_MANID);2184 if (devid != SIO_FINTEK_ID) {2185 pr_debug(DRVNAME ": Not a Fintek device\n");2186+ err = -ENODEV;2187 goto exit;2188 }2189···2213 default:2214 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",2215 (unsigned int)devid);2216+ err = -ENODEV;2217 goto exit;2218 }2219···22232224 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {2225 printk(KERN_WARNING DRVNAME ": Device not activated\n");2226+ err = -ENODEV;2227 goto exit;2228 }22292230 *address = superio_inw(sioaddr, SIO_REG_ADDR);2231 if (*address == 0) {2232 printk(KERN_WARNING DRVNAME ": Base address not set\n");2233+ err = -ENODEV;2234 goto exit;2235 }2236 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */···2239 (int)superio_inb(sioaddr, SIO_REG_DEVREV));2240exit:2241 superio_exit(sioaddr);02242 return err;2243}2244
+3-3
drivers/i2c/busses/i2c-davinci.c
···357358 dev->terminate = 0;359360- /* write the data into mode register */361- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);362-363 /*364 * First byte should be set here, not after interrupt,365 * because transmit-data-ready interrupt can come before···367 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);368 dev->buf_len--;369 }000370371 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,372 dev->adapter.timeout);
···357358 dev->terminate = 0;359000360 /*361 * First byte should be set here, not after interrupt,362 * because transmit-data-ready interrupt can come before···370 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);371 dev->buf_len--;372 }373+374+ /* write the data into mode register; start transmitting */375+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);376377 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,378 dev->adapter.timeout);
+1-1
drivers/i2c/busses/i2c-octeon.c
···218 return result;219 } else if (result == 0) {220 dev_dbg(i2c->dev, "%s: timeout\n", __func__);221- result = -ETIMEDOUT;222 }223224 return 0;
···755{756 int ret = 0;757758- blocking_notifier_chain_register(&xenstore_chain, nb);000759760 return ret;761}···772773void xenbus_probe(struct work_struct *unused)774{775- BUG_ON((xenstored_ready <= 0));776777 /* Enumerate devices in xenstore and watch for changes. */778 xenbus_probe_devices(&xenbus_frontend);···838 xen_store_evtchn = xen_start_info->store_evtchn;839 xen_store_mfn = xen_start_info->store_mfn;840 xen_store_interface = mfn_to_virt(xen_store_mfn);0841 }842- xenstored_ready = 1;843 }844845 /* Initialize the interface to xenstore. */
···755{756 int ret = 0;757758+ if (xenstored_ready > 0)759+ ret = nb->notifier_call(nb, 0, NULL);760+ else761+ blocking_notifier_chain_register(&xenstore_chain, nb);762763 return ret;764}···769770void xenbus_probe(struct work_struct *unused)771{772+ xenstored_ready = 1;773774 /* Enumerate devices in xenstore and watch for changes. */775 xenbus_probe_devices(&xenbus_frontend);···835 xen_store_evtchn = xen_start_info->store_evtchn;836 xen_store_mfn = xen_start_info->store_mfn;837 xen_store_interface = mfn_to_virt(xen_store_mfn);838+ xenstored_ready = 1;839 }0840 }841842 /* Initialize the interface to xenstore. */
+33-16
fs/cifs/cifssmb.c
···232small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,233 void **request_buf)234{235- int rc = 0;236237 rc = cifs_reconnect_tcon(tcon, smb_command);238 if (rc)···250 if (tcon != NULL)251 cifs_stats_inc(&tcon->num_smbs_sent);252253- return rc;254}255256int···281282/* If the return code is zero, this function must fill in request_buf pointer */283static int284-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,285- void **request_buf /* returned */ ,286- void **response_buf /* returned */ )287{288- int rc = 0;289-290- rc = cifs_reconnect_tcon(tcon, smb_command);291- if (rc)292- return rc;293-294 *request_buf = cifs_buf_get();295 if (*request_buf == NULL) {296 /* BB should we add a retry in here if not a writepage? */···302 if (tcon != NULL)303 cifs_stats_inc(&tcon->num_smbs_sent);304305- return rc;000000000000000000000000306}307308static int validate_t2(struct smb_t2_rsp *pSMB)···45514552 cFYI(1, "In QFSUnixInfo");4553QFSUnixRetry:4554- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,4555- (void **) &pSMBr);4556 if (rc)4557 return rc;4558···4621 cFYI(1, "In SETFSUnixInfo");4622SETFSUnixRetry:4623 /* BB switch to small buf init to save memory */4624- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,4625- (void **) &pSMBr);4626 if (rc)4627 return rc;4628
···232small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,233 void **request_buf)234{235+ int rc;236237 rc = cifs_reconnect_tcon(tcon, smb_command);238 if (rc)···250 if (tcon != NULL)251 cifs_stats_inc(&tcon->num_smbs_sent);252253+ return 0;254}255256int···281282/* If the return code is zero, this function must fill in request_buf pointer */283static int284+__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,285+ void **request_buf, void **response_buf)0286{000000287 *request_buf = cifs_buf_get();288 if (*request_buf == NULL) {289 /* BB should we add a retry in here if not a writepage? */···309 if (tcon != NULL)310 cifs_stats_inc(&tcon->num_smbs_sent);311312+ return 0;313+}314+315+/* If the return code is zero, this function must fill in request_buf pointer */316+static int317+smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,318+ void **request_buf, void **response_buf)319+{320+ int rc;321+322+ rc = cifs_reconnect_tcon(tcon, smb_command);323+ if (rc)324+ return rc;325+326+ return __smb_init(smb_command, wct, tcon, request_buf, response_buf);327+}328+329+static int330+smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,331+ void **request_buf, void **response_buf)332+{333+ if (tcon->ses->need_reconnect || tcon->need_reconnect)334+ return -EHOSTDOWN;335+336+ return __smb_init(smb_command, wct, tcon, request_buf, response_buf);337}338339static int validate_t2(struct smb_t2_rsp *pSMB)···45344535 cFYI(1, "In QFSUnixInfo");4536QFSUnixRetry:4537+ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,4538+ (void **) &pSMB, (void **) &pSMBr);4539 if (rc)4540 return rc;4541···4604 cFYI(1, "In SETFSUnixInfo");4605SETFSUnixRetry:4606 /* BB switch to small buf init to save memory */4607+ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,4608+ (void **) &pSMB, (void **) &pSMBr);4609 if (rc)4610 return rc;4611
···1354 loff_t file_size;1355 unsigned int num;1356 unsigned int offset;1357- size_t total_len;13581359 req = fuse_get_req(fc);1360 if (IS_ERR(req))
···1354 loff_t file_size;1355 unsigned int num;1356 unsigned int offset;1357+ size_t total_len = 0;13581359 req = fuse_get_req(fc);1360 if (IS_ERR(req))
+1-1
fs/ocfs2/symlink.c
···128 }129130 /* Fast symlinks can't be large */131- len = strlen(target);132 link = kzalloc(len + 1, GFP_NOFS);133 if (!link) {134 status = -ENOMEM;
···128 }129130 /* Fast symlinks can't be large */131+ len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));132 link = kzalloc(len + 1, GFP_NOFS);133 if (!link) {134 status = -ENOMEM;
···170int reiserfs_unpack(struct inode *inode, struct file *filp)171{172 int retval = 0;0173 int index;174 struct page *page;175 struct address_space *mapping;···189 /* we need to make sure nobody is changing the file size beneath190 ** us191 */192- mutex_lock(&inode->i_mutex);193- reiserfs_write_lock(inode->i_sb);194195 write_from = inode->i_size & (blocksize - 1);196 /* if we are on a block boundary, we are already unpacked. */···225226 out:227 mutex_unlock(&inode->i_mutex);228- reiserfs_write_unlock(inode->i_sb);229 return retval;230}
···170int reiserfs_unpack(struct inode *inode, struct file *filp)171{172 int retval = 0;173+ int depth;174 int index;175 struct page *page;176 struct address_space *mapping;···188 /* we need to make sure nobody is changing the file size beneath189 ** us190 */191+ reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);192+ depth = reiserfs_write_lock_once(inode->i_sb);193194 write_from = inode->i_size & (blocksize - 1);195 /* if we are on a block boundary, we are already unpacked. */···224225 out:226 mutex_unlock(&inode->i_mutex);227+ reiserfs_write_unlock_once(inode->i_sb, depth);228 return retval;229}
+9-3
fs/xfs/xfs_log_cil.c
···405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);406 new_ctx->ticket = xlog_cil_ticket_alloc(log);407408- /* lock out transaction commit, but don't block on background push */00000409 if (!down_write_trylock(&cil->xc_ctx_lock)) {410- if (!push_seq)0411 goto out_free_ticket;412 down_write(&cil->xc_ctx_lock);413 }···428 goto out_skip;429430 /* check for a previously pushed seqeunce */431- if (push_seq < cil->xc_ctx->sequence)432 goto out_skip;433434 /*
···405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);406 new_ctx->ticket = xlog_cil_ticket_alloc(log);407408+ /*409+ * Lock out transaction commit, but don't block for background pushes410+ * unless we are well over the CIL space limit. See the definition of411+ * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic412+ * used here.413+ */414 if (!down_write_trylock(&cil->xc_ctx_lock)) {415+ if (!push_seq &&416+ cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))417 goto out_free_ticket;418 down_write(&cil->xc_ctx_lock);419 }···422 goto out_skip;423424 /* check for a previously pushed seqeunce */425+ if (push_seq && push_seq < cil->xc_ctx->sequence)426 goto out_skip;427428 /*
+21-16
fs/xfs/xfs_log_priv.h
···426};427428/*429- * The amount of log space we should the CIL to aggregate is difficult to size.430- * Whatever we chose we have to make we can get a reservation for the log space431- * effectively, that it is large enough to capture sufficient relogging to432- * reduce log buffer IO significantly, but it is not too large for the log or433- * induces too much latency when writing out through the iclogs. We track both434- * space consumed and the number of vectors in the checkpoint context, so we435- * need to decide which to use for limiting.436 *437 * Every log buffer we write out during a push needs a header reserved, which438 * is at least one sector and more for v2 logs. Hence we need a reservation of···459 * checkpoint transaction ticket is specific to the checkpoint context, rather460 * than the CIL itself.461 *462- * With dynamic reservations, we can basically make up arbitrary limits for the463- * checkpoint size so long as they don't violate any other size rules. Hence464- * the initial maximum size for the checkpoint transaction will be set to a465- * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit466- * right now based on the latency of writing out a large amount of data through467- * the circular iclog buffers.000000468 */469-470-#define XLOG_CIL_SPACE_LIMIT(log) \471- (min((log->l_logsize >> 2), (8 * 1024 * 1024)))472473/*474 * The reservation head lsn is not made up of a cycle number and block number.
···426};427428/*429+ * The amount of log space we allow the CIL to aggregate is difficult to size.430+ * Whatever we choose, we have to make sure we can get a reservation for the431+ * log space effectively, that it is large enough to capture sufficient432+ * relogging to reduce log buffer IO significantly, but it is not too large for433+ * the log or induces too much latency when writing out through the iclogs. We434+ * track both space consumed and the number of vectors in the checkpoint435+ * context, so we need to decide which to use for limiting.436 *437 * Every log buffer we write out during a push needs a header reserved, which438 * is at least one sector and more for v2 logs. Hence we need a reservation of···459 * checkpoint transaction ticket is specific to the checkpoint context, rather460 * than the CIL itself.461 *462+ * With dynamic reservations, we can effectively make up arbitrary limits for463+ * the checkpoint size so long as they don't violate any other size rules.464+ * Recovery imposes a rule that no transaction exceed half the log, so we are465+ * limited by that. Furthermore, the log transaction reservation subsystem466+ * tries to keep 25% of the log free, so we need to keep below that limit or we467+ * risk running out of free log space to start any new transactions.468+ *469+ * In order to keep background CIL push efficient, we will set a lower470+ * threshold at which background pushing is attempted without blocking current471+ * transaction commits. A separate, higher bound defines when CIL pushes are472+ * enforced to ensure we stay within our maximum checkpoint size bounds.473+ * threshold, yet give us plenty of space for aggregation on large logs.474 */475+#define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3)476+#define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4))0477478/*479 * The reservation head lsn is not made up of a cycle number and block number.
+1-1
include/acpi/acpixf.h
···55extern u8 acpi_gbl_permanent_mmap;5657/*58- * Globals that are publically available, allowing for59 * run time configuration60 */61extern u32 acpi_dbg_level;
···55extern u8 acpi_gbl_permanent_mmap;5657/*58+ * Globals that are publicly available, allowing for59 * run time configuration60 */61extern u32 acpi_dbg_level;
+20-9
include/drm/drmP.h
···612 struct kref refcount;613614 /** Handle count of this object. Each handle also holds a reference */615- struct kref handlecount;616617 /** Related drm device */618 struct drm_device *dev;···808 */809 int (*gem_init_object) (struct drm_gem_object *obj);810 void (*gem_free_object) (struct drm_gem_object *obj);811- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);812813 /* vga arb irq handler */814 void (*vgaarb_irq)(struct drm_device *dev, bool state);···1174extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);1175extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);1176extern void drm_vm_open_locked(struct vm_area_struct *vma);01177extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);1178extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);1179extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);···1455void drm_gem_destroy(struct drm_device *dev);1456void drm_gem_object_release(struct drm_gem_object *obj);1457void drm_gem_object_free(struct kref *kref);1458-void drm_gem_object_free_unlocked(struct kref *kref);1459struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,1460 size_t size);1461int drm_gem_object_init(struct drm_device *dev,1462 struct drm_gem_object *obj, size_t size);1463-void drm_gem_object_handle_free(struct kref *kref);1464void drm_gem_vm_open(struct vm_area_struct *vma);1465void drm_gem_vm_close(struct vm_area_struct *vma);1466int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);···1482static inline void1483drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)1484{1485- if (obj != NULL)1486- kref_put(&obj->refcount, drm_gem_object_free_unlocked);00001487}14881489int drm_gem_handle_create(struct drm_file *file_priv,···1498drm_gem_object_handle_reference(struct drm_gem_object *obj)1499{1500 drm_gem_object_reference(obj);1501- kref_get(&obj->handlecount);1502}15031504static inline void···1507 if (obj == NULL)1508 return;1509001510 /*1511 * Must bump handle count first as this may be the last1512 * ref, in which case the object would disappear before we1513 * checked for a name1514 */1515- kref_put(&obj->handlecount, drm_gem_object_handle_free);01516 drm_gem_object_unreference(obj);1517}1518···1525 if (obj == NULL)1526 return;15270001528 /*1529 * Must bump handle count first as this may be the last1530 * ref, in which case the object would disappear before we1531 * checked for a name1532 */1533- kref_put(&obj->handlecount, drm_gem_object_handle_free);001534 drm_gem_object_unreference_unlocked(obj);1535}1536
···612 struct kref refcount;613614 /** Handle count of this object. Each handle also holds a reference */615+ atomic_t handle_count; /* number of handles on this object */616617 /** Related drm device */618 struct drm_device *dev;···808 */809 int (*gem_init_object) (struct drm_gem_object *obj);810 void (*gem_free_object) (struct drm_gem_object *obj);0811812 /* vga arb irq handler */813 void (*vgaarb_irq)(struct drm_device *dev, bool state);···1175extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);1176extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);1177extern void drm_vm_open_locked(struct vm_area_struct *vma);1178+extern void drm_vm_close_locked(struct vm_area_struct *vma);1179extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);1180extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);1181extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);···1455void drm_gem_destroy(struct drm_device *dev);1456void drm_gem_object_release(struct drm_gem_object *obj);1457void drm_gem_object_free(struct kref *kref);01458struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,1459 size_t size);1460int drm_gem_object_init(struct drm_device *dev,1461 struct drm_gem_object *obj, size_t size);1462+void drm_gem_object_handle_free(struct drm_gem_object *obj);1463void drm_gem_vm_open(struct vm_area_struct *vma);1464void drm_gem_vm_close(struct vm_area_struct *vma);1465int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);···1483static inline void1484drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)1485{1486+ if (obj != NULL) {1487+ struct drm_device *dev = obj->dev;1488+ mutex_lock(&dev->struct_mutex);1489+ kref_put(&obj->refcount, drm_gem_object_free);1490+ mutex_unlock(&dev->struct_mutex);1491+ }1492}14931494int drm_gem_handle_create(struct drm_file *file_priv,···1495drm_gem_object_handle_reference(struct drm_gem_object *obj)1496{1497 drm_gem_object_reference(obj);1498+ atomic_inc(&obj->handle_count);1499}15001501static inline void···1504 if (obj == NULL)1505 return;15061507+ if (atomic_read(&obj->handle_count) == 0)1508+ return;1509 /*1510 * Must bump handle count first as this may be the last1511 * ref, in which case the object would disappear before we1512 * checked for a name1513 */1514+ if (atomic_dec_and_test(&obj->handle_count))1515+ drm_gem_object_handle_free(obj);1516 drm_gem_object_unreference(obj);1517}1518···1519 if (obj == NULL)1520 return;15211522+ if (atomic_read(&obj->handle_count) == 0)1523+ return;1524+1525 /*1526 * Must bump handle count first as this may be the last1527 * ref, in which case the object would disappear before we1528 * checked for a name1529 */1530+1531+ if (atomic_dec_and_test(&obj->handle_count))1532+ drm_gem_object_handle_free(obj);1533 drm_gem_object_unreference_unlocked(obj);1534}1535
···53#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */54#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */55#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */05657#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)58
···53#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */54#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */55#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */56+#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */5758#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)59
···365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l);366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);367368- if (n)369- sg_mark_end(sgl + n - 1);370 return n;371}372
···365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l);366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);36700368 return n;369}370
+4
kernel/module.c
···1537{1538 struct module *mod = _mod;1539 list_del(&mod->list);01540 return 0;1541}1542···2626 if (err < 0)2627 goto ddebug;262802629 list_add_rcu(&mod->list, &modules);2630 mutex_unlock(&module_mutex);2631···2652 mutex_lock(&module_mutex);2653 /* Unlink carefully: kallsyms could be walking list. */2654 list_del_rcu(&mod->list);002655 ddebug:2656 if (!mod->taints)2657 dynamic_debug_remove(info.debug);
···1537{1538 struct module *mod = _mod;1539 list_del(&mod->list);1540+ module_bug_cleanup(mod);1541 return 0;1542}1543···2625 if (err < 0)2626 goto ddebug;26272628+ module_bug_finalize(info.hdr, info.sechdrs, mod);2629 list_add_rcu(&mod->list, &modules);2630 mutex_unlock(&module_mutex);2631···2650 mutex_lock(&module_mutex);2651 /* Unlink carefully: kallsyms could be walking list. */2652 list_del_rcu(&mod->list);2653+ module_bug_cleanup(mod);2654+2655 ddebug:2656 if (!mod->taints)2657 dynamic_debug_remove(info.debug);
+14-3
kernel/smp.c
···365EXPORT_SYMBOL_GPL(smp_call_function_any);366367/**368- * __smp_call_function_single(): Run a function on another CPU369 * @cpu: The CPU to run on.370 * @data: Pre-allocated and setup data structure0371 *372 * Like smp_call_function_single(), but allow caller to pass in a373 * pre-allocated data structure. Useful for embedding @data inside···377void __smp_call_function_single(int cpu, struct call_single_data *data,378 int wait)379{380- csd_lock(data);03810382 /*383 * Can deadlock when called with interrupts disabled.384 * We allow cpu's that are not yet online though, as no one else can···390 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()391 && !oops_in_progress);392393- generic_exec_single(cpu, data, wait);00000000394}395396/**
···365EXPORT_SYMBOL_GPL(smp_call_function_any);366367/**368+ * __smp_call_function_single(): Run a function on a specific CPU369 * @cpu: The CPU to run on.370 * @data: Pre-allocated and setup data structure371+ * @wait: If true, wait until function has completed on specified CPU.372 *373 * Like smp_call_function_single(), but allow caller to pass in a374 * pre-allocated data structure. Useful for embedding @data inside···376void __smp_call_function_single(int cpu, struct call_single_data *data,377 int wait)378{379+ unsigned int this_cpu;380+ unsigned long flags;381382+ this_cpu = get_cpu();383 /*384 * Can deadlock when called with interrupts disabled.385 * We allow cpu's that are not yet online though, as no one else can···387 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()388 && !oops_in_progress);389390+ if (cpu == this_cpu) {391+ local_irq_save(flags);392+ data->func(data->info);393+ local_irq_restore(flags);394+ } else {395+ csd_lock(data);396+ generic_exec_single(cpu, data, wait);397+ }398+ put_cpu();399}400401/**
+2-4
lib/bug.c
···72 return NULL;73}7475-int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,76- struct module *mod)77{78 char *secstrings;79 unsigned int i;···97 * could potentially lead to deadlock and thus be counter-productive.98 */99 list_add(&mod->bug_list, &module_bug_list);100-101- return 0;102}103104void module_bug_cleanup(struct module *mod)
···72 return NULL;73}7475+void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,76+ struct module *mod)77{78 char *secstrings;79 unsigned int i;···97 * could potentially lead to deadlock and thus be counter-productive.98 */99 list_add(&mod->bug_list, &module_bug_list);00100}101102void module_bug_cleanup(struct module *mod)
+1-1
lib/list_sort.c
···70 * element comparison is needed, so the client's cmp()71 * routine can invoke cond_resched() periodically.72 */73- (*cmp)(priv, tail, tail);7475 tail->next->prev = tail;76 tail = tail->next;
···70 * element comparison is needed, so the client's cmp()71 * routine can invoke cond_resched() periodically.72 */73+ (*cmp)(priv, tail->next, tail->next);7475 tail->next->prev = tail;76 tail = tail->next;
···381unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)382{383 if (PageAnon(page)) {384- if (vma->anon_vma->root != page_anon_vma(page)->root)000000385 return -EFAULT;386 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {387 if (!vma->vm_file ||
···381unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)382{383 if (PageAnon(page)) {384+ struct anon_vma *page__anon_vma = page_anon_vma(page);385+ /*386+ * Note: swapoff's unuse_vma() is more efficient with this387+ * check, and needs it to match anon_vma when KSM is active.388+ */389+ if (!vma->anon_vma || !page__anon_vma ||390+ vma->anon_vma->root != page__anon_vma->root)391 return -EFAULT;392 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {393 if (!vma->vm_file ||
+10-4
net/8021q/vlan_core.c
···2425 if (vlan_dev)26 skb->dev = vlan_dev;27- else if (vlan_id)28- goto drop;0002930 return (polling ? netif_receive_skb(skb) : netif_rx(skb));31···105106 if (vlan_dev)107 skb->dev = vlan_dev;108- else if (vlan_id)109- goto drop;000110111 for (p = napi->gro_list; p; p = p->next) {112 NAPI_GRO_CB(p)->same_flow =
···2425 if (vlan_dev)26 skb->dev = vlan_dev;27+ else if (vlan_id) {28+ if (!(skb->dev->flags & IFF_PROMISC))29+ goto drop;30+ skb->pkt_type = PACKET_OTHERHOST;31+ }3233 return (polling ? netif_receive_skb(skb) : netif_rx(skb));34···102103 if (vlan_dev)104 skb->dev = vlan_dev;105+ else if (vlan_id) {106+ if (!(skb->dev->flags & IFF_PROMISC))107+ goto drop;108+ skb->pkt_type = PACKET_OTHERHOST;109+ }110111 for (p = napi->gro_list; p; p = p->next) {112 NAPI_GRO_CB(p)->same_flow =
+1
net/ipv4/Kconfig
···217218config NET_IPGRE219 tristate "IP: GRE tunnels over IP"0220 help221 Tunneling means encapsulating data of one protocol type within222 another protocol and sending it over a channel that understands the
···217218config NET_IPGRE219 tristate "IP: GRE tunnels over IP"220+ depends on IPV6 || IPV6=n221 help222 Tunneling means encapsulating data of one protocol type within223 another protocol and sending it over a channel that understands the
+14-10
net/ipv4/tcp_timer.c
···135136/* This function calculates a "timeout" which is equivalent to the timeout of a137 * TCP connection after "boundary" unsuccessful, exponentially backed-off138- * retransmissions with an initial RTO of TCP_RTO_MIN.0139 */140static bool retransmits_timed_out(struct sock *sk,141- unsigned int boundary)0142{143 unsigned int timeout, linear_backoff_thresh;144 unsigned int start_ts;0145146 if (!inet_csk(sk)->icsk_retransmits)147 return false;···154 else155 start_ts = tcp_sk(sk)->retrans_stamp;156157- linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);158159 if (boundary <= linear_backoff_thresh)160- timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;161 else162- timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +163 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;164165 return (tcp_time_stamp - start_ts) >= timeout;···170{171 struct inet_connection_sock *icsk = inet_csk(sk);172 int retry_until;173- bool do_reset;174175 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {176 if (icsk->icsk_retransmits)177 dst_negative_advice(sk);178 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;0179 } else {180- if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {181 /* Black hole detection */182 tcp_mtu_probing(icsk, sk);183···191192 retry_until = tcp_orphan_retries(sk, alive);193 do_reset = alive ||194- !retransmits_timed_out(sk, retry_until);195196 if (tcp_out_of_resources(sk, do_reset))197 return 1;198 }199 }200201- if (retransmits_timed_out(sk, retry_until)) {202 /* Has it gone just too far? */203 tcp_write_err(sk);204 return 1;···440 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);441 }442 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);443- if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))444 __sk_dst_reset(sk);445446out:;
···135136/* This function calculates a "timeout" which is equivalent to the timeout of a137 * TCP connection after "boundary" unsuccessful, exponentially backed-off138+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if139+ * syn_set flag is set.140 */141static bool retransmits_timed_out(struct sock *sk,142+ unsigned int boundary,143+ bool syn_set)144{145 unsigned int timeout, linear_backoff_thresh;146 unsigned int start_ts;147+ unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;148149 if (!inet_csk(sk)->icsk_retransmits)150 return false;···151 else152 start_ts = tcp_sk(sk)->retrans_stamp;153154+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);155156 if (boundary <= linear_backoff_thresh)157+ timeout = ((2 << boundary) - 1) * rto_base;158 else159+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +160 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;161162 return (tcp_time_stamp - start_ts) >= timeout;···167{168 struct inet_connection_sock *icsk = inet_csk(sk);169 int retry_until;170+ bool do_reset, syn_set = 0;171172 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {173 if (icsk->icsk_retransmits)174 dst_negative_advice(sk);175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;176+ syn_set = 1;177 } else {178+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {179 /* Black hole detection */180 tcp_mtu_probing(icsk, sk);181···187188 retry_until = tcp_orphan_retries(sk, alive);189 do_reset = alive ||190+ !retransmits_timed_out(sk, retry_until, 0);191192 if (tcp_out_of_resources(sk, do_reset))193 return 1;194 }195 }196197+ if (retransmits_timed_out(sk, retry_until, syn_set)) {198 /* Has it gone just too far? */199 tcp_write_err(sk);200 return 1;···436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);437 }438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);439+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))440 __sk_dst_reset(sk);441442out:;
···225static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)226{227 struct pep_sock *pn = pep_sk(sk);228- struct pnpipehdr *hdr = pnp_hdr(skb);229 int wake = 0;230231 if (!pskb_may_pull(skb, sizeof(*hdr) + 4))232 return -EINVAL;2330234 if (hdr->data[0] != PN_PEP_TYPE_COMMON) {235 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",236 (unsigned)hdr->data[0]);
···225static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)226{227 struct pep_sock *pn = pep_sk(sk);228+ struct pnpipehdr *hdr;229 int wake = 0;230231 if (!pskb_may_pull(skb, sizeof(*hdr) + 4))232 return -EINVAL;233234+ hdr = pnp_hdr(skb);235 if (hdr->data[0] != PN_PEP_TYPE_COMMON) {236 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",237 (unsigned)hdr->data[0]);
+9-8
samples/kfifo/dma-example.c
···24{25 int i;26 unsigned int ret;027 struct scatterlist sg[10];2829 printk(KERN_INFO "DMA fifo test start\n");···62 * byte at the beginning, after the kfifo_skip().63 */64 sg_init_table(sg, ARRAY_SIZE(sg));65- ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);66- printk(KERN_INFO "DMA sgl entries: %d\n", ret);67- if (!ret) {68 /* fifo is full and no sgl was created */69 printk(KERN_WARNING "error kfifo_dma_in_prepare\n");70 return -EIO;···7273 /* receive data */74 printk(KERN_INFO "scatterlist for receive:\n");75- for (i = 0; i < ARRAY_SIZE(sg); i++) {76 printk(KERN_INFO77 "sg[%d] -> "78 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",···92 kfifo_dma_in_finish(&fifo, ret);9394 /* Prepare to transmit data, example: 8 bytes */95- ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);96- printk(KERN_INFO "DMA sgl entries: %d\n", ret);97- if (!ret) {98 /* no data was available and no sgl was created */99 printk(KERN_WARNING "error kfifo_dma_out_prepare\n");100 return -EIO;101 }102103 printk(KERN_INFO "scatterlist for transmit:\n");104- for (i = 0; i < ARRAY_SIZE(sg); i++) {105 printk(KERN_INFO106 "sg[%d] -> "107 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
···24{25 int i;26 unsigned int ret;27+ unsigned int nents;28 struct scatterlist sg[10];2930 printk(KERN_INFO "DMA fifo test start\n");···61 * byte at the beginning, after the kfifo_skip().62 */63 sg_init_table(sg, ARRAY_SIZE(sg));64+ nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);65+ printk(KERN_INFO "DMA sgl entries: %d\n", nents);66+ if (!nents) {67 /* fifo is full and no sgl was created */68 printk(KERN_WARNING "error kfifo_dma_in_prepare\n");69 return -EIO;···7172 /* receive data */73 printk(KERN_INFO "scatterlist for receive:\n");74+ for (i = 0; i < nents; i++) {75 printk(KERN_INFO76 "sg[%d] -> "77 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",···91 kfifo_dma_in_finish(&fifo, ret);9293 /* Prepare to transmit data, example: 8 bytes */94+ nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);95+ printk(KERN_INFO "DMA sgl entries: %d\n", nents);96+ if (!nents) {97 /* no data was available and no sgl was created */98 printk(KERN_WARNING "error kfifo_dma_out_prepare\n");99 return -EIO;100 }101102 printk(KERN_INFO "scatterlist for transmit:\n");103+ for (i = 0; i < nents; i++) {104 printk(KERN_INFO105 "sg[%d] -> "106 "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
+5
sound/core/control.c
···3132/* max number of user-defined controls */33#define MAX_USER_CONTROLS 3203435struct snd_kctl_ioctl {36 struct list_head list; /* list of all ioctls */···196197 if (snd_BUG_ON(!control || !control->count))198 return NULL;0000199 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);200 if (kctl == NULL) {201 snd_printk(KERN_ERR "Cannot allocate control instance\n");
···3132/* max number of user-defined controls */33#define MAX_USER_CONTROLS 3234+#define MAX_CONTROL_COUNT 10283536struct snd_kctl_ioctl {37 struct list_head list; /* list of all ioctls */···195196 if (snd_BUG_ON(!control || !control->count))197 return NULL;198+199+ if (control->count > MAX_CONTROL_COUNT)200+ return NULL;201+202 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);203 if (kctl == NULL) {204 snd_printk(KERN_ERR "Cannot allocate control instance\n");
···1017# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So1018# we depend the various files onto their directories.1019DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h1020-$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))1021# In the second step, we make a rule to actually create these directories1022$(sort $(dir $(DIRECTORY_DEPS))):1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
···1017# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So1018# we depend the various files onto their directories.1019DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h1020+$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))1021# In the second step, we make a rule to actually create these directories1022$(sort $(dir $(DIRECTORY_DEPS))):1023 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null