Merge commit '5359533801e3dd3abca5b7d3d985b0b33fd9fe8b' into drm-core-next

This commit changed an internal radeon structure, that meant a new driver
in -next had to be fixed up, merge in the commit and fix up the driver.

Also fixes a trivial nouveau merge.

Conflicts:
drivers/gpu/drm/nouveau/nouveau_mem.c

+1906 -1071
-6
Documentation/networking/00-INDEX
··· 40 - info on using the DECnet networking layer in Linux. 41 depca.txt 42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver 43 - dgrs.txt 44 - - the Digi International RightSwitch SE-X Ethernet driver 45 dmfe.txt 46 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. 47 e100.txt ··· 48 - info on Intel's E1000 line of gigabit ethernet boards 49 eql.txt 50 - serial IP load balancing 51 - ethertap.txt 52 - - the Ethertap user space packet reception and transmission driver 53 ewrk3.txt 54 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver 55 filter.txt ··· 100 - TUN/TAP device driver, allowing user space Rx/Tx of packets. 101 vortex.txt 102 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 103 - wavelan.txt 104 - - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver 105 x25.txt 106 - general info on X.25 development. 107 x25-iface.txt
··· 40 - info on using the DECnet networking layer in Linux. 41 depca.txt 42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver 43 dmfe.txt 44 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. 45 e100.txt ··· 50 - info on Intel's E1000 line of gigabit ethernet boards 51 eql.txt 52 - serial IP load balancing 53 ewrk3.txt 54 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver 55 filter.txt ··· 104 - TUN/TAP device driver, allowing user space Rx/Tx of packets. 105 vortex.txt 106 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 107 x25.txt 108 - general info on X.25 development. 109 x25-iface.txt
+8 -1
Documentation/networking/dns_resolver.txt
··· 61 create dns_resolver foo:* * /usr/sbin/dns.foo %k 62 63 64 - 65 ===== 66 USAGE 67 ===== ··· 101 102 If _expiry is non-NULL, the expiry time (TTL) of the result will be 103 returned also. 104 105 106 =========
··· 61 create dns_resolver foo:* * /usr/sbin/dns.foo %k 62 63 64 ===== 65 USAGE 66 ===== ··· 102 103 If _expiry is non-NULL, the expiry time (TTL) of the result will be 104 returned also. 105 + 106 + 107 + =============================== 108 + READING DNS KEYS FROM USERSPACE 109 + =============================== 110 + 111 + Keys of dns_resolver type can be read from userspace using keyctl_read() or 112 + "keyctl read/print/pipe". 113 114 115 =========
+13 -2
MAINTAINERS
··· 1010 S: Maintained 1011 F: arch/arm/mach-s5p*/ 1012 1013 ARM/SAMSUNG S5P SERIES FIMC SUPPORT 1014 M: Kyungmin Park <kyungmin.park@samsung.com> 1015 M: Sylwester Nawrocki <s.nawrocki@samsung.com> ··· 1476 1477 BONDING DRIVER 1478 M: Jay Vosburgh <fubar@us.ibm.com> 1479 L: netdev@vger.kernel.org 1480 W: http://sourceforge.net/projects/bonding/ 1481 S: Supported ··· 2043 F: drivers/scsi/dc395x.* 2044 2045 DCCP PROTOCOL 2046 - M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 2047 L: dccp@vger.kernel.org 2048 W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp 2049 S: Maintained ··· 3529 F: Documentation/hwmon/jc42 3530 3531 JFS FILESYSTEM 3532 - M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> 3533 L: jfs-discussion@lists.sourceforge.net 3534 W: http://jfs.sourceforge.net/ 3535 T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git ··· 5181 5182 RAPIDIO SUBSYSTEM 5183 M: Matt Porter <mporter@kernel.crashing.org> 5184 S: Maintained 5185 F: drivers/rapidio/ 5186
··· 1010 S: Maintained 1011 F: arch/arm/mach-s5p*/ 1012 1013 + ARM/SAMSUNG MOBILE MACHINE SUPPORT 1014 + M: Kyungmin Park <kyungmin.park@samsung.com> 1015 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1016 + S: Maintained 1017 + F: arch/arm/mach-s5pv210/mach-aquila.c 1018 + F: arch/arm/mach-s5pv210/mach-goni.c 1019 + F: arch/arm/mach-exynos4/mach-universal_c210.c 1020 + F: arch/arm/mach-exynos4/mach-nuri.c 1021 + 1022 ARM/SAMSUNG S5P SERIES FIMC SUPPORT 1023 M: Kyungmin Park <kyungmin.park@samsung.com> 1024 M: Sylwester Nawrocki <s.nawrocki@samsung.com> ··· 1467 1468 BONDING DRIVER 1469 M: Jay Vosburgh <fubar@us.ibm.com> 1470 + M: Andy Gospodarek <andy@greyhouse.net> 1471 L: netdev@vger.kernel.org 1472 W: http://sourceforge.net/projects/bonding/ 1473 S: Supported ··· 2033 F: drivers/scsi/dc395x.* 2034 2035 DCCP PROTOCOL 2036 + M: Gerrit Renker <gerrit@erg.abdn.ac.uk> 2037 L: dccp@vger.kernel.org 2038 W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp 2039 S: Maintained ··· 3519 F: Documentation/hwmon/jc42 3520 3521 JFS FILESYSTEM 3522 + M: Dave Kleikamp <shaggy@kernel.org> 3523 L: jfs-discussion@lists.sourceforge.net 3524 W: http://jfs.sourceforge.net/ 3525 T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git ··· 5171 5172 RAPIDIO SUBSYSTEM 5173 M: Matt Porter <mporter@kernel.crashing.org> 5174 + M: Alexandre Bounine <alexandre.bounine@idt.com> 5175 S: Maintained 5176 F: drivers/rapidio/ 5177
+1 -1
Makefile
··· 1 VERSION = 2 2 PATCHLEVEL = 6 3 SUBLEVEL = 38 4 - EXTRAVERSION = -rc7 5 NAME = Flesh-Eating Bats with Fangs 6 7 # *DOCUMENTATION*
··· 1 VERSION = 2 2 PATCHLEVEL = 6 3 SUBLEVEL = 38 4 + EXTRAVERSION = -rc8 5 NAME = Flesh-Eating Bats with Fangs 6 7 # *DOCUMENTATION*
+1
arch/alpha/Kconfig
··· 11 select HAVE_GENERIC_HARDIRQS 12 select GENERIC_IRQ_PROBE 13 select AUTO_IRQ_AFFINITY if SMP 14 help 15 The Alpha is a 64-bit general-purpose processor designed and 16 marketed by the Digital Equipment Corporation of blessed memory,
··· 11 select HAVE_GENERIC_HARDIRQS 12 select GENERIC_IRQ_PROBE 13 select AUTO_IRQ_AFFINITY if SMP 14 + select GENERIC_HARDIRQS_NO_DEPRECATED 15 help 16 The Alpha is a 64-bit general-purpose processor designed and 17 marketed by the Digital Equipment Corporation of blessed memory,
+9 -4
arch/alpha/kernel/irq.c
··· 44 45 int irq_select_affinity(unsigned int irq) 46 { 47 - struct irq_desc *desc = irq_to_desc[irq]; 48 static int last_cpu; 49 int cpu = last_cpu + 1; 50 51 - if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) 52 return 1; 53 54 while (!cpu_possible(cpu) || ··· 61 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 62 last_cpu = cpu; 63 64 - cpumask_copy(desc->affinity, cpumask_of(cpu)); 65 - get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); 66 return 0; 67 } 68 #endif /* CONFIG_SMP */
··· 44 45 int irq_select_affinity(unsigned int irq) 46 { 47 + struct irq_data *data = irq_get_irq_data(irq); 48 + struct irq_chip *chip; 49 static int last_cpu; 50 int cpu = last_cpu + 1; 51 52 + if (!data) 53 + return 1; 54 + chip = irq_data_get_irq_chip(data); 55 + 56 + if (!chip->irq_set_affinity || irq_user_affinity[irq]) 57 return 1; 58 59 while (!cpu_possible(cpu) || ··· 56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 57 last_cpu = cpu; 58 59 + cpumask_copy(data->affinity, cpumask_of(cpu)); 60 + chip->irq_set_affinity(data, cpumask_of(cpu), false); 61 return 0; 62 } 63 #endif /* CONFIG_SMP */
+3 -8
arch/alpha/kernel/irq_alpha.c
··· 228 void __init 229 init_rtc_irq(void) 230 { 231 - struct irq_desc *desc = irq_to_desc(RTC_IRQ); 232 - 233 - if (desc) { 234 - desc->status |= IRQ_DISABLED; 235 - set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 236 - handle_simple_irq, "RTC"); 237 - setup_irq(RTC_IRQ, &timer_irqaction); 238 - } 239 } 240 241 /* Dummy irqactions. */
··· 228 void __init 229 init_rtc_irq(void) 230 { 231 + set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 232 + handle_simple_irq, "RTC"); 233 + setup_irq(RTC_IRQ, &timer_irqaction); 234 } 235 236 /* Dummy irqactions. */
+10 -8
arch/alpha/kernel/irq_i8259.c
··· 33 } 34 35 inline void 36 - i8259a_enable_irq(unsigned int irq) 37 { 38 spin_lock(&i8259_irq_lock); 39 - i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 40 spin_unlock(&i8259_irq_lock); 41 } 42 ··· 47 } 48 49 void 50 - i8259a_disable_irq(unsigned int irq) 51 { 52 spin_lock(&i8259_irq_lock); 53 - __i8259a_disable_irq(irq); 54 spin_unlock(&i8259_irq_lock); 55 } 56 57 void 58 - i8259a_mask_and_ack_irq(unsigned int irq) 59 { 60 spin_lock(&i8259_irq_lock); 61 __i8259a_disable_irq(irq); 62 ··· 73 74 struct irq_chip i8259a_irq_type = { 75 .name = "XT-PIC", 76 - .unmask = i8259a_enable_irq, 77 - .mask = i8259a_disable_irq, 78 - .mask_ack = i8259a_mask_and_ack_irq, 79 }; 80 81 void __init
··· 33 } 34 35 inline void 36 + i8259a_enable_irq(struct irq_data *d) 37 { 38 spin_lock(&i8259_irq_lock); 39 + i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); 40 spin_unlock(&i8259_irq_lock); 41 } 42 ··· 47 } 48 49 void 50 + i8259a_disable_irq(struct irq_data *d) 51 { 52 spin_lock(&i8259_irq_lock); 53 + __i8259a_disable_irq(d->irq); 54 spin_unlock(&i8259_irq_lock); 55 } 56 57 void 58 + i8259a_mask_and_ack_irq(struct irq_data *d) 59 { 60 + unsigned int irq = d->irq; 61 + 62 spin_lock(&i8259_irq_lock); 63 __i8259a_disable_irq(irq); 64 ··· 71 72 struct irq_chip i8259a_irq_type = { 73 .name = "XT-PIC", 74 + .irq_unmask = i8259a_enable_irq, 75 + .irq_mask = i8259a_disable_irq, 76 + .irq_mask_ack = i8259a_mask_and_ack_irq, 77 }; 78 79 void __init
+3 -5
arch/alpha/kernel/irq_impl.h
··· 31 32 extern void common_init_isa_dma(void); 33 34 - extern void i8259a_enable_irq(unsigned int); 35 - extern void i8259a_disable_irq(unsigned int); 36 - extern void i8259a_mask_and_ack_irq(unsigned int); 37 - extern unsigned int i8259a_startup_irq(unsigned int); 38 - extern void i8259a_end_irq(unsigned int); 39 extern struct irq_chip i8259a_irq_type; 40 extern void init_i8259a_irqs(void); 41
··· 31 32 extern void common_init_isa_dma(void); 33 34 + extern void i8259a_enable_irq(struct irq_data *d); 35 + extern void i8259a_disable_irq(struct irq_data *d); 36 + extern void i8259a_mask_and_ack_irq(struct irq_data *d); 37 extern struct irq_chip i8259a_irq_type; 38 extern void init_i8259a_irqs(void); 39
+10 -10
arch/alpha/kernel/irq_pyxis.c
··· 29 } 30 31 static inline void 32 - pyxis_enable_irq(unsigned int irq) 33 { 34 - pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 35 } 36 37 static void 38 - pyxis_disable_irq(unsigned int irq) 39 { 40 - pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 41 } 42 43 static void 44 - pyxis_mask_and_ack_irq(unsigned int irq) 45 { 46 - unsigned long bit = 1UL << (irq - 16); 47 unsigned long mask = cached_irq_mask &= ~bit; 48 49 /* Disable the interrupt. */ ··· 58 59 static struct irq_chip pyxis_irq_type = { 60 .name = "PYXIS", 61 - .mask_ack = pyxis_mask_and_ack_irq, 62 - .mask = pyxis_disable_irq, 63 - .unmask = pyxis_enable_irq, 64 }; 65 66 void ··· 103 if ((ignore_mask >> i) & 1) 104 continue; 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 106 - irq_to_desc(i)->status |= IRQ_LEVEL; 107 } 108 109 setup_irq(16+7, &isa_cascade_irqaction);
··· 29 } 30 31 static inline void 32 + pyxis_enable_irq(struct irq_data *d) 33 { 34 + pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 35 } 36 37 static void 38 + pyxis_disable_irq(struct irq_data *d) 39 { 40 + pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 41 } 42 43 static void 44 + pyxis_mask_and_ack_irq(struct irq_data *d) 45 { 46 + unsigned long bit = 1UL << (d->irq - 16); 47 unsigned long mask = cached_irq_mask &= ~bit; 48 49 /* Disable the interrupt. */ ··· 58 59 static struct irq_chip pyxis_irq_type = { 60 .name = "PYXIS", 61 + .irq_mask_ack = pyxis_mask_and_ack_irq, 62 + .irq_mask = pyxis_disable_irq, 63 + .irq_unmask = pyxis_enable_irq, 64 }; 65 66 void ··· 103 if ((ignore_mask >> i) & 1) 104 continue; 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 106 + irq_set_status_flags(i, IRQ_LEVEL); 107 } 108 109 setup_irq(16+7, &isa_cascade_irqaction);
+8 -8
arch/alpha/kernel/irq_srm.c
··· 18 DEFINE_SPINLOCK(srm_irq_lock); 19 20 static inline void 21 - srm_enable_irq(unsigned int irq) 22 { 23 spin_lock(&srm_irq_lock); 24 - cserve_ena(irq - 16); 25 spin_unlock(&srm_irq_lock); 26 } 27 28 static void 29 - srm_disable_irq(unsigned int irq) 30 { 31 spin_lock(&srm_irq_lock); 32 - cserve_dis(irq - 16); 33 spin_unlock(&srm_irq_lock); 34 } 35 36 /* Handle interrupts from the SRM, assuming no additional weirdness. */ 37 static struct irq_chip srm_irq_type = { 38 .name = "SRM", 39 - .unmask = srm_enable_irq, 40 - .mask = srm_disable_irq, 41 - .mask_ack = srm_disable_irq, 42 }; 43 44 void __init ··· 52 if (i < 64 && ((ignore_mask >> i) & 1)) 53 continue; 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 55 - irq_to_desc(i)->status |= IRQ_LEVEL; 56 } 57 } 58
··· 18 DEFINE_SPINLOCK(srm_irq_lock); 19 20 static inline void 21 + srm_enable_irq(struct irq_data *d) 22 { 23 spin_lock(&srm_irq_lock); 24 + cserve_ena(d->irq - 16); 25 spin_unlock(&srm_irq_lock); 26 } 27 28 static void 29 + srm_disable_irq(struct irq_data *d) 30 { 31 spin_lock(&srm_irq_lock); 32 + cserve_dis(d->irq - 16); 33 spin_unlock(&srm_irq_lock); 34 } 35 36 /* Handle interrupts from the SRM, assuming no additional weirdness. */ 37 static struct irq_chip srm_irq_type = { 38 .name = "SRM", 39 + .irq_unmask = srm_enable_irq, 40 + .irq_mask = srm_disable_irq, 41 + .irq_mask_ack = srm_disable_irq, 42 }; 43 44 void __init ··· 52 if (i < 64 && ((ignore_mask >> i) & 1)) 53 continue; 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 55 + irq_set_status_flags(i, IRQ_LEVEL); 56 } 57 } 58
+14 -14
arch/alpha/kernel/sys_alcor.c
··· 44 } 45 46 static inline void 47 - alcor_enable_irq(unsigned int irq) 48 { 49 - alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 50 } 51 52 static void 53 - alcor_disable_irq(unsigned int irq) 54 { 55 - alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 56 } 57 58 static void 59 - alcor_mask_and_ack_irq(unsigned int irq) 60 { 61 - alcor_disable_irq(irq); 62 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 64 - *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb(); 66 } 67 68 static void 69 - alcor_isa_mask_and_ack_irq(unsigned int irq) 70 { 71 - i8259a_mask_and_ack_irq(irq); 72 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); ··· 77 78 static struct irq_chip alcor_irq_type = { 79 .name = "ALCOR", 80 - .unmask = alcor_enable_irq, 81 - .mask = alcor_disable_irq, 82 - .mask_ack = alcor_mask_and_ack_irq, 83 }; 84 85 static void ··· 126 if (i >= 16+20 && i <= 16+30) 127 continue; 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 129 - irq_to_desc(i)->status |= IRQ_LEVEL; 130 } 131 - i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; 132 133 init_i8259a_irqs(); 134 common_init_isa_dma();
··· 44 } 45 46 static inline void 47 + alcor_enable_irq(struct irq_data *d) 48 { 49 + alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 50 } 51 52 static void 53 + alcor_disable_irq(struct irq_data *d) 54 { 55 + alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 56 } 57 58 static void 59 + alcor_mask_and_ack_irq(struct irq_data *d) 60 { 61 + alcor_disable_irq(d); 62 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 64 + *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb(); 66 } 67 68 static void 69 + alcor_isa_mask_and_ack_irq(struct irq_data *d) 70 { 71 + i8259a_mask_and_ack_irq(d); 72 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); ··· 77 78 static struct irq_chip alcor_irq_type = { 79 .name = "ALCOR", 80 + .irq_unmask = alcor_enable_irq, 81 + .irq_mask = alcor_disable_irq, 82 + .irq_mask_ack = alcor_mask_and_ack_irq, 83 }; 84 85 static void ··· 126 if (i >= 16+20 && i <= 16+30) 127 continue; 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 129 + irq_set_status_flags(i, IRQ_LEVEL); 130 } 131 + i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; 132 133 init_i8259a_irqs(); 134 common_init_isa_dma();
+8 -8
arch/alpha/kernel/sys_cabriolet.c
··· 46 } 47 48 static inline void 49 - cabriolet_enable_irq(unsigned int irq) 50 { 51 - cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); 52 } 53 54 static void 55 - cabriolet_disable_irq(unsigned int irq) 56 { 57 - cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); 58 } 59 60 static struct irq_chip cabriolet_irq_type = { 61 .name = "CABRIOLET", 62 - .unmask = cabriolet_enable_irq, 63 - .mask = cabriolet_disable_irq, 64 - .mask_ack = cabriolet_disable_irq, 65 }; 66 67 static void ··· 107 for (i = 16; i < 35; ++i) { 108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 109 handle_level_irq); 110 - irq_to_desc(i)->status |= IRQ_LEVEL; 111 } 112 } 113
··· 46 } 47 48 static inline void 49 + cabriolet_enable_irq(struct irq_data *d) 50 { 51 + cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); 52 } 53 54 static void 55 + cabriolet_disable_irq(struct irq_data *d) 56 { 57 + cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); 58 } 59 60 static struct irq_chip cabriolet_irq_type = { 61 .name = "CABRIOLET", 62 + .irq_unmask = cabriolet_enable_irq, 63 + .irq_mask = cabriolet_disable_irq, 64 + .irq_mask_ack = cabriolet_disable_irq, 65 }; 66 67 static void ··· 107 for (i = 16; i < 35; ++i) { 108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 109 handle_level_irq); 110 + irq_set_status_flags(i, IRQ_LEVEL); 111 } 112 } 113
+27 -25
arch/alpha/kernel/sys_dp264.c
··· 98 } 99 100 static void 101 - dp264_enable_irq(unsigned int irq) 102 { 103 spin_lock(&dp264_irq_lock); 104 - cached_irq_mask |= 1UL << irq; 105 tsunami_update_irq_hw(cached_irq_mask); 106 spin_unlock(&dp264_irq_lock); 107 } 108 109 static void 110 - dp264_disable_irq(unsigned int irq) 111 { 112 spin_lock(&dp264_irq_lock); 113 - cached_irq_mask &= ~(1UL << irq); 114 tsunami_update_irq_hw(cached_irq_mask); 115 spin_unlock(&dp264_irq_lock); 116 } 117 118 static void 119 - clipper_enable_irq(unsigned int irq) 120 { 121 spin_lock(&dp264_irq_lock); 122 - cached_irq_mask |= 1UL << (irq - 16); 123 tsunami_update_irq_hw(cached_irq_mask); 124 spin_unlock(&dp264_irq_lock); 125 } 126 127 static void 128 - clipper_disable_irq(unsigned int irq) 129 { 130 spin_lock(&dp264_irq_lock); 131 - cached_irq_mask &= ~(1UL << (irq - 16)); 132 tsunami_update_irq_hw(cached_irq_mask); 133 spin_unlock(&dp264_irq_lock); 134 } ··· 149 } 150 151 static int 152 - dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) 153 - { 154 spin_lock(&dp264_irq_lock); 155 - cpu_set_irq_affinity(irq, *affinity); 156 tsunami_update_irq_hw(cached_irq_mask); 157 spin_unlock(&dp264_irq_lock); 158 ··· 161 } 162 163 static int 164 - clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) 165 - { 166 spin_lock(&dp264_irq_lock); 167 - cpu_set_irq_affinity(irq - 16, *affinity); 168 tsunami_update_irq_hw(cached_irq_mask); 169 spin_unlock(&dp264_irq_lock); 170 ··· 173 } 174 175 static struct irq_chip dp264_irq_type = { 176 - .name = "DP264", 177 - .unmask = dp264_enable_irq, 178 - .mask = dp264_disable_irq, 179 - .mask_ack = dp264_disable_irq, 180 - .set_affinity = dp264_set_affinity, 181 }; 182 183 static struct irq_chip clipper_irq_type = { 184 - .name = "CLIPPER", 185 - .unmask = clipper_enable_irq, 186 - .mask = clipper_disable_irq, 187 - .mask_ack = clipper_disable_irq, 188 - .set_affinity = clipper_set_affinity, 189 }; 190 191 static void ··· 270 { 271 long i; 272 for (i = imin; i <= imax; ++i) { 273 - irq_to_desc(i)->status |= IRQ_LEVEL; 274 set_irq_chip_and_handler(i, ops, handle_level_irq); 275 } 276 } 277
··· 98 } 99 100 static void 101 + dp264_enable_irq(struct irq_data *d) 102 { 103 spin_lock(&dp264_irq_lock); 104 + cached_irq_mask |= 1UL << d->irq; 105 tsunami_update_irq_hw(cached_irq_mask); 106 spin_unlock(&dp264_irq_lock); 107 } 108 109 static void 110 + dp264_disable_irq(struct irq_data *d) 111 { 112 spin_lock(&dp264_irq_lock); 113 + cached_irq_mask &= ~(1UL << d->irq); 114 tsunami_update_irq_hw(cached_irq_mask); 115 spin_unlock(&dp264_irq_lock); 116 } 117 118 static void 119 + clipper_enable_irq(struct irq_data *d) 120 { 121 spin_lock(&dp264_irq_lock); 122 + cached_irq_mask |= 1UL << (d->irq - 16); 123 tsunami_update_irq_hw(cached_irq_mask); 124 spin_unlock(&dp264_irq_lock); 125 } 126 127 static void 128 + clipper_disable_irq(struct irq_data *d) 129 { 130 spin_lock(&dp264_irq_lock); 131 + cached_irq_mask &= ~(1UL << (d->irq - 16)); 132 tsunami_update_irq_hw(cached_irq_mask); 133 spin_unlock(&dp264_irq_lock); 134 } ··· 149 } 150 151 static int 152 + dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, 153 + bool force) 154 + { 155 spin_lock(&dp264_irq_lock); 156 + cpu_set_irq_affinity(d->irq, *affinity); 157 tsunami_update_irq_hw(cached_irq_mask); 158 spin_unlock(&dp264_irq_lock); 159 ··· 160 } 161 162 static int 163 + clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, 164 + bool force) 165 + { 166 spin_lock(&dp264_irq_lock); 167 + cpu_set_irq_affinity(d->irq - 16, *affinity); 168 tsunami_update_irq_hw(cached_irq_mask); 169 spin_unlock(&dp264_irq_lock); 170 ··· 171 } 172 173 static struct irq_chip dp264_irq_type = { 174 + .name = "DP264", 175 + .irq_unmask = dp264_enable_irq, 176 + .irq_mask = dp264_disable_irq, 177 + .irq_mask_ack = dp264_disable_irq, 178 + .irq_set_affinity = dp264_set_affinity, 179 }; 180 181 static struct irq_chip clipper_irq_type = { 182 + .name = "CLIPPER", 183 + .irq_unmask = clipper_enable_irq, 184 + .irq_mask = clipper_disable_irq, 185 + .irq_mask_ack = clipper_disable_irq, 186 + .irq_set_affinity = clipper_set_affinity, 187 }; 188 189 static void ··· 268 { 269 long i; 270 for (i = imin; i <= imax; ++i) { 271 set_irq_chip_and_handler(i, ops, handle_level_irq); 272 + irq_set_status_flags(i, IRQ_LEVEL); 273 } 274 } 275
+9 -9
arch/alpha/kernel/sys_eb64p.c
··· 44 } 45 46 static inline void 47 - eb64p_enable_irq(unsigned int irq) 48 { 49 - eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 50 } 51 52 static void 53 - eb64p_disable_irq(unsigned int irq) 54 { 55 - eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); 56 } 57 58 static struct irq_chip eb64p_irq_type = { 59 .name = "EB64P", 60 - .unmask = eb64p_enable_irq, 61 - .mask = eb64p_disable_irq, 62 - .mask_ack = eb64p_disable_irq, 63 }; 64 65 static void ··· 118 init_i8259a_irqs(); 119 120 for (i = 16; i < 32; ++i) { 121 - irq_to_desc(i)->status |= IRQ_LEVEL; 122 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 123 - } 124 125 common_init_isa_dma(); 126 setup_irq(16+5, &isa_cascade_irqaction);
··· 44 } 45 46 static inline void 47 + eb64p_enable_irq(struct irq_data *d) 48 { 49 + eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); 50 } 51 52 static void 53 + eb64p_disable_irq(struct irq_data *d) 54 { 55 + eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); 56 } 57 58 static struct irq_chip eb64p_irq_type = { 59 .name = "EB64P", 60 + .irq_unmask = eb64p_enable_irq, 61 + .irq_mask = eb64p_disable_irq, 62 + .irq_mask_ack = eb64p_disable_irq, 63 }; 64 65 static void ··· 118 init_i8259a_irqs(); 119 120 for (i = 16; i < 32; ++i) { 121 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 122 + irq_set_status_flags(i, IRQ_LEVEL); 123 + } 124 125 common_init_isa_dma(); 126 setup_irq(16+5, &isa_cascade_irqaction);
+8 -6
arch/alpha/kernel/sys_eiger.c
··· 51 } 52 53 static inline void 54 - eiger_enable_irq(unsigned int irq) 55 { 56 unsigned long mask; 57 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 58 eiger_update_irq_hw(irq, mask); 59 } 60 61 static void 62 - eiger_disable_irq(unsigned int irq) 63 { 64 unsigned long mask; 65 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 66 eiger_update_irq_hw(irq, mask); ··· 70 71 static struct irq_chip eiger_irq_type = { 72 .name = "EIGER", 73 - .unmask = eiger_enable_irq, 74 - .mask = eiger_disable_irq, 75 - .mask_ack = eiger_disable_irq, 76 }; 77 78 static void ··· 138 init_i8259a_irqs(); 139 140 for (i = 16; i < 128; ++i) { 141 - irq_to_desc(i)->status |= IRQ_LEVEL; 142 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 143 } 144 } 145
··· 51 } 52 53 static inline void 54 + eiger_enable_irq(struct irq_data *d) 55 { 56 + unsigned int irq = d->irq; 57 unsigned long mask; 58 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 59 eiger_update_irq_hw(irq, mask); 60 } 61 62 static void 63 + eiger_disable_irq(struct irq_data *d) 64 { 65 + unsigned int irq = d->irq; 66 unsigned long mask; 67 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 68 eiger_update_irq_hw(irq, mask); ··· 68 69 static struct irq_chip eiger_irq_type = { 70 .name = "EIGER", 71 + .irq_unmask = eiger_enable_irq, 72 + .irq_mask = eiger_disable_irq, 73 + .irq_mask_ack = eiger_disable_irq, 74 }; 75 76 static void ··· 136 init_i8259a_irqs(); 137 138 for (i = 16; i < 128; ++i) { 139 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 140 + irq_set_status_flags(i, IRQ_LEVEL); 141 } 142 } 143
+12 -12
arch/alpha/kernel/sys_jensen.c
··· 63 */ 64 65 static void 66 - jensen_local_enable(unsigned int irq) 67 { 68 /* the parport is really hw IRQ 1, silly Jensen. */ 69 - if (irq == 7) 70 - i8259a_enable_irq(1); 71 } 72 73 static void 74 - jensen_local_disable(unsigned int irq) 75 { 76 /* the parport is really hw IRQ 1, silly Jensen. */ 77 - if (irq == 7) 78 - i8259a_disable_irq(1); 79 } 80 81 static void 82 - jensen_local_mask_ack(unsigned int irq) 83 { 84 /* the parport is really hw IRQ 1, silly Jensen. */ 85 - if (irq == 7) 86 - i8259a_mask_and_ack_irq(1); 87 } 88 89 static struct irq_chip jensen_local_irq_type = { 90 .name = "LOCAL", 91 - .unmask = jensen_local_enable, 92 - .mask = jensen_local_disable, 93 - .mask_ack = jensen_local_mask_ack, 94 }; 95 96 static void
··· 63 */ 64 65 static void 66 + jensen_local_enable(struct irq_data *d) 67 { 68 /* the parport is really hw IRQ 1, silly Jensen. */ 69 + if (d->irq == 7) 70 + i8259a_enable_irq(d); 71 } 72 73 static void 74 + jensen_local_disable(struct irq_data *d) 75 { 76 /* the parport is really hw IRQ 1, silly Jensen. */ 77 + if (d->irq == 7) 78 + i8259a_disable_irq(d); 79 } 80 81 static void 82 + jensen_local_mask_ack(struct irq_data *d) 83 { 84 /* the parport is really hw IRQ 1, silly Jensen. */ 85 + if (d->irq == 7) 86 + i8259a_mask_and_ack_irq(d); 87 } 88 89 static struct irq_chip jensen_local_irq_type = { 90 .name = "LOCAL", 91 + .irq_unmask = jensen_local_enable, 92 + .irq_mask = jensen_local_disable, 93 + .irq_mask_ack = jensen_local_mask_ack, 94 }; 95 96 static void
+19 -23
arch/alpha/kernel/sys_marvel.c
··· 104 } 105 106 static void 107 - io7_enable_irq(unsigned int irq) 108 { 109 volatile unsigned long *ctl; 110 struct io7 *io7; 111 112 ctl = io7_get_irq_ctl(irq, &io7); ··· 116 __func__, irq); 117 return; 118 } 119 - 120 spin_lock(&io7->irq_lock); 121 *ctl |= 1UL << 24; 122 mb(); ··· 125 } 126 127 static void 128 - io7_disable_irq(unsigned int irq) 129 { 130 volatile unsigned long *ctl; 131 struct io7 *io7; 132 133 ctl = io7_get_irq_ctl(irq, &io7); ··· 137 __func__, irq); 138 return; 139 } 140 - 141 spin_lock(&io7->irq_lock); 142 *ctl &= ~(1UL << 24); 143 mb(); ··· 146 } 147 148 static void 149 - marvel_irq_noop(unsigned int irq) 150 - { 151 - return; 152 - } 153 - 154 - static unsigned int 155 - marvel_irq_noop_return(unsigned int irq) 156 - { 157 - return 0; 158 } 159 160 static struct irq_chip marvel_legacy_irq_type = { 161 .name = "LEGACY", 162 - .mask = marvel_irq_noop, 163 - .unmask = marvel_irq_noop, 164 }; 165 166 static struct irq_chip io7_lsi_irq_type = { 167 .name = "LSI", 168 - .unmask = io7_enable_irq, 169 - .mask = io7_disable_irq, 170 - .mask_ack = io7_disable_irq, 171 }; 172 173 static struct irq_chip io7_msi_irq_type = { 174 .name = "MSI", 175 - .unmask = io7_enable_irq, 176 - .mask = io7_disable_irq, 177 - .ack = marvel_irq_noop, 178 }; 179 180 static void ··· 276 277 /* Set up the lsi irqs. */ 278 for (i = 0; i < 128; ++i) { 279 - irq_to_desc(base + i)->status |= IRQ_LEVEL; 280 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 281 } 282 283 /* Disable the implemented irqs in hardware. */ ··· 290 291 /* Set up the msi irqs. */ 292 for (i = 128; i < (128 + 512); ++i) { 293 - irq_to_desc(base + i)->status |= IRQ_LEVEL; 294 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 295 } 296 297 for (i = 0; i < 16; ++i)
··· 104 } 105 106 static void 107 + io7_enable_irq(struct irq_data *d) 108 { 109 volatile unsigned long *ctl; 110 + unsigned int irq = d->irq; 111 struct io7 *io7; 112 113 ctl = io7_get_irq_ctl(irq, &io7); ··· 115 __func__, irq); 116 return; 117 } 118 + 119 spin_lock(&io7->irq_lock); 120 *ctl |= 1UL << 24; 121 mb(); ··· 124 } 125 126 static void 127 + io7_disable_irq(struct irq_data *d) 128 { 129 volatile unsigned long *ctl; 130 + unsigned int irq = d->irq; 131 struct io7 *io7; 132 133 ctl = io7_get_irq_ctl(irq, &io7); ··· 135 __func__, irq); 136 return; 137 } 138 + 139 spin_lock(&io7->irq_lock); 140 *ctl &= ~(1UL << 24); 141 mb(); ··· 144 } 145 146 static void 147 + marvel_irq_noop(struct irq_data *d) 148 + { 149 + return; 150 } 151 152 static struct irq_chip marvel_legacy_irq_type = { 153 .name = "LEGACY", 154 + .irq_mask = marvel_irq_noop, 155 + .irq_unmask = marvel_irq_noop, 156 }; 157 158 static struct irq_chip io7_lsi_irq_type = { 159 .name = "LSI", 160 + .irq_unmask = io7_enable_irq, 161 + .irq_mask = io7_disable_irq, 162 + .irq_mask_ack = io7_disable_irq, 163 }; 164 165 static struct irq_chip io7_msi_irq_type = { 166 .name = "MSI", 167 + .irq_unmask = io7_enable_irq, 168 + .irq_mask = io7_disable_irq, 169 + .irq_ack = marvel_irq_noop, 170 }; 171 172 static void ··· 280 281 /* Set up the lsi irqs. */ 282 for (i = 0; i < 128; ++i) { 283 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 284 + irq_set_status_flags(i, IRQ_LEVEL); 285 } 286 287 /* Disable the implemented irqs in hardware. */ ··· 294 295 /* Set up the msi irqs. */ 296 for (i = 128; i < (128 + 512); ++i) { 297 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 298 + irq_set_status_flags(i, IRQ_LEVEL); 299 } 300 301 for (i = 0; i < 16; ++i)
+8 -8
arch/alpha/kernel/sys_mikasa.c
··· 43 } 44 45 static inline void 46 - mikasa_enable_irq(unsigned int irq) 47 { 48 - mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); 49 } 50 51 static void 52 - mikasa_disable_irq(unsigned int irq) 53 { 54 - mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); 55 } 56 57 static struct irq_chip mikasa_irq_type = { 58 .name = "MIKASA", 59 - .unmask = mikasa_enable_irq, 60 - .mask = mikasa_disable_irq, 61 - .mask_ack = mikasa_disable_irq, 62 }; 63 64 static void ··· 98 mikasa_update_irq_hw(0); 99 100 for (i = 16; i < 32; ++i) { 101 - irq_to_desc(i)->status |= IRQ_LEVEL; 102 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 103 } 104 105 init_i8259a_irqs();
··· 43 } 44 45 static inline void 46 + mikasa_enable_irq(struct irq_data *d) 47 { 48 + mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); 49 } 50 51 static void 52 + mikasa_disable_irq(struct irq_data *d) 53 { 54 + mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); 55 } 56 57 static struct irq_chip mikasa_irq_type = { 58 .name = "MIKASA", 59 + .irq_unmask = mikasa_enable_irq, 60 + .irq_mask = mikasa_disable_irq, 61 + .irq_mask_ack = mikasa_disable_irq, 62 }; 63 64 static void ··· 98 mikasa_update_irq_hw(0); 99 100 for (i = 16; i < 32; ++i) { 101 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 102 + irq_set_status_flags(i, IRQ_LEVEL); 103 } 104 105 init_i8259a_irqs();
+8 -8
arch/alpha/kernel/sys_noritake.c
··· 48 } 49 50 static void 51 - noritake_enable_irq(unsigned int irq) 52 { 53 - noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); 54 } 55 56 static void 57 - noritake_disable_irq(unsigned int irq) 58 { 59 - noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); 60 } 61 62 static struct irq_chip noritake_irq_type = { 63 .name = "NORITAKE", 64 - .unmask = noritake_enable_irq, 65 - .mask = noritake_disable_irq, 66 - .mask_ack = noritake_disable_irq, 67 }; 68 69 static void ··· 127 outw(0, 0x54c); 128 129 for (i = 16; i < 48; ++i) { 130 - irq_to_desc(i)->status |= IRQ_LEVEL; 131 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 132 } 133 134 init_i8259a_irqs();
··· 48 } 49 50 static void 51 + noritake_enable_irq(struct irq_data *d) 52 { 53 + noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); 54 } 55 56 static void 57 + noritake_disable_irq(struct irq_data *d) 58 { 59 + noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); 60 } 61 62 static struct irq_chip noritake_irq_type = { 63 .name = "NORITAKE", 64 + .irq_unmask = noritake_enable_irq, 65 + .irq_mask = noritake_disable_irq, 66 + .irq_mask_ack = noritake_disable_irq, 67 }; 68 69 static void ··· 127 outw(0, 0x54c); 128 129 for (i = 16; i < 48; ++i) { 130 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 131 + irq_set_status_flags(i, IRQ_LEVEL); 132 } 133 134 init_i8259a_irqs();
+10 -7
arch/alpha/kernel/sys_rawhide.c
··· 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 57 58 static inline void 59 - rawhide_enable_irq(unsigned int irq) 60 { 61 unsigned int mask, hose; 62 63 irq -= 16; 64 hose = irq / 24; ··· 77 } 78 79 static void 80 - rawhide_disable_irq(unsigned int irq) 81 { 82 unsigned int mask, hose; 83 84 irq -= 16; 85 hose = irq / 24; ··· 98 } 99 100 static void 101 - rawhide_mask_and_ack_irq(unsigned int irq) 102 { 103 unsigned int mask, mask1, hose; 104 105 irq -= 16; 106 hose = irq / 24; ··· 126 127 static struct irq_chip rawhide_irq_type = { 128 .name = "RAWHIDE", 129 - .unmask = rawhide_enable_irq, 130 - .mask = rawhide_disable_irq, 131 - .mask_ack = rawhide_mask_and_ack_irq, 132 }; 133 134 static void ··· 180 } 181 182 for (i = 16; i < 128; ++i) { 183 - irq_to_desc(i)->status |= IRQ_LEVEL; 184 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 185 } 186 187 init_i8259a_irqs();
··· 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 57 58 static inline void 59 + rawhide_enable_irq(struct irq_data *d) 60 { 61 unsigned int mask, hose; 62 + unsigned int irq = d->irq; 63 64 irq -= 16; 65 hose = irq / 24; ··· 76 } 77 78 static void 79 + rawhide_disable_irq(struct irq_data *d) 80 { 81 unsigned int mask, hose; 82 + unsigned int irq = d->irq; 83 84 irq -= 16; 85 hose = irq / 24; ··· 96 } 97 98 static void 99 + rawhide_mask_and_ack_irq(struct irq_data *d) 100 { 101 unsigned int mask, mask1, hose; 102 + unsigned int irq = d->irq; 103 104 irq -= 16; 105 hose = irq / 24; ··· 123 124 static struct irq_chip rawhide_irq_type = { 125 .name = "RAWHIDE", 126 + .irq_unmask = rawhide_enable_irq, 127 + .irq_mask = rawhide_disable_irq, 128 + .irq_mask_ack = rawhide_mask_and_ack_irq, 129 }; 130 131 static void ··· 177 } 178 179 for (i = 16; i < 128; ++i) { 180 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 181 + irq_set_status_flags(i, IRQ_LEVEL); 182 } 183 184 init_i8259a_irqs();
+8 -8
arch/alpha/kernel/sys_rx164.c
··· 47 } 48 49 static inline void 50 - rx164_enable_irq(unsigned int irq) 51 { 52 - rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 53 } 54 55 static void 56 - rx164_disable_irq(unsigned int irq) 57 { 58 - rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 59 } 60 61 static struct irq_chip rx164_irq_type = { 62 .name = "RX164", 63 - .unmask = rx164_enable_irq, 64 - .mask = rx164_disable_irq, 65 - .mask_ack = rx164_disable_irq, 66 }; 67 68 static void ··· 99 100 rx164_update_irq_hw(0); 101 for (i = 16; i < 40; ++i) { 102 - irq_to_desc(i)->status |= IRQ_LEVEL; 103 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 104 } 105 106 init_i8259a_irqs();
··· 47 } 48 49 static inline void 50 + rx164_enable_irq(struct irq_data *d) 51 { 52 + rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 53 } 54 55 static void 56 + rx164_disable_irq(struct irq_data *d) 57 { 58 + rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 59 } 60 61 static struct irq_chip rx164_irq_type = { 62 .name = "RX164", 63 + .irq_unmask = rx164_enable_irq, 64 + .irq_mask = rx164_disable_irq, 65 + .irq_mask_ack = rx164_disable_irq, 66 }; 67 68 static void ··· 99 100 rx164_update_irq_hw(0); 101 for (i = 16; i < 40; ++i) { 102 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 103 + irq_set_status_flags(i, IRQ_LEVEL); 104 } 105 106 init_i8259a_irqs();
+10 -10
arch/alpha/kernel/sys_sable.c
··· 443 /* GENERIC irq routines */ 444 445 static inline void 446 - sable_lynx_enable_irq(unsigned int irq) 447 { 448 unsigned long bit, mask; 449 450 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 451 spin_lock(&sable_lynx_irq_lock); 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 459 } 460 461 static void 462 - sable_lynx_disable_irq(unsigned int irq) 463 { 464 unsigned long bit, mask; 465 466 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 467 spin_lock(&sable_lynx_irq_lock); 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 475 } 476 477 static void 478 - sable_lynx_mask_and_ack_irq(unsigned int irq) 479 { 480 unsigned long bit, mask; 481 482 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 483 spin_lock(&sable_lynx_irq_lock); 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 489 490 static struct irq_chip sable_lynx_irq_type = { 491 .name = "SABLE/LYNX", 492 - .unmask = sable_lynx_enable_irq, 493 - .mask = sable_lynx_disable_irq, 494 - .mask_ack = sable_lynx_mask_and_ack_irq, 495 }; 496 497 static void ··· 518 long i; 519 520 for (i = 0; i < nr_of_irqs; ++i) { 521 - irq_to_desc(i)->status |= IRQ_LEVEL; 522 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 523 handle_level_irq); 524 } 525 526 common_init_isa_dma();
··· 443 /* GENERIC irq routines */ 444 445 static inline void 446 + sable_lynx_enable_irq(struct irq_data *d) 447 { 448 unsigned long bit, mask; 449 450 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 451 spin_lock(&sable_lynx_irq_lock); 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 459 } 460 461 static void 462 + sable_lynx_disable_irq(struct irq_data *d) 463 { 464 unsigned long bit, mask; 465 466 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 467 spin_lock(&sable_lynx_irq_lock); 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 475 } 476 477 static void 478 + sable_lynx_mask_and_ack_irq(struct irq_data *d) 479 { 480 unsigned long bit, mask; 481 482 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 483 spin_lock(&sable_lynx_irq_lock); 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 489 490 static struct irq_chip sable_lynx_irq_type = { 491 .name = "SABLE/LYNX", 492 + .irq_unmask = sable_lynx_enable_irq, 493 + .irq_mask = sable_lynx_disable_irq, 494 + .irq_mask_ack = sable_lynx_mask_and_ack_irq, 495 }; 496 497 static void ··· 518 long i; 519 520 for (i = 0; i < nr_of_irqs; ++i) { 521 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 522 handle_level_irq); 523 + irq_set_status_flags(i, IRQ_LEVEL); 524 } 525 526 common_init_isa_dma();
+8 -6
arch/alpha/kernel/sys_takara.c
··· 45 } 46 47 static inline void 48 - takara_enable_irq(unsigned int irq) 49 { 50 unsigned long mask; 51 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 52 takara_update_irq_hw(irq, mask); 53 } 54 55 static void 56 - takara_disable_irq(unsigned int irq) 57 { 58 unsigned long mask; 59 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 60 takara_update_irq_hw(irq, mask); ··· 64 65 static struct irq_chip takara_irq_type = { 66 .name = "TAKARA", 67 - .unmask = takara_enable_irq, 68 - .mask = takara_disable_irq, 69 - .mask_ack = takara_disable_irq, 70 }; 71 72 static void ··· 138 takara_update_irq_hw(i, -1); 139 140 for (i = 16; i < 128; ++i) { 141 - irq_to_desc(i)->status |= IRQ_LEVEL; 142 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 143 } 144 145 common_init_isa_dma();
··· 45 } 46 47 static inline void 48 + takara_enable_irq(struct irq_data *d) 49 { 50 + unsigned int irq = d->irq; 51 unsigned long mask; 52 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 53 takara_update_irq_hw(irq, mask); 54 } 55 56 static void 57 + takara_disable_irq(struct irq_data *d) 58 { 59 + unsigned int irq = d->irq; 60 unsigned long mask; 61 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 62 takara_update_irq_hw(irq, mask); ··· 62 63 static struct irq_chip takara_irq_type = { 64 .name = "TAKARA", 65 + .irq_unmask = takara_enable_irq, 66 + .irq_mask = takara_disable_irq, 67 + .irq_mask_ack = takara_disable_irq, 68 }; 69 70 static void ··· 136 takara_update_irq_hw(i, -1); 137 138 for (i = 16; i < 128; ++i) { 139 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 140 + irq_set_status_flags(i, IRQ_LEVEL); 141 } 142 143 common_init_isa_dma();
+13 -9
arch/alpha/kernel/sys_titan.c
··· 112 } 113 114 static inline void 115 - titan_enable_irq(unsigned int irq) 116 { 117 spin_lock(&titan_irq_lock); 118 titan_cached_irq_mask |= 1UL << (irq - 16); 119 titan_update_irq_hw(titan_cached_irq_mask); ··· 122 } 123 124 static inline void 125 - titan_disable_irq(unsigned int irq) 126 { 127 spin_lock(&titan_irq_lock); 128 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 titan_update_irq_hw(titan_cached_irq_mask); ··· 146 } 147 148 static int 149 - titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 150 { 151 spin_lock(&titan_irq_lock); 152 titan_cpu_set_irq_affinity(irq - 16, *affinity); 153 titan_update_irq_hw(titan_cached_irq_mask); ··· 179 { 180 long i; 181 for (i = imin; i <= imax; ++i) { 182 - irq_to_desc(i)->status |= IRQ_LEVEL; 183 set_irq_chip_and_handler(i, ops, handle_level_irq); 184 } 185 } 186 187 static struct irq_chip titan_irq_type = { 188 - .name = "TITAN", 189 - .unmask = titan_enable_irq, 190 - .mask = titan_disable_irq, 191 - .mask_ack = titan_disable_irq, 192 - .set_affinity = titan_set_irq_affinity, 193 }; 194 195 static irqreturn_t
··· 112 } 113 114 static inline void 115 + titan_enable_irq(struct irq_data *d) 116 { 117 + unsigned int irq = d->irq; 118 spin_lock(&titan_irq_lock); 119 titan_cached_irq_mask |= 1UL << (irq - 16); 120 titan_update_irq_hw(titan_cached_irq_mask); ··· 121 } 122 123 static inline void 124 + titan_disable_irq(struct irq_data *d) 125 { 126 + unsigned int irq = d->irq; 127 spin_lock(&titan_irq_lock); 128 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 titan_update_irq_hw(titan_cached_irq_mask); ··· 144 } 145 146 static int 147 + titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, 148 + bool force) 149 { 150 + unsigned int irq = d->irq; 151 spin_lock(&titan_irq_lock); 152 titan_cpu_set_irq_affinity(irq - 16, *affinity); 153 titan_update_irq_hw(titan_cached_irq_mask); ··· 175 { 176 long i; 177 for (i = imin; i <= imax; ++i) { 178 set_irq_chip_and_handler(i, ops, handle_level_irq); 179 + irq_set_status_flags(i, IRQ_LEVEL); 180 } 181 } 182 183 static struct irq_chip titan_irq_type = { 184 + .name = "TITAN", 185 + .irq_unmask = titan_enable_irq, 186 + .irq_mask = titan_disable_irq, 187 + .irq_mask_ack = titan_disable_irq, 188 + .irq_set_affinity = titan_set_irq_affinity, 189 }; 190 191 static irqreturn_t
+19 -13
arch/alpha/kernel/sys_wildfire.c
··· 104 } 105 106 static void 107 - wildfire_enable_irq(unsigned int irq) 108 { 109 if (irq < 16) 110 - i8259a_enable_irq(irq); 111 112 spin_lock(&wildfire_irq_lock); 113 set_bit(irq, &cached_irq_mask); ··· 118 } 119 120 static void 121 - wildfire_disable_irq(unsigned int irq) 122 { 123 if (irq < 16) 124 - i8259a_disable_irq(irq); 125 126 spin_lock(&wildfire_irq_lock); 127 clear_bit(irq, &cached_irq_mask); ··· 132 } 133 134 static void 135 - wildfire_mask_and_ack_irq(unsigned int irq) 136 { 137 if (irq < 16) 138 - i8259a_mask_and_ack_irq(irq); 139 140 spin_lock(&wildfire_irq_lock); 141 clear_bit(irq, &cached_irq_mask); ··· 147 148 static struct irq_chip wildfire_irq_type = { 149 .name = "WILDFIRE", 150 - .unmask = wildfire_enable_irq, 151 - .mask = wildfire_disable_irq, 152 - .mask_ack = wildfire_mask_and_ack_irq, 153 }; 154 155 static void __init ··· 183 for (i = 0; i < 16; ++i) { 184 if (i == 2) 185 continue; 186 - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; 187 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 188 handle_level_irq); 189 } 190 191 - irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL; 192 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 193 handle_level_irq); 194 for (i = 40; i < 64; ++i) { 195 - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; 196 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 197 handle_level_irq); 198 } 199 200 - setup_irq(32+irq_bias, &isa_enable); 201 } 202 203 static void __init
··· 104 } 105 106 static void 107 + wildfire_enable_irq(struct irq_data *d) 108 { 109 + unsigned int irq = d->irq; 110 + 111 if (irq < 16) 112 + i8259a_enable_irq(d); 113 114 spin_lock(&wildfire_irq_lock); 115 set_bit(irq, &cached_irq_mask); ··· 116 } 117 118 static void 119 + wildfire_disable_irq(struct irq_data *d) 120 { 121 + unsigned int irq = d->irq; 122 + 123 if (irq < 16) 124 + i8259a_disable_irq(d); 125 126 spin_lock(&wildfire_irq_lock); 127 clear_bit(irq, &cached_irq_mask); ··· 128 } 129 130 static void 131 + wildfire_mask_and_ack_irq(struct irq_data *d) 132 { 133 + unsigned int irq = d->irq; 134 + 135 if (irq < 16) 136 + i8259a_mask_and_ack_irq(d); 137 138 spin_lock(&wildfire_irq_lock); 139 clear_bit(irq, &cached_irq_mask); ··· 141 142 static struct irq_chip wildfire_irq_type = { 143 .name = "WILDFIRE", 144 + .irq_unmask = wildfire_enable_irq, 145 + .irq_mask = wildfire_disable_irq, 146 + .irq_mask_ack = wildfire_mask_and_ack_irq, 147 }; 148 149 static void __init ··· 177 for (i = 0; i < 16; ++i) { 178 if (i == 2) 179 continue; 180 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 181 handle_level_irq); 182 + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 183 } 184 185 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 186 handle_level_irq); 187 + irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); 188 for (i = 40; i < 64; ++i) { 189 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 190 handle_level_irq); 191 + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 192 } 193 194 + setup_irq(32+irq_bias, &isa_enable); 195 } 196 197 static void __init
+2
arch/arm/common/Kconfig
··· 6 7 config ARM_VIC_NR 8 int 9 default 2 10 depends on ARM_VIC 11 help
··· 6 7 config ARM_VIC_NR 8 int 9 + default 4 if ARCH_S5PV210 10 + default 3 if ARCH_S5P6442 || ARCH_S5PC100 11 default 2 12 depends on ARM_VIC 13 help
-4
arch/arm/include/asm/mach/arch.h
··· 15 struct sys_timer; 16 17 struct machine_desc { 18 - /* 19 - * Note! The first two elements are used 20 - * by assembler code in head.S, head-common.S 21 - */ 22 unsigned int nr; /* architecture number */ 23 const char *name; /* architecture name */ 24 unsigned long boot_params; /* tagged list */
··· 15 struct sys_timer; 16 17 struct machine_desc { 18 unsigned int nr; /* architecture number */ 19 const char *name; /* architecture name */ 20 unsigned long boot_params; /* tagged list */
+2
arch/arm/include/asm/pgalloc.h
··· 10 #ifndef _ASMARM_PGALLOC_H 11 #define _ASMARM_PGALLOC_H 12 13 #include <asm/domain.h> 14 #include <asm/pgtable-hwdef.h> 15 #include <asm/processor.h>
··· 10 #ifndef _ASMARM_PGALLOC_H 11 #define _ASMARM_PGALLOC_H 12 13 + #include <linux/pagemap.h> 14 + 15 #include <asm/domain.h> 16 #include <asm/pgtable-hwdef.h> 17 #include <asm/processor.h>
+23 -3
arch/arm/kernel/hw_breakpoint.c
··· 836 /* 837 * One-time initialisation. 838 */ 839 - static void reset_ctrl_regs(void *unused) 840 { 841 - int i; 842 843 /* 844 * v7 debug contains save and restore registers so that debug state ··· 851 * later on. 852 */ 853 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 854 /* 855 * Unconditionally clear the lock by writing a value 856 * other than 0xC5ACCE55 to the access register. ··· 900 static int __init arch_hw_breakpoint_init(void) 901 { 902 u32 dscr; 903 904 debug_arch = get_debug_arch(); 905 ··· 925 * Reset the breakpoint resources. We assume that a halting 926 * debugger will leave the world in a nice state for us. 927 */ 928 - on_each_cpu(reset_ctrl_regs, NULL, 1); 929 930 ARM_DBG_READ(c1, 0, dscr); 931 if (dscr & ARM_DSCR_HDBGEN) {
··· 836 /* 837 * One-time initialisation. 838 */ 839 + static void reset_ctrl_regs(void *info) 840 { 841 + int i, cpu = smp_processor_id(); 842 + u32 dbg_power; 843 + cpumask_t *cpumask = info; 844 845 /* 846 * v7 debug contains save and restore registers so that debug state ··· 849 * later on. 850 */ 851 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 852 + /* 853 + * Ensure sticky power-down is clear (i.e. debug logic is 854 + * powered up). 855 + */ 856 + asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); 857 + if ((dbg_power & 0x1) == 0) { 858 + pr_warning("CPU %d debug is powered down!\n", cpu); 859 + cpumask_or(cpumask, cpumask, cpumask_of(cpu)); 860 + return; 861 + } 862 + 863 /* 864 * Unconditionally clear the lock by writing a value 865 * other than 0xC5ACCE55 to the access register. ··· 887 static int __init arch_hw_breakpoint_init(void) 888 { 889 u32 dscr; 890 + cpumask_t cpumask = { CPU_BITS_NONE }; 891 892 debug_arch = get_debug_arch(); 893 ··· 911 * Reset the breakpoint resources. We assume that a halting 912 * debugger will leave the world in a nice state for us. 913 */ 914 + on_each_cpu(reset_ctrl_regs, &cpumask, 1); 915 + if (!cpumask_empty(&cpumask)) { 916 + core_num_brps = 0; 917 + core_num_reserved_brps = 0; 918 + core_num_wrps = 0; 919 + return 0; 920 + } 921 922 ARM_DBG_READ(c1, 0, dscr); 923 if (dscr & ARM_DSCR_HDBGEN) {
+3 -3
arch/arm/kernel/ptrace.c
··· 996 while (!(arch_ctrl.len & 0x1)) 997 arch_ctrl.len >>= 1; 998 999 - if (idx & 0x1) 1000 - reg = encode_ctrl_reg(arch_ctrl); 1001 - else 1002 reg = bp->attr.bp_addr; 1003 } 1004 1005 put:
··· 996 while (!(arch_ctrl.len & 0x1)) 997 arch_ctrl.len >>= 1; 998 999 + if (num & 0x1) 1000 reg = bp->attr.bp_addr; 1001 + else 1002 + reg = encode_ctrl_reg(arch_ctrl); 1003 } 1004 1005 put:
+1 -1
arch/arm/mach-davinci/cpufreq.c
··· 132 return ret; 133 } 134 135 - static int __init davinci_cpu_init(struct cpufreq_policy *policy) 136 { 137 int result = 0; 138 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
··· 132 return ret; 133 } 134 135 + static int davinci_cpu_init(struct cpufreq_policy *policy) 136 { 137 int result = 0; 138 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+7
arch/arm/mach-davinci/devices-da8xx.c
··· 480 .resource = da850_mcasp_resources, 481 }; 482 483 void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) 484 { 485 /* DA830/OMAP-L137 has 3 instances of McASP */ 486 if (cpu_is_davinci_da830() && id == 1) { 487 da830_mcasp1_device.dev.platform_data = pdata;
··· 480 .resource = da850_mcasp_resources, 481 }; 482 483 + struct platform_device davinci_pcm_device = { 484 + .name = "davinci-pcm-audio", 485 + .id = -1, 486 + }; 487 + 488 void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) 489 { 490 + platform_device_register(&davinci_pcm_device); 491 + 492 /* DA830/OMAP-L137 has 3 instances of McASP */ 493 if (cpu_is_davinci_da830() && id == 1) { 494 da830_mcasp1_device.dev.platform_data = pdata;
+9 -9
arch/arm/mach-davinci/gpio-tnetv107x.c
··· 58 59 spin_lock_irqsave(&ctlr->lock, flags); 60 61 - gpio_reg_set_bit(&regs->enable, gpio); 62 63 spin_unlock_irqrestore(&ctlr->lock, flags); 64 ··· 74 75 spin_lock_irqsave(&ctlr->lock, flags); 76 77 - gpio_reg_clear_bit(&regs->enable, gpio); 78 79 spin_unlock_irqrestore(&ctlr->lock, flags); 80 } ··· 88 89 spin_lock_irqsave(&ctlr->lock, flags); 90 91 - gpio_reg_set_bit(&regs->direction, gpio); 92 93 spin_unlock_irqrestore(&ctlr->lock, flags); 94 ··· 106 spin_lock_irqsave(&ctlr->lock, flags); 107 108 if (value) 109 - gpio_reg_set_bit(&regs->data_out, gpio); 110 else 111 - gpio_reg_clear_bit(&regs->data_out, gpio); 112 113 - gpio_reg_clear_bit(&regs->direction, gpio); 114 115 spin_unlock_irqrestore(&ctlr->lock, flags); 116 ··· 124 unsigned gpio = chip->base + offset; 125 int ret; 126 127 - ret = gpio_reg_get_bit(&regs->data_in, gpio); 128 129 return ret ? 1 : 0; 130 } ··· 140 spin_lock_irqsave(&ctlr->lock, flags); 141 142 if (value) 143 - gpio_reg_set_bit(&regs->data_out, gpio); 144 else 145 - gpio_reg_clear_bit(&regs->data_out, gpio); 146 147 spin_unlock_irqrestore(&ctlr->lock, flags); 148 }
··· 58 59 spin_lock_irqsave(&ctlr->lock, flags); 60 61 + gpio_reg_set_bit(regs->enable, gpio); 62 63 spin_unlock_irqrestore(&ctlr->lock, flags); 64 ··· 74 75 spin_lock_irqsave(&ctlr->lock, flags); 76 77 + gpio_reg_clear_bit(regs->enable, gpio); 78 79 spin_unlock_irqrestore(&ctlr->lock, flags); 80 } ··· 88 89 spin_lock_irqsave(&ctlr->lock, flags); 90 91 + gpio_reg_set_bit(regs->direction, gpio); 92 93 spin_unlock_irqrestore(&ctlr->lock, flags); 94 ··· 106 spin_lock_irqsave(&ctlr->lock, flags); 107 108 if (value) 109 + gpio_reg_set_bit(regs->data_out, gpio); 110 else 111 + gpio_reg_clear_bit(regs->data_out, gpio); 112 113 + gpio_reg_clear_bit(regs->direction, gpio); 114 115 spin_unlock_irqrestore(&ctlr->lock, flags); 116 ··· 124 unsigned gpio = chip->base + offset; 125 int ret; 126 127 + ret = gpio_reg_get_bit(regs->data_in, gpio); 128 129 return ret ? 1 : 0; 130 } ··· 140 spin_lock_irqsave(&ctlr->lock, flags); 141 142 if (value) 143 + gpio_reg_set_bit(regs->data_out, gpio); 144 else 145 + gpio_reg_clear_bit(regs->data_out, gpio); 146 147 spin_unlock_irqrestore(&ctlr->lock, flags); 148 }
+2
arch/arm/mach-davinci/include/mach/clkdev.h
··· 1 #ifndef __MACH_CLKDEV_H 2 #define __MACH_CLKDEV_H 3 4 static inline int __clk_get(struct clk *clk) 5 { 6 return 1;
··· 1 #ifndef __MACH_CLKDEV_H 2 #define __MACH_CLKDEV_H 3 4 + struct clk; 5 + 6 static inline int __clk_get(struct clk *clk) 7 { 8 return 1;
+6 -4
arch/arm/mach-omap2/mailbox.c
··· 193 omap_mbox_type_t irq) 194 { 195 struct omap_mbox2_priv *p = mbox->priv; 196 - u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; 197 - l = mbox_read_reg(p->irqdisable); 198 - l &= ~bit; 199 - mbox_write_reg(l, p->irqdisable); 200 } 201 202 static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
··· 193 omap_mbox_type_t irq) 194 { 195 struct omap_mbox2_priv *p = mbox->priv; 196 + u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; 197 + 198 + if (!cpu_is_omap44xx()) 199 + bit = mbox_read_reg(p->irqdisable) & ~bit; 200 + 201 + mbox_write_reg(bit, p->irqdisable); 202 } 203 204 static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
+15 -18
arch/arm/mach-omap2/smartreflex.c
··· 282 dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" 283 "interrupt handler. Smartreflex will" 284 "not function as desired\n", __func__); 285 kfree(sr_info); 286 return ret; 287 } ··· 880 ret = sr_late_init(sr_info); 881 if (ret) { 882 pr_warning("%s: Error in SR late init\n", __func__); 883 - return ret; 884 } 885 } 886 ··· 891 * not try to create rest of the debugfs entries. 892 */ 893 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); 894 - if (!vdd_dbg_dir) 895 - return -EINVAL; 896 897 dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); 898 if (IS_ERR(dbg_dir)) { 899 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", 900 __func__); 901 - return PTR_ERR(dbg_dir); 902 } 903 904 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, ··· 917 if (IS_ERR(nvalue_dir)) { 918 dev_err(&pdev->dev, "%s: Unable to create debugfs directory" 919 "for n-values\n", __func__); 920 - return PTR_ERR(nvalue_dir); 921 } 922 923 omap_voltage_get_volttable(sr_info->voltdm, &volt_data); ··· 927 " corresponding vdd vdd_%s. Cannot create debugfs" 928 "entries for n-values\n", 929 __func__, sr_info->voltdm->name); 930 - return -ENODATA; 931 } 932 933 for (i = 0; i < sr_info->nvalue_count; i++) { 934 - char *name; 935 - char volt_name[32]; 936 937 - name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL); 938 - if (!name) { 939 - dev_err(&pdev->dev, "%s: Unable to allocate memory" 940 - " for n-value directory name\n", __func__); 941 - return -ENOMEM; 942 - } 943 - 944 - strcpy(name, "volt_"); 945 - sprintf(volt_name, "%d", volt_data[i].volt_nominal); 946 - strcat(name, volt_name); 947 (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, 948 &(sr_info->nvalue_table[i].nvalue)); 949 }
··· 282 dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" 283 "interrupt handler. Smartreflex will" 284 "not function as desired\n", __func__); 285 + kfree(name); 286 kfree(sr_info); 287 return ret; 288 } ··· 879 ret = sr_late_init(sr_info); 880 if (ret) { 881 pr_warning("%s: Error in SR late init\n", __func__); 882 + goto err_release_region; 883 } 884 } 885 ··· 890 * not try to create rest of the debugfs entries. 891 */ 892 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); 893 + if (!vdd_dbg_dir) { 894 + ret = -EINVAL; 895 + goto err_release_region; 896 + } 897 898 dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); 899 if (IS_ERR(dbg_dir)) { 900 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", 901 __func__); 902 + ret = PTR_ERR(dbg_dir); 903 + goto err_release_region; 904 } 905 906 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, ··· 913 if (IS_ERR(nvalue_dir)) { 914 dev_err(&pdev->dev, "%s: Unable to create debugfs directory" 915 "for n-values\n", __func__); 916 + ret = PTR_ERR(nvalue_dir); 917 + goto err_release_region; 918 } 919 920 omap_voltage_get_volttable(sr_info->voltdm, &volt_data); ··· 922 " corresponding vdd vdd_%s. Cannot create debugfs" 923 "entries for n-values\n", 924 __func__, sr_info->voltdm->name); 925 + ret = -ENODATA; 926 + goto err_release_region; 927 } 928 929 for (i = 0; i < sr_info->nvalue_count; i++) { 930 + char name[NVALUE_NAME_LEN + 1]; 931 932 + snprintf(name, sizeof(name), "volt_%d", 933 + volt_data[i].volt_nominal); 934 (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, 935 &(sr_info->nvalue_table[i].nvalue)); 936 }
+1
arch/arm/mach-pxa/pxa25x.c
··· 347 &pxa25x_device_assp, 348 &pxa25x_device_pwm0, 349 &pxa25x_device_pwm1, 350 }; 351 352 static struct sys_device pxa25x_sysdev[] = {
··· 347 &pxa25x_device_assp, 348 &pxa25x_device_pwm0, 349 &pxa25x_device_pwm1, 350 + &pxa_device_asoc_platform, 351 }; 352 353 static struct sys_device pxa25x_sysdev[] = {
-2
arch/arm/mach-pxa/tosa-bt.c
··· 81 goto err_rfk_alloc; 82 } 83 84 - rfkill_set_led_trigger_name(rfk, "tosa-bt"); 85 - 86 rc = rfkill_register(rfk); 87 if (rc) 88 goto err_rfkill;
··· 81 goto err_rfk_alloc; 82 } 83 84 rc = rfkill_register(rfk); 85 if (rc) 86 goto err_rfkill;
+6
arch/arm/mach-pxa/tosa.c
··· 875 .dev.platform_data = &sharpsl_rom_data, 876 }; 877 878 static struct platform_device *devices[] __initdata = { 879 &tosascoop_device, 880 &tosascoop_jc_device, ··· 890 &tosaled_device, 891 &tosa_bt_device, 892 &sharpsl_rom_device, 893 }; 894 895 static void tosa_poweroff(void)
··· 875 .dev.platform_data = &sharpsl_rom_data, 876 }; 877 878 + static struct platform_device wm9712_device = { 879 + .name = "wm9712-codec", 880 + .id = -1, 881 + }; 882 + 883 static struct platform_device *devices[] __initdata = { 884 &tosascoop_device, 885 &tosascoop_jc_device, ··· 885 &tosaled_device, 886 &tosa_bt_device, 887 &sharpsl_rom_device, 888 + &wm9712_device, 889 }; 890 891 static void tosa_poweroff(void)
+1
arch/arm/mach-s3c2440/Kconfig
··· 99 select POWER_SUPPLY 100 select MACH_NEO1973 101 select S3C2410_PWM 102 help 103 Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone 104
··· 99 select POWER_SUPPLY 100 select MACH_NEO1973 101 select S3C2410_PWM 102 + select S3C_DEV_USB_HOST 103 help 104 Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone 105
+13 -13
arch/arm/mach-s3c2440/include/mach/gta02.h
··· 44 #define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ 45 #define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ 46 47 - #define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */ 48 - #define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2 49 - #define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */ 50 - #define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */ 51 - #define GTA02_GPIO_nGSM_EN S3C2440_GPJ4 52 - #define GTA02_GPIO_3D_RESET S3C2440_GPJ5 53 - #define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */ 54 - #define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7 55 - #define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8 56 - #define GTA02_GPIO_KEEPACT S3C2440_GPJ8 57 - #define GTA02v1_GPIO_HP_IN S3C2440_GPJ10 58 - #define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */ 59 - #define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */ 60 61 #define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 62 #define GTA02_IRQ_MODEM IRQ_EINT1
··· 44 #define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ 45 #define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ 46 47 + #define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */ 48 + #define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2) 49 + #define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */ 50 + #define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */ 51 + #define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4) 52 + #define GTA02_GPIO_3D_RESET S3C2410_GPJ(5) 53 + #define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */ 54 + #define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7) 55 + #define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8) 56 + #define GTA02_GPIO_KEEPACT S3C2410_GPJ(8) 57 + #define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10) 58 + #define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */ 59 + #define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */ 60 61 #define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 62 #define GTA02_IRQ_MODEM IRQ_EINT1
+6
arch/arm/mach-s3c64xx/clock.c
··· 151 .enable = s3c64xx_pclk_ctrl, 152 .ctrlbit = S3C_CLKCON_PCLK_IIC, 153 }, { 154 .name = "iis", 155 .id = 0, 156 .parent = &clk_p,
··· 151 .enable = s3c64xx_pclk_ctrl, 152 .ctrlbit = S3C_CLKCON_PCLK_IIC, 153 }, { 154 + .name = "i2c", 155 + .id = 1, 156 + .parent = &clk_p, 157 + .enable = s3c64xx_pclk_ctrl, 158 + .ctrlbit = S3C6410_CLKCON_PCLK_I2C1, 159 + }, { 160 .name = "iis", 161 .id = 0, 162 .parent = &clk_p,
+6 -5
arch/arm/mach-s3c64xx/dma.c
··· 690 691 regptr = regs + PL080_Cx_BASE(0); 692 693 - for (ch = 0; ch < 8; ch++, chno++, chptr++) { 694 - printk(KERN_INFO "%s: registering DMA %d (%p)\n", 695 - __func__, chno, regptr); 696 697 chptr->bit = 1 << ch; 698 - chptr->number = chno; 699 chptr->dmac = dmac; 700 chptr->regs = regptr; 701 regptr += PL080_Cx_STRIDE; ··· 704 /* for the moment, permanently enable the controller */ 705 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); 706 707 - printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs); 708 709 return 0; 710
··· 690 691 regptr = regs + PL080_Cx_BASE(0); 692 693 + for (ch = 0; ch < 8; ch++, chptr++) { 694 + pr_debug("%s: registering DMA %d (%p)\n", 695 + __func__, chno + ch, regptr); 696 697 chptr->bit = 1 << ch; 698 + chptr->number = chno + ch; 699 chptr->dmac = dmac; 700 chptr->regs = regptr; 701 regptr += PL080_Cx_STRIDE; ··· 704 /* for the moment, permanently enable the controller */ 705 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); 706 707 + printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n", 708 + irq, regs, chno, chno+8); 709 710 return 0; 711
+2 -2
arch/arm/mach-s3c64xx/gpiolib.c
··· 72 .get_pull = s3c_gpio_getpull_updown, 73 }; 74 75 - int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) 76 { 77 return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; 78 } ··· 138 }, 139 }; 140 141 - int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) 142 { 143 return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; 144 }
··· 72 .get_pull = s3c_gpio_getpull_updown, 73 }; 74 75 + static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) 76 { 77 return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; 78 } ··· 138 }, 139 }; 140 141 + static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) 142 { 143 return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; 144 }
+7 -6
arch/arm/mach-s3c64xx/mach-smdk6410.c
··· 28 #include <linux/delay.h> 29 #include <linux/smsc911x.h> 30 #include <linux/regulator/fixed.h> 31 32 #ifdef CONFIG_SMDK6410_WM1190_EV1 33 #include <linux/mfd/wm8350/core.h> ··· 352 /* VDD_UH_MMC, LDO5 on J5 */ 353 static struct regulator_init_data smdk6410_vdduh_mmc = { 354 .constraints = { 355 - .name = "PVDD_UH/PVDD_MMC", 356 .always_on = 1, 357 }, 358 }; ··· 418 /* S3C64xx internal logic & PLL */ 419 static struct regulator_init_data wm8350_dcdc1_data = { 420 .constraints = { 421 - .name = "PVDD_INT/PVDD_PLL", 422 .min_uV = 1200000, 423 .max_uV = 1200000, 424 .always_on = 1, ··· 453 454 static struct regulator_init_data wm8350_dcdc4_data = { 455 .constraints = { 456 - .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV", 457 .min_uV = 3000000, 458 .max_uV = 3000000, 459 .always_on = 1, ··· 465 /* OTGi/1190-EV1 HPVDD & AVDD */ 466 static struct regulator_init_data wm8350_ldo4_data = { 467 .constraints = { 468 - .name = "PVDD_OTGI/HPVDD/AVDD", 469 .min_uV = 1200000, 470 .max_uV = 1200000, 471 .apply_uV = 1, ··· 553 554 static struct regulator_init_data wm1192_dcdc3 = { 555 .constraints = { 556 - .name = "PVDD_MEM/PVDD_GPS", 557 .always_on = 1, 558 }, 559 }; ··· 564 565 static struct regulator_init_data wm1192_ldo1 = { 566 .constraints = { 567 - .name = "PVDD_LCD/PVDD_EXT", 568 .always_on = 1, 569 }, 570 .consumer_supplies = wm1192_ldo1_consumers,
··· 28 #include <linux/delay.h> 29 #include <linux/smsc911x.h> 30 #include <linux/regulator/fixed.h> 31 + #include <linux/regulator/machine.h> 32 33 #ifdef CONFIG_SMDK6410_WM1190_EV1 34 #include <linux/mfd/wm8350/core.h> ··· 351 /* VDD_UH_MMC, LDO5 on J5 */ 352 static struct regulator_init_data smdk6410_vdduh_mmc = { 353 .constraints = { 354 + .name = "PVDD_UH+PVDD_MMC", 355 .always_on = 1, 356 }, 357 }; ··· 417 /* S3C64xx internal logic & PLL */ 418 static struct regulator_init_data wm8350_dcdc1_data = { 419 .constraints = { 420 + .name = "PVDD_INT+PVDD_PLL", 421 .min_uV = 1200000, 422 .max_uV = 1200000, 423 .always_on = 1, ··· 452 453 static struct regulator_init_data wm8350_dcdc4_data = { 454 .constraints = { 455 + .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV", 456 .min_uV = 3000000, 457 .max_uV = 3000000, 458 .always_on = 1, ··· 464 /* OTGi/1190-EV1 HPVDD & AVDD */ 465 static struct regulator_init_data wm8350_ldo4_data = { 466 .constraints = { 467 + .name = "PVDD_OTGI+HPVDD+AVDD", 468 .min_uV = 1200000, 469 .max_uV = 1200000, 470 .apply_uV = 1, ··· 552 553 static struct regulator_init_data wm1192_dcdc3 = { 554 .constraints = { 555 + .name = "PVDD_MEM+PVDD_GPS", 556 .always_on = 1, 557 }, 558 }; ··· 563 564 static struct regulator_init_data wm1192_ldo1 = { 565 .constraints = { 566 + .name = "PVDD_LCD+PVDD_EXT", 567 .always_on = 1, 568 }, 569 .consumer_supplies = wm1192_ldo1_consumers,
+1 -1
arch/arm/mach-s3c64xx/setup-keypad.c
··· 17 void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) 18 { 19 /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ 20 - s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3)); 21 22 /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ 23 s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
··· 17 void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) 18 { 19 /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ 20 + s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3)); 21 22 /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ 23 s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
+1 -1
arch/arm/mach-s3c64xx/setup-sdhci.c
··· 56 else 57 ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); 58 59 - printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); 60 writel(ctrl2, r + S3C_SDHCI_CONTROL2); 61 writel(ctrl3, r + S3C_SDHCI_CONTROL3); 62 }
··· 56 else 57 ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); 58 59 + pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); 60 writel(ctrl2, r + S3C_SDHCI_CONTROL2); 61 writel(ctrl3, r + S3C_SDHCI_CONTROL3); 62 }
+2 -2
arch/arm/mach-s5p64x0/include/mach/gpio.h
··· 23 #define S5P6440_GPIO_A_NR (6) 24 #define S5P6440_GPIO_B_NR (7) 25 #define S5P6440_GPIO_C_NR (8) 26 - #define S5P6440_GPIO_F_NR (2) 27 #define S5P6440_GPIO_G_NR (7) 28 #define S5P6440_GPIO_H_NR (10) 29 #define S5P6440_GPIO_I_NR (16) ··· 36 #define S5P6450_GPIO_B_NR (7) 37 #define S5P6450_GPIO_C_NR (8) 38 #define S5P6450_GPIO_D_NR (8) 39 - #define S5P6450_GPIO_F_NR (2) 40 #define S5P6450_GPIO_G_NR (14) 41 #define S5P6450_GPIO_H_NR (10) 42 #define S5P6450_GPIO_I_NR (16)
··· 23 #define S5P6440_GPIO_A_NR (6) 24 #define S5P6440_GPIO_B_NR (7) 25 #define S5P6440_GPIO_C_NR (8) 26 + #define S5P6440_GPIO_F_NR (16) 27 #define S5P6440_GPIO_G_NR (7) 28 #define S5P6440_GPIO_H_NR (10) 29 #define S5P6440_GPIO_I_NR (16) ··· 36 #define S5P6450_GPIO_B_NR (7) 37 #define S5P6450_GPIO_C_NR (8) 38 #define S5P6450_GPIO_D_NR (8) 39 + #define S5P6450_GPIO_F_NR (16) 40 #define S5P6450_GPIO_G_NR (14) 41 #define S5P6450_GPIO_H_NR (10) 42 #define S5P6450_GPIO_I_NR (16)
+1
arch/arm/mach-shmobile/board-ag5evm.c
··· 454 gpio_direction_output(GPIO_PORT217, 0); 455 mdelay(1); 456 gpio_set_value(GPIO_PORT217, 1); 457 458 /* LCD backlight controller */ 459 gpio_request(GPIO_PORT235, NULL); /* RESET */
··· 454 gpio_direction_output(GPIO_PORT217, 0); 455 mdelay(1); 456 gpio_set_value(GPIO_PORT217, 1); 457 + mdelay(100); 458 459 /* LCD backlight controller */ 460 gpio_request(GPIO_PORT235, NULL); /* RESET */
+1 -1
arch/arm/mach-shmobile/board-ap4evb.c
··· 1303 1304 lcdc_info.clock_source = LCDC_CLK_BUS; 1305 lcdc_info.ch[0].interface_type = RGB18; 1306 - lcdc_info.ch[0].clock_divider = 2; 1307 lcdc_info.ch[0].flags = 0; 1308 lcdc_info.ch[0].lcd_size_cfg.width = 152; 1309 lcdc_info.ch[0].lcd_size_cfg.height = 91;
··· 1303 1304 lcdc_info.clock_source = LCDC_CLK_BUS; 1305 lcdc_info.ch[0].interface_type = RGB18; 1306 + lcdc_info.ch[0].clock_divider = 3; 1307 lcdc_info.ch[0].flags = 0; 1308 lcdc_info.ch[0].lcd_size_cfg.width = 152; 1309 lcdc_info.ch[0].lcd_size_cfg.height = 91;
+1 -1
arch/arm/mach-shmobile/board-mackerel.c
··· 303 .lcd_cfg = mackerel_lcdc_modes, 304 .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), 305 .interface_type = RGB24, 306 - .clock_divider = 2, 307 .flags = 0, 308 .lcd_size_cfg.width = 152, 309 .lcd_size_cfg.height = 91,
··· 303 .lcd_cfg = mackerel_lcdc_modes, 304 .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), 305 .interface_type = RGB24, 306 + .clock_divider = 3, 307 .flags = 0, 308 .lcd_size_cfg.width = 152, 309 .lcd_size_cfg.height = 91,
+14 -3
arch/arm/mach-shmobile/clock-sh73a0.c
··· 263 }; 264 265 enum { MSTP001, 266 - MSTP125, MSTP118, MSTP116, MSTP100, 267 MSTP219, 268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, ··· 275 276 static struct clk mstp_clks[MSTP_NR] = { 277 [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ 278 [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ 279 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ 280 [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ ··· 310 CLKDEV_CON_ID("r_clk", &r_clk), 311 312 /* DIV6 clocks */ 313 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 314 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 315 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), ··· 320 321 /* MSTP32 clocks */ 322 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ 323 - CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 324 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ 325 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ 326 - CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ 327 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 328 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ 329 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 330 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
··· 263 }; 264 265 enum { MSTP001, 266 + MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, 267 MSTP219, 268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, ··· 275 276 static struct clk mstp_clks[MSTP_NR] = { 277 [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ 278 + [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */ 279 + [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */ 280 + [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */ 281 + [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */ 282 [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ 283 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ 284 [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ ··· 306 CLKDEV_CON_ID("r_clk", &r_clk), 307 308 /* DIV6 clocks */ 309 + CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), 310 + CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), 311 + CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), 312 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 313 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 314 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), ··· 313 314 /* MSTP32 clocks */ 315 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ 316 + CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */ 317 + CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ 318 + CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */ 319 + CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ 320 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ 321 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ 322 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 323 + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ 324 + CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 325 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ 326 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 327 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
+5 -5
arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
··· 6 EW 0xE6020004, 0xA500 7 EW 0xE6030004, 0xA500 8 9 - DD 0x01001000, 0x01001000 10 - 11 LIST "GPIO Setting" 12 EB 0xE6051013, 0xA2 13 14 LIST "CPG" 15 - ED 0xE6150080, 0x00000180 16 ED 0xE61500C0, 0x00000002 17 18 WAIT 1, 0xFE40009C ··· 34 35 WAIT 1, 0xFE40009C 36 37 LIST "BSC" 38 ED 0xFEC10000, 0x00E0001B 39 ··· 53 ED 0xFE40004C, 0x00110209 54 ED 0xFE400010, 0x00000087 55 56 - WAIT 10, 0xFE40009C 57 58 ED 0xFE400084, 0x0000003F 59 EB 0xFE500000, 0x00 ··· 84 85 WAIT 1, 0xFE40009C 86 87 - ED 0xE6150354, 0x00000002 88 89 LIST "SCIF0 - Serial port for earlyprintk" 90 EB 0xE6053098, 0x11
··· 6 EW 0xE6020004, 0xA500 7 EW 0xE6030004, 0xA500 8 9 LIST "GPIO Setting" 10 EB 0xE6051013, 0xA2 11 12 LIST "CPG" 13 ED 0xE61500C0, 0x00000002 14 15 WAIT 1, 0xFE40009C ··· 37 38 WAIT 1, 0xFE40009C 39 40 + LIST "SUB/USBClk" 41 + ED 0xE6150080, 0x00000180 42 + 43 LIST "BSC" 44 ED 0xFEC10000, 0x00E0001B 45 ··· 53 ED 0xFE40004C, 0x00110209 54 ED 0xFE400010, 0x00000087 55 56 + WAIT 30, 0xFE40009C 57 58 ED 0xFE400084, 0x0000003F 59 EB 0xFE500000, 0x00 ··· 84 85 WAIT 1, 0xFE40009C 86 87 + ED 0xFE400354, 0x01AD8002 88 89 LIST "SCIF0 - Serial port for earlyprintk" 90 EB 0xE6053098, 0x11
+5 -5
arch/arm/mach-shmobile/include/mach/head-mackerel.txt
··· 6 EW 0xE6020004, 0xA500 7 EW 0xE6030004, 0xA500 8 9 - DD 0x01001000, 0x01001000 10 - 11 LIST "GPIO Setting" 12 EB 0xE6051013, 0xA2 13 14 LIST "CPG" 15 - ED 0xE6150080, 0x00000180 16 ED 0xE61500C0, 0x00000002 17 18 WAIT 1, 0xFE40009C ··· 34 35 WAIT 1, 0xFE40009C 36 37 LIST "BSC" 38 ED 0xFEC10000, 0x00E0001B 39 ··· 53 ED 0xFE40004C, 0x00110209 54 ED 0xFE400010, 0x00000087 55 56 - WAIT 10, 0xFE40009C 57 58 ED 0xFE400084, 0x0000003F 59 EB 0xFE500000, 0x00 ··· 84 85 WAIT 1, 0xFE40009C 86 87 - ED 0xE6150354, 0x00000002 88 89 LIST "SCIF0 - Serial port for earlyprintk" 90 EB 0xE6053098, 0x11
··· 6 EW 0xE6020004, 0xA500 7 EW 0xE6030004, 0xA500 8 9 LIST "GPIO Setting" 10 EB 0xE6051013, 0xA2 11 12 LIST "CPG" 13 ED 0xE61500C0, 0x00000002 14 15 WAIT 1, 0xFE40009C ··· 37 38 WAIT 1, 0xFE40009C 39 40 + LIST "SUB/USBClk" 41 + ED 0xE6150080, 0x00000180 42 + 43 LIST "BSC" 44 ED 0xFEC10000, 0x00E0001B 45 ··· 53 ED 0xFE40004C, 0x00110209 54 ED 0xFE400010, 0x00000087 55 56 + WAIT 30, 0xFE40009C 57 58 ED 0xFE400084, 0x0000003F 59 EB 0xFE500000, 0x00 ··· 84 85 WAIT 1, 0xFE40009C 86 87 + ED 0xFE400354, 0x01AD8002 88 89 LIST "SCIF0 - Serial port for earlyprintk" 90 EB 0xE6053098, 0x11
+2
arch/arm/plat-samsung/dev-uart.c
··· 15 #include <linux/kernel.h> 16 #include <linux/platform_device.h> 17 18 /* uart devices */ 19 20 static struct platform_device s3c24xx_uart_device0 = {
··· 15 #include <linux/kernel.h> 16 #include <linux/platform_device.h> 17 18 + #include <plat/devs.h> 19 + 20 /* uart devices */ 21 22 static struct platform_device s3c24xx_uart_device0 = {
+12 -4
arch/blackfin/lib/outs.S
··· 13 .align 2 14 15 ENTRY(_outsl) 16 P0 = R0; /* P0 = port */ 17 P1 = R1; /* P1 = address */ 18 P2 = R2; /* P2 = count */ ··· 22 LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; 23 .Llong_loop_s: R0 = [P1++]; 24 .Llong_loop_e: [P0] = R0; 25 - RTS; 26 ENDPROC(_outsl) 27 28 ENTRY(_outsw) 29 P0 = R0; /* P0 = port */ 30 P1 = R1; /* P1 = address */ 31 P2 = R2; /* P2 = count */ ··· 35 LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; 36 .Lword_loop_s: R0 = W[P1++]; 37 .Lword_loop_e: W[P0] = R0; 38 - RTS; 39 ENDPROC(_outsw) 40 41 ENTRY(_outsb) 42 P0 = R0; /* P0 = port */ 43 P1 = R1; /* P1 = address */ 44 P2 = R2; /* P2 = count */ ··· 48 LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; 49 .Lbyte_loop_s: R0 = B[P1++]; 50 .Lbyte_loop_e: B[P0] = R0; 51 - RTS; 52 ENDPROC(_outsb) 53 54 ENTRY(_outsw_8) 55 P0 = R0; /* P0 = port */ 56 P1 = R1; /* P1 = address */ 57 P2 = R2; /* P2 = count */ ··· 64 R0 = R0 << 8; 65 R0 = R0 + R1; 66 .Lword8_loop_e: W[P0] = R0; 67 - RTS; 68 ENDPROC(_outsw_8)
··· 13 .align 2 14 15 ENTRY(_outsl) 16 + CC = R2 == 0; 17 + IF CC JUMP 1f; 18 P0 = R0; /* P0 = port */ 19 P1 = R1; /* P1 = address */ 20 P2 = R2; /* P2 = count */ ··· 20 LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; 21 .Llong_loop_s: R0 = [P1++]; 22 .Llong_loop_e: [P0] = R0; 23 + 1: RTS; 24 ENDPROC(_outsl) 25 26 ENTRY(_outsw) 27 + CC = R2 == 0; 28 + IF CC JUMP 1f; 29 P0 = R0; /* P0 = port */ 30 P1 = R1; /* P1 = address */ 31 P2 = R2; /* P2 = count */ ··· 31 LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; 32 .Lword_loop_s: R0 = W[P1++]; 33 .Lword_loop_e: W[P0] = R0; 34 + 1: RTS; 35 ENDPROC(_outsw) 36 37 ENTRY(_outsb) 38 + CC = R2 == 0; 39 + IF CC JUMP 1f; 40 P0 = R0; /* P0 = port */ 41 P1 = R1; /* P1 = address */ 42 P2 = R2; /* P2 = count */ ··· 42 LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; 43 .Lbyte_loop_s: R0 = B[P1++]; 44 .Lbyte_loop_e: B[P0] = R0; 45 + 1: RTS; 46 ENDPROC(_outsb) 47 48 ENTRY(_outsw_8) 49 + CC = R2 == 0; 50 + IF CC JUMP 1f; 51 P0 = R0; /* P0 = port */ 52 P1 = R1; /* P1 = address */ 53 P2 = R2; /* P2 = count */ ··· 56 R0 = R0 << 8; 57 R0 = R0 + R1; 58 .Lword8_loop_e: W[P0] = R0; 59 + 1: RTS; 60 ENDPROC(_outsw_8)
+2
arch/blackfin/mach-common/cache.S
··· 58 1: 59 .ifeqs "\flushins", BROK_FLUSH_INST 60 \flushins [P0++]; 61 2: nop; 62 .else 63 2: \flushins [P0++];
··· 58 1: 59 .ifeqs "\flushins", BROK_FLUSH_INST 60 \flushins [P0++]; 61 + nop; 62 + nop; 63 2: nop; 64 .else 65 2: \flushins [P0++];
+16
arch/powerpc/include/asm/lppaca.h
··· 33 // 34 //---------------------------------------------------------------------------- 35 #include <linux/cache.h> 36 #include <asm/types.h> 37 #include <asm/mmu.h> 38 39 /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 40 * alignment is sufficient to prevent this */
··· 33 // 34 //---------------------------------------------------------------------------- 35 #include <linux/cache.h> 36 + #include <linux/threads.h> 37 #include <asm/types.h> 38 #include <asm/mmu.h> 39 + 40 + /* 41 + * We only have to have statically allocated lppaca structs on 42 + * legacy iSeries, which supports at most 64 cpus. 43 + */ 44 + #ifdef CONFIG_PPC_ISERIES 45 + #if NR_CPUS < 64 46 + #define NR_LPPACAS NR_CPUS 47 + #else 48 + #define NR_LPPACAS 64 49 + #endif 50 + #else /* not iSeries */ 51 + #define NR_LPPACAS 1 52 + #endif 53 + 54 55 /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 56 * alignment is sufficient to prevent this */
+6
arch/powerpc/include/asm/machdep.h
··· 240 * claims to support kexec. 241 */ 242 int (*machine_kexec_prepare)(struct kimage *image); 243 #endif /* CONFIG_KEXEC */ 244 245 #ifdef CONFIG_SUSPEND
··· 240 * claims to support kexec. 241 */ 242 int (*machine_kexec_prepare)(struct kimage *image); 243 + 244 + /* Called to perform the _real_ kexec. 245 + * Do NOT allocate memory or fail here. We are past the point of 246 + * no return. 247 + */ 248 + void (*machine_kexec)(struct kimage *image); 249 #endif /* CONFIG_KEXEC */ 250 251 #ifdef CONFIG_SUSPEND
+4 -1
arch/powerpc/kernel/machine_kexec.c
··· 87 88 save_ftrace_enabled = __ftrace_enabled_save(); 89 90 - default_machine_kexec(image); 91 92 __ftrace_enabled_restore(save_ftrace_enabled); 93
··· 87 88 save_ftrace_enabled = __ftrace_enabled_save(); 89 90 + if (ppc_md.machine_kexec) 91 + ppc_md.machine_kexec(image); 92 + else 93 + default_machine_kexec(image); 94 95 __ftrace_enabled_restore(save_ftrace_enabled); 96
-14
arch/powerpc/kernel/paca.c
··· 27 #ifdef CONFIG_PPC_BOOK3S 28 29 /* 30 - * We only have to have statically allocated lppaca structs on 31 - * legacy iSeries, which supports at most 64 cpus. 32 - */ 33 - #ifdef CONFIG_PPC_ISERIES 34 - #if NR_CPUS < 64 35 - #define NR_LPPACAS NR_CPUS 36 - #else 37 - #define NR_LPPACAS 64 38 - #endif 39 - #else /* not iSeries */ 40 - #define NR_LPPACAS 1 41 - #endif 42 - 43 - /* 44 * The structure which the hypervisor knows about - this structure 45 * should not cross a page boundary. The vpa_init/register_vpa call 46 * is now known to fail if the lppaca structure crosses a page
··· 27 #ifdef CONFIG_PPC_BOOK3S 28 29 /* 30 * The structure which the hypervisor knows about - this structure 31 * should not cross a page boundary. The vpa_init/register_vpa call 32 * is now known to fail if the lppaca structure crosses a page
+5 -3
arch/powerpc/kernel/process.c
··· 353 prime_debug_regs(new_thread); 354 } 355 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 356 static void set_debug_reg_defaults(struct thread_struct *thread) 357 { 358 if (thread->dabr) { ··· 361 set_dabr(0); 362 } 363 } 364 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 365 366 int set_dabr(unsigned long dabr) ··· 672 { 673 discard_lazy_cpu_state(); 674 675 - #ifdef CONFIG_HAVE_HW_BREAKPOINTS 676 flush_ptrace_hw_breakpoint(current); 677 - #else /* CONFIG_HAVE_HW_BREAKPOINTS */ 678 set_debug_reg_defaults(&current->thread); 679 - #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ 680 } 681 682 void
··· 353 prime_debug_regs(new_thread); 354 } 355 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 356 + #ifndef CONFIG_HAVE_HW_BREAKPOINT 357 static void set_debug_reg_defaults(struct thread_struct *thread) 358 { 359 if (thread->dabr) { ··· 360 set_dabr(0); 361 } 362 } 363 + #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 364 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 365 366 int set_dabr(unsigned long dabr) ··· 670 { 671 discard_lazy_cpu_state(); 672 673 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 674 flush_ptrace_hw_breakpoint(current); 675 + #else /* CONFIG_HAVE_HW_BREAKPOINT */ 676 set_debug_reg_defaults(&current->thread); 677 + #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 678 } 679 680 void
+2 -1
arch/powerpc/mm/numa.c
··· 1516 { 1517 int rc = 0; 1518 1519 - if (firmware_has_feature(FW_FEATURE_VPHN) && 1520 get_lppaca()->shared_proc) { 1521 vphn_enabled = 1; 1522 setup_cpu_associativity_change_counters();
··· 1516 { 1517 int rc = 0; 1518 1519 + /* Disabled until races with load balancing are fixed */ 1520 + if (0 && firmware_has_feature(FW_FEATURE_VPHN) && 1521 get_lppaca()->shared_proc) { 1522 vphn_enabled = 1; 1523 setup_cpu_associativity_change_counters();
+3 -3
arch/powerpc/mm/tlb_hash64.c
··· 38 * neesd to be flushed. This function will either perform the flush 39 * immediately or will batch it up if the current CPU has an active 40 * batch on it. 41 - * 42 - * Must be called from within some kind of spinlock/non-preempt region... 43 */ 44 void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 45 pte_t *ptep, unsigned long pte, int huge) 46 { 47 - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 48 unsigned long vsid, vaddr; 49 unsigned int psize; 50 int ssize; ··· 97 */ 98 if (!batch->active) { 99 flush_hash_page(vaddr, rpte, psize, ssize, 0); 100 return; 101 } 102 ··· 126 batch->index = ++i; 127 if (i >= PPC64_TLB_BATCH_NR) 128 __flush_tlb_pending(batch); 129 } 130 131 /*
··· 38 * neesd to be flushed. This function will either perform the flush 39 * immediately or will batch it up if the current CPU has an active 40 * batch on it. 41 */ 42 void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 43 pte_t *ptep, unsigned long pte, int huge) 44 { 45 + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 46 unsigned long vsid, vaddr; 47 unsigned int psize; 48 int ssize; ··· 99 */ 100 if (!batch->active) { 101 flush_hash_page(vaddr, rpte, psize, ssize, 0); 102 + put_cpu_var(ppc64_tlb_batch); 103 return; 104 } 105 ··· 127 batch->index = ++i; 128 if (i >= PPC64_TLB_BATCH_NR) 129 __flush_tlb_pending(batch); 130 + put_cpu_var(ppc64_tlb_batch); 131 } 132 133 /*
+3 -3
arch/powerpc/platforms/iseries/dt.c
··· 242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ 243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); 244 245 - for (i = 0; i < NR_CPUS; i++) { 246 - if (lppaca_of(i).dyn_proc_status >= 2) 247 continue; 248 249 snprintf(p, 32 - (p - buf), "@%d", i); ··· 251 252 dt_prop_str(dt, "device_type", device_type_cpu); 253 254 - index = lppaca_of(i).dyn_hv_phys_proc_index; 255 d = &xIoHriProcessorVpd[index]; 256 257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
··· 242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ 243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); 244 245 + for (i = 0; i < NR_LPPACAS; i++) { 246 + if (lppaca[i].dyn_proc_status >= 2) 247 continue; 248 249 snprintf(p, 32 - (p - buf), "@%d", i); ··· 251 252 dt_prop_str(dt, "device_type", device_type_cpu); 253 254 + index = lppaca[i].dyn_hv_phys_proc_index; 255 d = &xIoHriProcessorVpd[index]; 256 257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
+1
arch/powerpc/platforms/iseries/setup.c
··· 680 * on but calling this function multiple times is fine. 681 */ 682 identify_cpu(0, mfspr(SPRN_PVR)); 683 684 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_LPAR;
··· 680 * on but calling this function multiple times is fine. 681 */ 682 identify_cpu(0, mfspr(SPRN_PVR)); 683 + initialise_paca(&boot_paca, 0); 684 685 powerpc_firmware_features |= FW_FEATURE_ISERIES; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
+1 -1
arch/sh/include/asm/sections.h
··· 3 4 #include <asm-generic/sections.h> 5 6 - extern void __nosave_begin, __nosave_end; 7 extern long __machvec_start, __machvec_end; 8 extern char __uncached_start, __uncached_end; 9 extern char _ebss[];
··· 3 4 #include <asm-generic/sections.h> 5 6 + extern long __nosave_begin, __nosave_end; 7 extern long __machvec_start, __machvec_end; 8 extern char __uncached_start, __uncached_end; 9 extern char _ebss[];
+9 -4
arch/sh/kernel/cpu/sh4/setup-sh7750.c
··· 14 #include <linux/io.h> 15 #include <linux/sh_timer.h> 16 #include <linux/serial_sci.h> 17 - #include <asm/machtypes.h> 18 19 static struct resource rtc_resources[] = { 20 [0] = { ··· 255 256 void __init plat_early_device_setup(void) 257 { 258 if (mach_is_rts7751r2d()) { 259 scif_platform_data.scscr |= SCSCR_CKE1; 260 - early_platform_add_devices(&scif_device, 1); 261 } else { 262 - early_platform_add_devices(&sci_device, 1); 263 - early_platform_add_devices(&scif_device, 1); 264 } 265 266 early_platform_add_devices(sh7750_early_devices,
··· 14 #include <linux/io.h> 15 #include <linux/sh_timer.h> 16 #include <linux/serial_sci.h> 17 + #include <generated/machtypes.h> 18 19 static struct resource rtc_resources[] = { 20 [0] = { ··· 255 256 void __init plat_early_device_setup(void) 257 { 258 + struct platform_device *dev[1]; 259 + 260 if (mach_is_rts7751r2d()) { 261 scif_platform_data.scscr |= SCSCR_CKE1; 262 + dev[0] = &scif_device; 263 + early_platform_add_devices(dev, 1); 264 } else { 265 + dev[0] = &sci_device; 266 + early_platform_add_devices(dev, 1); 267 + dev[0] = &scif_device; 268 + early_platform_add_devices(dev, 1); 269 } 270 271 early_platform_add_devices(sh7750_early_devices,
+10
arch/sh/lib/delay.c
··· 10 void __delay(unsigned long loops) 11 { 12 __asm__ __volatile__( 13 "tst %0, %0\n\t" 14 "1:\t" 15 "bf/s 1b\n\t"
··· 10 void __delay(unsigned long loops) 11 { 12 __asm__ __volatile__( 13 + /* 14 + * ST40-300 appears to have an issue with this code, 15 + * normally taking two cycles each loop, as with all 16 + * other SH variants. If however the branch and the 17 + * delay slot straddle an 8 byte boundary, this increases 18 + * to 3 cycles. 19 + * This align directive ensures this doesn't occur. 20 + */ 21 + ".balign 8\n\t" 22 + 23 "tst %0, %0\n\t" 24 "1:\t" 25 "bf/s 1b\n\t"
+2 -1
arch/sh/mm/cache.c
··· 108 kunmap_atomic(vfrom, KM_USER0); 109 } 110 111 - if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 112 __flush_purge_region(vto, PAGE_SIZE); 113 114 kunmap_atomic(vto, KM_USER1);
··· 108 kunmap_atomic(vfrom, KM_USER0); 109 } 110 111 + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || 112 + (vma->vm_flags & VM_EXEC)) 113 __flush_purge_region(vto, PAGE_SIZE); 114 115 kunmap_atomic(vto, KM_USER1);
+6 -1
arch/x86/boot/compressed/mkpiggy.c
··· 62 if (fseek(f, -4L, SEEK_END)) { 63 perror(argv[1]); 64 } 65 - fread(&olen, sizeof olen, 1, f); 66 ilen = ftell(f); 67 olen = getle32(&olen); 68 fclose(f);
··· 62 if (fseek(f, -4L, SEEK_END)) { 63 perror(argv[1]); 64 } 65 + 66 + if (fread(&olen, sizeof(olen), 1, f) != 1) { 67 + perror(argv[1]); 68 + return 1; 69 + } 70 + 71 ilen = ftell(f); 72 olen = getle32(&olen); 73 fclose(f);
+5
arch/x86/include/asm/msr-index.h
··· 36 #define MSR_IA32_PERFCTR1 0x000000c2 37 #define MSR_FSB_FREQ 0x000000cd 38 39 #define MSR_MTRRcap 0x000000fe 40 #define MSR_IA32_BBL_CR_CTL 0x00000119 41
··· 36 #define MSR_IA32_PERFCTR1 0x000000c2 37 #define MSR_FSB_FREQ 0x000000cd 38 39 + #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 40 + #define NHM_C3_AUTO_DEMOTE (1UL << 25) 41 + #define NHM_C1_AUTO_DEMOTE (1UL << 26) 42 + #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) 43 + 44 #define MSR_MTRRcap 0x000000fe 45 #define MSR_IA32_BBL_CR_CTL 0x00000119 46
+1 -1
arch/x86/include/asm/uv/uv_bau.h
··· 176 struct bau_msg_header { 177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 178 /* bits 5:0 */ 179 - unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ 180 /* bits 20:6 */ /* first bit in uvhub map */ 181 unsigned int command:8; /* message type */ 182 /* bits 28:21 */
··· 176 struct bau_msg_header { 177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 178 /* bits 5:0 */ 179 + unsigned int base_dest_nodeid:15; /* nasid of the */ 180 /* bits 20:6 */ /* first bit in uvhub map */ 181 unsigned int command:8; /* message type */ 182 /* bits 28:21 */
+3 -3
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
··· 158 { 159 if (c->x86 == 0x06) { 160 if (cpu_has(c, X86_FEATURE_EST)) 161 - printk(KERN_WARNING PFX "Warning: EST-capable CPU " 162 - "detected. The acpi-cpufreq module offers " 163 - "voltage scaling in addition of frequency " 164 "scaling. You should use that instead of " 165 "p4-clockmod, if possible.\n"); 166 switch (c->x86_model) {
··· 158 { 159 if (c->x86 == 0x06) { 160 if (cpu_has(c, X86_FEATURE_EST)) 161 + printk_once(KERN_WARNING PFX "Warning: EST-capable " 162 + "CPU detected. The acpi-cpufreq module offers " 163 + "voltage scaling in addition to frequency " 164 "scaling. You should use that instead of " 165 "p4-clockmod, if possible.\n"); 166 switch (c->x86_model) {
+1 -1
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
··· 195 cmd_incomplete: 196 iowrite16(0, &pcch_hdr->status); 197 spin_unlock(&pcc_lock); 198 - return -EINVAL; 199 } 200 201 static int pcc_cpufreq_target(struct cpufreq_policy *policy,
··· 195 cmd_incomplete: 196 iowrite16(0, &pcch_hdr->status); 197 spin_unlock(&pcc_lock); 198 + return 0; 199 } 200 201 static int pcc_cpufreq_target(struct cpufreq_policy *policy,
+10 -3
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 1537 static int __cpuinit powernowk8_init(void) 1538 { 1539 unsigned int i, supported_cpus = 0, cpu; 1540 1541 for_each_online_cpu(i) { 1542 int rc; ··· 1556 1557 cpb_capable = true; 1558 1559 - register_cpu_notifier(&cpb_nb); 1560 - 1561 msrs = msrs_alloc(); 1562 if (!msrs) { 1563 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); 1564 return -ENOMEM; 1565 } 1566 1567 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1568 ··· 1575 (cpb_enabled ? "on" : "off")); 1576 } 1577 1578 - return cpufreq_register_driver(&cpufreq_amd64_driver); 1579 } 1580 1581 /* driver entry point for term */
··· 1537 static int __cpuinit powernowk8_init(void) 1538 { 1539 unsigned int i, supported_cpus = 0, cpu; 1540 + int rv; 1541 1542 for_each_online_cpu(i) { 1543 int rc; ··· 1555 1556 cpb_capable = true; 1557 1558 msrs = msrs_alloc(); 1559 if (!msrs) { 1560 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); 1561 return -ENOMEM; 1562 } 1563 + 1564 + register_cpu_notifier(&cpb_nb); 1565 1566 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1567 ··· 1574 (cpb_enabled ? "on" : "off")); 1575 } 1576 1577 + rv = cpufreq_register_driver(&cpufreq_amd64_driver); 1578 + if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { 1579 + unregister_cpu_notifier(&cpb_nb); 1580 + msrs_free(msrs); 1581 + msrs = NULL; 1582 + } 1583 + return rv; 1584 } 1585 1586 /* driver entry point for term */
+1 -5
arch/x86/mm/numa_64.c
··· 780 int physnid; 781 int nid = NUMA_NO_NODE; 782 783 - apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 784 - if (apicid != BAD_APICID) 785 - nid = apicid_to_node[apicid]; 786 - if (nid == NUMA_NO_NODE) 787 - nid = early_cpu_to_node(cpu); 788 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); 789 790 /*
··· 780 int physnid; 781 int nid = NUMA_NO_NODE; 782 783 + nid = early_cpu_to_node(cpu); 784 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); 785 786 /*
+1 -2
arch/x86/platform/olpc/olpc_dt.c
··· 140 * wasted bootmem) and hand off chunks of it to callers. 141 */ 142 res = alloc_bootmem(chunk_size); 143 - if (!res) 144 - return NULL; 145 prom_early_allocated += chunk_size; 146 memset(res, 0, chunk_size); 147 free_mem = chunk_size;
··· 140 * wasted bootmem) and hand off chunks of it to callers. 141 */ 142 res = alloc_bootmem(chunk_size); 143 + BUG_ON(!res); 144 prom_early_allocated += chunk_size; 145 memset(res, 0, chunk_size); 146 free_mem = chunk_size;
+2 -2
arch/x86/platform/uv/tlb_uv.c
··· 1364 memset(bd2, 0, sizeof(struct bau_desc)); 1365 bd2->header.sw_ack_flag = 1; 1366 /* 1367 - * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub 1368 * in the partition. The bit map will indicate uvhub numbers, 1369 * which are 0-N in a partition. Pnodes are unique system-wide. 1370 */ 1371 - bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; 1372 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1373 bd2->header.command = UV_NET_ENDPOINT_INTD; 1374 bd2->header.int_both = 1;
··· 1364 memset(bd2, 0, sizeof(struct bau_desc)); 1365 bd2->header.sw_ack_flag = 1; 1366 /* 1367 + * base_dest_nodeid is the nasid of the first uvhub 1368 * in the partition. The bit map will indicate uvhub numbers, 1369 * which are 0-N in a partition. Pnodes are unique system-wide. 1370 */ 1371 + bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1372 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1373 bd2->header.command = UV_NET_ENDPOINT_INTD; 1374 bd2->header.int_both = 1;
+6 -12
block/blk-core.c
··· 352 WARN_ON(!irqs_disabled()); 353 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 355 - __blk_run_queue(q); 356 } 357 EXPORT_SYMBOL(blk_start_queue); 358 ··· 403 /** 404 * __blk_run_queue - run a single device queue 405 * @q: The queue to run 406 * 407 * Description: 408 * See @blk_run_queue. This variant must be called with the queue lock 409 * held and interrupts disabled. 410 * 411 */ 412 - void __blk_run_queue(struct request_queue *q) 413 { 414 blk_remove_plug(q); 415 ··· 424 * Only recurse once to avoid overrunning the stack, let the unplug 425 * handling reinvoke the handler shortly if we already got there. 426 */ 427 - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 428 q->request_fn(q); 429 queue_flag_clear(QUEUE_FLAG_REENTER, q); 430 } else { ··· 447 unsigned long flags; 448 449 spin_lock_irqsave(q->queue_lock, flags); 450 - __blk_run_queue(q); 451 spin_unlock_irqrestore(q->queue_lock, flags); 452 } 453 EXPORT_SYMBOL(blk_run_queue); ··· 1054 1055 drive_stat_acct(rq, 1); 1056 __elv_add_request(q, rq, where, 0); 1057 - __blk_run_queue(q); 1058 spin_unlock_irqrestore(q->queue_lock, flags); 1059 } 1060 EXPORT_SYMBOL(blk_insert_request); ··· 2610 return queue_work(kblockd_workqueue, work); 2611 } 2612 EXPORT_SYMBOL(kblockd_schedule_work); 2613 - 2614 - int kblockd_schedule_delayed_work(struct request_queue *q, 2615 - struct delayed_work *dwork, unsigned long delay) 2616 - { 2617 - return queue_delayed_work(kblockd_workqueue, dwork, delay); 2618 - } 2619 - EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2620 2621 int __init blk_dev_init(void) 2622 {
··· 352 WARN_ON(!irqs_disabled()); 353 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 355 + __blk_run_queue(q, false); 356 } 357 EXPORT_SYMBOL(blk_start_queue); 358 ··· 403 /** 404 * __blk_run_queue - run a single device queue 405 * @q: The queue to run 406 + * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. 407 * 408 * Description: 409 * See @blk_run_queue. This variant must be called with the queue lock 410 * held and interrupts disabled. 411 * 412 */ 413 + void __blk_run_queue(struct request_queue *q, bool force_kblockd) 414 { 415 blk_remove_plug(q); 416 ··· 423 * Only recurse once to avoid overrunning the stack, let the unplug 424 * handling reinvoke the handler shortly if we already got there. 425 */ 426 + if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 427 q->request_fn(q); 428 queue_flag_clear(QUEUE_FLAG_REENTER, q); 429 } else { ··· 446 unsigned long flags; 447 448 spin_lock_irqsave(q->queue_lock, flags); 449 + __blk_run_queue(q, false); 450 spin_unlock_irqrestore(q->queue_lock, flags); 451 } 452 EXPORT_SYMBOL(blk_run_queue); ··· 1053 1054 drive_stat_acct(rq, 1); 1055 __elv_add_request(q, rq, where, 0); 1056 + __blk_run_queue(q, false); 1057 spin_unlock_irqrestore(q->queue_lock, flags); 1058 } 1059 EXPORT_SYMBOL(blk_insert_request); ··· 2609 return queue_work(kblockd_workqueue, work); 2610 } 2611 EXPORT_SYMBOL(kblockd_schedule_work); 2612 2613 int __init blk_dev_init(void) 2614 {
+5 -3
block/blk-flush.c
··· 66 67 /* 68 * Moving a request silently to empty queue_head may stall the 69 - * queue. Kick the queue in those cases. 70 */ 71 if (was_empty && next_rq) 72 - __blk_run_queue(q); 73 } 74 75 static void pre_flush_end_io(struct request *rq, int error) ··· 132 BUG(); 133 } 134 135 - elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 136 return rq; 137 } 138
··· 66 67 /* 68 * Moving a request silently to empty queue_head may stall the 69 + * queue. Kick the queue in those cases. This function is called 70 + * from request completion path and calling directly into 71 + * request_fn may confuse the driver. Always use kblockd. 72 */ 73 if (was_empty && next_rq) 74 + __blk_run_queue(q, true); 75 } 76 77 static void pre_flush_end_io(struct request *rq, int error) ··· 130 BUG(); 131 } 132 133 + elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 134 return rq; 135 } 136
+1 -1
block/blk-lib.c
··· 132 } 133 134 /** 135 - * blkdev_issue_zeroout generate number of zero filed write bios 136 * @bdev: blockdev to issue 137 * @sector: start sector 138 * @nr_sects: number of sectors to write
··· 132 } 133 134 /** 135 + * blkdev_issue_zeroout - generate number of zero filed write bios 136 * @bdev: blockdev to issue 137 * @sector: start sector 138 * @nr_sects: number of sectors to write
+18 -11
block/blk-throttle.c
··· 20 /* Throttling is performed over 100ms slice and after that slice is renewed */ 21 static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22 23 struct throtl_rb_root { 24 struct rb_root rb; 25 struct rb_node *left; ··· 350 update_min_dispatch_time(st); 351 352 if (time_before_eq(st->min_disptime, jiffies)) 353 - throtl_schedule_delayed_work(td->queue, 0); 354 else 355 - throtl_schedule_delayed_work(td->queue, 356 - (st->min_disptime - jiffies)); 357 } 358 359 static inline void ··· 819 } 820 821 /* Call with queue lock held */ 822 - void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) 823 { 824 825 - struct throtl_data *td = q->td; 826 struct delayed_work *dwork = &td->throtl_work; 827 828 if (total_nr_queued(td) > 0) { ··· 831 * Cancel that and schedule a new one. 832 */ 833 __cancel_delayed_work(dwork); 834 - kblockd_schedule_delayed_work(q, dwork, delay); 835 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 836 delay, jiffies); 837 } 838 } 839 - EXPORT_SYMBOL(throtl_schedule_delayed_work); 840 841 static void 842 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) ··· 923 smp_mb__after_atomic_inc(); 924 925 /* Schedule a work now to process the limit change */ 926 - throtl_schedule_delayed_work(td->queue, 0); 927 } 928 929 static void throtl_update_blkio_group_write_bps(void *key, ··· 937 smp_mb__before_atomic_inc(); 938 atomic_inc(&td->limits_changed); 939 smp_mb__after_atomic_inc(); 940 - throtl_schedule_delayed_work(td->queue, 0); 941 } 942 943 static void throtl_update_blkio_group_read_iops(void *key, ··· 951 smp_mb__before_atomic_inc(); 952 atomic_inc(&td->limits_changed); 953 smp_mb__after_atomic_inc(); 954 - throtl_schedule_delayed_work(td->queue, 0); 955 } 956 957 static void throtl_update_blkio_group_write_iops(void *key, ··· 965 smp_mb__before_atomic_inc(); 966 atomic_inc(&td->limits_changed); 967 smp_mb__after_atomic_inc(); 968 - throtl_schedule_delayed_work(td->queue, 0); 969 } 970 971 void throtl_shutdown_timer_wq(struct request_queue *q) ··· 1138 1139 static int __init throtl_init(void) 1140 { 1141 blkio_policy_register(&blkio_policy_throtl); 1142 return 0; 1143 }
··· 20 /* Throttling is performed over 100ms slice and after that slice is renewed */ 21 static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22 23 + /* A workqueue to queue throttle related work */ 24 + static struct workqueue_struct *kthrotld_workqueue; 25 + static void throtl_schedule_delayed_work(struct throtl_data *td, 26 + unsigned long delay); 27 + 28 struct throtl_rb_root { 29 struct rb_root rb; 30 struct rb_node *left; ··· 345 update_min_dispatch_time(st); 346 347 if (time_before_eq(st->min_disptime, jiffies)) 348 + throtl_schedule_delayed_work(td, 0); 349 else 350 + throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); 351 } 352 353 static inline void ··· 815 } 816 817 /* Call with queue lock held */ 818 + static void 819 + throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) 820 { 821 822 struct delayed_work *dwork = &td->throtl_work; 823 824 if (total_nr_queued(td) > 0) { ··· 827 * Cancel that and schedule a new one. 828 */ 829 __cancel_delayed_work(dwork); 830 + queue_delayed_work(kthrotld_workqueue, dwork, delay); 831 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 832 delay, jiffies); 833 } 834 } 835 836 static void 837 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) ··· 920 smp_mb__after_atomic_inc(); 921 922 /* Schedule a work now to process the limit change */ 923 + throtl_schedule_delayed_work(td, 0); 924 } 925 926 static void throtl_update_blkio_group_write_bps(void *key, ··· 934 smp_mb__before_atomic_inc(); 935 atomic_inc(&td->limits_changed); 936 smp_mb__after_atomic_inc(); 937 + throtl_schedule_delayed_work(td, 0); 938 } 939 940 static void throtl_update_blkio_group_read_iops(void *key, ··· 948 smp_mb__before_atomic_inc(); 949 atomic_inc(&td->limits_changed); 950 smp_mb__after_atomic_inc(); 951 + throtl_schedule_delayed_work(td, 0); 952 } 953 954 static void throtl_update_blkio_group_write_iops(void *key, ··· 962 smp_mb__before_atomic_inc(); 963 atomic_inc(&td->limits_changed); 964 smp_mb__after_atomic_inc(); 965 + throtl_schedule_delayed_work(td, 0); 966 } 967 968 void throtl_shutdown_timer_wq(struct request_queue *q) ··· 1135 1136 static int __init throtl_init(void) 1137 { 1138 + kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); 1139 + if (!kthrotld_workqueue) 1140 + panic("Failed to create kthrotld\n"); 1141 + 1142 blkio_policy_register(&blkio_policy_throtl); 1143 return 0; 1144 }
+3 -3
block/cfq-iosched.c
··· 3355 cfqd->busy_queues > 1) { 3356 cfq_del_timer(cfqd, cfqq); 3357 cfq_clear_cfqq_wait_request(cfqq); 3358 - __blk_run_queue(cfqd->queue); 3359 } else { 3360 cfq_blkiocg_update_idle_time_stats( 3361 &cfqq->cfqg->blkg); ··· 3370 * this new queue is RT and the current one is BE 3371 */ 3372 cfq_preempt_queue(cfqd, cfqq); 3373 - __blk_run_queue(cfqd->queue); 3374 } 3375 } 3376 ··· 3731 struct request_queue *q = cfqd->queue; 3732 3733 spin_lock_irq(q->queue_lock); 3734 - __blk_run_queue(cfqd->queue); 3735 spin_unlock_irq(q->queue_lock); 3736 } 3737
··· 3355 cfqd->busy_queues > 1) { 3356 cfq_del_timer(cfqd, cfqq); 3357 cfq_clear_cfqq_wait_request(cfqq); 3358 + __blk_run_queue(cfqd->queue, false); 3359 } else { 3360 cfq_blkiocg_update_idle_time_stats( 3361 &cfqq->cfqg->blkg); ··· 3370 * this new queue is RT and the current one is BE 3371 */ 3372 cfq_preempt_queue(cfqd, cfqq); 3373 + __blk_run_queue(cfqd->queue, false); 3374 } 3375 } 3376 ··· 3731 struct request_queue *q = cfqd->queue; 3732 3733 spin_lock_irq(q->queue_lock); 3734 + __blk_run_queue(cfqd->queue, false); 3735 spin_unlock_irq(q->queue_lock); 3736 } 3737
+2 -2
block/elevator.c
··· 602 */ 603 elv_drain_elevator(q); 604 while (q->rq.elvpriv) { 605 - __blk_run_queue(q); 606 spin_unlock_irq(q->queue_lock); 607 msleep(10); 608 spin_lock_irq(q->queue_lock); ··· 651 * with anything. There's no point in delaying queue 652 * processing. 653 */ 654 - __blk_run_queue(q); 655 break; 656 657 case ELEVATOR_INSERT_SORT:
··· 602 */ 603 elv_drain_elevator(q); 604 while (q->rq.elvpriv) { 605 + __blk_run_queue(q, false); 606 spin_unlock_irq(q->queue_lock); 607 msleep(10); 608 spin_lock_irq(q->queue_lock); ··· 651 * with anything. There's no point in delaying queue 652 * processing. 653 */ 654 + __blk_run_queue(q, false); 655 break; 656 657 case ELEVATOR_INSERT_SORT:
+6 -1
drivers/acpi/acpica/aclocal.h
··· 416 u8 originally_enabled; /* True if GPE was originally enabled */ 417 }; 418 419 union acpi_gpe_dispatch_info { 420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 421 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 422 - struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ 423 }; 424 425 /*
··· 416 u8 originally_enabled; /* True if GPE was originally enabled */ 417 }; 418 419 + struct acpi_gpe_notify_object { 420 + struct acpi_namespace_node *node; 421 + struct acpi_gpe_notify_object *next; 422 + }; 423 + 424 union acpi_gpe_dispatch_info { 425 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 426 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 427 + struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */ 428 }; 429 430 /*
+13 -4
drivers/acpi/acpica/evgpe.c
··· 457 acpi_status status; 458 struct acpi_gpe_event_info *local_gpe_event_info; 459 struct acpi_evaluate_info *info; 460 461 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 462 ··· 509 * from this thread -- because handlers may in turn run other 510 * control methods. 511 */ 512 - status = 513 - acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. 514 - device_node, 515 - ACPI_NOTIFY_DEVICE_WAKE); 516 break; 517 518 case ACPI_GPE_DISPATCH_METHOD:
··· 457 acpi_status status; 458 struct acpi_gpe_event_info *local_gpe_event_info; 459 struct acpi_evaluate_info *info; 460 + struct acpi_gpe_notify_object *notify_object; 461 462 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 463 ··· 508 * from this thread -- because handlers may in turn run other 509 * control methods. 510 */ 511 + status = acpi_ev_queue_notify_request( 512 + local_gpe_event_info->dispatch.device.node, 513 + ACPI_NOTIFY_DEVICE_WAKE); 514 + 515 + notify_object = local_gpe_event_info->dispatch.device.next; 516 + while (ACPI_SUCCESS(status) && notify_object) { 517 + status = acpi_ev_queue_notify_request( 518 + notify_object->node, 519 + ACPI_NOTIFY_DEVICE_WAKE); 520 + notify_object = notify_object->next; 521 + } 522 + 523 break; 524 525 case ACPI_GPE_DISPATCH_METHOD:
+37 -13
drivers/acpi/acpica/evxfgpe.c
··· 198 acpi_status status = AE_BAD_PARAMETER; 199 struct acpi_gpe_event_info *gpe_event_info; 200 struct acpi_namespace_node *device_node; 201 acpi_cpu_flags flags; 202 203 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); 204 ··· 223 goto unlock_and_exit; 224 } 225 226 /* 227 * If there is no method or handler for this GPE, then the 228 * wake_device will be notified whenever this GPE fires (aka 229 * "implicit notify") Note: The GPE is assumed to be 230 * level-triggered (for windows compatibility). 231 */ 232 - if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 233 - ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { 234 - 235 - /* Validate wake_device is of type Device */ 236 - 237 - device_node = ACPI_CAST_PTR(struct acpi_namespace_node, 238 - wake_device); 239 - if (device_node->type != ACPI_TYPE_DEVICE) { 240 - goto unlock_and_exit; 241 - } 242 - gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | 243 - ACPI_GPE_LEVEL_TRIGGERED); 244 - gpe_event_info->dispatch.device_node = device_node; 245 } 246 247 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 248 status = AE_OK; 249
··· 198 acpi_status status = AE_BAD_PARAMETER; 199 struct acpi_gpe_event_info *gpe_event_info; 200 struct acpi_namespace_node *device_node; 201 + struct acpi_gpe_notify_object *notify_object; 202 acpi_cpu_flags flags; 203 + u8 gpe_dispatch_mask; 204 205 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); 206 ··· 221 goto unlock_and_exit; 222 } 223 224 + if (wake_device == ACPI_ROOT_OBJECT) { 225 + goto out; 226 + } 227 + 228 /* 229 * If there is no method or handler for this GPE, then the 230 * wake_device will be notified whenever this GPE fires (aka 231 * "implicit notify") Note: The GPE is assumed to be 232 * level-triggered (for windows compatibility). 233 */ 234 + gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK; 235 + if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE 236 + && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) { 237 + goto out; 238 } 239 240 + /* Validate wake_device is of type Device */ 241 + 242 + device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); 243 + if (device_node->type != ACPI_TYPE_DEVICE) { 244 + goto unlock_and_exit; 245 + } 246 + 247 + if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) { 248 + gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | 249 + ACPI_GPE_LEVEL_TRIGGERED); 250 + gpe_event_info->dispatch.device.node = device_node; 251 + gpe_event_info->dispatch.device.next = NULL; 252 + } else { 253 + /* There are multiple devices to notify implicitly. */ 254 + 255 + notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object)); 256 + if (!notify_object) { 257 + status = AE_NO_MEMORY; 258 + goto unlock_and_exit; 259 + } 260 + 261 + notify_object->node = device_node; 262 + notify_object->next = gpe_event_info->dispatch.device.next; 263 + gpe_event_info->dispatch.device.next = notify_object; 264 + } 265 + 266 + out: 267 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 268 status = AE_OK; 269
+14 -6
drivers/acpi/debugfs.c
··· 26 size_t count, loff_t *ppos) 27 { 28 static char *buf; 29 - static int uncopied_bytes; 30 struct acpi_table_header table; 31 acpi_status status; 32 ··· 39 if (copy_from_user(&table, user_buf, 40 sizeof(struct acpi_table_header))) 41 return -EFAULT; 42 - uncopied_bytes = table.length; 43 - buf = kzalloc(uncopied_bytes, GFP_KERNEL); 44 if (!buf) 45 return -ENOMEM; 46 } 47 48 - if (uncopied_bytes < count) { 49 - kfree(buf); 50 return -EINVAL; 51 - } 52 53 if (copy_from_user(buf + (*ppos), user_buf, count)) { 54 kfree(buf); 55 return -EFAULT; 56 } 57 ··· 66 if (!uncopied_bytes) { 67 status = acpi_install_method(buf); 68 kfree(buf); 69 if (ACPI_FAILURE(status)) 70 return -EINVAL; 71 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
··· 26 size_t count, loff_t *ppos) 27 { 28 static char *buf; 29 + static u32 max_size; 30 + static u32 uncopied_bytes; 31 + 32 struct acpi_table_header table; 33 acpi_status status; 34 ··· 37 if (copy_from_user(&table, user_buf, 38 sizeof(struct acpi_table_header))) 39 return -EFAULT; 40 + uncopied_bytes = max_size = table.length; 41 + buf = kzalloc(max_size, GFP_KERNEL); 42 if (!buf) 43 return -ENOMEM; 44 } 45 46 + if (buf == NULL) 47 return -EINVAL; 48 + 49 + if ((*ppos > max_size) || 50 + (*ppos + count > max_size) || 51 + (*ppos + count < count) || 52 + (count > uncopied_bytes)) 53 + return -EINVAL; 54 55 if (copy_from_user(buf + (*ppos), user_buf, count)) { 56 kfree(buf); 57 + buf = NULL; 58 return -EFAULT; 59 } 60 ··· 59 if (!uncopied_bytes) { 60 status = acpi_install_method(buf); 61 kfree(buf); 62 + buf = NULL; 63 if (ACPI_FAILURE(status)) 64 return -EINVAL; 65 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
-5
drivers/block/loop.c
··· 78 79 #include <asm/uaccess.h> 80 81 - static DEFINE_MUTEX(loop_mutex); 82 static LIST_HEAD(loop_devices); 83 static DEFINE_MUTEX(loop_devices_mutex); 84 ··· 1500 { 1501 struct loop_device *lo = bdev->bd_disk->private_data; 1502 1503 - mutex_lock(&loop_mutex); 1504 mutex_lock(&lo->lo_ctl_mutex); 1505 lo->lo_refcnt++; 1506 mutex_unlock(&lo->lo_ctl_mutex); 1507 - mutex_unlock(&loop_mutex); 1508 1509 return 0; 1510 } ··· 1512 struct loop_device *lo = disk->private_data; 1513 int err; 1514 1515 - mutex_lock(&loop_mutex); 1516 mutex_lock(&lo->lo_ctl_mutex); 1517 1518 if (--lo->lo_refcnt) ··· 1536 out: 1537 mutex_unlock(&lo->lo_ctl_mutex); 1538 out_unlocked: 1539 - mutex_unlock(&loop_mutex); 1540 return 0; 1541 } 1542
··· 78 79 #include <asm/uaccess.h> 80 81 static LIST_HEAD(loop_devices); 82 static DEFINE_MUTEX(loop_devices_mutex); 83 ··· 1501 { 1502 struct loop_device *lo = bdev->bd_disk->private_data; 1503 1504 mutex_lock(&lo->lo_ctl_mutex); 1505 lo->lo_refcnt++; 1506 mutex_unlock(&lo->lo_ctl_mutex); 1507 1508 return 0; 1509 } ··· 1515 struct loop_device *lo = disk->private_data; 1516 int err; 1517 1518 mutex_lock(&lo->lo_ctl_mutex); 1519 1520 if (--lo->lo_refcnt) ··· 1540 out: 1541 mutex_unlock(&lo->lo_ctl_mutex); 1542 out_unlocked: 1543 return 0; 1544 } 1545
+8
drivers/char/ipmi/ipmi_si_intf.c
··· 900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 901 #endif 902 903 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 904 905 if (smi_info->thread)
··· 900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 901 #endif 902 903 + /* 904 + * last_timeout_jiffies is updated here to avoid 905 + * smi_timeout() handler passing very large time_diff 906 + * value to smi_event_handler() that causes 907 + * the send command to abort. 908 + */ 909 + smi_info->last_timeout_jiffies = jiffies; 910 + 911 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 912 913 if (smi_info->thread)
+8
drivers/char/virtio_console.c
··· 388 unsigned int len; 389 int ret; 390 391 vq = port->in_vq; 392 if (port->inbuf) 393 buf = port->inbuf; ··· 474 void *buf; 475 unsigned int len; 476 477 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 478 kfree(buf); 479 port->outvq_full = false;
··· 388 unsigned int len; 389 int ret; 390 391 + if (!port->portdev) { 392 + /* Device has been unplugged. vqs are already gone. */ 393 + return; 394 + } 395 vq = port->in_vq; 396 if (port->inbuf) 397 buf = port->inbuf; ··· 470 void *buf; 471 unsigned int len; 472 473 + if (!port->portdev) { 474 + /* Device has been unplugged. vqs are already gone. */ 475 + return; 476 + } 477 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 478 kfree(buf); 479 port->outvq_full = false;
+15 -12
drivers/cpufreq/cpufreq.c
··· 1919 1920 ret = sysdev_driver_register(&cpu_sysdev_class, 1921 &cpufreq_sysdev_driver); 1922 1923 - if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1924 int i; 1925 ret = -ENODEV; 1926 ··· 1937 if (ret) { 1938 dprintk("no CPU initialized for driver %s\n", 1939 driver_data->name); 1940 - sysdev_driver_unregister(&cpu_sysdev_class, 1941 - &cpufreq_sysdev_driver); 1942 - 1943 - spin_lock_irqsave(&cpufreq_driver_lock, flags); 1944 - cpufreq_driver = NULL; 1945 - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1946 } 1947 } 1948 1949 - if (!ret) { 1950 - register_hotcpu_notifier(&cpufreq_cpu_notifier); 1951 - dprintk("driver %s up and running\n", driver_data->name); 1952 - cpufreq_debug_enable_ratelimit(); 1953 - } 1954 1955 return ret; 1956 } 1957 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
··· 1919 1920 ret = sysdev_driver_register(&cpu_sysdev_class, 1921 &cpufreq_sysdev_driver); 1922 + if (ret) 1923 + goto err_null_driver; 1924 1925 + if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1926 int i; 1927 ret = -ENODEV; 1928 ··· 1935 if (ret) { 1936 dprintk("no CPU initialized for driver %s\n", 1937 driver_data->name); 1938 + goto err_sysdev_unreg; 1939 } 1940 } 1941 1942 + register_hotcpu_notifier(&cpufreq_cpu_notifier); 1943 + dprintk("driver %s up and running\n", driver_data->name); 1944 + cpufreq_debug_enable_ratelimit(); 1945 1946 + return 0; 1947 + err_sysdev_unreg: 1948 + sysdev_driver_unregister(&cpu_sysdev_class, 1949 + &cpufreq_sysdev_driver); 1950 + err_null_driver: 1951 + spin_lock_irqsave(&cpufreq_driver_lock, flags); 1952 + cpufreq_driver = NULL; 1953 + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1954 return ret; 1955 } 1956 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
+2 -2
drivers/gpu/drm/drm_fb_helper.c
··· 677 struct drm_crtc_helper_funcs *crtc_funcs; 678 u16 *red, *green, *blue, *transp; 679 struct drm_crtc *crtc; 680 - int i, rc = 0; 681 int start; 682 683 for (i = 0; i < fb_helper->crtc_count; i++) { ··· 690 transp = cmap->transp; 691 start = cmap->start; 692 693 - for (i = 0; i < cmap->len; i++) { 694 u16 hred, hgreen, hblue, htransp = 0xffff; 695 696 hred = *red++;
··· 677 struct drm_crtc_helper_funcs *crtc_funcs; 678 u16 *red, *green, *blue, *transp; 679 struct drm_crtc *crtc; 680 + int i, j, rc = 0; 681 int start; 682 683 for (i = 0; i < fb_helper->crtc_count; i++) { ··· 690 transp = cmap->transp; 691 start = cmap->start; 692 693 + for (j = 0; j < cmap->len; j++) { 694 u16 hred, hgreen, hblue, htransp = 0xffff; 695 696 hred = *red++;
+10
drivers/gpu/drm/i915/i915_reg.h
··· 1566 1567 /* Backlight control */ 1568 #define BLC_PWM_CTL 0x61254 1569 #define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1570 /* 1571 * This is the number of cycles out of the backlight modulation cycle for which 1572 * the backlight is on.
··· 1566 1567 /* Backlight control */ 1568 #define BLC_PWM_CTL 0x61254 1569 + #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) 1570 #define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1571 + #define BLM_COMBINATION_MODE (1 << 30) 1572 + /* 1573 + * This is the most significant 15 bits of the number of backlight cycles in a 1574 + * complete cycle of the modulated backlight control. 1575 + * 1576 + * The actual value is this field multiplied by two. 1577 + */ 1578 + #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) 1579 + #define BLM_LEGACY_MODE (1 << 16) 1580 /* 1581 * This is the number of cycles out of the backlight modulation cycle for which 1582 * the backlight is on.
+36
drivers/gpu/drm/i915/intel_panel.c
··· 30 31 #include "intel_drv.h" 32 33 void 34 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 35 struct drm_display_mode *adjusted_mode) ··· 112 dev_priv->pch_pf_size = (width << 16) | height; 113 } 114 115 static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 116 { 117 u32 val; ··· 181 if (INTEL_INFO(dev)->gen < 4) 182 max &= ~1; 183 } 184 } 185 186 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); ··· 201 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 202 if (IS_PINEVIEW(dev)) 203 val >>= 1; 204 } 205 206 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); ··· 231 232 if (HAS_PCH_SPLIT(dev)) 233 return intel_pch_panel_set_backlight(dev, level); 234 tmp = I915_READ(BLC_PWM_CTL); 235 if (IS_PINEVIEW(dev)) { 236 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
··· 30 31 #include "intel_drv.h" 32 33 + #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 34 + 35 void 36 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 37 struct drm_display_mode *adjusted_mode) ··· 110 dev_priv->pch_pf_size = (width << 16) | height; 111 } 112 113 + static int is_backlight_combination_mode(struct drm_device *dev) 114 + { 115 + struct drm_i915_private *dev_priv = dev->dev_private; 116 + 117 + if (INTEL_INFO(dev)->gen >= 4) 118 + return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; 119 + 120 + if (IS_GEN2(dev)) 121 + return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; 122 + 123 + return 0; 124 + } 125 + 126 static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 127 { 128 u32 val; ··· 166 if (INTEL_INFO(dev)->gen < 4) 167 max &= ~1; 168 } 169 + 170 + if (is_backlight_combination_mode(dev)) 171 + max *= 0xff; 172 } 173 174 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); ··· 183 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 184 if (IS_PINEVIEW(dev)) 185 val >>= 1; 186 + 187 + if (is_backlight_combination_mode(dev)){ 188 + u8 lbpc; 189 + 190 + val &= ~1; 191 + pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 192 + val *= lbpc; 193 + } 194 } 195 196 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); ··· 205 206 if (HAS_PCH_SPLIT(dev)) 207 return intel_pch_panel_set_backlight(dev, level); 208 + 209 + if (is_backlight_combination_mode(dev)){ 210 + u32 max = intel_panel_get_max_backlight(dev); 211 + u8 lbpc; 212 + 213 + lbpc = level * 0xfe / max + 1; 214 + level /= lbpc; 215 + pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); 216 + } 217 + 218 tmp = I915_READ(BLC_PWM_CTL); 219 if (IS_PINEVIEW(dev)) { 220 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+2 -1
drivers/gpu/drm/nouveau/nouveau_dma.c
··· 83 return ret; 84 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 86 - ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); 87 if (ret) 88 return ret; 89
··· 83 return ret; 84 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 86 + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, 87 + &chan->m2mf_ntfy); 88 if (ret) 89 return ret; 90
+2 -1
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 853 extern int nouveau_notifier_init_channel(struct nouveau_channel *); 854 extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 855 extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, 856 - int cout, uint32_t *offset); 857 extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); 858 extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, 859 struct drm_file *);
··· 853 extern int nouveau_notifier_init_channel(struct nouveau_channel *); 854 extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 855 extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, 856 + int cout, uint32_t start, uint32_t end, 857 + uint32_t *offset); 858 extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); 859 extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, 860 struct drm_file *);
+4 -2
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 759 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 760 mem->page_alignment << PAGE_SHIFT, size_nc, 761 (nvbo->tile_flags >> 8) & 0x3ff, &node); 762 - if (ret) 763 - return ret; 764 765 node->page_shift = 12; 766 if (nvbo->vma.node)
··· 759 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 760 mem->page_alignment << PAGE_SHIFT, size_nc, 761 (nvbo->tile_flags >> 8) & 0x3ff, &node); 762 + if (ret) { 763 + mem->mm_node = NULL; 764 + return (ret == -ENOSPC) ? 0 : ret; 765 + } 766 767 node->page_shift = 12; 768 if (nvbo->vma.node)
+1 -1
drivers/gpu/drm/nouveau/nouveau_mm.c
··· 123 return 0; 124 } 125 126 - return -ENOMEM; 127 } 128 129 int
··· 123 return 0; 124 } 125 126 + return -ENOSPC; 127 } 128 129 int
+7 -4
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 95 96 int 97 nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 98 - int size, uint32_t *b_offset) 99 { 100 struct drm_device *dev = chan->dev; 101 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 105 uint32_t offset; 106 int target, ret; 107 108 - mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); 109 if (mem) 110 - mem = drm_mm_get_block(mem, size, 0); 111 if (!mem) { 112 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); 113 return -ENOMEM; ··· 184 if (IS_ERR(chan)) 185 return PTR_ERR(chan); 186 187 - ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 188 nouveau_channel_put(&chan); 189 return ret; 190 }
··· 95 96 int 97 nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 98 + int size, uint32_t start, uint32_t end, 99 + uint32_t *b_offset) 100 { 101 struct drm_device *dev = chan->dev; 102 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 104 uint32_t offset; 105 int target, ret; 106 107 + mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, 108 + start, end, 0); 109 if (mem) 110 + mem = drm_mm_get_block_range(mem, size, 0, start, end); 111 if (!mem) { 112 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); 113 return -ENOMEM; ··· 182 if (IS_ERR(chan)) 183 return PTR_ERR(chan); 184 185 + ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, 186 + &na->offset); 187 nouveau_channel_put(&chan); 188 return ret; 189 }
+8
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 403 void 404 nv50_instmem_flush(struct drm_device *dev) 405 { 406 nv_wr32(dev, 0x00330c, 0x00000001); 407 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 408 NV_ERROR(dev, "PRAMIN flush timeout\n"); 409 } 410 411 void 412 nv84_instmem_flush(struct drm_device *dev) 413 { 414 nv_wr32(dev, 0x070000, 0x00000001); 415 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 416 NV_ERROR(dev, "PRAMIN flush timeout\n"); 417 } 418
··· 403 void 404 nv50_instmem_flush(struct drm_device *dev) 405 { 406 + struct drm_nouveau_private *dev_priv = dev->dev_private; 407 + 408 + spin_lock(&dev_priv->ramin_lock); 409 nv_wr32(dev, 0x00330c, 0x00000001); 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 411 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 + spin_unlock(&dev_priv->ramin_lock); 413 } 414 415 void 416 nv84_instmem_flush(struct drm_device *dev) 417 { 418 + struct drm_nouveau_private *dev_priv = dev->dev_private; 419 + 420 + spin_lock(&dev_priv->ramin_lock); 421 nv_wr32(dev, 0x070000, 0x00000001); 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 423 NV_ERROR(dev, "PRAMIN flush timeout\n"); 424 + spin_unlock(&dev_priv->ramin_lock); 425 } 426
+4
drivers/gpu/drm/nouveau/nv50_vm.c
··· 173 void 174 nv50_vm_flush_engine(struct drm_device *dev, int engine) 175 { 176 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 177 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 178 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 179 }
··· 173 void 174 nv50_vm_flush_engine(struct drm_device *dev, int engine) 175 { 176 + struct drm_nouveau_private *dev_priv = dev->dev_private; 177 + 178 + spin_lock(&dev_priv->ramin_lock); 179 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 180 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 181 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 182 + spin_unlock(&dev_priv->ramin_lock); 183 }
+1 -2
drivers/gpu/drm/radeon/evergreen.c
··· 2194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2195 } 2196 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2197 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2198 r700_vram_gtt_location(rdev, &rdev->mc); 2199 radeon_update_bandwidth_info(rdev); 2200 ··· 2933 /* XXX: ontario has problems blitting to gart at the moment */ 2934 if (rdev->family == CHIP_PALM) { 2935 rdev->asic->copy = NULL; 2936 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2937 } 2938 2939 /* allocate wb buffer */
··· 2194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2195 } 2196 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2197 r700_vram_gtt_location(rdev, &rdev->mc); 2198 radeon_update_bandwidth_info(rdev); 2199 ··· 2934 /* XXX: ontario has problems blitting to gart at the moment */ 2935 if (rdev->family == CHIP_PALM) { 2936 rdev->asic->copy = NULL; 2937 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2938 } 2939 2940 /* allocate wb buffer */
+2 -2
drivers/gpu/drm/radeon/evergreen_blit_kms.c
··· 623 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 624 return r; 625 } 626 - rdev->mc.active_vram_size = rdev->mc.real_vram_size; 627 return 0; 628 } 629 ··· 631 { 632 int r; 633 634 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 635 if (rdev->r600_blit.shader_obj == NULL) 636 return; 637 /* If we can't reserve the bo, unref should be enough to destroy
··· 623 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 624 return r; 625 } 626 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 627 return 0; 628 } 629 ··· 631 { 632 int r; 633 634 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 635 if (rdev->r600_blit.shader_obj == NULL) 636 return; 637 /* If we can't reserve the bo, unref should be enough to destroy
+1 -1
drivers/gpu/drm/radeon/ni.c
··· 1039 if (enable) 1040 WREG32(CP_ME_CNTL, 0); 1041 else { 1042 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1043 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1044 WREG32(SCRATCH_UMSK, 0); 1045 }
··· 1039 if (enable) 1040 WREG32(CP_ME_CNTL, 0); 1041 else { 1042 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1043 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1044 WREG32(SCRATCH_UMSK, 0); 1045 }
+2 -20
drivers/gpu/drm/radeon/r100.c
··· 70 71 void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 72 { 73 - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; 74 - u32 tmp; 75 - 76 - /* make sure flip is at vb rather than hb */ 77 - tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); 78 - tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; 79 - /* make sure pending bit is asserted */ 80 - tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; 81 - WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); 82 - 83 - /* set pageflip to happen as late as possible in the vblank interval. 84 - * same field for crtc1/2 85 - */ 86 - tmp = RREG32(RADEON_CRTC_GEN_CNTL); 87 - tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK; 88 - WREG32(RADEON_CRTC_GEN_CNTL, tmp); 89 - 90 /* enable the pflip int */ 91 radeon_irq_kms_pflip_irq_get(rdev, crtc); 92 } ··· 1024 return r; 1025 } 1026 rdev->cp.ready = true; 1027 - rdev->mc.active_vram_size = rdev->mc.real_vram_size; 1028 return 0; 1029 } 1030 ··· 1042 void r100_cp_disable(struct radeon_device *rdev) 1043 { 1044 /* Disable ring */ 1045 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1046 rdev->cp.ready = false; 1047 WREG32(RADEON_CP_CSQ_MODE, 0); 1048 WREG32(RADEON_CP_CSQ_CNTL, 0); ··· 2312 /* FIXME we don't use the second aperture yet when we could use it */ 2313 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2314 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2315 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2316 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2317 if (rdev->flags & RADEON_IS_IGP) { 2318 uint32_t tom;
··· 70 71 void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 72 { 73 /* enable the pflip int */ 74 radeon_irq_kms_pflip_irq_get(rdev, crtc); 75 } ··· 1041 return r; 1042 } 1043 rdev->cp.ready = true; 1044 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1045 return 0; 1046 } 1047 ··· 1059 void r100_cp_disable(struct radeon_device *rdev) 1060 { 1061 /* Disable ring */ 1062 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1063 rdev->cp.ready = false; 1064 WREG32(RADEON_CP_CSQ_MODE, 0); 1065 WREG32(RADEON_CP_CSQ_CNTL, 0); ··· 2329 /* FIXME we don't use the second aperture yet when we could use it */ 2330 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2331 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2332 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2333 if (rdev->flags & RADEON_IS_IGP) { 2334 uint32_t tom;
+1 -2
drivers/gpu/drm/radeon/r600.c
··· 1256 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1257 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1258 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1259 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1260 r600_vram_gtt_location(rdev, &rdev->mc); 1261 1262 if (rdev->flags & RADEON_IS_IGP) { ··· 1937 */ 1938 void r600_cp_stop(struct radeon_device *rdev) 1939 { 1940 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1941 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1942 WREG32(SCRATCH_UMSK, 0); 1943 }
··· 1256 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1257 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1258 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1259 r600_vram_gtt_location(rdev, &rdev->mc); 1260 1261 if (rdev->flags & RADEON_IS_IGP) { ··· 1938 */ 1939 void r600_cp_stop(struct radeon_device *rdev) 1940 { 1941 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1942 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1943 WREG32(SCRATCH_UMSK, 0); 1944 }
+2 -2
drivers/gpu/drm/radeon/r600_blit_kms.c
··· 558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 559 return r; 560 } 561 - rdev->mc.active_vram_size = rdev->mc.real_vram_size; 562 return 0; 563 } 564 ··· 566 { 567 int r; 568 569 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 570 if (rdev->r600_blit.shader_obj == NULL) 571 return; 572 /* If we can't reserve the bo, unref should be enough to destroy
··· 558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 559 return r; 560 } 561 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 562 return 0; 563 } 564 ··· 566 { 567 int r; 568 569 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 570 if (rdev->r600_blit.shader_obj == NULL) 571 return; 572 /* If we can't reserve the bo, unref should be enough to destroy
+1 -1
drivers/gpu/drm/radeon/radeon.h
··· 357 * about vram size near mc fb location */ 358 u64 mc_vram_size; 359 u64 visible_vram_size; 360 - u64 active_vram_size; 361 u64 gtt_size; 362 u64 gtt_start; 363 u64 gtt_end; ··· 1491 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1492 extern int radeon_resume_kms(struct drm_device *dev); 1493 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1494 1495 /* 1496 * r600 functions used by radeon_encoder.c
··· 357 * about vram size near mc fb location */ 358 u64 mc_vram_size; 359 u64 visible_vram_size; 360 u64 gtt_size; 361 u64 gtt_start; 362 u64 gtt_end; ··· 1492 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1493 extern int radeon_resume_kms(struct drm_device *dev); 1494 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1495 + extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1496 1497 /* 1498 * r600 functions used by radeon_encoder.c
+3
drivers/gpu/drm/radeon/radeon_asic.c
··· 834 .pm_finish = &evergreen_pm_finish, 835 .pm_init_profile = &rs780_pm_init_profile, 836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 837 }; 838 839 static struct radeon_asic btc_asic = {
··· 834 .pm_finish = &evergreen_pm_finish, 835 .pm_init_profile = &rs780_pm_init_profile, 836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 837 + .pre_page_flip = &evergreen_pre_page_flip, 838 + .page_flip = &evergreen_page_flip, 839 + .post_page_flip = &evergreen_post_page_flip, 840 }; 841 842 static struct radeon_asic btc_asic = {
+4 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 151 { 152 struct radeon_device *rdev = dev->dev_private; 153 struct drm_radeon_gem_info *args = data; 154 155 args->vram_size = rdev->mc.real_vram_size; 156 - args->vram_visible = rdev->mc.real_vram_size; 157 if (rdev->stollen_vga_memory) 158 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 159 args->vram_visible -= radeon_fbdev_total_size(rdev);
··· 151 { 152 struct radeon_device *rdev = dev->dev_private; 153 struct drm_radeon_gem_info *args = data; 154 + struct ttm_mem_type_manager *man; 155 + 156 + man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 157 158 args->vram_size = rdev->mc.real_vram_size; 159 + args->vram_visible = (u64)man->size << PAGE_SHIFT; 160 if (rdev->stollen_vga_memory) 161 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 162 args->vram_visible -= radeon_fbdev_total_size(rdev);
+2 -1
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
··· 443 (target_fb->bits_per_pixel * 8)); 444 crtc_pitch |= crtc_pitch << 16; 445 446 - 447 if (tiling_flags & RADEON_TILING_MACRO) { 448 if (ASIC_IS_R300(rdev)) 449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | ··· 502 gen_cntl_val = RREG32(gen_cntl_reg); 503 gen_cntl_val &= ~(0xf << 8); 504 gen_cntl_val |= (format << 8); 505 WREG32(gen_cntl_reg, gen_cntl_val); 506 507 crtc_offset = (u32)base;
··· 443 (target_fb->bits_per_pixel * 8)); 444 crtc_pitch |= crtc_pitch << 16; 445 446 + crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; 447 if (tiling_flags & RADEON_TILING_MACRO) { 448 if (ASIC_IS_R300(rdev)) 449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | ··· 502 gen_cntl_val = RREG32(gen_cntl_reg); 503 gen_cntl_val &= ~(0xf << 8); 504 gen_cntl_val |= (format << 8); 505 + gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK; 506 WREG32(gen_cntl_reg, gen_cntl_val); 507 508 crtc_offset = (u32)base;
+14
drivers/gpu/drm/radeon/radeon_ttm.c
··· 589 DRM_INFO("radeon: ttm finalized\n"); 590 } 591 592 static struct vm_operations_struct radeon_ttm_vm_ops; 593 static const struct vm_operations_struct *ttm_vm_ops = NULL; 594
··· 589 DRM_INFO("radeon: ttm finalized\n"); 590 } 591 592 + /* this should only be called at bootup or when userspace 593 + * isn't running */ 594 + void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) 595 + { 596 + struct ttm_mem_type_manager *man; 597 + 598 + if (!rdev->mman.initialized) 599 + return; 600 + 601 + man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 602 + /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 603 + man->size = size >> PAGE_SHIFT; 604 + } 605 + 606 static struct vm_operations_struct radeon_ttm_vm_ops; 607 static const struct vm_operations_struct *ttm_vm_ops = NULL; 608
-1
drivers/gpu/drm/radeon/rs600.c
··· 751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 753 rdev->mc.visible_vram_size = rdev->mc.aper_size; 754 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 755 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 756 base = RREG32_MC(R_000004_MC_FB_LOCATION); 757 base = G_000004_MC_FB_START(base) << 16;
··· 751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 753 rdev->mc.visible_vram_size = rdev->mc.aper_size; 754 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 755 base = RREG32_MC(R_000004_MC_FB_LOCATION); 756 base = G_000004_MC_FB_START(base) << 16;
-1
drivers/gpu/drm/radeon/rs690.c
··· 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 160 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 161 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = G_000100_MC_FB_START(base) << 16; 163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
··· 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 161 base = G_000100_MC_FB_START(base) << 16; 162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+1 -2
drivers/gpu/drm/radeon/rv770.c
··· 307 */ 308 void r700_cp_stop(struct radeon_device *rdev) 309 { 310 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 312 WREG32(SCRATCH_UMSK, 0); 313 } ··· 1123 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1124 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1125 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1126 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1127 r700_vram_gtt_location(rdev, &rdev->mc); 1128 radeon_update_bandwidth_info(rdev); 1129
··· 307 */ 308 void r700_cp_stop(struct radeon_device *rdev) 309 { 310 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 312 WREG32(SCRATCH_UMSK, 0); 313 } ··· 1123 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1124 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1125 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1126 r700_vram_gtt_location(rdev, &rdev->mc); 1127 radeon_update_bandwidth_info(rdev); 1128
+1
drivers/i2c/busses/i2c-eg20t.c
··· 29 #include <linux/pci.h> 30 #include <linux/mutex.h> 31 #include <linux/ktime.h> 32 33 #define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ 34 #define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
··· 29 #include <linux/pci.h> 30 #include <linux/mutex.h> 31 #include <linux/ktime.h> 32 + #include <linux/slab.h> 33 34 #define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ 35 #define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
+1 -1
drivers/i2c/busses/i2c-ocores.c
··· 249 static int ocores_i2c_of_probe(struct platform_device* pdev, 250 struct ocores_i2c* i2c) 251 { 252 - __be32* val; 253 254 val = of_get_property(pdev->dev.of_node, "regstep", NULL); 255 if (!val) {
··· 249 static int ocores_i2c_of_probe(struct platform_device* pdev, 250 struct ocores_i2c* i2c) 251 { 252 + const __be32* val; 253 254 val = of_get_property(pdev->dev.of_node, "regstep", NULL); 255 if (!val) {
+1 -3
drivers/i2c/busses/i2c-omap.c
··· 378 * REVISIT: Some wkup sources might not be needed. 379 */ 380 dev->westate = OMAP_I2C_WE_ALL; 381 - if (dev->rev < OMAP_I2C_REV_ON_4430) 382 - omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, 383 - dev->westate); 384 } 385 } 386 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
··· 378 * REVISIT: Some wkup sources might not be needed. 379 */ 380 dev->westate = OMAP_I2C_WE_ALL; 381 + omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); 382 } 383 } 384 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+24
drivers/idle/intel_idle.c
··· 62 #include <linux/notifier.h> 63 #include <linux/cpu.h> 64 #include <asm/mwait.h> 65 66 #define INTEL_IDLE_VERSION "0.4" 67 #define PREFIX "intel_idle: " ··· 84 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 85 86 static struct cpuidle_state *cpuidle_state_table; 87 88 /* 89 * Set this flag for states where the HW flushes the TLB for us ··· 288 .notifier_call = setup_broadcast_cpuhp_notify, 289 }; 290 291 /* 292 * intel_idle_probe() 293 */ ··· 340 case 0x25: /* Westmere */ 341 case 0x2C: /* Westmere */ 342 cpuidle_state_table = nehalem_cstates; 343 break; 344 345 case 0x1C: /* 28 - Atom Processor */ 346 case 0x26: /* 38 - Lincroft Atom Processor */ 347 cpuidle_state_table = atom_cstates; 348 break; 349 350 case 0x2A: /* SNB */ ··· 458 return -EIO; 459 } 460 } 461 462 return 0; 463 }
··· 62 #include <linux/notifier.h> 63 #include <linux/cpu.h> 64 #include <asm/mwait.h> 65 + #include <asm/msr.h> 66 67 #define INTEL_IDLE_VERSION "0.4" 68 #define PREFIX "intel_idle: " ··· 83 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 84 85 static struct cpuidle_state *cpuidle_state_table; 86 + 87 + /* 88 + * Hardware C-state auto-demotion may not always be optimal. 89 + * Indicate which enable bits to clear here. 90 + */ 91 + static unsigned long long auto_demotion_disable_flags; 92 93 /* 94 * Set this flag for states where the HW flushes the TLB for us ··· 281 .notifier_call = setup_broadcast_cpuhp_notify, 282 }; 283 284 + static void auto_demotion_disable(void *dummy) 285 + { 286 + unsigned long long msr_bits; 287 + 288 + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); 289 + msr_bits &= ~auto_demotion_disable_flags; 290 + wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); 291 + } 292 + 293 /* 294 * intel_idle_probe() 295 */ ··· 324 case 0x25: /* Westmere */ 325 case 0x2C: /* Westmere */ 326 cpuidle_state_table = nehalem_cstates; 327 + auto_demotion_disable_flags = 328 + (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); 329 break; 330 331 case 0x1C: /* 28 - Atom Processor */ 332 + cpuidle_state_table = atom_cstates; 333 + break; 334 + 335 case 0x26: /* 38 - Lincroft Atom Processor */ 336 cpuidle_state_table = atom_cstates; 337 + auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; 338 break; 339 340 case 0x2A: /* SNB */ ··· 436 return -EIO; 437 } 438 } 439 + if (auto_demotion_disable_flags) 440 + smp_call_function(auto_demotion_disable, NULL, 1); 441 442 return 0; 443 }
+1 -1
drivers/isdn/hardware/eicon/istream.c
··· 62 stream interface. 63 If synchronous service was requested, then function 64 does return amount of data written to stream. 65 - 'final' does indicate that pice of data to be written is 66 final part of frame (necessary only by structured datatransfer) 67 return 0 if zero lengh packet was written 68 return -1 if stream is full
··· 62 stream interface. 63 If synchronous service was requested, then function 64 does return amount of data written to stream. 65 + 'final' does indicate that piece of data to be written is 66 final part of frame (necessary only by structured datatransfer) 67 return 0 if zero lengh packet was written 68 return -1 if stream is full
+7 -7
drivers/media/common/tuners/tda8290.c
··· 658 #define TDA8290_ID 0x89 659 u8 reg = 0x1f, id; 660 struct i2c_msg msg_read[] = { 661 - { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 662 - { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 663 }; 664 665 /* detect tda8290 */ 666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 667 - printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 668 __func__, reg); 669 return -ENODEV; 670 } ··· 685 #define TDA8295C2_ID 0x8b 686 u8 reg = 0x2f, id; 687 struct i2c_msg msg_read[] = { 688 - { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 689 - { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 690 }; 691 692 - /* detect tda8290 */ 693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 694 - printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 695 __func__, reg); 696 return -ENODEV; 697 }
··· 658 #define TDA8290_ID 0x89 659 u8 reg = 0x1f, id; 660 struct i2c_msg msg_read[] = { 661 + { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg }, 662 + { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, 663 }; 664 665 /* detect tda8290 */ 666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 667 + printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", 668 __func__, reg); 669 return -ENODEV; 670 } ··· 685 #define TDA8295C2_ID 0x8b 686 u8 reg = 0x2f, id; 687 struct i2c_msg msg_read[] = { 688 + { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg }, 689 + { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, 690 }; 691 692 + /* detect tda8295 */ 693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 694 + printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", 695 __func__, reg); 696 return -ENODEV; 697 }
+19 -2
drivers/media/dvb/dvb-usb/dib0700_devices.c
··· 870 return 0; 871 } 872 873 static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) 874 { 875 return dib7000p_pid_filter(adapter->fe, index, pid, onoff); ··· 1892 { 1893 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 1894 .pid_filter_count = 32, 1895 - .pid_filter = stk70x0p_pid_filter, 1896 - .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 1897 .frontend_attach = stk7700p_frontend_attach, 1898 .tuner_attach = stk7700p_tuner_attach, 1899
··· 870 return 0; 871 } 872 873 + static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index, 874 + u16 pid, int onoff) 875 + { 876 + struct dib0700_state *st = adapter->dev->priv; 877 + if (st->is_dib7000pc) 878 + return dib7000p_pid_filter(adapter->fe, index, pid, onoff); 879 + return dib7000m_pid_filter(adapter->fe, index, pid, onoff); 880 + } 881 + 882 + static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) 883 + { 884 + struct dib0700_state *st = adapter->dev->priv; 885 + if (st->is_dib7000pc) 886 + return dib7000p_pid_filter_ctrl(adapter->fe, onoff); 887 + return dib7000m_pid_filter_ctrl(adapter->fe, onoff); 888 + } 889 + 890 static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) 891 { 892 return dib7000p_pid_filter(adapter->fe, index, pid, onoff); ··· 1875 { 1876 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 1877 .pid_filter_count = 32, 1878 + .pid_filter = stk7700p_pid_filter, 1879 + .pid_filter_ctrl = stk7700p_pid_filter_ctrl, 1880 .frontend_attach = stk7700p_frontend_attach, 1881 .tuner_attach = stk7700p_tuner_attach, 1882
+3 -3
drivers/media/dvb/dvb-usb/lmedm04.c
··· 659 } 660 661 /* Default firmware for LME2510C */ 662 - const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; 663 664 static void lme_coldreset(struct usb_device *dev) 665 { ··· 1006 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 1007 .usb_ctrl = DEVICE_SPECIFIC, 1008 .download_firmware = lme2510_download_firmware, 1009 - .firmware = lme_firmware, 1010 .size_of_priv = sizeof(struct lme2510_state), 1011 .num_adapters = 1, 1012 .adapter = { ··· 1109 1110 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 1111 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); 1112 - MODULE_VERSION("1.74"); 1113 MODULE_LICENSE("GPL");
··· 659 } 660 661 /* Default firmware for LME2510C */ 662 + char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; 663 664 static void lme_coldreset(struct usb_device *dev) 665 { ··· 1006 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 1007 .usb_ctrl = DEVICE_SPECIFIC, 1008 .download_firmware = lme2510_download_firmware, 1009 + .firmware = (const char *)&lme_firmware, 1010 .size_of_priv = sizeof(struct lme2510_state), 1011 .num_adapters = 1, 1012 .adapter = { ··· 1109 1110 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 1111 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); 1112 + MODULE_VERSION("1.75"); 1113 MODULE_LICENSE("GPL");
+19
drivers/media/dvb/frontends/dib7000m.c
··· 1285 } 1286 EXPORT_SYMBOL(dib7000m_get_i2c_master); 1287 1288 #if 0 1289 /* used with some prototype boards */ 1290 int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
··· 1285 } 1286 EXPORT_SYMBOL(dib7000m_get_i2c_master); 1287 1288 + int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) 1289 + { 1290 + struct dib7000m_state *state = fe->demodulator_priv; 1291 + u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef; 1292 + val |= (onoff & 0x1) << 4; 1293 + dprintk("PID filter enabled %d", onoff); 1294 + return dib7000m_write_word(state, 294 + state->reg_offs, val); 1295 + } 1296 + EXPORT_SYMBOL(dib7000m_pid_filter_ctrl); 1297 + 1298 + int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) 1299 + { 1300 + struct dib7000m_state *state = fe->demodulator_priv; 1301 + dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff); 1302 + return dib7000m_write_word(state, 300 + state->reg_offs + id, 1303 + onoff ? (1 << 13) | pid : 0); 1304 + } 1305 + EXPORT_SYMBOL(dib7000m_pid_filter); 1306 + 1307 #if 0 1308 /* used with some prototype boards */ 1309 int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
+15
drivers/media/dvb/frontends/dib7000m.h
··· 46 extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, 47 enum dibx000_i2c_interface, 48 int); 49 #else 50 static inline 51 struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, ··· 64 { 65 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 66 return NULL; 67 } 68 #endif 69
··· 46 extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, 47 enum dibx000_i2c_interface, 48 int); 49 + extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff); 50 + extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff); 51 #else 52 static inline 53 struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, ··· 62 { 63 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 64 return NULL; 65 + } 66 + static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, 67 + u16 pid, u8 onoff) 68 + { 69 + printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 70 + return -ENODEV; 71 + } 72 + 73 + static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, 74 + uint8_t onoff) 75 + { 76 + printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 77 + return -ENODEV; 78 } 79 #endif 80
-1
drivers/media/dvb/mantis/mantis_pci.c
··· 22 #include <linux/moduleparam.h> 23 #include <linux/kernel.h> 24 #include <asm/io.h> 25 - #include <asm/pgtable.h> 26 #include <asm/page.h> 27 #include <linux/kmod.h> 28 #include <linux/vmalloc.h>
··· 22 #include <linux/moduleparam.h> 23 #include <linux/kernel.h> 24 #include <asm/io.h> 25 #include <asm/page.h> 26 #include <linux/kmod.h> 27 #include <linux/vmalloc.h>
+1 -2
drivers/media/rc/ir-raw.c
··· 112 { 113 ktime_t now; 114 s64 delta; /* ns */ 115 - struct ir_raw_event ev; 116 int rc = 0; 117 118 if (!dev->raw) ··· 125 * being called for the first time, note that delta can't 126 * possibly be negative. 127 */ 128 - ev.duration = 0; 129 if (delta > IR_MAX_DURATION || !dev->raw->last_type) 130 type |= IR_START_EVENT; 131 else
··· 112 { 113 ktime_t now; 114 s64 delta; /* ns */ 115 + DEFINE_IR_RAW_EVENT(ev); 116 int rc = 0; 117 118 if (!dev->raw) ··· 125 * being called for the first time, note that delta can't 126 * possibly be negative. 127 */ 128 if (delta > IR_MAX_DURATION || !dev->raw->last_type) 129 type |= IR_START_EVENT; 130 else
+15 -12
drivers/media/rc/mceusb.c
··· 148 MCE_GEN2_TX_INV, 149 POLARIS_EVK, 150 CX_HYBRID_TV, 151 }; 152 153 struct mceusb_model { ··· 156 u32 mce_gen2:1; 157 u32 mce_gen3:1; 158 u32 tx_mask_normal:1; 159 - u32 is_polaris:1; 160 u32 no_tx:1; 161 162 const char *rc_map; /* Allow specify a per-board map */ 163 const char *name; /* per-board name */ ··· 181 .tx_mask_normal = 1, 182 }, 183 [POLARIS_EVK] = { 184 - .is_polaris = 1, 185 /* 186 * In fact, the EVK is shipped without 187 * remotes, but we should have something handy, ··· 190 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 191 }, 192 [CX_HYBRID_TV] = { 193 - .is_polaris = 1, 194 .no_tx = 1, /* tx isn't wired up at all */ 195 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 196 }, 197 }; 198 ··· 220 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, 221 /* Philips/Spinel plus IR transceiver for ASUS */ 222 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, 223 - /* Realtek MCE IR Receiver */ 224 - { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, 225 /* SMK/Toshiba G83C0004D410 */ 226 { USB_DEVICE(VENDOR_SMK, 0x031d), 227 .driver_info = MCE_GEN2_TX_INV }, ··· 1106 bool is_gen3; 1107 bool is_microsoft_gen1; 1108 bool tx_mask_normal; 1109 - bool is_polaris; 1110 1111 dev_dbg(&intf->dev, "%s called\n", __func__); 1112 ··· 1115 is_gen3 = mceusb_model[model].mce_gen3; 1116 is_microsoft_gen1 = mceusb_model[model].mce_gen1; 1117 tx_mask_normal = mceusb_model[model].tx_mask_normal; 1118 - is_polaris = mceusb_model[model].is_polaris; 1119 1120 - if (is_polaris) { 1121 - /* Interface 0 is IR */ 1122 - if (idesc->desc.bInterfaceNumber) 1123 - return -ENODEV; 1124 - } 1125 1126 /* step through the endpoints to find first bulk in and out endpoint */ 1127 for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
··· 148 MCE_GEN2_TX_INV, 149 POLARIS_EVK, 150 CX_HYBRID_TV, 151 + MULTIFUNCTION, 152 }; 153 154 struct mceusb_model { ··· 155 u32 mce_gen2:1; 156 u32 mce_gen3:1; 157 u32 tx_mask_normal:1; 158 u32 no_tx:1; 159 + 160 + int ir_intfnum; 161 162 const char *rc_map; /* Allow specify a per-board map */ 163 const char *name; /* per-board name */ ··· 179 .tx_mask_normal = 1, 180 }, 181 [POLARIS_EVK] = { 182 /* 183 * In fact, the EVK is shipped without 184 * remotes, but we should have something handy, ··· 189 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 190 }, 191 [CX_HYBRID_TV] = { 192 .no_tx = 1, /* tx isn't wired up at all */ 193 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 194 + }, 195 + [MULTIFUNCTION] = { 196 + .mce_gen2 = 1, 197 + .ir_intfnum = 2, 198 }, 199 }; 200 ··· 216 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, 217 /* Philips/Spinel plus IR transceiver for ASUS */ 218 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, 219 + /* Realtek MCE IR Receiver and card reader */ 220 + { USB_DEVICE(VENDOR_REALTEK, 0x0161), 221 + .driver_info = MULTIFUNCTION }, 222 /* SMK/Toshiba G83C0004D410 */ 223 { USB_DEVICE(VENDOR_SMK, 0x031d), 224 .driver_info = MCE_GEN2_TX_INV }, ··· 1101 bool is_gen3; 1102 bool is_microsoft_gen1; 1103 bool tx_mask_normal; 1104 + int ir_intfnum; 1105 1106 dev_dbg(&intf->dev, "%s called\n", __func__); 1107 ··· 1110 is_gen3 = mceusb_model[model].mce_gen3; 1111 is_microsoft_gen1 = mceusb_model[model].mce_gen1; 1112 tx_mask_normal = mceusb_model[model].tx_mask_normal; 1113 + ir_intfnum = mceusb_model[model].ir_intfnum; 1114 1115 + /* There are multi-function devices with non-IR interfaces */ 1116 + if (idesc->desc.bInterfaceNumber != ir_intfnum) 1117 + return -ENODEV; 1118 1119 /* step through the endpoints to find first bulk in and out endpoint */ 1120 for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
+3 -2
drivers/media/rc/nuvoton-cir.c
··· 385 386 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) 387 { 388 - /* set number of bytes needed for wake key comparison (default 67) */ 389 - nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP); 390 391 /* set tolerance/variance allowed per byte during wake compare */ 392 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
··· 385 386 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) 387 { 388 + /* set number of bytes needed for wake from s3 (default 65) */ 389 + nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES, 390 + CIR_WAKE_FIFO_CMP_DEEP); 391 392 /* set tolerance/variance allowed per byte during wake compare */ 393 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
+5 -2
drivers/media/rc/nuvoton-cir.h
··· 305 #define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 306 #define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 307 308 - /* CIR Wake FIFO buffer is 67 bytes long */ 309 - #define CIR_WAKE_FIFO_LEN 67 310 /* CIR Wake byte comparison tolerance */ 311 #define CIR_WAKE_CMP_TOLERANCE 5 312
··· 305 #define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 306 #define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 307 308 + /* 309 + * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes 310 + * the system comparing only 65 bytes (fails with this set to 67) 311 + */ 312 + #define CIR_WAKE_FIFO_CMP_BYTES 65 313 /* CIR Wake byte comparison tolerance */ 314 #define CIR_WAKE_CMP_TOLERANCE 5 315
+1 -1
drivers/media/rc/rc-main.c
··· 850 count++; 851 } else { 852 for (i = 0; i < ARRAY_SIZE(proto_names); i++) { 853 - if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) { 854 tmp += strlen(proto_names[i].name); 855 mask = proto_names[i].type; 856 break;
··· 850 count++; 851 } else { 852 for (i = 0; i < ARRAY_SIZE(proto_names); i++) { 853 + if (!strcasecmp(tmp, proto_names[i].name)) { 854 tmp += strlen(proto_names[i].name); 855 mask = proto_names[i].type; 856 break;
+24 -4
drivers/media/video/au0828/au0828-video.c
··· 1758 if (rc < 0) 1759 return rc; 1760 1761 - return videobuf_reqbufs(&fh->vb_vidq, rb); 1762 } 1763 1764 static int vidioc_querybuf(struct file *file, void *priv, ··· 1777 if (rc < 0) 1778 return rc; 1779 1780 - return videobuf_querybuf(&fh->vb_vidq, b); 1781 } 1782 1783 static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) ··· 1795 if (rc < 0) 1796 return rc; 1797 1798 - return videobuf_qbuf(&fh->vb_vidq, b); 1799 } 1800 1801 static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) ··· 1821 dev->greenscreen_detected = 0; 1822 } 1823 1824 - return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); 1825 } 1826 1827 static struct v4l2_file_operations au0828_v4l_fops = {
··· 1758 if (rc < 0) 1759 return rc; 1760 1761 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1762 + rc = videobuf_reqbufs(&fh->vb_vidq, rb); 1763 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1764 + rc = videobuf_reqbufs(&fh->vb_vbiq, rb); 1765 + 1766 + return rc; 1767 } 1768 1769 static int vidioc_querybuf(struct file *file, void *priv, ··· 1772 if (rc < 0) 1773 return rc; 1774 1775 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1776 + rc = videobuf_querybuf(&fh->vb_vidq, b); 1777 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1778 + rc = videobuf_querybuf(&fh->vb_vbiq, b); 1779 + 1780 + return rc; 1781 } 1782 1783 static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) ··· 1785 if (rc < 0) 1786 return rc; 1787 1788 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1789 + rc = videobuf_qbuf(&fh->vb_vidq, b); 1790 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1791 + rc = videobuf_qbuf(&fh->vb_vbiq, b); 1792 + 1793 + return rc; 1794 } 1795 1796 static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) ··· 1806 dev->greenscreen_detected = 0; 1807 } 1808 1809 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1810 + rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); 1811 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1812 + rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK); 1813 + 1814 + return rc; 1815 } 1816 1817 static struct v4l2_file_operations au0828_v4l_fops = {
+49 -1
drivers/media/video/cx18/cx18-cards.c
··· 95 .i2c = &cx18_i2c_std, 96 }; 97 98 static const struct cx18_card cx18_card_hvr1600_samsung = { 99 .type = CX18_CARD_HVR_1600_SAMSUNG, 100 .name = "Hauppauge HVR-1600 (Preproduction)", ··· 570 &cx18_card_toshiba_qosmio_dvbt, 571 &cx18_card_leadtek_pvr2100, 572 &cx18_card_leadtek_dvr3100h, 573 - &cx18_card_gotview_dvd3 574 }; 575 576 const struct cx18_card *cx18_get_card(u16 index)
··· 95 .i2c = &cx18_i2c_std, 96 }; 97 98 + static const struct cx18_card cx18_card_hvr1600_s5h1411 = { 99 + .type = CX18_CARD_HVR_1600_S5H1411, 100 + .name = "Hauppauge HVR-1600", 101 + .comment = "Simultaneous Digital and Analog TV capture supported\n", 102 + .v4l2_capabilities = CX18_CAP_ENCODER, 103 + .hw_audio_ctrl = CX18_HW_418_AV, 104 + .hw_muxer = CX18_HW_CS5345, 105 + .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | 106 + CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | 107 + CX18_HW_Z8F0811_IR_HAUP, 108 + .video_inputs = { 109 + { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, 110 + { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, 111 + { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, 112 + { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, 113 + { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, 114 + }, 115 + .audio_inputs = { 116 + { CX18_CARD_INPUT_AUD_TUNER, 117 + CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, 118 + { CX18_CARD_INPUT_LINE_IN1, 119 + CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, 120 + { CX18_CARD_INPUT_LINE_IN2, 121 + CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, 122 + }, 123 + .radio_input = { CX18_CARD_INPUT_AUD_TUNER, 124 + CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, 125 + .ddr = { 126 + /* ESMT M13S128324A-5B memory */ 127 + .chip_config = 0x003, 128 + .refresh = 0x30c, 129 + .timing1 = 0x44220e82, 130 + .timing2 = 0x08, 131 + .tune_lane = 0, 132 + .initial_emrs = 0, 133 + }, 134 + .gpio_init.initial_value = 0x3001, 135 + .gpio_init.direction = 0x3001, 136 + .gpio_i2c_slave_reset = { 137 + .active_lo_mask = 0x3001, 138 + .msecs_asserted = 10, 139 + .msecs_recovery = 40, 140 + .ir_reset_mask = 0x0001, 141 + }, 142 + .i2c = &cx18_i2c_std, 143 + }; 144 + 145 static const struct cx18_card cx18_card_hvr1600_samsung = { 146 .type = CX18_CARD_HVR_1600_SAMSUNG, 147 .name = "Hauppauge HVR-1600 (Preproduction)", ··· 523 &cx18_card_toshiba_qosmio_dvbt, 524 &cx18_card_leadtek_pvr2100, 525 &cx18_card_leadtek_dvr3100h, 526 + &cx18_card_gotview_dvd3, 527 + &cx18_card_hvr1600_s5h1411 528 }; 529 530 const struct cx18_card *cx18_get_card(u16 index)
+23 -2
drivers/media/video/cx18/cx18-driver.c
··· 157 "\t\t\t 7 = Leadtek WinFast PVR2100\n" 158 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" 159 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" 160 "\t\t\t 0 = Autodetect (default)\n" 161 "\t\t\t-1 = Ignore this card\n\t\t"); 162 MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); ··· 338 switch (cx->card->type) { 339 case CX18_CARD_HVR_1600_ESMT: 340 case CX18_CARD_HVR_1600_SAMSUNG: 341 tveeprom_hauppauge_analog(&c, tv, eedata); 342 break; 343 case CX18_CARD_YUAN_MPC718: ··· 367 from the model number. Use the cardtype module option if you 368 have one of these preproduction models. */ 369 switch (tv.model) { 370 - case 74000 ... 74999: 371 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 372 break; 373 case 0x718: ··· 397 CX18_ERR("Invalid EEPROM\n"); 398 return; 399 default: 400 - CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model); 401 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 402 break; 403 }
··· 157 "\t\t\t 7 = Leadtek WinFast PVR2100\n" 158 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" 159 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" 160 + "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n" 161 "\t\t\t 0 = Autodetect (default)\n" 162 "\t\t\t-1 = Ignore this card\n\t\t"); 163 MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); ··· 337 switch (cx->card->type) { 338 case CX18_CARD_HVR_1600_ESMT: 339 case CX18_CARD_HVR_1600_SAMSUNG: 340 + case CX18_CARD_HVR_1600_S5H1411: 341 tveeprom_hauppauge_analog(&c, tv, eedata); 342 break; 343 case CX18_CARD_YUAN_MPC718: ··· 365 from the model number. Use the cardtype module option if you 366 have one of these preproduction models. */ 367 switch (tv.model) { 368 + case 74301: /* Retail models */ 369 + case 74321: 370 + case 74351: /* OEM models */ 371 + case 74361: 372 + /* Digital side is s5h1411/tda18271 */ 373 + cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411); 374 + break; 375 + case 74021: /* Retail models */ 376 + case 74031: 377 + case 74041: 378 + case 74141: 379 + case 74541: /* OEM models */ 380 + case 74551: 381 + case 74591: 382 + case 74651: 383 + case 74691: 384 + case 74751: 385 + case 74891: 386 + /* Digital side is s5h1409/mxl5005s */ 387 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 388 break; 389 case 0x718: ··· 377 CX18_ERR("Invalid EEPROM\n"); 378 return; 379 default: 380 + CX18_ERR("Unknown model %d, defaulting to original HVR-1600 " 381 + "(cardtype=1)\n", tv.model); 382 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 383 break; 384 }
+2 -1
drivers/media/video/cx18/cx18-driver.h
··· 85 #define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ 86 #define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ 87 #define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ 88 - #define CX18_CARD_LAST 8 89 90 #define CX18_ENC_STREAM_TYPE_MPG 0 91 #define CX18_ENC_STREAM_TYPE_TS 1
··· 85 #define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ 86 #define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ 87 #define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ 88 + #define CX18_CARD_HVR_1600_S5H1411 9 /* Hauppauge HVR 1600 s5h1411/tda18271*/ 89 + #define CX18_CARD_LAST 9 90 91 #define CX18_ENC_STREAM_TYPE_MPG 0 92 #define CX18_ENC_STREAM_TYPE_TS 1
+38
drivers/media/video/cx18/cx18-dvb.c
··· 29 #include "cx18-gpio.h" 30 #include "s5h1409.h" 31 #include "mxl5005s.h" 32 #include "zl10353.h" 33 34 #include <linux/firmware.h> ··· 76 .status_mode = S5H1409_DEMODLOCKING, 77 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, 78 .hvr1600_opt = S5H1409_HVR1600_OPTIMIZE 79 }; 80 81 /* ··· 272 switch (cx->card->type) { 273 case CX18_CARD_HVR_1600_ESMT: 274 case CX18_CARD_HVR_1600_SAMSUNG: 275 v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); 276 v |= 0x00400000; /* Serial Mode */ 277 v |= 0x00002000; /* Data Length - Byte */ ··· 483 &hauppauge_hvr1600_tuner); 484 ret = 0; 485 } 486 break; 487 case CX18_CARD_LEADTEK_DVR3100H: 488 dvb->fe = dvb_attach(zl10353_attach,
··· 29 #include "cx18-gpio.h" 30 #include "s5h1409.h" 31 #include "mxl5005s.h" 32 + #include "s5h1411.h" 33 + #include "tda18271.h" 34 #include "zl10353.h" 35 36 #include <linux/firmware.h> ··· 74 .status_mode = S5H1409_DEMODLOCKING, 75 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, 76 .hvr1600_opt = S5H1409_HVR1600_OPTIMIZE 77 + }; 78 + 79 + /* 80 + * CX18_CARD_HVR_1600_S5H1411 81 + */ 82 + static struct s5h1411_config hcw_s5h1411_config = { 83 + .output_mode = S5H1411_SERIAL_OUTPUT, 84 + .gpio = S5H1411_GPIO_OFF, 85 + .vsb_if = S5H1411_IF_44000, 86 + .qam_if = S5H1411_IF_4000, 87 + .inversion = S5H1411_INVERSION_ON, 88 + .status_mode = S5H1411_DEMODLOCKING, 89 + .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, 90 + }; 91 + 92 + static struct tda18271_std_map hauppauge_tda18271_std_map = { 93 + .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3, 94 + .if_lvl = 6, .rfagc_top = 0x37 }, 95 + .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0, 96 + .if_lvl = 6, .rfagc_top = 0x37 }, 97 + }; 98 + 99 + static struct tda18271_config hauppauge_tda18271_config = { 100 + .std_map = &hauppauge_tda18271_std_map, 101 + .gate = TDA18271_GATE_DIGITAL, 102 + .output_opt = TDA18271_OUTPUT_LT_OFF, 103 }; 104 105 /* ··· 244 switch (cx->card->type) { 245 case CX18_CARD_HVR_1600_ESMT: 246 case CX18_CARD_HVR_1600_SAMSUNG: 247 + case CX18_CARD_HVR_1600_S5H1411: 248 v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); 249 v |= 0x00400000; /* Serial Mode */ 250 v |= 0x00002000; /* Data Length - Byte */ ··· 454 &hauppauge_hvr1600_tuner); 455 ret = 0; 456 } 457 + break; 458 + case CX18_CARD_HVR_1600_S5H1411: 459 + dvb->fe = dvb_attach(s5h1411_attach, 460 + &hcw_s5h1411_config, 461 + &cx->i2c_adap[0]); 462 + if (dvb->fe != NULL) 463 + dvb_attach(tda18271_attach, dvb->fe, 464 + 0x60, &cx->i2c_adap[0], 465 + &hauppauge_tda18271_config); 466 break; 467 case CX18_CARD_LEADTEK_DVR3100H: 468 dvb->fe = dvb_attach(zl10353_attach,
-10
drivers/media/video/cx23885/cx23885-i2c.c
··· 122 123 if (!i2c_wait_done(i2c_adap)) 124 goto eio; 125 - if (!i2c_slave_did_ack(i2c_adap)) { 126 - retval = -ENXIO; 127 - goto err; 128 - } 129 if (i2c_debug) { 130 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); 131 if (!(ctrl & I2C_NOSTOP)) ··· 154 155 eio: 156 retval = -EIO; 157 - err: 158 if (i2c_debug) 159 printk(KERN_ERR " ERR: %d\n", retval); 160 return retval; ··· 204 205 if (!i2c_wait_done(i2c_adap)) 206 goto eio; 207 - if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) { 208 - retval = -ENXIO; 209 - goto err; 210 - } 211 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; 212 if (i2c_debug) { 213 dprintk(1, " %02x", msg->buf[cnt]); ··· 215 216 eio: 217 retval = -EIO; 218 - err: 219 if (i2c_debug) 220 printk(KERN_ERR " ERR: %d\n", retval); 221 return retval;
··· 122 123 if (!i2c_wait_done(i2c_adap)) 124 goto eio; 125 if (i2c_debug) { 126 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); 127 if (!(ctrl & I2C_NOSTOP)) ··· 158 159 eio: 160 retval = -EIO; 161 if (i2c_debug) 162 printk(KERN_ERR " ERR: %d\n", retval); 163 return retval; ··· 209 210 if (!i2c_wait_done(i2c_adap)) 211 goto eio; 212 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; 213 if (i2c_debug) { 214 dprintk(1, " %02x", msg->buf[cnt]); ··· 224 225 eio: 226 retval = -EIO; 227 if (i2c_debug) 228 printk(KERN_ERR " ERR: %d\n", retval); 229 return retval;
+2 -1
drivers/media/video/cx25840/cx25840-core.c
··· 2015 kfree(state); 2016 return err; 2017 } 2018 - v4l2_ctrl_cluster(2, &state->volume); 2019 v4l2_ctrl_handler_setup(&state->hdl); 2020 2021 if (client->dev.platform_data) {
··· 2015 kfree(state); 2016 return err; 2017 } 2018 + if (!is_cx2583x(state)) 2019 + v4l2_ctrl_cluster(2, &state->volume); 2020 v4l2_ctrl_handler_setup(&state->hdl); 2021 2022 if (client->dev.platform_data) {
+51 -7
drivers/media/video/ivtv/ivtv-irq.c
··· 628 static void ivtv_irq_dma_err(struct ivtv *itv) 629 { 630 u32 data[CX2341X_MBOX_MAX_DATA]; 631 632 del_timer(&itv->dma_timer); 633 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 634 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], 635 - read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); 636 - write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); 637 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && 638 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { 639 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; 640 641 - /* retry */ 642 - if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) 643 ivtv_dma_dec_start(s); 644 - else 645 - ivtv_dma_enc_start(s); 646 - return; 647 } 648 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { 649 ivtv_udma_start(itv);
··· 628 static void ivtv_irq_dma_err(struct ivtv *itv) 629 { 630 u32 data[CX2341X_MBOX_MAX_DATA]; 631 + u32 status; 632 633 del_timer(&itv->dma_timer); 634 + 635 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 636 + status = read_reg(IVTV_REG_DMASTATUS); 637 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], 638 + status, itv->cur_dma_stream); 639 + /* 640 + * We do *not* write back to the IVTV_REG_DMASTATUS register to 641 + * clear the error status, if either the encoder write (0x02) or 642 + * decoder read (0x01) bus master DMA operation do not indicate 643 + * completed. We can race with the DMA engine, which may have 644 + * transitioned to completed status *after* we read the register. 645 + * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the 646 + * DMA engine has completed, will cause the DMA engine to stop working. 647 + */ 648 + status &= 0x3; 649 + if (status == 0x3) 650 + write_reg(status, IVTV_REG_DMASTATUS); 651 + 652 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && 653 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { 654 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; 655 656 + if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { 657 + /* retry */ 658 + /* 659 + * FIXME - handle cases of DMA error similar to 660 + * encoder below, except conditioned on status & 0x1 661 + */ 662 ivtv_dma_dec_start(s); 663 + return; 664 + } else { 665 + if ((status & 0x2) == 0) { 666 + /* 667 + * CX2341x Bus Master DMA write is ongoing. 668 + * Reset the timer and let it complete. 669 + */ 670 + itv->dma_timer.expires = 671 + jiffies + msecs_to_jiffies(600); 672 + add_timer(&itv->dma_timer); 673 + return; 674 + } 675 + 676 + if (itv->dma_retries < 3) { 677 + /* 678 + * CX2341x Bus Master DMA write has ended. 679 + * Retry the write, starting with the first 680 + * xfer segment. Just retrying the current 681 + * segment is not sufficient. 682 + */ 683 + s->sg_processed = 0; 684 + itv->dma_retries++; 685 + ivtv_dma_enc_start_xfer(s); 686 + return; 687 + } 688 + /* Too many retries, give up on this one */ 689 + } 690 + 691 } 692 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { 693 ivtv_udma_start(itv);
-1
drivers/media/video/mem2mem_testdev.c
··· 1011 v4l2_m2m_release(dev->m2m_dev); 1012 del_timer_sync(&dev->timer); 1013 video_unregister_device(dev->vfd); 1014 - video_device_release(dev->vfd); 1015 v4l2_device_unregister(&dev->v4l2_dev); 1016 kfree(dev); 1017
··· 1011 v4l2_m2m_release(dev->m2m_dev); 1012 del_timer_sync(&dev->timer); 1013 video_unregister_device(dev->vfd); 1014 v4l2_device_unregister(&dev->v4l2_dev); 1015 kfree(dev); 1016
+6 -4
drivers/media/video/s2255drv.c
··· 57 #include <linux/usb.h> 58 59 #define S2255_MAJOR_VERSION 1 60 - #define S2255_MINOR_VERSION 20 61 #define S2255_RELEASE 0 62 #define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ 63 S2255_MINOR_VERSION, \ ··· 312 }; 313 314 /* current cypress EEPROM firmware version */ 315 - #define S2255_CUR_USB_FWVER ((3 << 8) | 6) 316 /* current DSP FW version */ 317 - #define S2255_CUR_DSP_FWVER 8 318 /* Need DSP version 5+ for video status feature */ 319 #define S2255_MIN_DSP_STATUS 5 320 #define S2255_MIN_DSP_COLORFILTER 8 ··· 492 493 static void s2255_reset_dsppower(struct s2255_dev *dev) 494 { 495 - s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1); 496 msleep(10); 497 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); 498 return; 499 } 500
··· 57 #include <linux/usb.h> 58 59 #define S2255_MAJOR_VERSION 1 60 + #define S2255_MINOR_VERSION 21 61 #define S2255_RELEASE 0 62 #define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ 63 S2255_MINOR_VERSION, \ ··· 312 }; 313 314 /* current cypress EEPROM firmware version */ 315 + #define S2255_CUR_USB_FWVER ((3 << 8) | 11) 316 /* current DSP FW version */ 317 + #define S2255_CUR_DSP_FWVER 10102 318 /* Need DSP version 5+ for video status feature */ 319 #define S2255_MIN_DSP_STATUS 5 320 #define S2255_MIN_DSP_COLORFILTER 8 ··· 492 493 static void s2255_reset_dsppower(struct s2255_dev *dev) 494 { 495 + s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1); 496 msleep(10); 497 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); 498 + msleep(600); 499 + s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1); 500 return; 501 } 502
+2 -2
drivers/mfd/asic3.c
··· 143 unsigned long flags; 144 struct asic3 *asic; 145 146 - desc->chip->ack(irq); 147 148 - asic = desc->handler_data; 149 150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 151 u32 status;
··· 143 unsigned long flags; 144 struct asic3 *asic; 145 146 + desc->irq_data.chip->irq_ack(&desc->irq_data); 147 148 + asic = get_irq_data(irq); 149 150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 151 u32 status;
+2 -2
drivers/mfd/davinci_voicecodec.c
··· 118 119 /* Voice codec interface client */ 120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; 121 - cell->name = "davinci_vcif"; 122 cell->driver_data = davinci_vc; 123 124 /* Voice codec CQ93VC client */ 125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; 126 - cell->name = "cq93vc"; 127 cell->driver_data = davinci_vc; 128 129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
··· 118 119 /* Voice codec interface client */ 120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; 121 + cell->name = "davinci-vcif"; 122 cell->driver_data = davinci_vc; 123 124 /* Voice codec CQ93VC client */ 125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; 126 + cell->name = "cq93vc-codec"; 127 cell->driver_data = davinci_vc; 128 129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
+5 -5
drivers/mfd/tps6586x.c
··· 150 static inline int __tps6586x_writes(struct i2c_client *client, int reg, 151 int len, uint8_t *val) 152 { 153 - int ret; 154 155 - ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); 156 - if (ret < 0) { 157 - dev_err(&client->dev, "failed writings to 0x%02x\n", reg); 158 - return ret; 159 } 160 161 return 0;
··· 150 static inline int __tps6586x_writes(struct i2c_client *client, int reg, 151 int len, uint8_t *val) 152 { 153 + int ret, i; 154 155 + for (i = 0; i < len; i++) { 156 + ret = __tps6586x_write(client, reg + i, *(val + i)); 157 + if (ret < 0) 158 + return ret; 159 } 160 161 return 0;
+9 -3
drivers/mfd/ucb1x00-ts.c
··· 385 idev->close = ucb1x00_ts_close; 386 387 __set_bit(EV_ABS, idev->evbit); 388 - __set_bit(ABS_X, idev->absbit); 389 - __set_bit(ABS_Y, idev->absbit); 390 - __set_bit(ABS_PRESSURE, idev->absbit); 391 392 input_set_drvdata(idev, ts); 393 394 err = input_register_device(idev); 395 if (err)
··· 385 idev->close = ucb1x00_ts_close; 386 387 __set_bit(EV_ABS, idev->evbit); 388 389 input_set_drvdata(idev, ts); 390 + 391 + ucb1x00_adc_enable(ts->ucb); 392 + ts->x_res = ucb1x00_ts_read_xres(ts); 393 + ts->y_res = ucb1x00_ts_read_yres(ts); 394 + ucb1x00_adc_disable(ts->ucb); 395 + 396 + input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); 397 + input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); 398 + input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); 399 400 err = input_register_device(idev); 401 if (err)
+18
drivers/mfd/wm8994-core.c
··· 246 struct wm8994 *wm8994 = dev_get_drvdata(dev); 247 int ret; 248 249 /* GPIO configuration state is saved here since we may be configuring 250 * the GPIO alternate functions even if we're not using the gpiolib 251 * driver for them. ··· 271 if (ret < 0) 272 dev_err(dev, "Failed to save LDO registers: %d\n", ret); 273 274 ret = regulator_bulk_disable(wm8994->num_supplies, 275 wm8994->supplies); 276 if (ret != 0) { ··· 287 { 288 struct wm8994 *wm8994 = dev_get_drvdata(dev); 289 int ret; 290 291 ret = regulator_bulk_enable(wm8994->num_supplies, 292 wm8994->supplies); ··· 313 &wm8994->gpio_regs); 314 if (ret < 0) 315 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); 316 317 return 0; 318 }
··· 246 struct wm8994 *wm8994 = dev_get_drvdata(dev); 247 int ret; 248 249 + /* Don't actually go through with the suspend if the CODEC is 250 + * still active (eg, for audio passthrough from CP. */ 251 + ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1); 252 + if (ret < 0) { 253 + dev_err(dev, "Failed to read power status: %d\n", ret); 254 + } else if (ret & WM8994_VMID_SEL_MASK) { 255 + dev_dbg(dev, "CODEC still active, ignoring suspend\n"); 256 + return 0; 257 + } 258 + 259 /* GPIO configuration state is saved here since we may be configuring 260 * the GPIO alternate functions even if we're not using the gpiolib 261 * driver for them. ··· 261 if (ret < 0) 262 dev_err(dev, "Failed to save LDO registers: %d\n", ret); 263 264 + wm8994->suspended = true; 265 + 266 ret = regulator_bulk_disable(wm8994->num_supplies, 267 wm8994->supplies); 268 if (ret != 0) { ··· 275 { 276 struct wm8994 *wm8994 = dev_get_drvdata(dev); 277 int ret; 278 + 279 + /* We may have lied to the PM core about suspending */ 280 + if (!wm8994->suspended) 281 + return 0; 282 283 ret = regulator_bulk_enable(wm8994->num_supplies, 284 wm8994->supplies); ··· 297 &wm8994->gpio_regs); 298 if (ret < 0) 299 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); 300 + 301 + wm8994->suspended = false; 302 303 return 0; 304 }
+1
drivers/misc/bmp085.c
··· 449 { "bmp085", 0 }, 450 { } 451 }; 452 453 static struct i2c_driver bmp085_driver = { 454 .driver = {
··· 449 { "bmp085", 0 }, 450 { } 451 }; 452 + MODULE_DEVICE_TABLE(i2c, bmp085_id); 453 454 static struct i2c_driver bmp085_driver = { 455 .driver = {
+1 -1
drivers/mmc/core/core.c
··· 1529 * still present 1530 */ 1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1532 - && mmc_card_is_removable(host)) 1533 host->bus_ops->detect(host); 1534 1535 /*
··· 1529 * still present 1530 */ 1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1532 + && !(host->caps & MMC_CAP_NONREMOVABLE)) 1533 host->bus_ops->detect(host); 1534 1535 /*
+1 -2
drivers/mmc/core/sdio.c
··· 792 */ 793 mmc_release_host(host); 794 err = mmc_add_card(host->card); 795 - mmc_claim_host(host); 796 if (err) 797 goto remove_added; 798 ··· 804 goto remove_added; 805 } 806 807 return 0; 808 809 810 remove_added: 811 /* Remove without lock if the device has been added. */ 812 - mmc_release_host(host); 813 mmc_sdio_remove(host); 814 mmc_claim_host(host); 815 remove:
··· 792 */ 793 mmc_release_host(host); 794 err = mmc_add_card(host->card); 795 if (err) 796 goto remove_added; 797 ··· 805 goto remove_added; 806 } 807 808 + mmc_claim_host(host); 809 return 0; 810 811 812 remove_added: 813 /* Remove without lock if the device has been added. */ 814 mmc_sdio_remove(host); 815 mmc_claim_host(host); 816 remove:
+16 -12
drivers/net/bnx2x/bnx2x.h
··· 22 * (you will need to reboot afterwards) */ 23 /* #define BNX2X_STOP_ON_ERROR */ 24 25 - #define DRV_MODULE_VERSION "1.62.00-5" 26 #define DRV_MODULE_RELDATE "2011/01/30" 27 #define BNX2X_BC_VER 0x040200 28 ··· 1613 #define BNX2X_BTR 4 1614 #define MAX_SPQ_PENDING 8 1615 1616 - 1617 - /* CMNG constants 1618 - derived from lab experiments, and not from system spec calculations !!! */ 1619 - #define DEF_MIN_RATE 100 1620 /* resolution of the rate shaping timer - 100 usec */ 1621 - #define RS_PERIODIC_TIMEOUT_USEC 100 1622 - /* resolution of fairness algorithm in usecs - 1623 - coefficient for calculating the actual t fair */ 1624 - #define T_FAIR_COEF 10000000 1625 /* number of bytes in single QM arbitration cycle - 1626 - coefficient for calculating the fairness timer */ 1627 - #define QM_ARB_BYTES 40000 1628 - #define FAIR_MEM 2 1629 1630 1631 #define ATTN_NIG_FOR_FUNC (1L << 8)
··· 22 * (you will need to reboot afterwards) */ 23 /* #define BNX2X_STOP_ON_ERROR */ 24 25 + #define DRV_MODULE_VERSION "1.62.00-6" 26 #define DRV_MODULE_RELDATE "2011/01/30" 27 #define BNX2X_BC_VER 0x040200 28 ··· 1613 #define BNX2X_BTR 4 1614 #define MAX_SPQ_PENDING 8 1615 1616 + /* CMNG constants, as derived from system spec calculations */ 1617 + /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ 1618 + #define DEF_MIN_RATE 100 1619 /* resolution of the rate shaping timer - 100 usec */ 1620 + #define RS_PERIODIC_TIMEOUT_USEC 100 1621 /* number of bytes in single QM arbitration cycle - 1622 + * coefficient for calculating the fairness timer */ 1623 + #define QM_ARB_BYTES 160000 1624 + /* resolution of Min algorithm 1:100 */ 1625 + #define MIN_RES 100 1626 + /* how many bytes above threshold for the minimal credit of Min algorithm*/ 1627 + #define MIN_ABOVE_THRESH 32768 1628 + /* Fairness algorithm integration time coefficient - 1629 + * for calculating the actual Tfair */ 1630 + #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) 1631 + /* Memory of fairness algorithm . 2 cycles */ 1632 + #define FAIR_MEM 2 1633 1634 1635 #define ATTN_NIG_FOR_FUNC (1L << 8)
+51 -14
drivers/net/bnx2x/bnx2x_cmn.c
··· 259 #endif 260 } 261 262 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 263 struct sk_buff *skb, 264 struct eth_fast_path_rx_cqe *fp_cqe, 265 - u16 cqe_idx) 266 { 267 struct sw_rx_page *rx_pg, old_rx_pg; 268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); ··· 309 310 /* This is needed in order to enable forwarding support */ 311 if (frag_size) 312 - skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, 313 - max(frag_size, (u32)len_on_bd)); 314 315 #ifdef BNX2X_STOP_ON_ERROR 316 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { ··· 378 if (likely(new_skb)) { 379 /* fix ip xsum and give it to the stack */ 380 /* (no need to map the new skb) */ 381 382 prefetch(skb); 383 prefetch(((char *)(skb)) + L1_CACHE_BYTES); ··· 409 } 410 411 if (!bnx2x_fill_frag_skb(bp, fp, skb, 412 - &cqe->fast_path_cqe, cqe_idx)) { 413 - if ((le16_to_cpu(cqe->fast_path_cqe. 414 - pars_flags.flags) & PARSING_FLAGS_VLAN)) 415 __vlan_hwaccel_put_tag(skb, 416 le16_to_cpu(cqe->fast_path_cqe. 417 vlan_tag)); ··· 739 { 740 u16 line_speed = bp->link_vars.line_speed; 741 if (IS_MF(bp)) { 742 - u16 maxCfg = (bp->mf_config[BP_VN(bp)] & 743 - FUNC_MF_CFG_MAX_BW_MASK) >> 744 - FUNC_MF_CFG_MAX_BW_SHIFT; 745 - /* Calculate the current MAX line speed limit for the DCC 746 - * capable devices 747 */ 748 - if (IS_MF_SD(bp)) { 749 u16 vn_max_rate = maxCfg * 100; 750 751 if (vn_max_rate < line_speed) 752 line_speed = vn_max_rate; 753 - } else /* IS_MF_SI(bp)) */ 754 - line_speed = (line_speed * maxCfg) / 100; 755 } 756 757 return line_speed;
··· 259 #endif 260 } 261 262 + /* Timestamp option length allowed for TPA aggregation: 263 + * 264 + * nop nop kind length echo val 265 + */ 266 + #define TPA_TSTAMP_OPT_LEN 12 267 + /** 268 + * Calculate the approximate value of the MSS for this 269 + * aggregation using the first packet of it. 270 + * 271 + * @param bp 272 + * @param parsing_flags Parsing flags from the START CQE 273 + * @param len_on_bd Total length of the first packet for the 274 + * aggregation. 275 + */ 276 + static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, 277 + u16 len_on_bd) 278 + { 279 + /* TPA arrgregation won't have an IP options and TCP options 280 + * other than timestamp. 281 + */ 282 + u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); 283 + 284 + 285 + /* Check if there was a TCP timestamp, if there is it's will 286 + * always be 12 bytes length: nop nop kind length echo val. 287 + * 288 + * Otherwise FW would close the aggregation. 289 + */ 290 + if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) 291 + hdrs_len += TPA_TSTAMP_OPT_LEN; 292 + 293 + return len_on_bd - hdrs_len; 294 + } 295 + 296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 297 struct sk_buff *skb, 298 struct eth_fast_path_rx_cqe *fp_cqe, 299 + u16 cqe_idx, u16 parsing_flags) 300 { 301 struct sw_rx_page *rx_pg, old_rx_pg; 302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); ··· 275 276 /* This is needed in order to enable forwarding support */ 277 if (frag_size) 278 + skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, 279 + len_on_bd); 280 281 #ifdef BNX2X_STOP_ON_ERROR 282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { ··· 344 if (likely(new_skb)) { 345 /* fix ip xsum and give it to the stack */ 346 /* (no need to map the new skb) */ 347 + u16 parsing_flags = 348 + le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); 349 350 prefetch(skb); 351 prefetch(((char *)(skb)) + L1_CACHE_BYTES); ··· 373 } 374 375 if (!bnx2x_fill_frag_skb(bp, fp, skb, 376 + &cqe->fast_path_cqe, cqe_idx, 377 + parsing_flags)) { 378 + if (parsing_flags & PARSING_FLAGS_VLAN) 379 __vlan_hwaccel_put_tag(skb, 380 le16_to_cpu(cqe->fast_path_cqe. 381 vlan_tag)); ··· 703 { 704 u16 line_speed = bp->link_vars.line_speed; 705 if (IS_MF(bp)) { 706 + u16 maxCfg = bnx2x_extract_max_cfg(bp, 707 + bp->mf_config[BP_VN(bp)]); 708 + 709 + /* Calculate the current MAX line speed limit for the MF 710 + * devices 711 */ 712 + if (IS_MF_SI(bp)) 713 + line_speed = (line_speed * maxCfg) / 100; 714 + else { /* SD mode */ 715 u16 vn_max_rate = maxCfg * 100; 716 717 if (vn_max_rate < line_speed) 718 line_speed = vn_max_rate; 719 + } 720 } 721 722 return line_speed;
+20
drivers/net/bnx2x/bnx2x_cmn.h
··· 1044 void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1045 void bnx2x_release_phy_lock(struct bnx2x *bp); 1046 1047 #endif /* BNX2X_CMN_H */
··· 1044 void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1045 void bnx2x_release_phy_lock(struct bnx2x *bp); 1046 1047 + /** 1048 + * Extracts MAX BW part from MF configuration. 1049 + * 1050 + * @param bp 1051 + * @param mf_cfg 1052 + * 1053 + * @return u16 1054 + */ 1055 + static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1056 + { 1057 + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1058 + FUNC_MF_CFG_MAX_BW_SHIFT; 1059 + if (!max_cfg) { 1060 + BNX2X_ERR("Illegal configuration detected for Max BW - " 1061 + "using 100 instead\n"); 1062 + max_cfg = 100; 1063 + } 1064 + return max_cfg; 1065 + } 1066 + 1067 #endif /* BNX2X_CMN_H */
+12 -13
drivers/net/bnx2x/bnx2x_ethtool.c
··· 238 speed |= (cmd->speed_hi << 16); 239 240 if (IS_MF_SI(bp)) { 241 - u32 param = 0; 242 u32 line_speed = bp->link_vars.line_speed; 243 244 /* use 10G if no link detected */ ··· 251 REQ_BC_VER_4_SET_MF_BW); 252 return -EINVAL; 253 } 254 - if (line_speed < speed) { 255 - BNX2X_DEV_INFO("New speed should be less or equal " 256 - "to actual line speed\n"); 257 return -EINVAL; 258 } 259 /* load old values */ ··· 265 param &= FUNC_MF_CFG_MIN_BW_MASK; 266 267 /* set new MAX value */ 268 - param |= (((speed * 100) / line_speed) 269 - << FUNC_MF_CFG_MAX_BW_SHIFT) 270 & FUNC_MF_CFG_MAX_BW_MASK; 271 272 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); ··· 1782 { 0x100, 0x350 }, /* manuf_info */ 1783 { 0x450, 0xf0 }, /* feature_info */ 1784 { 0x640, 0x64 }, /* upgrade_key_info */ 1785 - { 0x6a4, 0x64 }, 1786 { 0x708, 0x70 }, /* manuf_key_info */ 1787 - { 0x778, 0x70 }, 1788 { 0, 0 } 1789 }; 1790 __be32 buf[0x350 / 4]; ··· 1932 buf[4] = 1; 1933 etest->flags |= ETH_TEST_FL_FAILED; 1934 } 1935 - if (bp->port.pmf) 1936 - if (bnx2x_link_test(bp, is_serdes) != 0) { 1937 - buf[5] = 1; 1938 - etest->flags |= ETH_TEST_FL_FAILED; 1939 - } 1940 1941 #ifdef BNX2X_EXTRA_DEBUG 1942 bnx2x_panic_dump(bp);
··· 238 speed |= (cmd->speed_hi << 16); 239 240 if (IS_MF_SI(bp)) { 241 + u32 param = 0, part; 242 u32 line_speed = bp->link_vars.line_speed; 243 244 /* use 10G if no link detected */ ··· 251 REQ_BC_VER_4_SET_MF_BW); 252 return -EINVAL; 253 } 254 + part = (speed * 100) / line_speed; 255 + if (line_speed < speed || !part) { 256 + BNX2X_DEV_INFO("Speed setting should be in a range " 257 + "from 1%% to 100%% " 258 + "of actual line speed\n"); 259 return -EINVAL; 260 } 261 /* load old values */ ··· 263 param &= FUNC_MF_CFG_MIN_BW_MASK; 264 265 /* set new MAX value */ 266 + param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT) 267 & FUNC_MF_CFG_MAX_BW_MASK; 268 269 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); ··· 1781 { 0x100, 0x350 }, /* manuf_info */ 1782 { 0x450, 0xf0 }, /* feature_info */ 1783 { 0x640, 0x64 }, /* upgrade_key_info */ 1784 { 0x708, 0x70 }, /* manuf_key_info */ 1785 { 0, 0 } 1786 }; 1787 __be32 buf[0x350 / 4]; ··· 1933 buf[4] = 1; 1934 etest->flags |= ETH_TEST_FL_FAILED; 1935 } 1936 + 1937 + if (bnx2x_link_test(bp, is_serdes) != 0) { 1938 + buf[5] = 1; 1939 + etest->flags |= ETH_TEST_FL_FAILED; 1940 + } 1941 1942 #ifdef BNX2X_EXTRA_DEBUG 1943 bnx2x_panic_dump(bp);
+1 -1
drivers/net/bnx2x/bnx2x_init.h
··· 241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 242 * want to handle "system kill" flow at the moment. 243 */ 244 - BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), 245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
··· 241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 242 * want to handle "system kill" flow at the moment. 243 */ 244 + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), 245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+14 -4
drivers/net/bnx2x/bnx2x_main.c
··· 1974 vn_max_rate = 0; 1975 1976 } else { 1977 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1978 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1979 - /* If min rate is zero - set it to 1 */ 1980 if (bp->vn_weight_sum && (vn_min_rate == 0)) 1981 vn_min_rate = DEF_MIN_RATE; 1982 - vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1983 - FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1984 } 1985 1986 DP(NETIF_MSG_IFUP, ··· 2015 m_fair_vn.vn_credit_delta = 2016 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2017 (8 * bp->vn_weight_sum))), 2018 - (bp->cmng.fair_vars.fair_threshold * 2)); 2019 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2020 m_fair_vn.vn_credit_delta); 2021 }
··· 1974 vn_max_rate = 0; 1975 1976 } else { 1977 + u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 1978 + 1979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1981 + /* If fairness is enabled (not all min rates are zeroes) and 1982 + if current min rate is zero - set it to 1. 1983 + This is a requirement of the algorithm. */ 1984 if (bp->vn_weight_sum && (vn_min_rate == 0)) 1985 vn_min_rate = DEF_MIN_RATE; 1986 + 1987 + if (IS_MF_SI(bp)) 1988 + /* maxCfg in percents of linkspeed */ 1989 + vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 1990 + else 1991 + /* maxCfg is absolute in 100Mb units */ 1992 + vn_max_rate = maxCfg * 100; 1993 } 1994 1995 DP(NETIF_MSG_IFUP, ··· 2006 m_fair_vn.vn_credit_delta = 2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2008 (8 * bp->vn_weight_sum))), 2009 + (bp->cmng.fair_vars.fair_threshold + 2010 + MIN_ABOVE_THRESH)); 2011 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2012 m_fair_vn.vn_credit_delta); 2013 }
+2 -2
drivers/net/bnx2x/bnx2x_stats.c
··· 1239 if (unlikely(bp->panic)) 1240 return; 1241 1242 /* Protect a state change flow */ 1243 spin_lock_bh(&bp->stats_lock); 1244 state = bp->stats_state; 1245 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1246 spin_unlock_bh(&bp->stats_lock); 1247 - 1248 - bnx2x_stats_stm[state][event].action(bp); 1249 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
··· 1239 if (unlikely(bp->panic)) 1240 return; 1241 1242 + bnx2x_stats_stm[bp->stats_state][event].action(bp); 1243 + 1244 /* Protect a state change flow */ 1245 spin_lock_bh(&bp->stats_lock); 1246 state = bp->stats_state; 1247 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1248 spin_unlock_bh(&bp->stats_lock); 1249 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+1
drivers/net/can/softing/softing_main.c
··· 633 }; 634 635 static const struct can_bittiming_const softing_btr_const = { 636 .tseg1_min = 1, 637 .tseg1_max = 16, 638 .tseg2_min = 1,
··· 633 }; 634 635 static const struct can_bittiming_const softing_btr_const = { 636 + .name = "softing", 637 .tseg1_min = 1, 638 .tseg1_max = 16, 639 .tseg2_min = 1,
+25 -8
drivers/net/cnic.c
··· 2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2761 int kcqe_cnt; 2762 2763 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2764 2765 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { ··· 2772 barrier(); 2773 if (status_idx != *cp->kcq1.status_idx_ptr) { 2774 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2775 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2776 } else 2777 break; ··· 2892 u32 last_status = *info->status_idx_ptr; 2893 int kcqe_cnt; 2894 2895 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2896 2897 service_kcqes(dev, kcqe_cnt); ··· 2904 break; 2905 2906 last_status = *info->status_idx_ptr; 2907 } 2908 return last_status; 2909 } ··· 2914 { 2915 struct cnic_dev *dev = (struct cnic_dev *) data; 2916 struct cnic_local *cp = dev->cnic_priv; 2917 - u32 status_idx; 2918 2919 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2920 return; 2921 2922 - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2923 2924 - CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2925 2926 - if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 2927 - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2928 2929 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2930 MAX_KCQ_IDX); 2931 2932 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2933 status_idx, IGU_INT_ENABLE, 1); 2934 - } else { 2935 - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2936 - status_idx, IGU_INT_ENABLE, 1); 2937 } 2938 } 2939
··· 2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2761 int kcqe_cnt; 2762 2763 + /* status block index must be read before reading other fields */ 2764 + rmb(); 2765 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2766 2767 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { ··· 2770 barrier(); 2771 if (status_idx != *cp->kcq1.status_idx_ptr) { 2772 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2773 + /* status block index must be read first */ 2774 + rmb(); 2775 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2776 } else 2777 break; ··· 2888 u32 last_status = *info->status_idx_ptr; 2889 int kcqe_cnt; 2890 2891 + /* status block index must be read before reading the KCQ */ 2892 + rmb(); 2893 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2894 2895 service_kcqes(dev, kcqe_cnt); ··· 2898 break; 2899 2900 last_status = *info->status_idx_ptr; 2901 + /* status block index must be read before reading the KCQ */ 2902 + rmb(); 2903 } 2904 return last_status; 2905 } ··· 2906 { 2907 struct cnic_dev *dev = (struct cnic_dev *) data; 2908 struct cnic_local *cp = dev->cnic_priv; 2909 + u32 status_idx, new_status_idx; 2910 2911 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2912 return; 2913 2914 + while (1) { 2915 + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2916 2917 + CNIC_WR16(dev, cp->kcq1.io_addr, 2918 + cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2919 2920 + if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { 2921 + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2922 + status_idx, IGU_INT_ENABLE, 1); 2923 + break; 2924 + } 2925 + 2926 + new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2927 + 2928 + if (new_status_idx != status_idx) 2929 + continue; 2930 2931 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2932 MAX_KCQ_IDX); 2933 2934 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2935 status_idx, IGU_INT_ENABLE, 1); 2936 + 2937 + break; 2938 } 2939 } 2940
+1 -1
drivers/net/davinci_emac.c
··· 1008 int ret; 1009 1010 /* free and bail if we are shutting down */ 1011 - if (unlikely(!netif_running(ndev))) { 1012 dev_kfree_skb_any(skb); 1013 return; 1014 }
··· 1008 int ret; 1009 1010 /* free and bail if we are shutting down */ 1011 + if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { 1012 dev_kfree_skb_any(skb); 1013 return; 1014 }
+1 -2
drivers/net/dnet.c
··· 337 for (i = 0; i < PHY_MAX_ADDR; i++) 338 bp->mii_bus->irq[i] = PHY_POLL; 339 340 - platform_set_drvdata(bp->dev, bp->mii_bus); 341 - 342 if (mdiobus_register(bp->mii_bus)) { 343 err = -ENXIO; 344 goto err_out_free_mdio_irq; ··· 861 bp = netdev_priv(dev); 862 bp->dev = dev; 863 864 SET_NETDEV_DEV(dev, &pdev->dev); 865 866 spin_lock_init(&bp->lock);
··· 337 for (i = 0; i < PHY_MAX_ADDR; i++) 338 bp->mii_bus->irq[i] = PHY_POLL; 339 340 if (mdiobus_register(bp->mii_bus)) { 341 err = -ENXIO; 342 goto err_out_free_mdio_irq; ··· 863 bp = netdev_priv(dev); 864 bp->dev = dev; 865 866 + platform_set_drvdata(pdev, dev); 867 SET_NETDEV_DEV(dev, &pdev->dev); 868 869 spin_lock_init(&bp->lock);
+2 -1
drivers/net/e1000/e1000_osdep.h
··· 42 #define GBE_CONFIG_RAM_BASE \ 43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 44 45 - #define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) 46 47 #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 48 (iowrite16_rep(base + offset, data, count))
··· 42 #define GBE_CONFIG_RAM_BASE \ 43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 44 45 + #define GBE_CONFIG_BASE_VIRT \ 46 + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) 47 48 #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 49 (iowrite16_rep(base + offset, data, count))
+2 -1
drivers/net/e1000e/netdev.c
··· 5967 /* APME bit in EEPROM is mapped to WUC.APME */ 5968 eeprom_data = er32(WUC); 5969 eeprom_apme_mask = E1000_WUC_APME; 5970 - if (eeprom_data & E1000_WUC_PHY_WAKE) 5971 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 5972 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 5973 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
··· 5967 /* APME bit in EEPROM is mapped to WUC.APME */ 5968 eeprom_data = er32(WUC); 5969 eeprom_apme_mask = E1000_WUC_APME; 5970 + if ((hw->mac.type > e1000_ich10lan) && 5971 + (eeprom_data & E1000_WUC_PHY_WAKE)) 5972 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 5973 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 5974 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
+2 -1
drivers/net/fec.c
··· 74 }, { 75 .name = "imx28-fec", 76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 77 - } 78 }; 79 80 static unsigned char macaddr[ETH_ALEN];
··· 74 }, { 75 .name = "imx28-fec", 76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 77 + }, 78 + { } 79 }; 80 81 static unsigned char macaddr[ETH_ALEN];
+1 -1
drivers/net/igbvf/vf.c
··· 220 * The parameter rar_count will usually be hw->mac.rar_entry_count 221 * unless there are workarounds that change this. 222 **/ 223 - void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 224 u8 *mc_addr_list, u32 mc_addr_count, 225 u32 rar_used_count, u32 rar_count) 226 {
··· 220 * The parameter rar_count will usually be hw->mac.rar_entry_count 221 * unless there are workarounds that change this. 222 **/ 223 + static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 224 u8 *mc_addr_list, u32 mc_addr_count, 225 u32 rar_used_count, u32 rar_count) 226 {
+1 -1
drivers/net/macb.c
··· 260 for (i = 0; i < PHY_MAX_ADDR; i++) 261 bp->mii_bus->irq[i] = PHY_POLL; 262 263 - platform_set_drvdata(bp->dev, bp->mii_bus); 264 265 if (mdiobus_register(bp->mii_bus)) 266 goto err_out_free_mdio_irq;
··· 260 for (i = 0; i < PHY_MAX_ADDR; i++) 261 bp->mii_bus->irq[i] = PHY_POLL; 262 263 + dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 264 265 if (mdiobus_register(bp->mii_bus)) 266 goto err_out_free_mdio_irq;
+1
drivers/net/pcmcia/fmvj18x_cs.c
··· 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 695 PCMCIA_DEVICE_NULL, 696 };
··· 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 694 + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), 695 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 696 PCMCIA_DEVICE_NULL, 697 };
+6
drivers/net/r8169.c
··· 25 #include <linux/dma-mapping.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/firmware.h> 28 29 #include <asm/system.h> 30 #include <asm/io.h> ··· 3020 mii->phy_id_mask = 0x1f; 3021 mii->reg_num_mask = 0x1f; 3022 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); 3023 3024 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3025 rc = pci_enable_device(pdev);
··· 25 #include <linux/dma-mapping.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/firmware.h> 28 + #include <linux/pci-aspm.h> 29 30 #include <asm/system.h> 31 #include <asm/io.h> ··· 3019 mii->phy_id_mask = 0x1f; 3020 mii->reg_num_mask = 0x1f; 3021 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); 3022 + 3023 + /* disable ASPM completely as that cause random device stop working 3024 + * problems as well as full system hangs for some PCIe devices users */ 3025 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 3026 + PCIE_LINK_STATE_CLKPM); 3027 3028 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3029 rc = pci_enable_device(pdev);
-3
drivers/net/skge.c
··· 3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3858 3859 - /* device is off until link detection */ 3860 - netif_carrier_off(dev); 3861 - 3862 return dev; 3863 } 3864
··· 3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3858 3859 return dev; 3860 } 3861
+5 -4
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 219 struct tx_buf *tx_buf = NULL; 220 struct sk_buff *nskb = NULL; 221 int ret = 0, i; 222 - u16 *hdr, tx_skb_cnt = 0; 223 u8 *buf; 224 225 if (hif_dev->tx.tx_skb_cnt == 0) 226 return 0; ··· 246 247 buf = tx_buf->buf; 248 buf += tx_buf->offset; 249 - hdr = (u16 *)buf; 250 - *hdr++ = nskb->len; 251 - *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; 252 buf += 4; 253 memcpy(buf, nskb->data, nskb->len); 254 tx_buf->len = nskb->len + 4;
··· 219 struct tx_buf *tx_buf = NULL; 220 struct sk_buff *nskb = NULL; 221 int ret = 0, i; 222 + u16 tx_skb_cnt = 0; 223 u8 *buf; 224 + __le16 *hdr; 225 226 if (hif_dev->tx.tx_skb_cnt == 0) 227 return 0; ··· 245 246 buf = tx_buf->buf; 247 buf += tx_buf->offset; 248 + hdr = (__le16 *)buf; 249 + *hdr++ = cpu_to_le16(nskb->len); 250 + *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); 251 buf += 4; 252 memcpy(buf, nskb->data, nskb->len); 253 tx_buf->len = nskb->len + 4;
+3 -2
drivers/net/wireless/ath/ath9k/mac.c
··· 885 struct ath_common *common = ath9k_hw_common(ah); 886 887 if (!(ints & ATH9K_INT_GLOBAL)) 888 - ath9k_hw_enable_interrupts(ah); 889 890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 891 ··· 963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 964 } 965 966 - ath9k_hw_enable_interrupts(ah); 967 968 return; 969 }
··· 885 struct ath_common *common = ath9k_hw_common(ah); 886 887 if (!(ints & ATH9K_INT_GLOBAL)) 888 + ath9k_hw_disable_interrupts(ah); 889 890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 891 ··· 963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 964 } 965 966 + if (ints & ATH9K_INT_GLOBAL) 967 + ath9k_hw_enable_interrupts(ah); 968 969 return; 970 }
+2
drivers/net/wireless/ath/carl9170/usb.c
··· 118 { USB_DEVICE(0x057c, 0x8402) }, 119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ 120 { USB_DEVICE(0x1668, 0x1200) }, 121 122 /* terminate */ 123 {}
··· 118 { USB_DEVICE(0x057c, 0x8402) }, 119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ 120 { USB_DEVICE(0x1668, 0x1200) }, 121 + /* Airlive X.USB a/b/g/n */ 122 + { USB_DEVICE(0x1b75, 0x9170) }, 123 124 /* terminate */ 125 {}
+1 -1
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 51 #include "iwl-agn-debugfs.h" 52 53 /* Highest firmware API version supported */ 54 - #define IWL5000_UCODE_API_MAX 2 55 #define IWL5150_UCODE_API_MAX 2 56 57 /* Lowest firmware API version supported */
··· 51 #include "iwl-agn-debugfs.h" 52 53 /* Highest firmware API version supported */ 54 + #define IWL5000_UCODE_API_MAX 5 55 #define IWL5150_UCODE_API_MAX 2 56 57 /* Lowest firmware API version supported */
+1
drivers/net/wireless/p54/p54usb.c
··· 98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 101 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 102 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 103 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
··· 98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 101 + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ 102 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 103 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 104 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
+3
drivers/net/wireless/rndis_wlan.c
··· 2597 __le32 mode; 2598 int ret; 2599 2600 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, 2601 enabled ? "enabled" : "disabled", 2602 timeout);
··· 2597 __le32 mode; 2598 int ret; 2599 2600 + if (priv->device_type != RNDIS_BCM4320B) 2601 + return -ENOTSUPP; 2602 + 2603 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, 2604 enabled ? "enabled" : "disabled", 2605 timeout);
+42 -70
drivers/of/pdt.c
··· 36 (p)->unique_id = of_pdt_unique_id++; \ 37 } while (0) 38 39 - static inline const char *of_pdt_node_name(struct device_node *dp) 40 { 41 - return dp->path_component_name; 42 } 43 44 - #else 45 46 static inline void of_pdt_incr_unique_id(void *p) { } 47 static inline void irq_trans_init(struct device_node *dp) { } 48 49 - static inline const char *of_pdt_node_name(struct device_node *dp) 50 { 51 - return dp->name; 52 } 53 54 #endif /* !CONFIG_SPARC */ ··· 168 return buf; 169 } 170 171 - static char * __init of_pdt_try_pkg2path(phandle node) 172 - { 173 - char *res, *buf = NULL; 174 - int len; 175 - 176 - if (!of_pdt_prom_ops->pkg2path) 177 - return NULL; 178 - 179 - if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len)) 180 - return NULL; 181 - buf = prom_early_alloc(len + 1); 182 - if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) { 183 - pr_err("%s: package-to-path failed\n", __func__); 184 - return NULL; 185 - } 186 - 187 - res = strrchr(buf, '/'); 188 - if (!res) { 189 - pr_err("%s: couldn't find / in %s\n", __func__, buf); 190 - return NULL; 191 - } 192 - return res+1; 193 - } 194 - 195 - /* 196 - * When fetching the node's name, first try using package-to-path; if 197 - * that fails (either because the arch hasn't supplied a PROM callback, 198 - * or some other random failure), fall back to just looking at the node's 199 - * 'name' property. 200 - */ 201 - static char * __init of_pdt_build_name(phandle node) 202 - { 203 - char *buf; 204 - 205 - buf = of_pdt_try_pkg2path(node); 206 - if (!buf) 207 - buf = of_pdt_get_one_property(node, "name"); 208 - 209 - return buf; 210 - } 211 - 212 static struct device_node * __init of_pdt_create_node(phandle node, 213 struct device_node *parent) 214 { ··· 182 183 kref_init(&dp->kref); 184 185 - dp->name = of_pdt_build_name(node); 186 dp->type = of_pdt_get_one_property(node, "device_type"); 187 dp->phandle = node; 188 ··· 191 irq_trans_init(dp); 192 193 return dp; 194 - } 195 - 196 - static char * __init of_pdt_build_full_name(struct device_node *dp) 197 - { 198 - int len, ourlen, plen; 199 - char *n; 200 - 201 - plen = strlen(dp->parent->full_name); 202 - ourlen = strlen(of_pdt_node_name(dp)); 203 - len = ourlen + plen + 2; 204 - 205 - n = prom_early_alloc(len); 206 - strcpy(n, dp->parent->full_name); 207 - if (!of_node_is_root(dp->parent)) { 208 - strcpy(n + plen, "/"); 209 - plen++; 210 - } 211 - strcpy(n + plen, of_pdt_node_name(dp)); 212 - 213 - return n; 214 } 215 216 static struct device_node * __init of_pdt_build_tree(struct device_node *parent, ··· 215 *(*nextp) = dp; 216 *nextp = &dp->allnext; 217 218 - #if defined(CONFIG_SPARC) 219 - dp->path_component_name = build_path_component(dp); 220 - #endif 221 dp->full_name = of_pdt_build_full_name(dp); 222 223 dp->child = of_pdt_build_tree(dp,
··· 36 (p)->unique_id = of_pdt_unique_id++; \ 37 } while (0) 38 39 + static char * __init of_pdt_build_full_name(struct device_node *dp) 40 { 41 + int len, ourlen, plen; 42 + char *n; 43 + 44 + dp->path_component_name = build_path_component(dp); 45 + 46 + plen = strlen(dp->parent->full_name); 47 + ourlen = strlen(dp->path_component_name); 48 + len = ourlen + plen + 2; 49 + 50 + n = prom_early_alloc(len); 51 + strcpy(n, dp->parent->full_name); 52 + if (!of_node_is_root(dp->parent)) { 53 + strcpy(n + plen, "/"); 54 + plen++; 55 + } 56 + strcpy(n + plen, dp->path_component_name); 57 + 58 + return n; 59 } 60 61 + #else /* CONFIG_SPARC */ 62 63 static inline void of_pdt_incr_unique_id(void *p) { } 64 static inline void irq_trans_init(struct device_node *dp) { } 65 66 + static char * __init of_pdt_build_full_name(struct device_node *dp) 67 { 68 + static int failsafe_id = 0; /* for generating unique names on failure */ 69 + char *buf; 70 + int len; 71 + 72 + if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len)) 73 + goto failsafe; 74 + 75 + buf = prom_early_alloc(len + 1); 76 + if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len)) 77 + goto failsafe; 78 + return buf; 79 + 80 + failsafe: 81 + buf = prom_early_alloc(strlen(dp->parent->full_name) + 82 + strlen(dp->name) + 16); 83 + sprintf(buf, "%s/%s@unknown%i", 84 + of_node_is_root(dp->parent) ? "" : dp->parent->full_name, 85 + dp->name, failsafe_id++); 86 + pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf); 87 + return buf; 88 } 89 90 #endif /* !CONFIG_SPARC */ ··· 132 return buf; 133 } 134 135 static struct device_node * __init of_pdt_create_node(phandle node, 136 struct device_node *parent) 137 { ··· 187 188 kref_init(&dp->kref); 189 190 + dp->name = of_pdt_get_one_property(node, "name"); 191 dp->type = of_pdt_get_one_property(node, "device_type"); 192 dp->phandle = node; 193 ··· 196 irq_trans_init(dp); 197 198 return dp; 199 } 200 201 static struct device_node * __init of_pdt_build_tree(struct device_node *parent, ··· 240 *(*nextp) = dp; 241 *nextp = &dp->allnext; 242 243 dp->full_name = of_pdt_build_full_name(dp); 244 245 dp->child = of_pdt_build_tree(dp,
+3
drivers/pcmcia/pxa2xx_colibri.c
··· 181 { 182 int ret; 183 184 colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 185 if (!colibri_pcmcia_device) 186 return -ENOMEM;
··· 181 { 182 int ret; 183 184 + if (!machine_is_colibri() && !machine_is_colibri320()) 185 + return -ENODEV; 186 + 187 colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 188 if (!colibri_pcmcia_device) 189 return -ENOMEM;
+1 -1
drivers/pps/generators/Kconfig
··· 6 7 config PPS_GENERATOR_PARPORT 8 tristate "Parallel port PPS signal generator" 9 - depends on PARPORT 10 help 11 If you say yes here you get support for a PPS signal generator which 12 utilizes STROBE pin of a parallel port to send PPS signals. It uses
··· 6 7 config PPS_GENERATOR_PARPORT 8 tristate "Parallel port PPS signal generator" 9 + depends on PARPORT && BROKEN 10 help 11 If you say yes here you get support for a PPS signal generator which 12 utilizes STROBE pin of a parallel port to send PPS signals. It uses
+7 -5
drivers/rtc/rtc-s3c.c
··· 77 } 78 79 /* Update control registers */ 80 - static void s3c_rtc_setaie(int to) 81 { 82 unsigned int tmp; 83 84 - pr_debug("%s: aie=%d\n", __func__, to); 85 86 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 87 88 - if (to) 89 tmp |= S3C2410_RTCALM_ALMEN; 90 91 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 92 } 93 94 static int s3c_rtc_setpie(struct device *dev, int enabled) ··· 310 311 writeb(alrm_en, base + S3C2410_RTCALM); 312 313 - s3c_rtc_setaie(alrm->enabled); 314 315 return 0; 316 } ··· 442 rtc_device_unregister(rtc); 443 444 s3c_rtc_setpie(&dev->dev, 0); 445 - s3c_rtc_setaie(0); 446 447 clk_disable(rtc_clk); 448 clk_put(rtc_clk);
··· 77 } 78 79 /* Update control registers */ 80 + static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) 81 { 82 unsigned int tmp; 83 84 + pr_debug("%s: aie=%d\n", __func__, enabled); 85 86 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 87 88 + if (enabled) 89 tmp |= S3C2410_RTCALM_ALMEN; 90 91 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 92 + 93 + return 0; 94 } 95 96 static int s3c_rtc_setpie(struct device *dev, int enabled) ··· 308 309 writeb(alrm_en, base + S3C2410_RTCALM); 310 311 + s3c_rtc_setaie(dev, alrm->enabled); 312 313 return 0; 314 } ··· 440 rtc_device_unregister(rtc); 441 442 s3c_rtc_setpie(&dev->dev, 0); 443 + s3c_rtc_setaie(&dev->dev, 0); 444 445 clk_disable(rtc_clk); 446 clk_put(rtc_clk);
+2 -2
drivers/s390/block/xpram.c
··· 62 /* 63 * Parameter parsing functions. 64 */ 65 - static int __initdata devs = XPRAM_DEVS; 66 - static char __initdata *sizes[XPRAM_MAX_DEVS]; 67 68 module_param(devs, int, 0); 69 module_param_array(sizes, charp, NULL, 0);
··· 62 /* 63 * Parameter parsing functions. 64 */ 65 + static int devs = XPRAM_DEVS; 66 + static char *sizes[XPRAM_MAX_DEVS]; 67 68 module_param(devs, int, 0); 69 module_param_array(sizes, charp, NULL, 0);
+2 -1
drivers/s390/char/keyboard.c
··· 460 unsigned int cmd, unsigned long arg) 461 { 462 void __user *argp; 463 - int ct, perm; 464 465 argp = (void __user *)arg; 466
··· 460 unsigned int cmd, unsigned long arg) 461 { 462 void __user *argp; 463 + unsigned int ct; 464 + int perm; 465 466 argp = (void __user *)arg; 467
+8
drivers/s390/char/tape.h
··· 280 return rc; 281 } 282 283 extern int tape_oper_handler(int irq, int status); 284 extern void tape_noper_handler(int irq, int status); 285 extern int tape_open(struct tape_device *);
··· 280 return rc; 281 } 282 283 + static inline void 284 + tape_do_io_async_free(struct tape_device *device, struct tape_request *request) 285 + { 286 + request->callback = (void *) tape_free_request; 287 + request->callback_data = NULL; 288 + tape_do_io_async(device, request); 289 + } 290 + 291 extern int tape_oper_handler(int irq, int status); 292 extern void tape_noper_handler(int irq, int status); 293 extern int tape_open(struct tape_device *);
+41 -18
drivers/s390/char/tape_34xx.c
··· 53 * Medium sense for 34xx tapes. There is no 'real' medium sense call. 54 * So we just do a normal sense. 55 */ 56 - static int 57 - tape_34xx_medium_sense(struct tape_device *device) 58 { 59 - struct tape_request *request; 60 - unsigned char *sense; 61 - int rc; 62 63 - request = tape_alloc_request(1, 32); 64 - if (IS_ERR(request)) { 65 - DBF_EXCEPTION(6, "MSEN fail\n"); 66 - return PTR_ERR(request); 67 - } 68 - 69 - request->op = TO_MSEN; 70 - tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); 71 - 72 - rc = tape_do_io_interruptible(device, request); 73 if (request->rc == 0) { 74 sense = request->cpdata; 75 ··· 76 device->tape_generic_status |= GMT_WR_PROT(~0); 77 else 78 device->tape_generic_status &= ~GMT_WR_PROT(~0); 79 - } else { 80 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", 81 request->rc); 82 - } 83 tape_free_request(request); 84 85 return rc; 86 } 87 88 struct tape_34xx_work { ··· 129 * is inserted but cannot call tape_do_io* from an interrupt context. 130 * Maybe that's useful for other actions we want to start from the 131 * interrupt handler. 132 */ 133 static void 134 tape_34xx_work_handler(struct work_struct *work) ··· 142 143 switch(p->op) { 144 case TO_MSEN: 145 - tape_34xx_medium_sense(device); 146 break; 147 default: 148 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
··· 53 * Medium sense for 34xx tapes. There is no 'real' medium sense call. 54 * So we just do a normal sense. 55 */ 56 + static void __tape_34xx_medium_sense(struct tape_request *request) 57 { 58 + struct tape_device *device = request->device; 59 + unsigned char *sense; 60 61 if (request->rc == 0) { 62 sense = request->cpdata; 63 ··· 88 device->tape_generic_status |= GMT_WR_PROT(~0); 89 else 90 device->tape_generic_status &= ~GMT_WR_PROT(~0); 91 + } else 92 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", 93 request->rc); 94 tape_free_request(request); 95 + } 96 97 + static int tape_34xx_medium_sense(struct tape_device *device) 98 + { 99 + struct tape_request *request; 100 + int rc; 101 + 102 + request = tape_alloc_request(1, 32); 103 + if (IS_ERR(request)) { 104 + DBF_EXCEPTION(6, "MSEN fail\n"); 105 + return PTR_ERR(request); 106 + } 107 + 108 + request->op = TO_MSEN; 109 + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); 110 + rc = tape_do_io_interruptible(device, request); 111 + __tape_34xx_medium_sense(request); 112 return rc; 113 + } 114 + 115 + static void tape_34xx_medium_sense_async(struct tape_device *device) 116 + { 117 + struct tape_request *request; 118 + 119 + request = tape_alloc_request(1, 32); 120 + if (IS_ERR(request)) { 121 + DBF_EXCEPTION(6, "MSEN fail\n"); 122 + return; 123 + } 124 + 125 + request->op = TO_MSEN; 126 + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); 127 + request->callback = (void *) __tape_34xx_medium_sense; 128 + request->callback_data = NULL; 129 + tape_do_io_async(device, request); 130 } 131 132 struct tape_34xx_work { ··· 109 * is inserted but cannot call tape_do_io* from an interrupt context. 110 * Maybe that's useful for other actions we want to start from the 111 * interrupt handler. 112 + * Note: the work handler is called by the system work queue. The tape 113 + * commands started by the handler need to be asynchrounous, otherwise 114 + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). 115 */ 116 static void 117 tape_34xx_work_handler(struct work_struct *work) ··· 119 120 switch(p->op) { 121 case TO_MSEN: 122 + tape_34xx_medium_sense_async(device); 123 break; 124 default: 125 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
+67 -16
drivers/s390/char/tape_3590.c
··· 329 /* 330 * Enable encryption 331 */ 332 - static int tape_3592_enable_crypt(struct tape_device *device) 333 { 334 struct tape_request *request; 335 char *data; 336 337 DBF_EVENT(6, "tape_3592_enable_crypt\n"); 338 if (!crypt_supported(device)) 339 - return -ENOSYS; 340 request = tape_alloc_request(2, 72); 341 if (IS_ERR(request)) 342 - return PTR_ERR(request); 343 data = request->cpdata; 344 memset(data,0,72); 345 ··· 354 request->op = TO_CRYPT_ON; 355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 357 return tape_do_io_free(device, request); 358 } 359 360 /* 361 * Disable encryption 362 */ 363 - static int tape_3592_disable_crypt(struct tape_device *device) 364 { 365 struct tape_request *request; 366 char *data; 367 368 DBF_EVENT(6, "tape_3592_disable_crypt\n"); 369 if (!crypt_supported(device)) 370 - return -ENOSYS; 371 request = tape_alloc_request(2, 72); 372 if (IS_ERR(request)) 373 - return PTR_ERR(request); 374 data = request->cpdata; 375 memset(data,0,72); 376 ··· 402 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 403 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 404 405 return tape_do_io_free(device, request); 406 } 407 408 /* ··· 495 /* 496 * SENSE Medium: Get Sense data about medium state 497 */ 498 - static int 499 - tape_3590_sense_medium(struct tape_device *device) 500 { 501 struct tape_request *request; 502 ··· 505 request->op = TO_MSEN; 506 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); 507 return tape_do_io_free(device, request); 508 } 509 510 /* ··· 595 * 2. The attention msg is written to the "read subsystem data" buffer. 596 * In this case we probably should print it to the console. 597 */ 598 - static int 599 - tape_3590_read_attmsg(struct tape_device *device) 600 { 601 struct tape_request *request; 602 char *buf; 603 604 request = tape_alloc_request(3, 4096); 605 if (IS_ERR(request)) 606 - return PTR_ERR(request); 607 request->op = TO_READ_ATTMSG; 608 buf = request->cpdata; 609 buf[0] = PREP_RD_SS_DATA; ··· 610 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); 611 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); 612 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 613 - return tape_do_io_free(device, request); 614 } 615 616 /* 617 * These functions are used to schedule follow-up actions from within an 618 * interrupt context (like unsolicited interrupts). 619 */ 620 struct work_handler_data { 621 struct tape_device *device; ··· 634 635 switch (p->op) { 636 case TO_MSEN: 637 - tape_3590_sense_medium(p->device); 638 break; 639 case TO_READ_ATTMSG: 640 - tape_3590_read_attmsg(p->device); 641 break; 642 case TO_CRYPT_ON: 643 - tape_3592_enable_crypt(p->device); 644 break; 645 case TO_CRYPT_OFF: 646 - tape_3592_disable_crypt(p->device); 647 break; 648 default: 649 DBF_EVENT(3, "T3590: work handler undefined for "
··· 329 /* 330 * Enable encryption 331 */ 332 + static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) 333 { 334 struct tape_request *request; 335 char *data; 336 337 DBF_EVENT(6, "tape_3592_enable_crypt\n"); 338 if (!crypt_supported(device)) 339 + return ERR_PTR(-ENOSYS); 340 request = tape_alloc_request(2, 72); 341 if (IS_ERR(request)) 342 + return request; 343 data = request->cpdata; 344 memset(data,0,72); 345 ··· 354 request->op = TO_CRYPT_ON; 355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 357 + return request; 358 + } 359 + 360 + static int tape_3592_enable_crypt(struct tape_device *device) 361 + { 362 + struct tape_request *request; 363 + 364 + request = __tape_3592_enable_crypt(device); 365 + if (IS_ERR(request)) 366 + return PTR_ERR(request); 367 return tape_do_io_free(device, request); 368 + } 369 + 370 + static void tape_3592_enable_crypt_async(struct tape_device *device) 371 + { 372 + struct tape_request *request; 373 + 374 + request = __tape_3592_enable_crypt(device); 375 + if (!IS_ERR(request)) 376 + tape_do_io_async_free(device, request); 377 } 378 379 /* 380 * Disable encryption 381 */ 382 + static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) 383 { 384 struct tape_request *request; 385 char *data; 386 387 DBF_EVENT(6, "tape_3592_disable_crypt\n"); 388 if (!crypt_supported(device)) 389 + return ERR_PTR(-ENOSYS); 390 request = tape_alloc_request(2, 72); 391 if (IS_ERR(request)) 392 + return request; 393 data = request->cpdata; 394 memset(data,0,72); 395 ··· 383 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 384 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 385 386 + return request; 387 + } 388 + 389 + static int tape_3592_disable_crypt(struct tape_device *device) 390 + { 391 + struct tape_request *request; 392 + 393 + request = __tape_3592_disable_crypt(device); 394 + if (IS_ERR(request)) 395 + return PTR_ERR(request); 396 return tape_do_io_free(device, request); 397 + } 398 + 399 + static void tape_3592_disable_crypt_async(struct tape_device *device) 400 + { 401 + struct tape_request *request; 402 + 403 + request = __tape_3592_disable_crypt(device); 404 + if (!IS_ERR(request)) 405 + tape_do_io_async_free(device, request); 406 } 407 408 /* ··· 457 /* 458 * SENSE Medium: Get Sense data about medium state 459 */ 460 + static int tape_3590_sense_medium(struct tape_device *device) 461 { 462 struct tape_request *request; 463 ··· 468 request->op = TO_MSEN; 469 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); 470 return tape_do_io_free(device, request); 471 + } 472 + 473 + static void tape_3590_sense_medium_async(struct tape_device *device) 474 + { 475 + struct tape_request *request; 476 + 477 + request = tape_alloc_request(1, 128); 478 + if (IS_ERR(request)) 479 + return; 480 + request->op = TO_MSEN; 481 + tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); 482 + tape_do_io_async_free(device, request); 483 } 484 485 /* ··· 546 * 2. The attention msg is written to the "read subsystem data" buffer. 547 * In this case we probably should print it to the console. 548 */ 549 + static void tape_3590_read_attmsg_async(struct tape_device *device) 550 { 551 struct tape_request *request; 552 char *buf; 553 554 request = tape_alloc_request(3, 4096); 555 if (IS_ERR(request)) 556 + return; 557 request->op = TO_READ_ATTMSG; 558 buf = request->cpdata; 559 buf[0] = PREP_RD_SS_DATA; ··· 562 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); 563 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); 564 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 565 + tape_do_io_async_free(device, request); 566 } 567 568 /* 569 * These functions are used to schedule follow-up actions from within an 570 * interrupt context (like unsolicited interrupts). 571 + * Note: the work handler is called by the system work queue. The tape 572 + * commands started by the handler need to be asynchrounous, otherwise 573 + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). 574 */ 575 struct work_handler_data { 576 struct tape_device *device; ··· 583 584 switch (p->op) { 585 case TO_MSEN: 586 + tape_3590_sense_medium_async(p->device); 587 break; 588 case TO_READ_ATTMSG: 589 + tape_3590_read_attmsg_async(p->device); 590 break; 591 case TO_CRYPT_ON: 592 + tape_3592_enable_crypt_async(p->device); 593 break; 594 case TO_CRYPT_OFF: 595 + tape_3592_disable_crypt_async(p->device); 596 break; 597 default: 598 DBF_EVENT(3, "T3590: work handler undefined for "
+1 -1
drivers/scsi/scsi_lib.c
··· 443 &sdev->request_queue->queue_flags); 444 if (flagset) 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 446 - __blk_run_queue(sdev->request_queue); 447 if (flagset) 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 449 spin_unlock(sdev->request_queue->queue_lock);
··· 443 &sdev->request_queue->queue_flags); 444 if (flagset) 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 446 + __blk_run_queue(sdev->request_queue, false); 447 if (flagset) 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 449 spin_unlock(sdev->request_queue->queue_lock);
+1 -1
drivers/scsi/scsi_transport_fc.c
··· 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3830 if (flagset) 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3832 - __blk_run_queue(rport->rqst_q); 3833 if (flagset) 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
··· 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3830 if (flagset) 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3832 + __blk_run_queue(rport->rqst_q, false); 3833 if (flagset) 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
+1
drivers/tty/serial/serial_cs.c
··· 712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 715 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 716 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 717 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
··· 712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 715 + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), 716 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 717 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 718 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
+10 -5
drivers/usb/gadget/f_phonet.c
··· 346 347 if (unlikely(!skb)) 348 break; 349 - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, 350 - req->actual); 351 page = NULL; 352 353 if (req->actual < req->length) { /* Last fragment */ 354 - skb->protocol = htons(ETH_P_PHONET); 355 - skb_reset_mac_header(skb); 356 - pskb_pull(skb, 1); 357 skb->dev = dev; 358 dev->stats.rx_packets++; 359 dev->stats.rx_bytes += skb->len;
··· 346 347 if (unlikely(!skb)) 348 break; 349 + 350 + if (skb->len == 0) { /* First fragment */ 351 + skb->protocol = htons(ETH_P_PHONET); 352 + skb_reset_mac_header(skb); 353 + /* Can't use pskb_pull() on page in IRQ */ 354 + memcpy(skb_put(skb, 1), page_address(page), 1); 355 + } 356 + 357 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 358 + skb->len == 0, req->actual); 359 page = NULL; 360 361 if (req->actual < req->length) { /* Last fragment */ 362 skb->dev = dev; 363 dev->stats.rx_packets++; 364 dev->stats.rx_bytes += skb->len;
+1
drivers/usb/host/ehci-xilinx-of.c
··· 29 30 #include <linux/of.h> 31 #include <linux/of_platform.h> 32 33 /** 34 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
··· 29 30 #include <linux/of.h> 31 #include <linux/of_platform.h> 32 + #include <linux/of_address.h> 33 34 /** 35 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
+8 -1
drivers/video/backlight/ltv350qv.c
··· 239 lcd->spi = spi; 240 lcd->power = FB_BLANK_POWERDOWN; 241 lcd->buffer = kzalloc(8, GFP_KERNEL); 242 243 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); 244 if (IS_ERR(ld)) { 245 ret = PTR_ERR(ld); 246 - goto out_free_lcd; 247 } 248 lcd->ld = ld; 249 ··· 261 262 out_unregister: 263 lcd_device_unregister(ld); 264 out_free_lcd: 265 kfree(lcd); 266 return ret; ··· 274 275 ltv350qv_power(lcd, FB_BLANK_POWERDOWN); 276 lcd_device_unregister(lcd->ld); 277 kfree(lcd); 278 279 return 0;
··· 239 lcd->spi = spi; 240 lcd->power = FB_BLANK_POWERDOWN; 241 lcd->buffer = kzalloc(8, GFP_KERNEL); 242 + if (!lcd->buffer) { 243 + ret = -ENOMEM; 244 + goto out_free_lcd; 245 + } 246 247 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); 248 if (IS_ERR(ld)) { 249 ret = PTR_ERR(ld); 250 + goto out_free_buffer; 251 } 252 lcd->ld = ld; 253 ··· 257 258 out_unregister: 259 lcd_device_unregister(ld); 260 + out_free_buffer: 261 + kfree(lcd->buffer); 262 out_free_lcd: 263 kfree(lcd); 264 return ret; ··· 268 269 ltv350qv_power(lcd, FB_BLANK_POWERDOWN); 270 lcd_device_unregister(lcd->ld); 271 + kfree(lcd->buffer); 272 kfree(lcd); 273 274 return 0;
+5 -2
drivers/watchdog/sbc_fitpc2_wdt.c
··· 201 static int __init fitpc2_wdt_init(void) 202 { 203 int err; 204 205 - if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) 206 return -ENODEV; 207 208 - pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); 209 210 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { 211 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
··· 201 static int __init fitpc2_wdt_init(void) 202 { 203 int err; 204 + const char *brd_name; 205 206 + brd_name = dmi_get_system_info(DMI_BOARD_NAME); 207 + 208 + if (!brd_name || !strstr(brd_name, "SBC-FITPC2")) 209 return -ENODEV; 210 211 + pr_info("%s found\n", brd_name); 212 213 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { 214 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
+4 -26
fs/ceph/dir.c
··· 60 } 61 di->dentry = dentry; 62 di->lease_session = NULL; 63 - di->parent_inode = igrab(dentry->d_parent->d_inode); 64 dentry->d_fsdata = di; 65 dentry->d_time = jiffies; 66 ceph_dentry_lru_add(dentry); ··· 409 spin_lock(&inode->i_lock); 410 if (ci->i_release_count == fi->dir_release_count) { 411 dout(" marking %p complete\n", inode); 412 - ci->i_ceph_flags |= CEPH_I_COMPLETE; 413 ci->i_max_offset = filp->f_pos; 414 } 415 spin_unlock(&inode->i_lock); ··· 496 497 /* .snap dir? */ 498 if (err == -ENOENT && 499 strcmp(dentry->d_name.name, 500 fsc->mount_options->snapdir_name) == 0) { 501 struct inode *inode = ceph_get_snapdir(parent); ··· 993 { 994 struct inode *dir; 995 996 - if (nd->flags & LOOKUP_RCU) 997 return -ECHILD; 998 999 dir = dentry->d_parent->d_inode; ··· 1030 static void ceph_dentry_release(struct dentry *dentry) 1031 { 1032 struct ceph_dentry_info *di = ceph_dentry(dentry); 1033 - struct inode *parent_inode = NULL; 1034 - u64 snapid = CEPH_NOSNAP; 1035 1036 - if (!IS_ROOT(dentry)) { 1037 - parent_inode = di->parent_inode; 1038 - if (parent_inode) 1039 - snapid = ceph_snap(parent_inode); 1040 - } 1041 - dout("dentry_release %p parent %p\n", dentry, parent_inode); 1042 - if (parent_inode && snapid != CEPH_SNAPDIR) { 1043 - struct ceph_inode_info *ci = ceph_inode(parent_inode); 1044 - 1045 - spin_lock(&parent_inode->i_lock); 1046 - if (ci->i_shared_gen == di->lease_shared_gen || 1047 - snapid <= CEPH_MAXSNAP) { 1048 - dout(" clearing %p complete (d_release)\n", 1049 - parent_inode); 1050 - ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 1051 - ci->i_release_count++; 1052 - } 1053 - spin_unlock(&parent_inode->i_lock); 1054 - } 1055 if (di) { 1056 ceph_dentry_lru_del(dentry); 1057 if (di->lease_session) ··· 1039 kmem_cache_free(ceph_dentry_cachep, di); 1040 dentry->d_fsdata = NULL; 1041 } 1042 - if (parent_inode) 1043 - iput(parent_inode); 1044 } 1045 1046 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
··· 60 } 61 di->dentry = dentry; 62 di->lease_session = NULL; 63 dentry->d_fsdata = di; 64 dentry->d_time = jiffies; 65 ceph_dentry_lru_add(dentry); ··· 410 spin_lock(&inode->i_lock); 411 if (ci->i_release_count == fi->dir_release_count) { 412 dout(" marking %p complete\n", inode); 413 + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ 414 ci->i_max_offset = filp->f_pos; 415 } 416 spin_unlock(&inode->i_lock); ··· 497 498 /* .snap dir? */ 499 if (err == -ENOENT && 500 + ceph_snap(parent) == CEPH_NOSNAP && 501 strcmp(dentry->d_name.name, 502 fsc->mount_options->snapdir_name) == 0) { 503 struct inode *inode = ceph_get_snapdir(parent); ··· 993 { 994 struct inode *dir; 995 996 + if (nd && nd->flags & LOOKUP_RCU) 997 return -ECHILD; 998 999 dir = dentry->d_parent->d_inode; ··· 1030 static void ceph_dentry_release(struct dentry *dentry) 1031 { 1032 struct ceph_dentry_info *di = ceph_dentry(dentry); 1033 1034 + dout("dentry_release %p\n", dentry); 1035 if (di) { 1036 ceph_dentry_lru_del(dentry); 1037 if (di->lease_session) ··· 1059 kmem_cache_free(ceph_dentry_cachep, di); 1060 dentry->d_fsdata = NULL; 1061 } 1062 } 1063 1064 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
+1 -1
fs/ceph/inode.c
··· 707 (issued & CEPH_CAP_FILE_EXCL) == 0 && 708 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 709 dout(" marking %p complete (empty)\n", inode); 710 - ci->i_ceph_flags |= CEPH_I_COMPLETE; 711 ci->i_max_offset = 2; 712 } 713 break;
··· 707 (issued & CEPH_CAP_FILE_EXCL) == 0 && 708 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 709 dout(" marking %p complete (empty)\n", inode); 710 + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ 711 ci->i_max_offset = 2; 712 } 713 break;
-1
fs/ceph/super.h
··· 207 struct dentry *dentry; 208 u64 time; 209 u64 offset; 210 - struct inode *parent_inode; 211 }; 212 213 struct ceph_inode_xattrs_info {
··· 207 struct dentry *dentry; 208 u64 time; 209 u64 offset; 210 }; 211 212 struct ceph_inode_xattrs_info {
+24 -2
fs/dcache.c
··· 1523 } 1524 EXPORT_SYMBOL(d_alloc_root); 1525 1526 /** 1527 * d_obtain_alias - find or allocate a dentry for a given inode 1528 * @inode: inode to allocate the dentry for ··· 1574 if (IS_ERR(inode)) 1575 return ERR_CAST(inode); 1576 1577 - res = d_find_alias(inode); 1578 if (res) 1579 goto out_iput; 1580 ··· 1587 1588 1589 spin_lock(&inode->i_lock); 1590 - res = __d_find_alias(inode, 0); 1591 if (res) { 1592 spin_unlock(&inode->i_lock); 1593 dput(tmp);
··· 1523 } 1524 EXPORT_SYMBOL(d_alloc_root); 1525 1526 + static struct dentry * __d_find_any_alias(struct inode *inode) 1527 + { 1528 + struct dentry *alias; 1529 + 1530 + if (list_empty(&inode->i_dentry)) 1531 + return NULL; 1532 + alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); 1533 + __dget(alias); 1534 + return alias; 1535 + } 1536 + 1537 + static struct dentry * d_find_any_alias(struct inode *inode) 1538 + { 1539 + struct dentry *de; 1540 + 1541 + spin_lock(&inode->i_lock); 1542 + de = __d_find_any_alias(inode); 1543 + spin_unlock(&inode->i_lock); 1544 + return de; 1545 + } 1546 + 1547 + 1548 /** 1549 * d_obtain_alias - find or allocate a dentry for a given inode 1550 * @inode: inode to allocate the dentry for ··· 1552 if (IS_ERR(inode)) 1553 return ERR_CAST(inode); 1554 1555 + res = d_find_any_alias(inode); 1556 if (res) 1557 goto out_iput; 1558 ··· 1565 1566 1567 spin_lock(&inode->i_lock); 1568 + res = __d_find_any_alias(inode); 1569 if (res) { 1570 spin_unlock(&inode->i_lock); 1571 dput(tmp);
+2 -6
fs/exofs/namei.c
··· 272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page); 273 if (!new_de) 274 goto out_dir; 275 - inode_inc_link_count(old_inode); 276 err = exofs_set_link(new_dir, new_de, new_page, old_inode); 277 new_inode->i_ctime = CURRENT_TIME; 278 if (dir_de) ··· 285 if (new_dir->i_nlink >= EXOFS_LINK_MAX) 286 goto out_dir; 287 } 288 - inode_inc_link_count(old_inode); 289 err = exofs_add_link(new_dentry, old_inode); 290 - if (err) { 291 - inode_dec_link_count(old_inode); 292 goto out_dir; 293 - } 294 if (dir_de) 295 inode_inc_link_count(new_dir); 296 } ··· 295 old_inode->i_ctime = CURRENT_TIME; 296 297 exofs_delete_entry(old_de, old_page); 298 - inode_dec_link_count(old_inode); 299 300 if (dir_de) { 301 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
··· 272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page); 273 if (!new_de) 274 goto out_dir; 275 err = exofs_set_link(new_dir, new_de, new_page, old_inode); 276 new_inode->i_ctime = CURRENT_TIME; 277 if (dir_de) ··· 286 if (new_dir->i_nlink >= EXOFS_LINK_MAX) 287 goto out_dir; 288 } 289 err = exofs_add_link(new_dentry, old_inode); 290 + if (err) 291 goto out_dir; 292 if (dir_de) 293 inode_inc_link_count(new_dir); 294 } ··· 299 old_inode->i_ctime = CURRENT_TIME; 300 301 exofs_delete_entry(old_de, old_page); 302 + mark_inode_dirty(old_inode); 303 304 if (dir_de) { 305 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
+2 -7
fs/ext2/namei.c
··· 344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); 345 if (!new_de) 346 goto out_dir; 347 - inode_inc_link_count(old_inode); 348 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); 349 new_inode->i_ctime = CURRENT_TIME_SEC; 350 if (dir_de) ··· 355 if (new_dir->i_nlink >= EXT2_LINK_MAX) 356 goto out_dir; 357 } 358 - inode_inc_link_count(old_inode); 359 err = ext2_add_link(new_dentry, old_inode); 360 - if (err) { 361 - inode_dec_link_count(old_inode); 362 goto out_dir; 363 - } 364 if (dir_de) 365 inode_inc_link_count(new_dir); 366 } ··· 365 /* 366 * Like most other Unix systems, set the ctime for inodes on a 367 * rename. 368 - * inode_dec_link_count() will mark the inode dirty. 369 */ 370 old_inode->i_ctime = CURRENT_TIME_SEC; 371 372 ext2_delete_entry (old_de, old_page); 373 - inode_dec_link_count(old_inode); 374 375 if (dir_de) { 376 if (old_dir != new_dir)
··· 344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); 345 if (!new_de) 346 goto out_dir; 347 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); 348 new_inode->i_ctime = CURRENT_TIME_SEC; 349 if (dir_de) ··· 356 if (new_dir->i_nlink >= EXT2_LINK_MAX) 357 goto out_dir; 358 } 359 err = ext2_add_link(new_dentry, old_inode); 360 + if (err) 361 goto out_dir; 362 if (dir_de) 363 inode_inc_link_count(new_dir); 364 } ··· 369 /* 370 * Like most other Unix systems, set the ctime for inodes on a 371 * rename. 372 */ 373 old_inode->i_ctime = CURRENT_TIME_SEC; 374 + mark_inode_dirty(old_inode); 375 376 ext2_delete_entry (old_de, old_page); 377 378 if (dir_de) { 379 if (old_dir != new_dir)
+2 -2
fs/fat/namei_vfat.c
··· 43 44 static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) 45 { 46 - if (nd->flags & LOOKUP_RCU) 47 return -ECHILD; 48 49 /* This is not negative dentry. Always valid. */ ··· 54 55 static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) 56 { 57 - if (nd->flags & LOOKUP_RCU) 58 return -ECHILD; 59 60 /*
··· 43 44 static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) 45 { 46 + if (nd && nd->flags & LOOKUP_RCU) 47 return -ECHILD; 48 49 /* This is not negative dentry. Always valid. */ ··· 54 55 static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) 56 { 57 + if (nd && nd->flags & LOOKUP_RCU) 58 return -ECHILD; 59 60 /*
+1 -1
fs/fuse/dir.c
··· 158 { 159 struct inode *inode; 160 161 - if (nd->flags & LOOKUP_RCU) 162 return -ECHILD; 163 164 inode = entry->d_inode;
··· 158 { 159 struct inode *inode; 160 161 + if (nd && nd->flags & LOOKUP_RCU) 162 return -ECHILD; 163 164 inode = entry->d_inode;
+1 -1
fs/gfs2/dentry.c
··· 44 int error; 45 int had_lock = 0; 46 47 - if (nd->flags & LOOKUP_RCU) 48 return -ECHILD; 49 50 parent = dget_parent(dentry);
··· 44 int error; 45 int had_lock = 0; 46 47 + if (nd && nd->flags & LOOKUP_RCU) 48 return -ECHILD; 49 50 parent = dget_parent(dentry);
+14 -38
fs/hfs/dir.c
··· 238 } 239 240 /* 241 - * hfs_unlink() 242 * 243 - * This is the unlink() entry in the inode_operations structure for 244 - * regular HFS directories. The purpose is to delete an existing 245 - * file, given the inode for the parent directory and the name 246 - * (and its length) of the existing file. 247 */ 248 - static int hfs_unlink(struct inode *dir, struct dentry *dentry) 249 { 250 - struct inode *inode; 251 int res; 252 253 - inode = dentry->d_inode; 254 - res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 255 - if (res) 256 - return res; 257 - 258 - drop_nlink(inode); 259 - hfs_delete_inode(inode); 260 - inode->i_ctime = CURRENT_TIME_SEC; 261 - mark_inode_dirty(inode); 262 - 263 - return res; 264 - } 265 - 266 - /* 267 - * hfs_rmdir() 268 - * 269 - * This is the rmdir() entry in the inode_operations structure for 270 - * regular HFS directories. The purpose is to delete an existing 271 - * directory, given the inode for the parent directory and the name 272 - * (and its length) of the existing directory. 273 - */ 274 - static int hfs_rmdir(struct inode *dir, struct dentry *dentry) 275 - { 276 - struct inode *inode; 277 - int res; 278 - 279 - inode = dentry->d_inode; 280 - if (inode->i_size != 2) 281 return -ENOTEMPTY; 282 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 283 if (res) ··· 283 284 /* Unlink destination if it already exists */ 285 if (new_dentry->d_inode) { 286 - res = hfs_unlink(new_dir, new_dentry); 287 if (res) 288 return res; 289 } ··· 308 const struct inode_operations hfs_dir_inode_operations = { 309 .create = hfs_create, 310 .lookup = hfs_lookup, 311 - .unlink = hfs_unlink, 312 .mkdir = hfs_mkdir, 313 - .rmdir = hfs_rmdir, 314 .rename = hfs_rename, 315 .setattr = hfs_inode_setattr, 316 };
··· 238 } 239 240 /* 241 + * hfs_remove() 242 * 243 + * This serves as both unlink() and rmdir() in the inode_operations 244 + * structure for regular HFS directories. The purpose is to delete 245 + * an existing child, given the inode for the parent directory and 246 + * the name (and its length) of the existing directory. 247 + * 248 + * HFS does not have hardlinks, so both rmdir and unlink set the 249 + * link count to 0. The only difference is the emptiness check. 250 */ 251 + static int hfs_remove(struct inode *dir, struct dentry *dentry) 252 { 253 + struct inode *inode = dentry->d_inode; 254 int res; 255 256 + if (S_ISDIR(inode->i_mode) && inode->i_size != 2) 257 return -ENOTEMPTY; 258 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 259 if (res) ··· 307 308 /* Unlink destination if it already exists */ 309 if (new_dentry->d_inode) { 310 + res = hfs_remove(new_dir, new_dentry); 311 if (res) 312 return res; 313 } ··· 332 const struct inode_operations hfs_dir_inode_operations = { 333 .create = hfs_create, 334 .lookup = hfs_lookup, 335 + .unlink = hfs_remove, 336 .mkdir = hfs_mkdir, 337 + .rmdir = hfs_remove, 338 .rename = hfs_rename, 339 .setattr = hfs_inode_setattr, 340 };
+1 -1
fs/jfs/namei.c
··· 1600 1601 static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) 1602 { 1603 - if (nd->flags & LOOKUP_RCU) 1604 return -ECHILD; 1605 /* 1606 * This is not negative dentry. Always valid.
··· 1600 1601 static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) 1602 { 1603 + if (nd && nd->flags & LOOKUP_RCU) 1604 return -ECHILD; 1605 /* 1606 * This is not negative dentry. Always valid.
+2 -6
fs/minix/namei.c
··· 213 new_de = minix_find_entry(new_dentry, &new_page); 214 if (!new_de) 215 goto out_dir; 216 - inode_inc_link_count(old_inode); 217 minix_set_link(new_de, new_page, old_inode); 218 new_inode->i_ctime = CURRENT_TIME_SEC; 219 if (dir_de) ··· 224 if (new_dir->i_nlink >= info->s_link_max) 225 goto out_dir; 226 } 227 - inode_inc_link_count(old_inode); 228 err = minix_add_link(new_dentry, old_inode); 229 - if (err) { 230 - inode_dec_link_count(old_inode); 231 goto out_dir; 232 - } 233 if (dir_de) 234 inode_inc_link_count(new_dir); 235 } 236 237 minix_delete_entry(old_de, old_page); 238 - inode_dec_link_count(old_inode); 239 240 if (dir_de) { 241 minix_set_link(dir_de, dir_page, new_dir);
··· 213 new_de = minix_find_entry(new_dentry, &new_page); 214 if (!new_de) 215 goto out_dir; 216 minix_set_link(new_de, new_page, old_inode); 217 new_inode->i_ctime = CURRENT_TIME_SEC; 218 if (dir_de) ··· 225 if (new_dir->i_nlink >= info->s_link_max) 226 goto out_dir; 227 } 228 err = minix_add_link(new_dentry, old_inode); 229 + if (err) 230 goto out_dir; 231 if (dir_de) 232 inode_inc_link_count(new_dir); 233 } 234 235 minix_delete_entry(old_de, old_page); 236 + mark_inode_dirty(old_inode); 237 238 if (dir_de) { 239 minix_set_link(dir_de, dir_page, new_dir);
+11 -3
fs/namei.c
··· 1546 /* nd->path had been dropped */ 1547 current->total_link_count = 0; 1548 nd->path = save; 1549 path_get(&nd->path); 1550 nd->flags |= LOOKUP_REVAL; 1551 result = link_path_walk(name, nd); ··· 2456 /* !O_CREAT, simple open */ 2457 error = do_path_lookup(dfd, pathname, flags, &nd); 2458 if (unlikely(error)) 2459 - goto out_filp; 2460 error = -ELOOP; 2461 if (!(nd.flags & LOOKUP_FOLLOW)) { 2462 if (nd.inode->i_op->follow_link) 2463 - goto out_path; 2464 } 2465 error = -ENOTDIR; 2466 if (nd.flags & LOOKUP_DIRECTORY) { 2467 if (!nd.inode->i_op->lookup) 2468 - goto out_path; 2469 } 2470 audit_inode(pathname, nd.path.dentry); 2471 filp = finish_open(&nd, open_flag, acc_mode); 2472 release_open_intent(&nd); 2473 return filp; 2474 2475 creat: 2476 /* OK, have to create the file. Find the parent. */
··· 1546 /* nd->path had been dropped */ 1547 current->total_link_count = 0; 1548 nd->path = save; 1549 + nd->inode = save.dentry->d_inode; 1550 path_get(&nd->path); 1551 nd->flags |= LOOKUP_REVAL; 1552 result = link_path_walk(name, nd); ··· 2455 /* !O_CREAT, simple open */ 2456 error = do_path_lookup(dfd, pathname, flags, &nd); 2457 if (unlikely(error)) 2458 + goto out_filp2; 2459 error = -ELOOP; 2460 if (!(nd.flags & LOOKUP_FOLLOW)) { 2461 if (nd.inode->i_op->follow_link) 2462 + goto out_path2; 2463 } 2464 error = -ENOTDIR; 2465 if (nd.flags & LOOKUP_DIRECTORY) { 2466 if (!nd.inode->i_op->lookup) 2467 + goto out_path2; 2468 } 2469 audit_inode(pathname, nd.path.dentry); 2470 filp = finish_open(&nd, open_flag, acc_mode); 2471 + out2: 2472 release_open_intent(&nd); 2473 return filp; 2474 + 2475 + out_path2: 2476 + path_put(&nd.path); 2477 + out_filp2: 2478 + filp = ERR_PTR(error); 2479 + goto out2; 2480 2481 creat: 2482 /* OK, have to create the file. Find the parent. */
+42 -2
fs/nfs/nfs4proc.c
··· 51 #include <linux/sunrpc/bc_xprt.h> 52 #include <linux/xattr.h> 53 #include <linux/utsname.h> 54 55 #include "nfs4_fs.h" 56 #include "delegation.h" ··· 3253 } 3254 } 3255 3256 struct nfs4_cached_acl { 3257 int cached; 3258 size_t len; ··· 3450 .rpc_argp = &arg, 3451 .rpc_resp = &res, 3452 }; 3453 - int ret; 3454 3455 if (!nfs4_server_supports_acls(server)) 3456 return -EOPNOTSUPP; 3457 nfs_inode_return_delegation(inode); 3458 - buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3459 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3460 /* 3461 * Acl update can result in inode attribute update. 3462 * so mark the attribute cache invalid.
··· 51 #include <linux/sunrpc/bc_xprt.h> 52 #include <linux/xattr.h> 53 #include <linux/utsname.h> 54 + #include <linux/mm.h> 55 56 #include "nfs4_fs.h" 57 #include "delegation.h" ··· 3252 } 3253 } 3254 3255 + static int buf_to_pages_noslab(const void *buf, size_t buflen, 3256 + struct page **pages, unsigned int *pgbase) 3257 + { 3258 + struct page *newpage, **spages; 3259 + int rc = 0; 3260 + size_t len; 3261 + spages = pages; 3262 + 3263 + do { 3264 + len = min(PAGE_CACHE_SIZE, buflen); 3265 + newpage = alloc_page(GFP_KERNEL); 3266 + 3267 + if (newpage == NULL) 3268 + goto unwind; 3269 + memcpy(page_address(newpage), buf, len); 3270 + buf += len; 3271 + buflen -= len; 3272 + *pages++ = newpage; 3273 + rc++; 3274 + } while (buflen != 0); 3275 + 3276 + return rc; 3277 + 3278 + unwind: 3279 + for(; rc > 0; rc--) 3280 + __free_page(spages[rc-1]); 3281 + return -ENOMEM; 3282 + } 3283 + 3284 struct nfs4_cached_acl { 3285 int cached; 3286 size_t len; ··· 3420 .rpc_argp = &arg, 3421 .rpc_resp = &res, 3422 }; 3423 + int ret, i; 3424 3425 if (!nfs4_server_supports_acls(server)) 3426 return -EOPNOTSUPP; 3427 + i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3428 + if (i < 0) 3429 + return i; 3430 nfs_inode_return_delegation(inode); 3431 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3432 + 3433 + /* 3434 + * Free each page after tx, so the only ref left is 3435 + * held by the network stack 3436 + */ 3437 + for (; i > 0; i--) 3438 + put_page(pages[i-1]); 3439 + 3440 /* 3441 * Acl update can result in inode attribute update. 3442 * so mark the attribute cache invalid.
+1 -1
fs/nfsd/nfs4callback.c
··· 432 * If the server returns different values for sessionID, slotID or 433 * sequence number, the server is looney tunes. 434 */ 435 - p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4); 436 if (unlikely(p == NULL)) 437 goto out_overflow; 438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
··· 432 * If the server returns different values for sessionID, slotID or 433 * sequence number, the server is looney tunes. 434 */ 435 + p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4); 436 if (unlikely(p == NULL)) 437 goto out_overflow; 438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
+7 -6
fs/nfsd/nfs4state.c
··· 2445 static struct nfs4_delegation * 2446 find_delegation_file(struct nfs4_file *fp, stateid_t *stid) 2447 { 2448 - struct nfs4_delegation *dp = NULL; 2449 2450 spin_lock(&recall_lock); 2451 - list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { 2452 - if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) 2453 - break; 2454 - } 2455 spin_unlock(&recall_lock); 2456 - return dp; 2457 } 2458 2459 int share_access_to_flags(u32 share_access)
··· 2445 static struct nfs4_delegation * 2446 find_delegation_file(struct nfs4_file *fp, stateid_t *stid) 2447 { 2448 + struct nfs4_delegation *dp; 2449 2450 spin_lock(&recall_lock); 2451 + list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 2452 + if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) { 2453 + spin_unlock(&recall_lock); 2454 + return dp; 2455 + } 2456 spin_unlock(&recall_lock); 2457 + return NULL; 2458 } 2459 2460 int share_access_to_flags(u32 share_access)
+2 -2
fs/nfsd/nfs4xdr.c
··· 1142 1143 u32 dummy; 1144 char *machine_name; 1145 - int i; 1146 int nr_secflavs; 1147 1148 READ_BUF(16); ··· 1215 READ_BUF(4); 1216 READ32(dummy); 1217 READ_BUF(dummy * 4); 1218 - for (i = 0; i < dummy; ++i) 1219 READ32(dummy); 1220 break; 1221 case RPC_AUTH_GSS:
··· 1142 1143 u32 dummy; 1144 char *machine_name; 1145 + int i, j; 1146 int nr_secflavs; 1147 1148 READ_BUF(16); ··· 1215 READ_BUF(4); 1216 READ32(dummy); 1217 READ_BUF(dummy * 4); 1218 + for (j = 0; j < dummy; ++j) 1219 READ32(dummy); 1220 break; 1221 case RPC_AUTH_GSS:
+1 -7
fs/nilfs2/namei.c
··· 397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); 398 if (!new_de) 399 goto out_dir; 400 - inc_nlink(old_inode); 401 nilfs_set_link(new_dir, new_de, new_page, old_inode); 402 nilfs_mark_inode_dirty(new_dir); 403 new_inode->i_ctime = CURRENT_TIME; ··· 410 if (new_dir->i_nlink >= NILFS_LINK_MAX) 411 goto out_dir; 412 } 413 - inc_nlink(old_inode); 414 err = nilfs_add_link(new_dentry, old_inode); 415 - if (err) { 416 - drop_nlink(old_inode); 417 - nilfs_mark_inode_dirty(old_inode); 418 goto out_dir; 419 - } 420 if (dir_de) { 421 inc_nlink(new_dir); 422 nilfs_mark_inode_dirty(new_dir); ··· 426 old_inode->i_ctime = CURRENT_TIME; 427 428 nilfs_delete_entry(old_de, old_page); 429 - drop_nlink(old_inode); 430 431 if (dir_de) { 432 nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
··· 397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); 398 if (!new_de) 399 goto out_dir; 400 nilfs_set_link(new_dir, new_de, new_page, old_inode); 401 nilfs_mark_inode_dirty(new_dir); 402 new_inode->i_ctime = CURRENT_TIME; ··· 411 if (new_dir->i_nlink >= NILFS_LINK_MAX) 412 goto out_dir; 413 } 414 err = nilfs_add_link(new_dentry, old_inode); 415 + if (err) 416 goto out_dir; 417 if (dir_de) { 418 inc_nlink(new_dir); 419 nilfs_mark_inode_dirty(new_dir); ··· 431 old_inode->i_ctime = CURRENT_TIME; 432 433 nilfs_delete_entry(old_de, old_page); 434 435 if (dir_de) { 436 nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+2 -1
fs/nilfs2/segment.c
··· 430 nilfs_segctor_map_segsum_entry( 431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 432 433 - if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 434 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 435 /* skip finfo */ 436 }
··· 430 nilfs_segctor_map_segsum_entry( 431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 432 433 + if (NILFS_I(inode)->i_root && 434 + !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 435 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 436 /* skip finfo */ 437 }
+1 -1
fs/ocfs2/dcache.c
··· 56 int ret = 0; /* if all else fails, just return false */ 57 struct ocfs2_super *osb; 58 59 - if (nd->flags & LOOKUP_RCU) 60 return -ECHILD; 61 62 inode = dentry->d_inode;
··· 56 int ret = 0; /* if all else fails, just return false */ 57 struct ocfs2_super *osb; 58 59 + if (nd && nd->flags & LOOKUP_RCU) 60 return -ECHILD; 61 62 inode = dentry->d_inode;
+8
fs/open.c
··· 233 234 if (!(file->f_mode & FMODE_WRITE)) 235 return -EBADF; 236 /* 237 * Revalidate the write permissions, in case security policy has 238 * changed since the files were opened.
··· 233 234 if (!(file->f_mode & FMODE_WRITE)) 235 return -EBADF; 236 + 237 + /* It's not possible punch hole on append only file */ 238 + if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode)) 239 + return -EPERM; 240 + 241 + if (IS_IMMUTABLE(inode)) 242 + return -EPERM; 243 + 244 /* 245 * Revalidate the write permissions, in case security policy has 246 * changed since the files were opened.
-30
fs/proc/base.c
··· 2620 &proc_self_inode_operations, NULL, {}), 2621 }; 2622 2623 - /* 2624 - * Exceptional case: normally we are not allowed to unhash a busy 2625 - * directory. In this case, however, we can do it - no aliasing problems 2626 - * due to the way we treat inodes. 2627 - */ 2628 - static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) 2629 - { 2630 - struct inode *inode; 2631 - struct task_struct *task; 2632 - 2633 - if (nd->flags & LOOKUP_RCU) 2634 - return -ECHILD; 2635 - 2636 - inode = dentry->d_inode; 2637 - task = get_proc_task(inode); 2638 - if (task) { 2639 - put_task_struct(task); 2640 - return 1; 2641 - } 2642 - d_drop(dentry); 2643 - return 0; 2644 - } 2645 - 2646 - static const struct dentry_operations proc_base_dentry_operations = 2647 - { 2648 - .d_revalidate = proc_base_revalidate, 2649 - .d_delete = pid_delete_dentry, 2650 - }; 2651 - 2652 static struct dentry *proc_base_instantiate(struct inode *dir, 2653 struct dentry *dentry, struct task_struct *task, const void *ptr) 2654 { ··· 2656 if (p->fop) 2657 inode->i_fop = p->fop; 2658 ei->op = p->op; 2659 - d_set_d_op(dentry, &proc_base_dentry_operations); 2660 d_add(dentry, inode); 2661 error = NULL; 2662 out:
··· 2620 &proc_self_inode_operations, NULL, {}), 2621 }; 2622 2623 static struct dentry *proc_base_instantiate(struct inode *dir, 2624 struct dentry *dentry, struct task_struct *task, const void *ptr) 2625 { ··· 2685 if (p->fop) 2686 inode->i_fop = p->fop; 2687 ei->op = p->op; 2688 d_add(dentry, inode); 2689 error = NULL; 2690 out:
+6 -2
fs/proc/inode.c
··· 27 static void proc_evict_inode(struct inode *inode) 28 { 29 struct proc_dir_entry *de; 30 31 truncate_inode_pages(&inode->i_data, 0); 32 end_writeback(inode); ··· 39 de = PROC_I(inode)->pde; 40 if (de) 41 pde_put(de); 42 - if (PROC_I(inode)->sysctl) 43 - sysctl_head_put(PROC_I(inode)->sysctl); 44 } 45 46 struct vfsmount *proc_mnt;
··· 27 static void proc_evict_inode(struct inode *inode) 28 { 29 struct proc_dir_entry *de; 30 + struct ctl_table_header *head; 31 32 truncate_inode_pages(&inode->i_data, 0); 33 end_writeback(inode); ··· 38 de = PROC_I(inode)->pde; 39 if (de) 40 pde_put(de); 41 + head = PROC_I(inode)->sysctl; 42 + if (head) { 43 + rcu_assign_pointer(PROC_I(inode)->sysctl, NULL); 44 + sysctl_head_put(head); 45 + } 46 } 47 48 struct vfsmount *proc_mnt;
+1 -1
fs/proc/proc_devtree.c
··· 233 return; 234 root = of_find_node_by_path("/"); 235 if (root == NULL) { 236 - printk(KERN_ERR "/proc/device-tree: can't find root\n"); 237 return; 238 } 239 proc_device_tree_add_node(root, proc_device_tree);
··· 233 return; 234 root = of_find_node_by_path("/"); 235 if (root == NULL) { 236 + pr_debug("/proc/device-tree: can't find root\n"); 237 return; 238 } 239 proc_device_tree_add_node(root, proc_device_tree);
+5 -2
fs/proc/proc_sysctl.c
··· 408 const struct dentry *dentry, const struct inode *inode, 409 unsigned int len, const char *str, const struct qstr *name) 410 { 411 /* Although proc doesn't have negative dentries, rcu-walk means 412 * that inode here can be NULL */ 413 if (!inode) 414 - return 0; 415 if (name->len != len) 416 return 1; 417 if (memcmp(name->name, str, len)) 418 return 1; 419 - return !sysctl_is_seen(PROC_I(inode)->sysctl); 420 } 421 422 static const struct dentry_operations proc_sys_dentry_operations = {
··· 408 const struct dentry *dentry, const struct inode *inode, 409 unsigned int len, const char *str, const struct qstr *name) 410 { 411 + struct ctl_table_header *head; 412 /* Although proc doesn't have negative dentries, rcu-walk means 413 * that inode here can be NULL */ 414 + /* AV: can it, indeed? */ 415 if (!inode) 416 + return 1; 417 if (name->len != len) 418 return 1; 419 if (memcmp(name->name, str, len)) 420 return 1; 421 + head = rcu_dereference(PROC_I(inode)->sysctl); 422 + return !head || !sysctl_is_seen(head); 423 } 424 425 static const struct dentry_operations proc_sys_dentry_operations = {
+1 -1
fs/reiserfs/namei.c
··· 771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, 772 dentry, inode, &security); 773 if (retval) { 774 - dir->i_nlink--; 775 goto out_failed; 776 } 777
··· 771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, 772 dentry, inode, &security); 773 if (retval) { 774 + DEC_DIR_INODE_NLINK(dir) 775 goto out_failed; 776 } 777
-2
fs/reiserfs/xattr.c
··· 978 979 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) 980 { 981 - if (nd->flags & LOOKUP_RCU) 982 - return -ECHILD; 983 return -EPERM; 984 } 985
··· 978 979 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) 980 { 981 return -EPERM; 982 } 983
+2 -6
fs/sysv/namei.c
··· 245 new_de = sysv_find_entry(new_dentry, &new_page); 246 if (!new_de) 247 goto out_dir; 248 - inode_inc_link_count(old_inode); 249 sysv_set_link(new_de, new_page, old_inode); 250 new_inode->i_ctime = CURRENT_TIME_SEC; 251 if (dir_de) ··· 256 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 257 goto out_dir; 258 } 259 - inode_inc_link_count(old_inode); 260 err = sysv_add_link(new_dentry, old_inode); 261 - if (err) { 262 - inode_dec_link_count(old_inode); 263 goto out_dir; 264 - } 265 if (dir_de) 266 inode_inc_link_count(new_dir); 267 } 268 269 sysv_delete_entry(old_de, old_page); 270 - inode_dec_link_count(old_inode); 271 272 if (dir_de) { 273 sysv_set_link(dir_de, dir_page, new_dir);
··· 245 new_de = sysv_find_entry(new_dentry, &new_page); 246 if (!new_de) 247 goto out_dir; 248 sysv_set_link(new_de, new_page, old_inode); 249 new_inode->i_ctime = CURRENT_TIME_SEC; 250 if (dir_de) ··· 257 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 258 goto out_dir; 259 } 260 err = sysv_add_link(new_dentry, old_inode); 261 + if (err) 262 goto out_dir; 263 if (dir_de) 264 inode_inc_link_count(new_dir); 265 } 266 267 sysv_delete_entry(old_de, old_page); 268 + mark_inode_dirty(old_inode); 269 270 if (dir_de) { 271 sysv_set_link(dir_de, dir_page, new_dir);
+5 -6
fs/udf/namei.c
··· 32 #include <linux/crc-itu-t.h> 33 #include <linux/exportfs.h> 34 35 static inline int udf_match(int len1, const unsigned char *name1, int len2, 36 const unsigned char *name2) 37 { ··· 652 struct udf_inode_info *iinfo; 653 654 err = -EMLINK; 655 - if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) 656 goto out; 657 658 err = -EIO; ··· 1036 struct fileIdentDesc cfi, *fi; 1037 int err; 1038 1039 - if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1040 return -EMLINK; 1041 - } 1042 1043 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 1044 if (!fi) { ··· 1132 goto end_rename; 1133 1134 retval = -EMLINK; 1135 - if (!new_inode && 1136 - new_dir->i_nlink >= 1137 - (256 << sizeof(new_dir->i_nlink)) - 1) 1138 goto end_rename; 1139 } 1140 if (!nfi) {
··· 32 #include <linux/crc-itu-t.h> 33 #include <linux/exportfs.h> 34 35 + enum { UDF_MAX_LINKS = 0xffff }; 36 + 37 static inline int udf_match(int len1, const unsigned char *name1, int len2, 38 const unsigned char *name2) 39 { ··· 650 struct udf_inode_info *iinfo; 651 652 err = -EMLINK; 653 + if (dir->i_nlink >= UDF_MAX_LINKS) 654 goto out; 655 656 err = -EIO; ··· 1034 struct fileIdentDesc cfi, *fi; 1035 int err; 1036 1037 + if (inode->i_nlink >= UDF_MAX_LINKS) 1038 return -EMLINK; 1039 1040 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 1041 if (!fi) { ··· 1131 goto end_rename; 1132 1133 retval = -EMLINK; 1134 + if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS) 1135 goto end_rename; 1136 } 1137 if (!nfi) {
+2 -7
fs/ufs/namei.c
··· 306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); 307 if (!new_de) 308 goto out_dir; 309 - inode_inc_link_count(old_inode); 310 ufs_set_link(new_dir, new_de, new_page, old_inode); 311 new_inode->i_ctime = CURRENT_TIME_SEC; 312 if (dir_de) ··· 317 if (new_dir->i_nlink >= UFS_LINK_MAX) 318 goto out_dir; 319 } 320 - inode_inc_link_count(old_inode); 321 err = ufs_add_link(new_dentry, old_inode); 322 - if (err) { 323 - inode_dec_link_count(old_inode); 324 goto out_dir; 325 - } 326 if (dir_de) 327 inode_inc_link_count(new_dir); 328 } ··· 327 /* 328 * Like most other Unix systems, set the ctime for inodes on a 329 * rename. 330 - * inode_dec_link_count() will mark the inode dirty. 331 */ 332 old_inode->i_ctime = CURRENT_TIME_SEC; 333 334 ufs_delete_entry(old_dir, old_de, old_page); 335 - inode_dec_link_count(old_inode); 336 337 if (dir_de) { 338 ufs_set_link(old_inode, dir_de, dir_page, new_dir);
··· 306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); 307 if (!new_de) 308 goto out_dir; 309 ufs_set_link(new_dir, new_de, new_page, old_inode); 310 new_inode->i_ctime = CURRENT_TIME_SEC; 311 if (dir_de) ··· 318 if (new_dir->i_nlink >= UFS_LINK_MAX) 319 goto out_dir; 320 } 321 err = ufs_add_link(new_dentry, old_inode); 322 + if (err) 323 goto out_dir; 324 if (dir_de) 325 inode_inc_link_count(new_dir); 326 } ··· 331 /* 332 * Like most other Unix systems, set the ctime for inodes on a 333 * rename. 334 */ 335 old_inode->i_ctime = CURRENT_TIME_SEC; 336 337 ufs_delete_entry(old_dir, old_de, old_page); 338 + mark_inode_dirty(old_inode); 339 340 if (dir_de) { 341 ufs_set_link(old_inode, dir_de, dir_page, new_dir);
+8 -3
fs/xfs/linux-2.6/xfs_ioctl.c
··· 695 xfs_mount_t *mp, 696 void __user *arg) 697 { 698 - xfs_fsop_geom_v1_t fsgeo; 699 int error; 700 701 - error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); 702 if (error) 703 return -error; 704 705 - if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 706 return -XFS_ERROR(EFAULT); 707 return 0; 708 }
··· 695 xfs_mount_t *mp, 696 void __user *arg) 697 { 698 + xfs_fsop_geom_t fsgeo; 699 int error; 700 701 + error = xfs_fs_geometry(mp, &fsgeo, 3); 702 if (error) 703 return -error; 704 705 + /* 706 + * Caller should have passed an argument of type 707 + * xfs_fsop_geom_v1_t. This is a proper subset of the 708 + * xfs_fsop_geom_t that xfs_fs_geometry() fills in. 709 + */ 710 + if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) 711 return -XFS_ERROR(EFAULT); 712 return 0; 713 }
-1
include/keys/rxrpc-type.h
··· 99 * structure of raw payloads passed to add_key() or instantiate key 100 */ 101 struct rxrpc_key_data_v1 { 102 - u32 kif_version; /* 1 */ 103 u16 security_index; 104 u16 ticket_length; 105 u32 expiry; /* time_t */
··· 99 * structure of raw payloads passed to add_key() or instantiate key 100 */ 101 struct rxrpc_key_data_v1 { 102 u16 security_index; 103 u16 ticket_length; 104 u32 expiry; /* time_t */
+1 -4
include/linux/blkdev.h
··· 699 extern void blk_stop_queue(struct request_queue *q); 700 extern void blk_sync_queue(struct request_queue *q); 701 extern void __blk_stop_queue(struct request_queue *q); 702 - extern void __blk_run_queue(struct request_queue *); 703 extern void blk_run_queue(struct request_queue *); 704 extern int blk_rq_map_user(struct request_queue *, struct request *, 705 struct rq_map_data *, void __user *, unsigned long, ··· 1088 1089 struct work_struct; 1090 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1091 - int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); 1092 1093 #ifdef CONFIG_BLK_CGROUP 1094 /* ··· 1135 extern int blk_throtl_init(struct request_queue *q); 1136 extern void blk_throtl_exit(struct request_queue *q); 1137 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1138 - extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); 1139 extern void throtl_shutdown_timer_wq(struct request_queue *q); 1140 #else /* CONFIG_BLK_DEV_THROTTLING */ 1141 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) ··· 1144 1145 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1146 static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1147 - static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} 1148 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} 1149 #endif /* CONFIG_BLK_DEV_THROTTLING */ 1150
··· 699 extern void blk_stop_queue(struct request_queue *q); 700 extern void blk_sync_queue(struct request_queue *q); 701 extern void __blk_stop_queue(struct request_queue *q); 702 + extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); 703 extern void blk_run_queue(struct request_queue *); 704 extern int blk_rq_map_user(struct request_queue *, struct request *, 705 struct rq_map_data *, void __user *, unsigned long, ··· 1088 1089 struct work_struct; 1090 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1091 1092 #ifdef CONFIG_BLK_CGROUP 1093 /* ··· 1136 extern int blk_throtl_init(struct request_queue *q); 1137 extern void blk_throtl_exit(struct request_queue *q); 1138 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1139 extern void throtl_shutdown_timer_wq(struct request_queue *q); 1140 #else /* CONFIG_BLK_DEV_THROTTLING */ 1141 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) ··· 1146 1147 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1148 static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1149 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} 1150 #endif /* CONFIG_BLK_DEV_THROTTLING */ 1151
-1
include/linux/blktrace_api.h
··· 245 246 extern void blk_dump_cmd(char *buf, struct request *rq); 247 extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); 248 - extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); 249 250 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 251
··· 245 246 extern void blk_dump_cmd(char *buf, struct request *rq); 247 extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); 248 249 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 250
+1 -1
include/linux/ceph/messenger.h
··· 123 #define SOCK_CLOSED 11 /* socket state changed to closed */ 124 #define OPENING 13 /* open connection w/ (possibly new) peer */ 125 #define DEAD 14 /* dead, about to kfree */ 126 127 /* 128 * A single connection with another host. ··· 161 struct list_head out_queue; 162 struct list_head out_sent; /* sending or sent but unacked */ 163 u64 out_seq; /* last message queued for send */ 164 - bool out_keepalive_pending; 165 166 u64 in_seq, in_seq_acked; /* last message received, acked */ 167
··· 123 #define SOCK_CLOSED 11 /* socket state changed to closed */ 124 #define OPENING 13 /* open connection w/ (possibly new) peer */ 125 #define DEAD 14 /* dead, about to kfree */ 126 + #define BACKOFF 15 127 128 /* 129 * A single connection with another host. ··· 160 struct list_head out_queue; 161 struct list_head out_sent; /* sending or sent but unacked */ 162 u64 out_seq; /* last message queued for send */ 163 164 u64 in_seq, in_seq_acked; /* last message received, acked */ 165
+7 -4
include/linux/gfp.h
··· 332 return alloc_pages_current(gfp_mask, order); 333 } 334 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 335 - struct vm_area_struct *vma, unsigned long addr); 336 #else 337 #define alloc_pages(gfp_mask, order) \ 338 alloc_pages_node(numa_node_id(), gfp_mask, order) 339 - #define alloc_pages_vma(gfp_mask, order, vma, addr) \ 340 alloc_pages(gfp_mask, order) 341 #endif 342 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 343 - #define alloc_page_vma(gfp_mask, vma, addr) \ 344 - alloc_pages_vma(gfp_mask, 0, vma, addr) 345 346 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 347 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
··· 332 return alloc_pages_current(gfp_mask, order); 333 } 334 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 335 + struct vm_area_struct *vma, unsigned long addr, 336 + int node); 337 #else 338 #define alloc_pages(gfp_mask, order) \ 339 alloc_pages_node(numa_node_id(), gfp_mask, order) 340 + #define alloc_pages_vma(gfp_mask, order, vma, addr, node) \ 341 alloc_pages(gfp_mask, order) 342 #endif 343 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 344 + #define alloc_page_vma(gfp_mask, vma, addr) \ 345 + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) 346 + #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 347 + alloc_pages_vma(gfp_mask, 0, vma, addr, node) 348 349 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 350 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+1
include/linux/mfd/wm8994/core.h
··· 71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; 72 73 /* Used over suspend/resume */ 74 u16 ldo_regs[WM8994_NUM_LDO_REGS]; 75 u16 gpio_regs[WM8994_NUM_GPIO_REGS]; 76
··· 71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; 72 73 /* Used over suspend/resume */ 74 + bool suspended; 75 u16 ldo_regs[WM8994_NUM_LDO_REGS]; 76 u16 gpio_regs[WM8994_NUM_GPIO_REGS]; 77
+3
include/linux/netdevice.h
··· 2392 extern int netdev_info(const struct net_device *dev, const char *format, ...) 2393 __attribute__ ((format (printf, 2, 3))); 2394 2395 #if defined(DEBUG) 2396 #define netdev_dbg(__dev, format, args...) \ 2397 netdev_printk(KERN_DEBUG, __dev, format, ##args)
··· 2392 extern int netdev_info(const struct net_device *dev, const char *format, ...) 2393 __attribute__ ((format (printf, 2, 3))); 2394 2395 + #define MODULE_ALIAS_NETDEV(device) \ 2396 + MODULE_ALIAS("netdev-" device) 2397 + 2398 #if defined(DEBUG) 2399 #define netdev_dbg(__dev, format, args...) \ 2400 netdev_printk(KERN_DEBUG, __dev, format, ##args)
-3
include/linux/ptrace.h
··· 102 103 extern long arch_ptrace(struct task_struct *child, long request, 104 unsigned long addr, unsigned long data); 105 - extern int ptrace_traceme(void); 106 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 107 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 108 - extern int ptrace_attach(struct task_struct *tsk); 109 - extern int ptrace_detach(struct task_struct *, unsigned int); 110 extern void ptrace_disable(struct task_struct *); 111 extern int ptrace_check_attach(struct task_struct *task, int kill); 112 extern int ptrace_request(struct task_struct *child, long request,
··· 102 103 extern long arch_ptrace(struct task_struct *child, long request, 104 unsigned long addr, unsigned long data); 105 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 106 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 107 extern void ptrace_disable(struct task_struct *); 108 extern int ptrace_check_attach(struct task_struct *task, int kill); 109 extern int ptrace_request(struct task_struct *child, long request,
+10 -4
include/linux/sysctl.h
··· 930 931 #ifdef __KERNEL__ 932 #include <linux/list.h> 933 934 /* For the /proc/sys support */ 935 struct ctl_table; ··· 1038 struct ctl_table trees. */ 1039 struct ctl_table_header 1040 { 1041 - struct ctl_table *ctl_table; 1042 - struct list_head ctl_entry; 1043 - int used; 1044 - int count; 1045 struct completion *unregistering; 1046 struct ctl_table *ctl_table_arg; 1047 struct ctl_table_root *root;
··· 930 931 #ifdef __KERNEL__ 932 #include <linux/list.h> 933 + #include <linux/rcupdate.h> 934 935 /* For the /proc/sys support */ 936 struct ctl_table; ··· 1037 struct ctl_table trees. */ 1038 struct ctl_table_header 1039 { 1040 + union { 1041 + struct { 1042 + struct ctl_table *ctl_table; 1043 + struct list_head ctl_entry; 1044 + int used; 1045 + int count; 1046 + }; 1047 + struct rcu_head rcu; 1048 + }; 1049 struct completion *unregistering; 1050 struct ctl_table *ctl_table_arg; 1051 struct ctl_table_root *root;
+3 -3
include/trace/events/block.h
··· 31 0 : blk_rq_sectors(rq); 32 __entry->errors = rq->errors; 33 34 - blk_fill_rwbs_rq(__entry->rwbs, rq); 35 blk_dump_cmd(__get_str(cmd), rq); 36 ), 37 ··· 118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 119 blk_rq_bytes(rq) : 0; 120 121 - blk_fill_rwbs_rq(__entry->rwbs, rq); 122 blk_dump_cmd(__get_str(cmd), rq); 123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 124 ), ··· 563 __entry->nr_sector = blk_rq_sectors(rq); 564 __entry->old_dev = dev; 565 __entry->old_sector = from; 566 - blk_fill_rwbs_rq(__entry->rwbs, rq); 567 ), 568 569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
··· 31 0 : blk_rq_sectors(rq); 32 __entry->errors = rq->errors; 33 34 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 35 blk_dump_cmd(__get_str(cmd), rq); 36 ), 37 ··· 118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 119 blk_rq_bytes(rq) : 0; 120 121 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 122 blk_dump_cmd(__get_str(cmd), rq); 123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 124 ), ··· 563 __entry->nr_sector = blk_rq_sectors(rq); 564 __entry->old_dev = dev; 565 __entry->old_sector = from; 566 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 567 ), 568 569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+5 -2
kernel/cpuset.c
··· 1575 return -ENODEV; 1576 1577 trialcs = alloc_trial_cpuset(cs); 1578 - if (!trialcs) 1579 - return -ENOMEM; 1580 1581 switch (cft->private) { 1582 case FILE_CPULIST: ··· 1593 } 1594 1595 free_trial_cpuset(trialcs); 1596 cgroup_unlock(); 1597 return retval; 1598 }
··· 1575 return -ENODEV; 1576 1577 trialcs = alloc_trial_cpuset(cs); 1578 + if (!trialcs) { 1579 + retval = -ENOMEM; 1580 + goto out; 1581 + } 1582 1583 switch (cft->private) { 1584 case FILE_CPULIST: ··· 1591 } 1592 1593 free_trial_cpuset(trialcs); 1594 + out: 1595 cgroup_unlock(); 1596 return retval; 1597 }
+3 -3
kernel/ptrace.c
··· 163 return !err; 164 } 165 166 - int ptrace_attach(struct task_struct *task) 167 { 168 int retval; 169 ··· 219 * Performs checks and sets PT_PTRACED. 220 * Should be used by all ptrace implementations for PTRACE_TRACEME. 221 */ 222 - int ptrace_traceme(void) 223 { 224 int ret = -EPERM; 225 ··· 293 return false; 294 } 295 296 - int ptrace_detach(struct task_struct *child, unsigned int data) 297 { 298 bool dead = false; 299
··· 163 return !err; 164 } 165 166 + static int ptrace_attach(struct task_struct *task) 167 { 168 int retval; 169 ··· 219 * Performs checks and sets PT_PTRACED. 220 * Should be used by all ptrace implementations for PTRACE_TRACEME. 221 */ 222 + static int ptrace_traceme(void) 223 { 224 int ret = -EPERM; 225 ··· 293 return false; 294 } 295 296 + static int ptrace_detach(struct task_struct *child, unsigned int data) 297 { 298 bool dead = false; 299
+9 -5
kernel/sched_rt.c
··· 210 211 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 212 { 213 - int this_cpu = smp_processor_id(); 214 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 215 struct sched_rt_entity *rt_se; 216 217 - rt_se = rt_rq->tg->rt_se[this_cpu]; 218 219 if (rt_rq->rt_nr_running) { 220 if (rt_se && !on_rt_rq(rt_se)) ··· 227 228 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 229 { 230 - int this_cpu = smp_processor_id(); 231 struct sched_rt_entity *rt_se; 232 233 - rt_se = rt_rq->tg->rt_se[this_cpu]; 234 235 if (rt_se && on_rt_rq(rt_se)) 236 dequeue_rt_entity(rt_se); ··· 566 if (rt_rq->rt_time || rt_rq->rt_nr_running) 567 idle = 0; 568 raw_spin_unlock(&rt_rq->rt_runtime_lock); 569 - } else if (rt_rq->rt_nr_running) 570 idle = 0; 571 572 if (enqueue) 573 sched_rt_rq_enqueue(rt_rq);
··· 210 211 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 212 { 213 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 214 struct sched_rt_entity *rt_se; 215 216 + int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 217 + 218 + rt_se = rt_rq->tg->rt_se[cpu]; 219 220 if (rt_rq->rt_nr_running) { 221 if (rt_se && !on_rt_rq(rt_se)) ··· 226 227 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 228 { 229 struct sched_rt_entity *rt_se; 230 + int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 231 232 + rt_se = rt_rq->tg->rt_se[cpu]; 233 234 if (rt_se && on_rt_rq(rt_se)) 235 dequeue_rt_entity(rt_se); ··· 565 if (rt_rq->rt_time || rt_rq->rt_nr_running) 566 idle = 0; 567 raw_spin_unlock(&rt_rq->rt_runtime_lock); 568 + } else if (rt_rq->rt_nr_running) { 569 idle = 0; 570 + if (!rt_rq_throttled(rt_rq)) 571 + enqueue = 1; 572 + } 573 574 if (enqueue) 575 sched_rt_rq_enqueue(rt_rq);
+10 -5
kernel/sysctl.c
··· 194 static struct ctl_table root_table[]; 195 static struct ctl_table_root sysctl_table_root; 196 static struct ctl_table_header root_table_header = { 197 - .count = 1, 198 .ctl_table = root_table, 199 - .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), 200 .root = &sysctl_table_root, 201 .set = &sysctl_table_root.default_set, 202 }; ··· 1567 spin_unlock(&sysctl_lock); 1568 } 1569 1570 void sysctl_head_put(struct ctl_table_header *head) 1571 { 1572 spin_lock(&sysctl_lock); 1573 if (!--head->count) 1574 - kfree(head); 1575 spin_unlock(&sysctl_lock); 1576 } 1577 ··· 1953 start_unregistering(header); 1954 if (!--header->parent->count) { 1955 WARN_ON(1); 1956 - kfree(header->parent); 1957 } 1958 if (!--header->count) 1959 - kfree(header); 1960 spin_unlock(&sysctl_lock); 1961 } 1962
··· 194 static struct ctl_table root_table[]; 195 static struct ctl_table_root sysctl_table_root; 196 static struct ctl_table_header root_table_header = { 197 + {{.count = 1, 198 .ctl_table = root_table, 199 + .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, 200 .root = &sysctl_table_root, 201 .set = &sysctl_table_root.default_set, 202 }; ··· 1567 spin_unlock(&sysctl_lock); 1568 } 1569 1570 + static void free_head(struct rcu_head *rcu) 1571 + { 1572 + kfree(container_of(rcu, struct ctl_table_header, rcu)); 1573 + } 1574 + 1575 void sysctl_head_put(struct ctl_table_header *head) 1576 { 1577 spin_lock(&sysctl_lock); 1578 if (!--head->count) 1579 + call_rcu(&head->rcu, free_head); 1580 spin_unlock(&sysctl_lock); 1581 } 1582 ··· 1948 start_unregistering(header); 1949 if (!--header->parent->count) { 1950 WARN_ON(1); 1951 + call_rcu(&header->parent->rcu, free_head); 1952 } 1953 if (!--header->count) 1954 + call_rcu(&header->rcu, free_head); 1955 spin_unlock(&sysctl_lock); 1956 } 1957
-16
kernel/trace/blktrace.c
··· 1827 rwbs[i] = '\0'; 1828 } 1829 1830 - void blk_fill_rwbs_rq(char *rwbs, struct request *rq) 1831 - { 1832 - int rw = rq->cmd_flags & 0x03; 1833 - int bytes; 1834 - 1835 - if (rq->cmd_flags & REQ_DISCARD) 1836 - rw |= REQ_DISCARD; 1837 - 1838 - if (rq->cmd_flags & REQ_SECURE) 1839 - rw |= REQ_SECURE; 1840 - 1841 - bytes = blk_rq_bytes(rq); 1842 - 1843 - blk_fill_rwbs(rwbs, rw, bytes); 1844 - } 1845 - 1846 #endif /* CONFIG_EVENT_TRACING */ 1847
··· 1827 rwbs[i] = '\0'; 1828 } 1829 1830 #endif /* CONFIG_EVENT_TRACING */ 1831
+1 -1
lib/nlattr.c
··· 148 { 149 int i, len = 0; 150 151 - for (i = 0; i < n; i++) { 152 if (p->len) 153 len += nla_total_size(p->len); 154 else if (nla_attr_minlen[p->type])
··· 148 { 149 int i, len = 0; 150 151 + for (i = 0; i < n; i++, p++) { 152 if (p->len) 153 len += nla_total_size(p->len); 154 else if (nla_attr_minlen[p->type])
+19 -9
mm/huge_memory.c
··· 650 651 static inline struct page *alloc_hugepage_vma(int defrag, 652 struct vm_area_struct *vma, 653 - unsigned long haddr) 654 { 655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), 656 - HPAGE_PMD_ORDER, vma, haddr); 657 } 658 659 #ifndef CONFIG_NUMA ··· 678 if (unlikely(khugepaged_enter(vma))) 679 return VM_FAULT_OOM; 680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 681 - vma, haddr); 682 if (unlikely(!page)) 683 goto out; 684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { ··· 799 } 800 801 for (i = 0; i < HPAGE_PMD_NR; i++) { 802 - pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 803 - vma, address); 804 if (unlikely(!pages[i] || 805 mem_cgroup_newpage_charge(pages[i], mm, 806 GFP_KERNEL))) { ··· 902 if (transparent_hugepage_enabled(vma) && 903 !transparent_hugepage_debug_cow()) 904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 905 - vma, haddr); 906 else 907 new_page = NULL; 908 ··· 1745 static void collapse_huge_page(struct mm_struct *mm, 1746 unsigned long address, 1747 struct page **hpage, 1748 - struct vm_area_struct *vma) 1749 { 1750 pgd_t *pgd; 1751 pud_t *pud; ··· 1774 * mmap_sem in read mode is good idea also to allow greater 1775 * scalability. 1776 */ 1777 - new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); 1778 if (unlikely(!new_page)) { 1779 up_read(&mm->mmap_sem); 1780 *hpage = ERR_PTR(-ENOMEM); ··· 1921 struct page *page; 1922 unsigned long _address; 1923 spinlock_t *ptl; 1924 1925 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1926 ··· 1952 page = vm_normal_page(vma, _address, pteval); 1953 if (unlikely(!page)) 1954 goto out_unmap; 1955 VM_BUG_ON(PageCompound(page)); 1956 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 1957 goto out_unmap; ··· 1975 pte_unmap_unlock(pte, ptl); 1976 if (ret) 1977 /* collapse_huge_page will return with the mmap_sem released */ 1978 - collapse_huge_page(mm, address, hpage, vma); 1979 out: 1980 return ret; 1981 }
··· 650 651 static inline struct page *alloc_hugepage_vma(int defrag, 652 struct vm_area_struct *vma, 653 + unsigned long haddr, int nd) 654 { 655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), 656 + HPAGE_PMD_ORDER, vma, haddr, nd); 657 } 658 659 #ifndef CONFIG_NUMA ··· 678 if (unlikely(khugepaged_enter(vma))) 679 return VM_FAULT_OOM; 680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 681 + vma, haddr, numa_node_id()); 682 if (unlikely(!page)) 683 goto out; 684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { ··· 799 } 800 801 for (i = 0; i < HPAGE_PMD_NR; i++) { 802 + pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, 803 + vma, address, page_to_nid(page)); 804 if (unlikely(!pages[i] || 805 mem_cgroup_newpage_charge(pages[i], mm, 806 GFP_KERNEL))) { ··· 902 if (transparent_hugepage_enabled(vma) && 903 !transparent_hugepage_debug_cow()) 904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 905 + vma, haddr, numa_node_id()); 906 else 907 new_page = NULL; 908 ··· 1745 static void collapse_huge_page(struct mm_struct *mm, 1746 unsigned long address, 1747 struct page **hpage, 1748 + struct vm_area_struct *vma, 1749 + int node) 1750 { 1751 pgd_t *pgd; 1752 pud_t *pud; ··· 1773 * mmap_sem in read mode is good idea also to allow greater 1774 * scalability. 1775 */ 1776 + new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1777 + node); 1778 if (unlikely(!new_page)) { 1779 up_read(&mm->mmap_sem); 1780 *hpage = ERR_PTR(-ENOMEM); ··· 1919 struct page *page; 1920 unsigned long _address; 1921 spinlock_t *ptl; 1922 + int node = -1; 1923 1924 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1925 ··· 1949 page = vm_normal_page(vma, _address, pteval); 1950 if (unlikely(!page)) 1951 goto out_unmap; 1952 + /* 1953 + * Chose the node of the first page. This could 1954 + * be more sophisticated and look at more pages, 1955 + * but isn't for now. 1956 + */ 1957 + if (node == -1) 1958 + node = page_to_nid(page); 1959 VM_BUG_ON(PageCompound(page)); 1960 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 1961 goto out_unmap; ··· 1965 pte_unmap_unlock(pte, ptl); 1966 if (ret) 1967 /* collapse_huge_page will return with the mmap_sem released */ 1968 + collapse_huge_page(mm, address, hpage, vma, node); 1969 out: 1970 return ret; 1971 }
+7 -7
mm/mempolicy.c
··· 1524 } 1525 1526 /* Return a zonelist indicated by gfp for node representing a mempolicy */ 1527 - static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) 1528 { 1529 - int nd = numa_node_id(); 1530 - 1531 switch (policy->mode) { 1532 case MPOL_PREFERRED: 1533 if (!(policy->flags & MPOL_F_LOCAL)) ··· 1678 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1679 huge_page_shift(hstate_vma(vma))), gfp_flags); 1680 } else { 1681 - zl = policy_zonelist(gfp_flags, *mpol); 1682 if ((*mpol)->mode == MPOL_BIND) 1683 *nodemask = &(*mpol)->v.nodes; 1684 } ··· 1819 */ 1820 struct page * 1821 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1822 - unsigned long addr) 1823 { 1824 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1825 struct zonelist *zl; ··· 1835 put_mems_allowed(); 1836 return page; 1837 } 1838 - zl = policy_zonelist(gfp, pol); 1839 if (unlikely(mpol_needs_cond_ref(pol))) { 1840 /* 1841 * slow path: ref counted shared policy ··· 1891 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1892 else 1893 page = __alloc_pages_nodemask(gfp, order, 1894 - policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); 1895 put_mems_allowed(); 1896 return page; 1897 }
··· 1524 } 1525 1526 /* Return a zonelist indicated by gfp for node representing a mempolicy */ 1527 + static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 1528 + int nd) 1529 { 1530 switch (policy->mode) { 1531 case MPOL_PREFERRED: 1532 if (!(policy->flags & MPOL_F_LOCAL)) ··· 1679 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1680 huge_page_shift(hstate_vma(vma))), gfp_flags); 1681 } else { 1682 + zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 1683 if ((*mpol)->mode == MPOL_BIND) 1684 *nodemask = &(*mpol)->v.nodes; 1685 } ··· 1820 */ 1821 struct page * 1822 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1823 + unsigned long addr, int node) 1824 { 1825 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1826 struct zonelist *zl; ··· 1836 put_mems_allowed(); 1837 return page; 1838 } 1839 + zl = policy_zonelist(gfp, pol, node); 1840 if (unlikely(mpol_needs_cond_ref(pol))) { 1841 /* 1842 * slow path: ref counted shared policy ··· 1892 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1893 else 1894 page = __alloc_pages_nodemask(gfp, order, 1895 + policy_zonelist(gfp, pol, numa_node_id()), 1896 + policy_nodemask(gfp, pol)); 1897 put_mems_allowed(); 1898 return page; 1899 }
+54 -17
net/ceph/messenger.c
··· 336 ceph_msg_put(con->out_msg); 337 con->out_msg = NULL; 338 } 339 - con->out_keepalive_pending = false; 340 con->in_seq = 0; 341 con->in_seq_acked = 0; 342 } ··· 1247 con->auth_retry); 1248 if (con->auth_retry == 2) { 1249 con->error_msg = "connect authorization failure"; 1250 - reset_connection(con); 1251 - set_bit(CLOSED, &con->state); 1252 return -1; 1253 } 1254 con->auth_retry = 1; ··· 1712 1713 /* open the socket first? */ 1714 if (con->sock == NULL) { 1715 - /* 1716 - * if we were STANDBY and are reconnecting _this_ 1717 - * connection, bump connect_seq now. Always bump 1718 - * global_seq. 1719 - */ 1720 - if (test_and_clear_bit(STANDBY, &con->state)) 1721 - con->connect_seq++; 1722 - 1723 prepare_write_banner(msgr, con); 1724 prepare_write_connect(msgr, con, 1); 1725 prepare_read_banner(con); ··· 1940 work.work); 1941 1942 mutex_lock(&con->mutex); 1943 1944 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1945 dout("con_work CLOSED\n"); 1946 con_close_socket(con); ··· 2014 /* Requeue anything that hasn't been acked */ 2015 list_splice_init(&con->out_sent, &con->out_queue); 2016 2017 - /* If there are no messages in the queue, place the connection 2018 - * in a STANDBY state (i.e., don't try to reconnect just yet). */ 2019 - if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { 2020 - dout("fault setting STANDBY\n"); 2021 set_bit(STANDBY, &con->state); 2022 } else { 2023 /* retry after a delay. */ ··· 2027 con->delay = BASE_DELAY_INTERVAL; 2028 else if (con->delay < MAX_DELAY_INTERVAL) 2029 con->delay *= 2; 2030 - dout("fault queueing %p delay %lu\n", con, con->delay); 2031 con->ops->get(con); 2032 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2033 - round_jiffies_relative(con->delay)) == 0) 2034 con->ops->put(con); 2035 } 2036 2037 out_unlock: ··· 2115 } 2116 EXPORT_SYMBOL(ceph_messenger_destroy); 2117 2118 /* 2119 * Queue up an outgoing message on the given connection. 2120 */ ··· 2160 2161 /* if there wasn't anything waiting to send before, queue 2162 * new work */ 2163 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2164 queue_con(con); 2165 } ··· 2226 */ 2227 void ceph_con_keepalive(struct ceph_connection *con) 2228 { 2229 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2230 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2231 queue_con(con);
··· 336 ceph_msg_put(con->out_msg); 337 con->out_msg = NULL; 338 } 339 con->in_seq = 0; 340 con->in_seq_acked = 0; 341 } ··· 1248 con->auth_retry); 1249 if (con->auth_retry == 2) { 1250 con->error_msg = "connect authorization failure"; 1251 return -1; 1252 } 1253 con->auth_retry = 1; ··· 1715 1716 /* open the socket first? */ 1717 if (con->sock == NULL) { 1718 prepare_write_banner(msgr, con); 1719 prepare_write_connect(msgr, con, 1); 1720 prepare_read_banner(con); ··· 1951 work.work); 1952 1953 mutex_lock(&con->mutex); 1954 + if (test_and_clear_bit(BACKOFF, &con->state)) { 1955 + dout("con_work %p backing off\n", con); 1956 + if (queue_delayed_work(ceph_msgr_wq, &con->work, 1957 + round_jiffies_relative(con->delay))) { 1958 + dout("con_work %p backoff %lu\n", con, con->delay); 1959 + mutex_unlock(&con->mutex); 1960 + return; 1961 + } else { 1962 + con->ops->put(con); 1963 + dout("con_work %p FAILED to back off %lu\n", con, 1964 + con->delay); 1965 + } 1966 + } 1967 1968 + if (test_bit(STANDBY, &con->state)) { 1969 + dout("con_work %p STANDBY\n", con); 1970 + goto done; 1971 + } 1972 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1973 dout("con_work CLOSED\n"); 1974 con_close_socket(con); ··· 2008 /* Requeue anything that hasn't been acked */ 2009 list_splice_init(&con->out_sent, &con->out_queue); 2010 2011 + /* If there are no messages queued or keepalive pending, place 2012 + * the connection in a STANDBY state */ 2013 + if (list_empty(&con->out_queue) && 2014 + !test_bit(KEEPALIVE_PENDING, &con->state)) { 2015 + dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2016 + clear_bit(WRITE_PENDING, &con->state); 2017 set_bit(STANDBY, &con->state); 2018 } else { 2019 /* retry after a delay. */ ··· 2019 con->delay = BASE_DELAY_INTERVAL; 2020 else if (con->delay < MAX_DELAY_INTERVAL) 2021 con->delay *= 2; 2022 con->ops->get(con); 2023 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2024 + round_jiffies_relative(con->delay))) { 2025 + dout("fault queued %p delay %lu\n", con, con->delay); 2026 + } else { 2027 con->ops->put(con); 2028 + dout("fault failed to queue %p delay %lu, backoff\n", 2029 + con, con->delay); 2030 + /* 2031 + * In many cases we see a socket state change 2032 + * while con_work is running and end up 2033 + * queuing (non-delayed) work, such that we 2034 + * can't backoff with a delay. Set a flag so 2035 + * that when con_work restarts we schedule the 2036 + * delay then. 2037 + */ 2038 + set_bit(BACKOFF, &con->state); 2039 + } 2040 } 2041 2042 out_unlock: ··· 2094 } 2095 EXPORT_SYMBOL(ceph_messenger_destroy); 2096 2097 + static void clear_standby(struct ceph_connection *con) 2098 + { 2099 + /* come back from STANDBY? */ 2100 + if (test_and_clear_bit(STANDBY, &con->state)) { 2101 + mutex_lock(&con->mutex); 2102 + dout("clear_standby %p and ++connect_seq\n", con); 2103 + con->connect_seq++; 2104 + WARN_ON(test_bit(WRITE_PENDING, &con->state)); 2105 + WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); 2106 + mutex_unlock(&con->mutex); 2107 + } 2108 + } 2109 + 2110 /* 2111 * Queue up an outgoing message on the given connection. 2112 */ ··· 2126 2127 /* if there wasn't anything waiting to send before, queue 2128 * new work */ 2129 + clear_standby(con); 2130 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2131 queue_con(con); 2132 } ··· 2191 */ 2192 void ceph_con_keepalive(struct ceph_connection *con) 2193 { 2194 + dout("con_keepalive %p\n", con); 2195 + clear_standby(con); 2196 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2197 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2198 queue_con(con);
+13 -5
net/ceph/pagevec.c
··· 16 int num_pages, bool write_page) 17 { 18 struct page **pages; 19 - int rc; 20 21 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 22 if (!pages) 23 return ERR_PTR(-ENOMEM); 24 25 down_read(&current->mm->mmap_sem); 26 - rc = get_user_pages(current, current->mm, (unsigned long)data, 27 - num_pages, write_page, 0, pages, NULL); 28 up_read(&current->mm->mmap_sem); 29 - if (rc < num_pages) 30 goto fail; 31 return pages; 32 33 fail: 34 - ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 35 return ERR_PTR(rc); 36 } 37 EXPORT_SYMBOL(ceph_get_direct_page_vector);
··· 16 int num_pages, bool write_page) 17 { 18 struct page **pages; 19 + int got = 0; 20 + int rc = 0; 21 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 23 if (!pages) 24 return ERR_PTR(-ENOMEM); 25 26 down_read(&current->mm->mmap_sem); 27 + while (got < num_pages) { 28 + rc = get_user_pages(current, current->mm, 29 + (unsigned long)data + ((unsigned long)got * PAGE_SIZE), 30 + num_pages - got, write_page, 0, pages + got, NULL); 31 + if (rc < 0) 32 + break; 33 + BUG_ON(rc == 0); 34 + got += rc; 35 + } 36 up_read(&current->mm->mmap_sem); 37 + if (rc < 0) 38 goto fail; 39 return pages; 40 41 fail: 42 + ceph_put_page_vector(pages, got, false); 43 return ERR_PTR(rc); 44 } 45 EXPORT_SYMBOL(ceph_get_direct_page_vector);
+10 -2
net/core/dev.c
··· 1114 void dev_load(struct net *net, const char *name) 1115 { 1116 struct net_device *dev; 1117 1118 rcu_read_lock(); 1119 dev = dev_get_by_name_rcu(net, name); 1120 rcu_read_unlock(); 1121 1122 - if (!dev && capable(CAP_NET_ADMIN)) 1123 - request_module("%s", name); 1124 } 1125 EXPORT_SYMBOL(dev_load); 1126
··· 1114 void dev_load(struct net *net, const char *name) 1115 { 1116 struct net_device *dev; 1117 + int no_module; 1118 1119 rcu_read_lock(); 1120 dev = dev_get_by_name_rcu(net, name); 1121 rcu_read_unlock(); 1122 1123 + no_module = !dev; 1124 + if (no_module && capable(CAP_NET_ADMIN)) 1125 + no_module = request_module("netdev-%s", name); 1126 + if (no_module && capable(CAP_SYS_MODULE)) { 1127 + if (!request_module("%s", name)) 1128 + pr_err("Loading kernel module for a network device " 1129 + "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1130 + "instead\n", name); 1131 + } 1132 } 1133 EXPORT_SYMBOL(dev_load); 1134
+1 -1
net/core/dev_addr_lists.c
··· 144 145 list_for_each_entry(ha, &from_list->list, list) { 146 type = addr_type ? addr_type : ha->type; 147 - __hw_addr_del(to_list, ha->addr, addr_len, addr_type); 148 } 149 } 150 EXPORT_SYMBOL(__hw_addr_del_multiple);
··· 144 145 list_for_each_entry(ha, &from_list->list, list) { 146 type = addr_type ? addr_type : ha->type; 147 + __hw_addr_del(to_list, ha->addr, addr_len, type); 148 } 149 } 150 EXPORT_SYMBOL(__hw_addr_del_multiple);
+1 -1
net/dcb/dcbnl.c
··· 1193 goto err; 1194 } 1195 1196 - if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1198 err = ops->ieee_setpfc(netdev, pfc); 1199 if (err)
··· 1193 goto err; 1194 } 1195 1196 + if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1198 err = ops->ieee_setpfc(netdev, pfc); 1199 if (err)
+3 -4
net/dccp/input.c
··· 614 /* Caller (dccp_v4_do_rcv) will send Reset */ 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 616 return 1; 617 } 618 619 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { ··· 671 } 672 673 switch (sk->sk_state) { 674 - case DCCP_CLOSED: 675 - dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 676 - return 1; 677 - 678 case DCCP_REQUESTING: 679 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 680 if (queued >= 0)
··· 614 /* Caller (dccp_v4_do_rcv) will send Reset */ 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 616 return 1; 617 + } else if (sk->sk_state == DCCP_CLOSED) { 618 + dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 619 + return 1; 620 } 621 622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { ··· 668 } 669 670 switch (sk->sk_state) { 671 case DCCP_REQUESTING: 672 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 673 if (queued >= 0)
+17 -3
net/dns_resolver/dns_key.c
··· 67 size_t result_len = 0; 68 const char *data = _data, *end, *opt; 69 70 - kenter("%%%d,%s,'%s',%zu", 71 - key->serial, key->description, data, datalen); 72 73 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 74 return -EINVAL; ··· 218 seq_printf(m, ": %u", key->datalen); 219 } 220 221 struct key_type key_type_dns_resolver = { 222 .name = "dns_resolver", 223 .instantiate = dns_resolver_instantiate, ··· 238 .revoke = user_revoke, 239 .destroy = user_destroy, 240 .describe = dns_resolver_describe, 241 - .read = user_read, 242 }; 243 244 static int __init init_dns_resolver(void)
··· 67 size_t result_len = 0; 68 const char *data = _data, *end, *opt; 69 70 + kenter("%%%d,%s,'%*.*s',%zu", 71 + key->serial, key->description, 72 + (int)datalen, (int)datalen, data, datalen); 73 74 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 75 return -EINVAL; ··· 217 seq_printf(m, ": %u", key->datalen); 218 } 219 220 + /* 221 + * read the DNS data 222 + * - the key's semaphore is read-locked 223 + */ 224 + static long dns_resolver_read(const struct key *key, 225 + char __user *buffer, size_t buflen) 226 + { 227 + if (key->type_data.x[0]) 228 + return key->type_data.x[0]; 229 + 230 + return user_read(key, buffer, buflen); 231 + } 232 + 233 struct key_type key_type_dns_resolver = { 234 .name = "dns_resolver", 235 .instantiate = dns_resolver_instantiate, ··· 224 .revoke = user_revoke, 225 .destroy = user_destroy, 226 .describe = dns_resolver_describe, 227 + .read = dns_resolver_read, 228 }; 229 230 static int __init init_dns_resolver(void)
+1 -1
net/ipv4/ip_gre.c
··· 1765 MODULE_LICENSE("GPL"); 1766 MODULE_ALIAS_RTNL_LINK("gre"); 1767 MODULE_ALIAS_RTNL_LINK("gretap"); 1768 - MODULE_ALIAS("gre0");
··· 1765 MODULE_LICENSE("GPL"); 1766 MODULE_ALIAS_RTNL_LINK("gre"); 1767 MODULE_ALIAS_RTNL_LINK("gretap"); 1768 + MODULE_ALIAS_NETDEV("gre0");
+1 -1
net/ipv4/ipip.c
··· 913 module_init(ipip_init); 914 module_exit(ipip_fini); 915 MODULE_LICENSE("GPL"); 916 - MODULE_ALIAS("tunl0");
··· 913 module_init(ipip_init); 914 module_exit(ipip_fini); 915 MODULE_LICENSE("GPL"); 916 + MODULE_ALIAS_NETDEV("tunl0");
+10 -7
net/ipv6/route.c
··· 2557 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2558 void __user *buffer, size_t *lenp, loff_t *ppos) 2559 { 2560 - struct net *net = current->nsproxy->net_ns; 2561 - int delay = net->ipv6.sysctl.flush_delay; 2562 - if (write) { 2563 - proc_dointvec(ctl, write, buffer, lenp, ppos); 2564 - fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); 2565 - return 0; 2566 - } else 2567 return -EINVAL; 2568 } 2569 2570 ctl_table ipv6_route_table_template[] = { ··· 2653 2654 if (table) { 2655 table[0].data = &net->ipv6.sysctl.flush_delay; 2656 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2657 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2658 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
··· 2557 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2558 void __user *buffer, size_t *lenp, loff_t *ppos) 2559 { 2560 + struct net *net; 2561 + int delay; 2562 + if (!write) 2563 return -EINVAL; 2564 + 2565 + net = (struct net *)ctl->extra1; 2566 + delay = net->ipv6.sysctl.flush_delay; 2567 + proc_dointvec(ctl, write, buffer, lenp, ppos); 2568 + fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); 2569 + return 0; 2570 } 2571 2572 ctl_table ipv6_route_table_template[] = { ··· 2651 2652 if (table) { 2653 table[0].data = &net->ipv6.sysctl.flush_delay; 2654 + table[0].extra1 = net; 2655 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2656 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2657 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
+1 -1
net/ipv6/sit.c
··· 1290 module_init(sit_init); 1291 module_exit(sit_cleanup); 1292 MODULE_LICENSE("GPL"); 1293 - MODULE_ALIAS("sit0");
··· 1290 module_init(sit_init); 1291 module_exit(sit_cleanup); 1292 MODULE_LICENSE("GPL"); 1293 + MODULE_ALIAS_NETDEV("sit0");
+2 -2
net/netfilter/ipvs/ip_vs_ctl.c
··· 808 dest->u_threshold = udest->u_threshold; 809 dest->l_threshold = udest->l_threshold; 810 811 - spin_lock(&dest->dst_lock); 812 ip_vs_dst_reset(dest); 813 - spin_unlock(&dest->dst_lock); 814 815 if (add) 816 ip_vs_new_estimator(&dest->stats);
··· 808 dest->u_threshold = udest->u_threshold; 809 dest->l_threshold = udest->l_threshold; 810 811 + spin_lock_bh(&dest->dst_lock); 812 ip_vs_dst_reset(dest); 813 + spin_unlock_bh(&dest->dst_lock); 814 815 if (add) 816 ip_vs_new_estimator(&dest->stats);
+4
net/netfilter/nf_log.c
··· 85 86 int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 87 { 88 mutex_lock(&nf_log_mutex); 89 if (__find_logger(pf, logger->name) == NULL) { 90 mutex_unlock(&nf_log_mutex); ··· 100 101 void nf_log_unbind_pf(u_int8_t pf) 102 { 103 mutex_lock(&nf_log_mutex); 104 rcu_assign_pointer(nf_loggers[pf], NULL); 105 mutex_unlock(&nf_log_mutex);
··· 85 86 int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 87 { 88 + if (pf >= ARRAY_SIZE(nf_loggers)) 89 + return -EINVAL; 90 mutex_lock(&nf_log_mutex); 91 if (__find_logger(pf, logger->name) == NULL) { 92 mutex_unlock(&nf_log_mutex); ··· 98 99 void nf_log_unbind_pf(u_int8_t pf) 100 { 101 + if (pf >= ARRAY_SIZE(nf_loggers)) 102 + return; 103 mutex_lock(&nf_log_mutex); 104 rcu_assign_pointer(nf_loggers[pf], NULL); 105 mutex_unlock(&nf_log_mutex);
+14 -4
net/netlink/af_netlink.c
··· 1407 int noblock = flags&MSG_DONTWAIT; 1408 size_t copied; 1409 struct sk_buff *skb, *data_skb; 1410 - int err; 1411 1412 if (flags&MSG_OOB) 1413 return -EOPNOTSUPP; ··· 1470 1471 skb_free_datagram(sk, skb); 1472 1473 - if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1474 - netlink_dump(sk); 1475 1476 scm_recv(sock, msg, siocb->scm, flags); 1477 out: ··· 1741 struct netlink_callback *cb; 1742 struct sock *sk; 1743 struct netlink_sock *nlk; 1744 1745 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1746 if (cb == NULL) ··· 1770 nlk->cb = cb; 1771 mutex_unlock(nlk->cb_mutex); 1772 1773 - netlink_dump(sk); 1774 sock_put(sk); 1775 1776 /* We successfully started a dump, by returning -EINTR we 1777 * signal not to send ACK even if it was requested.
··· 1407 int noblock = flags&MSG_DONTWAIT; 1408 size_t copied; 1409 struct sk_buff *skb, *data_skb; 1410 + int err, ret; 1411 1412 if (flags&MSG_OOB) 1413 return -EOPNOTSUPP; ··· 1470 1471 skb_free_datagram(sk, skb); 1472 1473 + if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 1474 + ret = netlink_dump(sk); 1475 + if (ret) { 1476 + sk->sk_err = ret; 1477 + sk->sk_error_report(sk); 1478 + } 1479 + } 1480 1481 scm_recv(sock, msg, siocb->scm, flags); 1482 out: ··· 1736 struct netlink_callback *cb; 1737 struct sock *sk; 1738 struct netlink_sock *nlk; 1739 + int ret; 1740 1741 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1742 if (cb == NULL) ··· 1764 nlk->cb = cb; 1765 mutex_unlock(nlk->cb_mutex); 1766 1767 + ret = netlink_dump(sk); 1768 + 1769 sock_put(sk); 1770 + 1771 + if (ret) 1772 + return ret; 1773 1774 /* We successfully started a dump, by returning -EINTR we 1775 * signal not to send ACK even if it was requested.
+1
net/rxrpc/ar-input.c
··· 423 goto protocol_error; 424 } 425 426 case RXRPC_PACKET_TYPE_ACK: 427 /* ACK processing is done in process context */ 428 read_lock_bh(&call->state_lock);
··· 423 goto protocol_error; 424 } 425 426 + case RXRPC_PACKET_TYPE_ACKALL: 427 case RXRPC_PACKET_TYPE_ACK: 428 /* ACK processing is done in process context */ 429 read_lock_bh(&call->state_lock);
+2
sound/pci/hda/patch_cirrus.c
··· 1039 {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, 1040 {0x11, AC_VERB_SET_PROC_STATE, 0x00}, 1041 1042 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ 1043 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ 1044 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ 1045 1046 {} /* terminator */ 1047 };
··· 1039 {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, 1040 {0x11, AC_VERB_SET_PROC_STATE, 0x00}, 1041 1042 + #if 0 /* Don't to set to D3 as we are in power-up sequence */ 1043 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ 1044 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ 1045 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ 1046 + #endif 1047 1048 {} /* terminator */ 1049 };
+5
sound/pci/hda/patch_hdmi.c
··· 1634 { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1635 { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1636 { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1637 { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1638 { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1639 { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, ··· 1679 MODULE_ALIAS("snd-hda-codec-id:10de0012"); 1680 MODULE_ALIAS("snd-hda-codec-id:10de0013"); 1681 MODULE_ALIAS("snd-hda-codec-id:10de0014"); 1682 MODULE_ALIAS("snd-hda-codec-id:10de0018"); 1683 MODULE_ALIAS("snd-hda-codec-id:10de0019"); 1684 MODULE_ALIAS("snd-hda-codec-id:10de001a");
··· 1634 { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1635 { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1636 { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1637 + { .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1638 + { .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1639 + /* 17 is known to be absent */ 1640 { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1641 { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1642 { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, ··· 1676 MODULE_ALIAS("snd-hda-codec-id:10de0012"); 1677 MODULE_ALIAS("snd-hda-codec-id:10de0013"); 1678 MODULE_ALIAS("snd-hda-codec-id:10de0014"); 1679 + MODULE_ALIAS("snd-hda-codec-id:10de0015"); 1680 + MODULE_ALIAS("snd-hda-codec-id:10de0016"); 1681 MODULE_ALIAS("snd-hda-codec-id:10de0018"); 1682 MODULE_ALIAS("snd-hda-codec-id:10de0019"); 1683 MODULE_ALIAS("snd-hda-codec-id:10de001a");
+3 -6
sound/pci/hda/patch_realtek.c
··· 1133 nid = spec->autocfg.hp_pins[i]; 1134 if (!nid) 1135 break; 1136 - if (snd_hda_jack_detect(codec, nid)) { 1137 - spec->jack_present = 1; 1138 - break; 1139 - } 1140 - alc_report_jack(codec, spec->autocfg.hp_pins[i]); 1141 } 1142 1143 mute = spec->jack_present ? HDA_AMP_MUTE : 0; ··· 15012 SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), 15013 SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), 15014 SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), 15015 - SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC), 15016 SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), 15017 SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), 15018 SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
··· 1133 nid = spec->autocfg.hp_pins[i]; 1134 if (!nid) 1135 break; 1136 + alc_report_jack(codec, nid); 1137 + spec->jack_present |= snd_hda_jack_detect(codec, nid); 1138 } 1139 1140 mute = spec->jack_present ? HDA_AMP_MUTE : 0; ··· 15015 SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), 15016 SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), 15017 SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), 15018 + SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC), 15019 SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), 15020 SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), 15021 SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
+42 -9
sound/soc/codecs/wm8994.c
··· 110 111 unsigned int aif1clk_enable:1; 112 unsigned int aif2clk_enable:1; 113 }; 114 115 static int wm8994_readable(unsigned int reg) ··· 1018 1019 switch (event) { 1020 case SND_SOC_DAPM_PRE_PMU: 1021 - if (wm8994->aif1clk_enable) 1022 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1023 WM8994_AIF1CLK_ENA_MASK, 1024 WM8994_AIF1CLK_ENA); 1025 - if (wm8994->aif2clk_enable) 1026 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1027 WM8994_AIF2CLK_ENA_MASK, 1028 WM8994_AIF2CLK_ENA); 1029 break; 1030 } 1031 ··· 1044 1045 switch (event) { 1046 case SND_SOC_DAPM_POST_PMD: 1047 - if (wm8994->aif1clk_enable) { 1048 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1049 WM8994_AIF1CLK_ENA_MASK, 0); 1050 - wm8994->aif1clk_enable = 0; 1051 } 1052 - if (wm8994->aif2clk_enable) { 1053 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1054 WM8994_AIF2CLK_ENA_MASK, 0); 1055 - wm8994->aif2clk_enable = 0; 1056 } 1057 break; 1058 } ··· 1070 case SND_SOC_DAPM_PRE_PMU: 1071 wm8994->aif1clk_enable = 1; 1072 break; 1073 } 1074 1075 return 0; ··· 1088 case SND_SOC_DAPM_PRE_PMU: 1089 wm8994->aif2clk_enable = 1; 1090 break; 1091 } 1092 1093 return 0; 1094 } 1095 ··· 1423 SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), 1424 }; 1425 1426 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { 1427 SND_SOC_DAPM_INPUT("DMIC1DAT"), 1428 SND_SOC_DAPM_INPUT("DMIC2DAT"), ··· 1528 */ 1529 SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), 1530 SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), 1531 - 1532 - SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), 1533 - SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), 1534 1535 SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), 1536 SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), ··· 3309 if (wm8994->revision < 4) { 3310 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, 3311 ARRAY_SIZE(wm8994_lateclk_revd_widgets)); 3312 snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, 3313 ARRAY_SIZE(wm8994_dac_revd_widgets)); 3314 } else { 3315 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, 3316 ARRAY_SIZE(wm8994_lateclk_widgets)); 3317 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, 3318 ARRAY_SIZE(wm8994_dac_widgets)); 3319 }
··· 110 111 unsigned int aif1clk_enable:1; 112 unsigned int aif2clk_enable:1; 113 + 114 + unsigned int aif1clk_disable:1; 115 + unsigned int aif2clk_disable:1; 116 }; 117 118 static int wm8994_readable(unsigned int reg) ··· 1015 1016 switch (event) { 1017 case SND_SOC_DAPM_PRE_PMU: 1018 + if (wm8994->aif1clk_enable) { 1019 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1020 WM8994_AIF1CLK_ENA_MASK, 1021 WM8994_AIF1CLK_ENA); 1022 + wm8994->aif1clk_enable = 0; 1023 + } 1024 + if (wm8994->aif2clk_enable) { 1025 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1026 WM8994_AIF2CLK_ENA_MASK, 1027 WM8994_AIF2CLK_ENA); 1028 + wm8994->aif2clk_enable = 0; 1029 + } 1030 break; 1031 } 1032 ··· 1037 1038 switch (event) { 1039 case SND_SOC_DAPM_POST_PMD: 1040 + if (wm8994->aif1clk_disable) { 1041 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1042 WM8994_AIF1CLK_ENA_MASK, 0); 1043 + wm8994->aif1clk_disable = 0; 1044 } 1045 + if (wm8994->aif2clk_disable) { 1046 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1047 WM8994_AIF2CLK_ENA_MASK, 0); 1048 + wm8994->aif2clk_disable = 0; 1049 } 1050 break; 1051 } ··· 1063 case SND_SOC_DAPM_PRE_PMU: 1064 wm8994->aif1clk_enable = 1; 1065 break; 1066 + case SND_SOC_DAPM_POST_PMD: 1067 + wm8994->aif1clk_disable = 1; 1068 + break; 1069 } 1070 1071 return 0; ··· 1078 case SND_SOC_DAPM_PRE_PMU: 1079 wm8994->aif2clk_enable = 1; 1080 break; 1081 + case SND_SOC_DAPM_POST_PMD: 1082 + wm8994->aif2clk_disable = 1; 1083 + break; 1084 } 1085 1086 + return 0; 1087 + } 1088 + 1089 + static int adc_mux_ev(struct snd_soc_dapm_widget *w, 1090 + struct snd_kcontrol *kcontrol, int event) 1091 + { 1092 + late_enable_ev(w, kcontrol, event); 1093 return 0; 1094 } 1095 ··· 1403 SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), 1404 }; 1405 1406 + static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = { 1407 + SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, 1408 + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), 1409 + SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, 1410 + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), 1411 + }; 1412 + 1413 + static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = { 1414 + SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), 1415 + SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), 1416 + }; 1417 + 1418 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { 1419 SND_SOC_DAPM_INPUT("DMIC1DAT"), 1420 SND_SOC_DAPM_INPUT("DMIC2DAT"), ··· 1496 */ 1497 SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), 1498 SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), 1499 1500 SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), 1501 SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), ··· 3280 if (wm8994->revision < 4) { 3281 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, 3282 ARRAY_SIZE(wm8994_lateclk_revd_widgets)); 3283 + snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets, 3284 + ARRAY_SIZE(wm8994_adc_revd_widgets)); 3285 snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, 3286 ARRAY_SIZE(wm8994_dac_revd_widgets)); 3287 } else { 3288 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, 3289 ARRAY_SIZE(wm8994_lateclk_widgets)); 3290 + snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets, 3291 + ARRAY_SIZE(wm8994_adc_widgets)); 3292 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, 3293 ARRAY_SIZE(wm8994_dac_widgets)); 3294 }
+5
sound/soc/codecs/wm9081.c
··· 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/delay.h> 18 #include <linux/pm.h> 19 #include <linux/i2c.h> 20 #include <linux/platform_device.h> ··· 1341 i2c_set_clientdata(i2c, wm9081); 1342 wm9081->control_type = SND_SOC_I2C; 1343 wm9081->control_data = i2c; 1344 1345 ret = snd_soc_register_codec(&i2c->dev, 1346 &soc_codec_dev_wm9081, &wm9081_dai, 1);
··· 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/delay.h> 18 + #include <linux/device.h> 19 #include <linux/pm.h> 20 #include <linux/i2c.h> 21 #include <linux/platform_device.h> ··· 1340 i2c_set_clientdata(i2c, wm9081); 1341 wm9081->control_type = SND_SOC_I2C; 1342 wm9081->control_data = i2c; 1343 + 1344 + if (dev_get_platdata(&i2c->dev)) 1345 + memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev), 1346 + sizeof(wm9081->retune)); 1347 1348 ret = snd_soc_register_codec(&i2c->dev, 1349 &soc_codec_dev_wm9081, &wm9081_dai, 1);
+8 -3
tools/perf/util/header.c
··· 270 const char *name, bool is_kallsyms) 271 { 272 const size_t size = PATH_MAX; 273 - char *realname = realpath(name, NULL), 274 - *filename = malloc(size), 275 *linkname = malloc(size), *targetname; 276 int len, err = -1; 277 278 if (realname == NULL || filename == NULL || linkname == NULL) 279 goto out_free; ··· 310 if (symlink(targetname, linkname) == 0) 311 err = 0; 312 out_free: 313 - free(realname); 314 free(filename); 315 free(linkname); 316 return err;
··· 270 const char *name, bool is_kallsyms) 271 { 272 const size_t size = PATH_MAX; 273 + char *realname, *filename = malloc(size), 274 *linkname = malloc(size), *targetname; 275 int len, err = -1; 276 + 277 + if (is_kallsyms) 278 + realname = (char *)name; 279 + else 280 + realname = realpath(name, NULL); 281 282 if (realname == NULL || filename == NULL || linkname == NULL) 283 goto out_free; ··· 306 if (symlink(targetname, linkname) == 0) 307 err = 0; 308 out_free: 309 + if (!is_kallsyms) 310 + free(realname); 311 free(filename); 312 free(linkname); 313 return err;
+1 -1
tools/perf/util/symbol.c
··· 1836 int err = -1, fd; 1837 char symfs_vmlinux[PATH_MAX]; 1838 1839 - snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s", 1840 symbol_conf.symfs, vmlinux); 1841 fd = open(symfs_vmlinux, O_RDONLY); 1842 if (fd < 0)
··· 1836 int err = -1, fd; 1837 char symfs_vmlinux[PATH_MAX]; 1838 1839 + snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", 1840 symbol_conf.symfs, vmlinux); 1841 fd = open(symfs_vmlinux, O_RDONLY); 1842 if (fd < 0)