Merge commit '5359533801e3dd3abca5b7d3d985b0b33fd9fe8b' into drm-core-next

This commit changed an internal radeon structure, that meant a new driver
in -next had to be fixed up, merge in the commit and fix up the driver.

Also fixes a trivial nouveau merge.

Conflicts:
drivers/gpu/drm/nouveau/nouveau_mem.c

+1906 -1071
-6
Documentation/networking/00-INDEX
··· 40 40 - info on using the DECnet networking layer in Linux. 41 41 depca.txt 42 42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver 43 - dgrs.txt 44 - - the Digi International RightSwitch SE-X Ethernet driver 45 43 dmfe.txt 46 44 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. 47 45 e100.txt ··· 48 50 - info on Intel's E1000 line of gigabit ethernet boards 49 51 eql.txt 50 52 - serial IP load balancing 51 - ethertap.txt 52 - - the Ethertap user space packet reception and transmission driver 53 53 ewrk3.txt 54 54 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver 55 55 filter.txt ··· 100 104 - TUN/TAP device driver, allowing user space Rx/Tx of packets. 101 105 vortex.txt 102 106 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 103 - wavelan.txt 104 - - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver 105 107 x25.txt 106 108 - general info on X.25 development. 107 109 x25-iface.txt
+8 -1
Documentation/networking/dns_resolver.txt
··· 61 61 create dns_resolver foo:* * /usr/sbin/dns.foo %k 62 62 63 63 64 - 65 64 ===== 66 65 USAGE 67 66 ===== ··· 101 102 102 103 If _expiry is non-NULL, the expiry time (TTL) of the result will be 103 104 returned also. 105 + 106 + 107 + =============================== 108 + READING DNS KEYS FROM USERSPACE 109 + =============================== 110 + 111 + Keys of dns_resolver type can be read from userspace using keyctl_read() or 112 + "keyctl read/print/pipe". 104 113 105 114 106 115 =========
+13 -2
MAINTAINERS
··· 1010 1010 S: Maintained 1011 1011 F: arch/arm/mach-s5p*/ 1012 1012 1013 + ARM/SAMSUNG MOBILE MACHINE SUPPORT 1014 + M: Kyungmin Park <kyungmin.park@samsung.com> 1015 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1016 + S: Maintained 1017 + F: arch/arm/mach-s5pv210/mach-aquila.c 1018 + F: arch/arm/mach-s5pv210/mach-goni.c 1019 + F: arch/arm/mach-exynos4/mach-universal_c210.c 1020 + F: arch/arm/mach-exynos4/mach-nuri.c 1021 + 1013 1022 ARM/SAMSUNG S5P SERIES FIMC SUPPORT 1014 1023 M: Kyungmin Park <kyungmin.park@samsung.com> 1015 1024 M: Sylwester Nawrocki <s.nawrocki@samsung.com> ··· 1476 1467 1477 1468 BONDING DRIVER 1478 1469 M: Jay Vosburgh <fubar@us.ibm.com> 1470 + M: Andy Gospodarek <andy@greyhouse.net> 1479 1471 L: netdev@vger.kernel.org 1480 1472 W: http://sourceforge.net/projects/bonding/ 1481 1473 S: Supported ··· 2043 2033 F: drivers/scsi/dc395x.* 2044 2034 2045 2035 DCCP PROTOCOL 2046 - M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 2036 + M: Gerrit Renker <gerrit@erg.abdn.ac.uk> 2047 2037 L: dccp@vger.kernel.org 2048 2038 W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp 2049 2039 S: Maintained ··· 3529 3519 F: Documentation/hwmon/jc42 3530 3520 3531 3521 JFS FILESYSTEM 3532 - M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> 3522 + M: Dave Kleikamp <shaggy@kernel.org> 3533 3523 L: jfs-discussion@lists.sourceforge.net 3534 3524 W: http://jfs.sourceforge.net/ 3535 3525 T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git ··· 5181 5171 5182 5172 RAPIDIO SUBSYSTEM 5183 5173 M: Matt Porter <mporter@kernel.crashing.org> 5174 + M: Alexandre Bounine <alexandre.bounine@idt.com> 5184 5175 S: Maintained 5185 5176 F: drivers/rapidio/ 5186 5177
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 38 4 - EXTRAVERSION = -rc7 4 + EXTRAVERSION = -rc8 5 5 NAME = Flesh-Eating Bats with Fangs 6 6 7 7 # *DOCUMENTATION*
+1
arch/alpha/Kconfig
··· 11 11 select HAVE_GENERIC_HARDIRQS 12 12 select GENERIC_IRQ_PROBE 13 13 select AUTO_IRQ_AFFINITY if SMP 14 + select GENERIC_HARDIRQS_NO_DEPRECATED 14 15 help 15 16 The Alpha is a 64-bit general-purpose processor designed and 16 17 marketed by the Digital Equipment Corporation of blessed memory,
+9 -4
arch/alpha/kernel/irq.c
··· 44 44 45 45 int irq_select_affinity(unsigned int irq) 46 46 { 47 - struct irq_desc *desc = irq_to_desc[irq]; 47 + struct irq_data *data = irq_get_irq_data(irq); 48 + struct irq_chip *chip; 48 49 static int last_cpu; 49 50 int cpu = last_cpu + 1; 50 51 51 - if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) 52 + if (!data) 53 + return 1; 54 + chip = irq_data_get_irq_chip(data); 55 + 56 + if (!chip->irq_set_affinity || irq_user_affinity[irq]) 52 57 return 1; 53 58 54 59 while (!cpu_possible(cpu) || ··· 61 56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 62 57 last_cpu = cpu; 63 58 64 - cpumask_copy(desc->affinity, cpumask_of(cpu)); 65 - get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); 59 + cpumask_copy(data->affinity, cpumask_of(cpu)); 60 + chip->irq_set_affinity(data, cpumask_of(cpu), false); 66 61 return 0; 67 62 } 68 63 #endif /* CONFIG_SMP */
+3 -8
arch/alpha/kernel/irq_alpha.c
··· 228 228 void __init 229 229 init_rtc_irq(void) 230 230 { 231 - struct irq_desc *desc = irq_to_desc(RTC_IRQ); 232 - 233 - if (desc) { 234 - desc->status |= IRQ_DISABLED; 235 - set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 236 - handle_simple_irq, "RTC"); 237 - setup_irq(RTC_IRQ, &timer_irqaction); 238 - } 231 + set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 232 + handle_simple_irq, "RTC"); 233 + setup_irq(RTC_IRQ, &timer_irqaction); 239 234 } 240 235 241 236 /* Dummy irqactions. */
+10 -8
arch/alpha/kernel/irq_i8259.c
··· 33 33 } 34 34 35 35 inline void 36 - i8259a_enable_irq(unsigned int irq) 36 + i8259a_enable_irq(struct irq_data *d) 37 37 { 38 38 spin_lock(&i8259_irq_lock); 39 - i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 39 + i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); 40 40 spin_unlock(&i8259_irq_lock); 41 41 } 42 42 ··· 47 47 } 48 48 49 49 void 50 - i8259a_disable_irq(unsigned int irq) 50 + i8259a_disable_irq(struct irq_data *d) 51 51 { 52 52 spin_lock(&i8259_irq_lock); 53 - __i8259a_disable_irq(irq); 53 + __i8259a_disable_irq(d->irq); 54 54 spin_unlock(&i8259_irq_lock); 55 55 } 56 56 57 57 void 58 - i8259a_mask_and_ack_irq(unsigned int irq) 58 + i8259a_mask_and_ack_irq(struct irq_data *d) 59 59 { 60 + unsigned int irq = d->irq; 61 + 60 62 spin_lock(&i8259_irq_lock); 61 63 __i8259a_disable_irq(irq); 62 64 ··· 73 71 74 72 struct irq_chip i8259a_irq_type = { 75 73 .name = "XT-PIC", 76 - .unmask = i8259a_enable_irq, 77 - .mask = i8259a_disable_irq, 78 - .mask_ack = i8259a_mask_and_ack_irq, 74 + .irq_unmask = i8259a_enable_irq, 75 + .irq_mask = i8259a_disable_irq, 76 + .irq_mask_ack = i8259a_mask_and_ack_irq, 79 77 }; 80 78 81 79 void __init
+3 -5
arch/alpha/kernel/irq_impl.h
··· 31 31 32 32 extern void common_init_isa_dma(void); 33 33 34 - extern void i8259a_enable_irq(unsigned int); 35 - extern void i8259a_disable_irq(unsigned int); 36 - extern void i8259a_mask_and_ack_irq(unsigned int); 37 - extern unsigned int i8259a_startup_irq(unsigned int); 38 - extern void i8259a_end_irq(unsigned int); 34 + extern void i8259a_enable_irq(struct irq_data *d); 35 + extern void i8259a_disable_irq(struct irq_data *d); 36 + extern void i8259a_mask_and_ack_irq(struct irq_data *d); 39 37 extern struct irq_chip i8259a_irq_type; 40 38 extern void init_i8259a_irqs(void); 41 39
+10 -10
arch/alpha/kernel/irq_pyxis.c
··· 29 29 } 30 30 31 31 static inline void 32 - pyxis_enable_irq(unsigned int irq) 32 + pyxis_enable_irq(struct irq_data *d) 33 33 { 34 - pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 34 + pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 35 35 } 36 36 37 37 static void 38 - pyxis_disable_irq(unsigned int irq) 38 + pyxis_disable_irq(struct irq_data *d) 39 39 { 40 - pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 40 + pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 41 41 } 42 42 43 43 static void 44 - pyxis_mask_and_ack_irq(unsigned int irq) 44 + pyxis_mask_and_ack_irq(struct irq_data *d) 45 45 { 46 - unsigned long bit = 1UL << (irq - 16); 46 + unsigned long bit = 1UL << (d->irq - 16); 47 47 unsigned long mask = cached_irq_mask &= ~bit; 48 48 49 49 /* Disable the interrupt. */ ··· 58 58 59 59 static struct irq_chip pyxis_irq_type = { 60 60 .name = "PYXIS", 61 - .mask_ack = pyxis_mask_and_ack_irq, 62 - .mask = pyxis_disable_irq, 63 - .unmask = pyxis_enable_irq, 61 + .irq_mask_ack = pyxis_mask_and_ack_irq, 62 + .irq_mask = pyxis_disable_irq, 63 + .irq_unmask = pyxis_enable_irq, 64 64 }; 65 65 66 66 void ··· 103 103 if ((ignore_mask >> i) & 1) 104 104 continue; 105 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 106 - irq_to_desc(i)->status |= IRQ_LEVEL; 106 + irq_set_status_flags(i, IRQ_LEVEL); 107 107 } 108 108 109 109 setup_irq(16+7, &isa_cascade_irqaction);
+8 -8
arch/alpha/kernel/irq_srm.c
··· 18 18 DEFINE_SPINLOCK(srm_irq_lock); 19 19 20 20 static inline void 21 - srm_enable_irq(unsigned int irq) 21 + srm_enable_irq(struct irq_data *d) 22 22 { 23 23 spin_lock(&srm_irq_lock); 24 - cserve_ena(irq - 16); 24 + cserve_ena(d->irq - 16); 25 25 spin_unlock(&srm_irq_lock); 26 26 } 27 27 28 28 static void 29 - srm_disable_irq(unsigned int irq) 29 + srm_disable_irq(struct irq_data *d) 30 30 { 31 31 spin_lock(&srm_irq_lock); 32 - cserve_dis(irq - 16); 32 + cserve_dis(d->irq - 16); 33 33 spin_unlock(&srm_irq_lock); 34 34 } 35 35 36 36 /* Handle interrupts from the SRM, assuming no additional weirdness. */ 37 37 static struct irq_chip srm_irq_type = { 38 38 .name = "SRM", 39 - .unmask = srm_enable_irq, 40 - .mask = srm_disable_irq, 41 - .mask_ack = srm_disable_irq, 39 + .irq_unmask = srm_enable_irq, 40 + .irq_mask = srm_disable_irq, 41 + .irq_mask_ack = srm_disable_irq, 42 42 }; 43 43 44 44 void __init ··· 52 52 if (i < 64 && ((ignore_mask >> i) & 1)) 53 53 continue; 54 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 55 - irq_to_desc(i)->status |= IRQ_LEVEL; 55 + irq_set_status_flags(i, IRQ_LEVEL); 56 56 } 57 57 } 58 58
+14 -14
arch/alpha/kernel/sys_alcor.c
··· 44 44 } 45 45 46 46 static inline void 47 - alcor_enable_irq(unsigned int irq) 47 + alcor_enable_irq(struct irq_data *d) 48 48 { 49 - alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 49 + alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 50 50 } 51 51 52 52 static void 53 - alcor_disable_irq(unsigned int irq) 53 + alcor_disable_irq(struct irq_data *d) 54 54 { 55 - alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 55 + alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 56 56 } 57 57 58 58 static void 59 - alcor_mask_and_ack_irq(unsigned int irq) 59 + alcor_mask_and_ack_irq(struct irq_data *d) 60 60 { 61 - alcor_disable_irq(irq); 61 + alcor_disable_irq(d); 62 62 63 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 64 - *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); 64 + *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); 65 65 *(vuip)GRU_INT_CLEAR = 0; mb(); 66 66 } 67 67 68 68 static void 69 - alcor_isa_mask_and_ack_irq(unsigned int irq) 69 + alcor_isa_mask_and_ack_irq(struct irq_data *d) 70 70 { 71 - i8259a_mask_and_ack_irq(irq); 71 + i8259a_mask_and_ack_irq(d); 72 72 73 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 74 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); ··· 77 77 78 78 static struct irq_chip alcor_irq_type = { 79 79 .name = "ALCOR", 80 - .unmask = alcor_enable_irq, 81 - .mask = alcor_disable_irq, 82 - .mask_ack = alcor_mask_and_ack_irq, 80 + .irq_unmask = alcor_enable_irq, 81 + .irq_mask = alcor_disable_irq, 82 + .irq_mask_ack = alcor_mask_and_ack_irq, 83 83 }; 84 84 85 85 static void ··· 126 126 if (i >= 16+20 && i <= 16+30) 127 127 continue; 128 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 129 - irq_to_desc(i)->status |= IRQ_LEVEL; 129 + irq_set_status_flags(i, IRQ_LEVEL); 130 130 } 131 - i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; 131 + i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; 132 132 133 133 init_i8259a_irqs(); 134 134 common_init_isa_dma();
+8 -8
arch/alpha/kernel/sys_cabriolet.c
··· 46 46 } 47 47 48 48 static inline void 49 - cabriolet_enable_irq(unsigned int irq) 49 + cabriolet_enable_irq(struct irq_data *d) 50 50 { 51 - cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); 51 + cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); 52 52 } 53 53 54 54 static void 55 - cabriolet_disable_irq(unsigned int irq) 55 + cabriolet_disable_irq(struct irq_data *d) 56 56 { 57 - cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); 57 + cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); 58 58 } 59 59 60 60 static struct irq_chip cabriolet_irq_type = { 61 61 .name = "CABRIOLET", 62 - .unmask = cabriolet_enable_irq, 63 - .mask = cabriolet_disable_irq, 64 - .mask_ack = cabriolet_disable_irq, 62 + .irq_unmask = cabriolet_enable_irq, 63 + .irq_mask = cabriolet_disable_irq, 64 + .irq_mask_ack = cabriolet_disable_irq, 65 65 }; 66 66 67 67 static void ··· 107 107 for (i = 16; i < 35; ++i) { 108 108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 109 109 handle_level_irq); 110 - irq_to_desc(i)->status |= IRQ_LEVEL; 110 + irq_set_status_flags(i, IRQ_LEVEL); 111 111 } 112 112 } 113 113
+27 -25
arch/alpha/kernel/sys_dp264.c
··· 98 98 } 99 99 100 100 static void 101 - dp264_enable_irq(unsigned int irq) 101 + dp264_enable_irq(struct irq_data *d) 102 102 { 103 103 spin_lock(&dp264_irq_lock); 104 - cached_irq_mask |= 1UL << irq; 104 + cached_irq_mask |= 1UL << d->irq; 105 105 tsunami_update_irq_hw(cached_irq_mask); 106 106 spin_unlock(&dp264_irq_lock); 107 107 } 108 108 109 109 static void 110 - dp264_disable_irq(unsigned int irq) 110 + dp264_disable_irq(struct irq_data *d) 111 111 { 112 112 spin_lock(&dp264_irq_lock); 113 - cached_irq_mask &= ~(1UL << irq); 113 + cached_irq_mask &= ~(1UL << d->irq); 114 114 tsunami_update_irq_hw(cached_irq_mask); 115 115 spin_unlock(&dp264_irq_lock); 116 116 } 117 117 118 118 static void 119 - clipper_enable_irq(unsigned int irq) 119 + clipper_enable_irq(struct irq_data *d) 120 120 { 121 121 spin_lock(&dp264_irq_lock); 122 - cached_irq_mask |= 1UL << (irq - 16); 122 + cached_irq_mask |= 1UL << (d->irq - 16); 123 123 tsunami_update_irq_hw(cached_irq_mask); 124 124 spin_unlock(&dp264_irq_lock); 125 125 } 126 126 127 127 static void 128 - clipper_disable_irq(unsigned int irq) 128 + clipper_disable_irq(struct irq_data *d) 129 129 { 130 130 spin_lock(&dp264_irq_lock); 131 - cached_irq_mask &= ~(1UL << (irq - 16)); 131 + cached_irq_mask &= ~(1UL << (d->irq - 16)); 132 132 tsunami_update_irq_hw(cached_irq_mask); 133 133 spin_unlock(&dp264_irq_lock); 134 134 } ··· 149 149 } 150 150 151 151 static int 152 - dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) 153 - { 152 + dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, 153 + bool force) 154 + { 154 155 spin_lock(&dp264_irq_lock); 155 - cpu_set_irq_affinity(irq, *affinity); 156 + cpu_set_irq_affinity(d->irq, *affinity); 156 157 tsunami_update_irq_hw(cached_irq_mask); 157 158 spin_unlock(&dp264_irq_lock); 158 159 ··· 161 160 } 162 161 163 162 static int 164 - clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) 165 - { 163 + clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, 164 + bool force) 165 + { 166 166 spin_lock(&dp264_irq_lock); 167 - cpu_set_irq_affinity(irq - 16, *affinity); 167 + cpu_set_irq_affinity(d->irq - 16, *affinity); 168 168 tsunami_update_irq_hw(cached_irq_mask); 169 169 spin_unlock(&dp264_irq_lock); 170 170 ··· 173 171 } 174 172 175 173 static struct irq_chip dp264_irq_type = { 176 - .name = "DP264", 177 - .unmask = dp264_enable_irq, 178 - .mask = dp264_disable_irq, 179 - .mask_ack = dp264_disable_irq, 180 - .set_affinity = dp264_set_affinity, 174 + .name = "DP264", 175 + .irq_unmask = dp264_enable_irq, 176 + .irq_mask = dp264_disable_irq, 177 + .irq_mask_ack = dp264_disable_irq, 178 + .irq_set_affinity = dp264_set_affinity, 181 179 }; 182 180 183 181 static struct irq_chip clipper_irq_type = { 184 - .name = "CLIPPER", 185 - .unmask = clipper_enable_irq, 186 - .mask = clipper_disable_irq, 187 - .mask_ack = clipper_disable_irq, 188 - .set_affinity = clipper_set_affinity, 182 + .name = "CLIPPER", 183 + .irq_unmask = clipper_enable_irq, 184 + .irq_mask = clipper_disable_irq, 185 + .irq_mask_ack = clipper_disable_irq, 186 + .irq_set_affinity = clipper_set_affinity, 189 187 }; 190 188 191 189 static void ··· 270 268 { 271 269 long i; 272 270 for (i = imin; i <= imax; ++i) { 273 - irq_to_desc(i)->status |= IRQ_LEVEL; 274 271 set_irq_chip_and_handler(i, ops, handle_level_irq); 272 + irq_set_status_flags(i, IRQ_LEVEL); 275 273 } 276 274 } 277 275
+9 -9
arch/alpha/kernel/sys_eb64p.c
··· 44 44 } 45 45 46 46 static inline void 47 - eb64p_enable_irq(unsigned int irq) 47 + eb64p_enable_irq(struct irq_data *d) 48 48 { 49 - eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 49 + eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); 50 50 } 51 51 52 52 static void 53 - eb64p_disable_irq(unsigned int irq) 53 + eb64p_disable_irq(struct irq_data *d) 54 54 { 55 - eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); 55 + eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); 56 56 } 57 57 58 58 static struct irq_chip eb64p_irq_type = { 59 59 .name = "EB64P", 60 - .unmask = eb64p_enable_irq, 61 - .mask = eb64p_disable_irq, 62 - .mask_ack = eb64p_disable_irq, 60 + .irq_unmask = eb64p_enable_irq, 61 + .irq_mask = eb64p_disable_irq, 62 + .irq_mask_ack = eb64p_disable_irq, 63 63 }; 64 64 65 65 static void ··· 118 118 init_i8259a_irqs(); 119 119 120 120 for (i = 16; i < 32; ++i) { 121 - irq_to_desc(i)->status |= IRQ_LEVEL; 122 121 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 123 - } 122 + irq_set_status_flags(i, IRQ_LEVEL); 123 + } 124 124 125 125 common_init_isa_dma(); 126 126 setup_irq(16+5, &isa_cascade_irqaction);
+8 -6
arch/alpha/kernel/sys_eiger.c
··· 51 51 } 52 52 53 53 static inline void 54 - eiger_enable_irq(unsigned int irq) 54 + eiger_enable_irq(struct irq_data *d) 55 55 { 56 + unsigned int irq = d->irq; 56 57 unsigned long mask; 57 58 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 58 59 eiger_update_irq_hw(irq, mask); 59 60 } 60 61 61 62 static void 62 - eiger_disable_irq(unsigned int irq) 63 + eiger_disable_irq(struct irq_data *d) 63 64 { 65 + unsigned int irq = d->irq; 64 66 unsigned long mask; 65 67 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 66 68 eiger_update_irq_hw(irq, mask); ··· 70 68 71 69 static struct irq_chip eiger_irq_type = { 72 70 .name = "EIGER", 73 - .unmask = eiger_enable_irq, 74 - .mask = eiger_disable_irq, 75 - .mask_ack = eiger_disable_irq, 71 + .irq_unmask = eiger_enable_irq, 72 + .irq_mask = eiger_disable_irq, 73 + .irq_mask_ack = eiger_disable_irq, 76 74 }; 77 75 78 76 static void ··· 138 136 init_i8259a_irqs(); 139 137 140 138 for (i = 16; i < 128; ++i) { 141 - irq_to_desc(i)->status |= IRQ_LEVEL; 142 139 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 140 + irq_set_status_flags(i, IRQ_LEVEL); 143 141 } 144 142 } 145 143
+12 -12
arch/alpha/kernel/sys_jensen.c
··· 63 63 */ 64 64 65 65 static void 66 - jensen_local_enable(unsigned int irq) 66 + jensen_local_enable(struct irq_data *d) 67 67 { 68 68 /* the parport is really hw IRQ 1, silly Jensen. */ 69 - if (irq == 7) 70 - i8259a_enable_irq(1); 69 + if (d->irq == 7) 70 + i8259a_enable_irq(d); 71 71 } 72 72 73 73 static void 74 - jensen_local_disable(unsigned int irq) 74 + jensen_local_disable(struct irq_data *d) 75 75 { 76 76 /* the parport is really hw IRQ 1, silly Jensen. */ 77 - if (irq == 7) 78 - i8259a_disable_irq(1); 77 + if (d->irq == 7) 78 + i8259a_disable_irq(d); 79 79 } 80 80 81 81 static void 82 - jensen_local_mask_ack(unsigned int irq) 82 + jensen_local_mask_ack(struct irq_data *d) 83 83 { 84 84 /* the parport is really hw IRQ 1, silly Jensen. */ 85 - if (irq == 7) 86 - i8259a_mask_and_ack_irq(1); 85 + if (d->irq == 7) 86 + i8259a_mask_and_ack_irq(d); 87 87 } 88 88 89 89 static struct irq_chip jensen_local_irq_type = { 90 90 .name = "LOCAL", 91 - .unmask = jensen_local_enable, 92 - .mask = jensen_local_disable, 93 - .mask_ack = jensen_local_mask_ack, 91 + .irq_unmask = jensen_local_enable, 92 + .irq_mask = jensen_local_disable, 93 + .irq_mask_ack = jensen_local_mask_ack, 94 94 }; 95 95 96 96 static void
+19 -23
arch/alpha/kernel/sys_marvel.c
··· 104 104 } 105 105 106 106 static void 107 - io7_enable_irq(unsigned int irq) 107 + io7_enable_irq(struct irq_data *d) 108 108 { 109 109 volatile unsigned long *ctl; 110 + unsigned int irq = d->irq; 110 111 struct io7 *io7; 111 112 112 113 ctl = io7_get_irq_ctl(irq, &io7); ··· 116 115 __func__, irq); 117 116 return; 118 117 } 119 - 118 + 120 119 spin_lock(&io7->irq_lock); 121 120 *ctl |= 1UL << 24; 122 121 mb(); ··· 125 124 } 126 125 127 126 static void 128 - io7_disable_irq(unsigned int irq) 127 + io7_disable_irq(struct irq_data *d) 129 128 { 130 129 volatile unsigned long *ctl; 130 + unsigned int irq = d->irq; 131 131 struct io7 *io7; 132 132 133 133 ctl = io7_get_irq_ctl(irq, &io7); ··· 137 135 __func__, irq); 138 136 return; 139 137 } 140 - 138 + 141 139 spin_lock(&io7->irq_lock); 142 140 *ctl &= ~(1UL << 24); 143 141 mb(); ··· 146 144 } 147 145 148 146 static void 149 - marvel_irq_noop(unsigned int irq) 150 - { 151 - return; 152 - } 153 - 154 - static unsigned int 155 - marvel_irq_noop_return(unsigned int irq) 156 - { 157 - return 0; 147 + marvel_irq_noop(struct irq_data *d) 148 + { 149 + return; 158 150 } 159 151 160 152 static struct irq_chip marvel_legacy_irq_type = { 161 153 .name = "LEGACY", 162 - .mask = marvel_irq_noop, 163 - .unmask = marvel_irq_noop, 154 + .irq_mask = marvel_irq_noop, 155 + .irq_unmask = marvel_irq_noop, 164 156 }; 165 157 166 158 static struct irq_chip io7_lsi_irq_type = { 167 159 .name = "LSI", 168 - .unmask = io7_enable_irq, 169 - .mask = io7_disable_irq, 170 - .mask_ack = io7_disable_irq, 160 + .irq_unmask = io7_enable_irq, 161 + .irq_mask = io7_disable_irq, 162 + .irq_mask_ack = io7_disable_irq, 171 163 }; 172 164 173 165 static struct irq_chip io7_msi_irq_type = { 174 166 .name = "MSI", 175 - .unmask = io7_enable_irq, 176 - .mask = io7_disable_irq, 177 - .ack = marvel_irq_noop, 167 + .irq_unmask = io7_enable_irq, 168 + .irq_mask = io7_disable_irq, 169 + .irq_ack = marvel_irq_noop, 178 170 }; 179 171 180 172 static void ··· 276 280 277 281 /* Set up the lsi irqs. */ 278 282 for (i = 0; i < 128; ++i) { 279 - irq_to_desc(base + i)->status |= IRQ_LEVEL; 280 283 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 284 + irq_set_status_flags(i, IRQ_LEVEL); 281 285 } 282 286 283 287 /* Disable the implemented irqs in hardware. */ ··· 290 294 291 295 /* Set up the msi irqs. */ 292 296 for (i = 128; i < (128 + 512); ++i) { 293 - irq_to_desc(base + i)->status |= IRQ_LEVEL; 294 297 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 298 + irq_set_status_flags(i, IRQ_LEVEL); 295 299 } 296 300 297 301 for (i = 0; i < 16; ++i)
+8 -8
arch/alpha/kernel/sys_mikasa.c
··· 43 43 } 44 44 45 45 static inline void 46 - mikasa_enable_irq(unsigned int irq) 46 + mikasa_enable_irq(struct irq_data *d) 47 47 { 48 - mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); 48 + mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); 49 49 } 50 50 51 51 static void 52 - mikasa_disable_irq(unsigned int irq) 52 + mikasa_disable_irq(struct irq_data *d) 53 53 { 54 - mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); 54 + mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); 55 55 } 56 56 57 57 static struct irq_chip mikasa_irq_type = { 58 58 .name = "MIKASA", 59 - .unmask = mikasa_enable_irq, 60 - .mask = mikasa_disable_irq, 61 - .mask_ack = mikasa_disable_irq, 59 + .irq_unmask = mikasa_enable_irq, 60 + .irq_mask = mikasa_disable_irq, 61 + .irq_mask_ack = mikasa_disable_irq, 62 62 }; 63 63 64 64 static void ··· 98 98 mikasa_update_irq_hw(0); 99 99 100 100 for (i = 16; i < 32; ++i) { 101 - irq_to_desc(i)->status |= IRQ_LEVEL; 102 101 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 102 + irq_set_status_flags(i, IRQ_LEVEL); 103 103 } 104 104 105 105 init_i8259a_irqs();
+8 -8
arch/alpha/kernel/sys_noritake.c
··· 48 48 } 49 49 50 50 static void 51 - noritake_enable_irq(unsigned int irq) 51 + noritake_enable_irq(struct irq_data *d) 52 52 { 53 - noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); 53 + noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); 54 54 } 55 55 56 56 static void 57 - noritake_disable_irq(unsigned int irq) 57 + noritake_disable_irq(struct irq_data *d) 58 58 { 59 - noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); 59 + noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); 60 60 } 61 61 62 62 static struct irq_chip noritake_irq_type = { 63 63 .name = "NORITAKE", 64 - .unmask = noritake_enable_irq, 65 - .mask = noritake_disable_irq, 66 - .mask_ack = noritake_disable_irq, 64 + .irq_unmask = noritake_enable_irq, 65 + .irq_mask = noritake_disable_irq, 66 + .irq_mask_ack = noritake_disable_irq, 67 67 }; 68 68 69 69 static void ··· 127 127 outw(0, 0x54c); 128 128 129 129 for (i = 16; i < 48; ++i) { 130 - irq_to_desc(i)->status |= IRQ_LEVEL; 131 130 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 131 + irq_set_status_flags(i, IRQ_LEVEL); 132 132 } 133 133 134 134 init_i8259a_irqs();
+10 -7
arch/alpha/kernel/sys_rawhide.c
··· 56 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 57 57 58 58 static inline void 59 - rawhide_enable_irq(unsigned int irq) 59 + rawhide_enable_irq(struct irq_data *d) 60 60 { 61 61 unsigned int mask, hose; 62 + unsigned int irq = d->irq; 62 63 63 64 irq -= 16; 64 65 hose = irq / 24; ··· 77 76 } 78 77 79 78 static void 80 - rawhide_disable_irq(unsigned int irq) 79 + rawhide_disable_irq(struct irq_data *d) 81 80 { 82 81 unsigned int mask, hose; 82 + unsigned int irq = d->irq; 83 83 84 84 irq -= 16; 85 85 hose = irq / 24; ··· 98 96 } 99 97 100 98 static void 101 - rawhide_mask_and_ack_irq(unsigned int irq) 99 + rawhide_mask_and_ack_irq(struct irq_data *d) 102 100 { 103 101 unsigned int mask, mask1, hose; 102 + unsigned int irq = d->irq; 104 103 105 104 irq -= 16; 106 105 hose = irq / 24; ··· 126 123 127 124 static struct irq_chip rawhide_irq_type = { 128 125 .name = "RAWHIDE", 129 - .unmask = rawhide_enable_irq, 130 - .mask = rawhide_disable_irq, 131 - .mask_ack = rawhide_mask_and_ack_irq, 126 + .irq_unmask = rawhide_enable_irq, 127 + .irq_mask = rawhide_disable_irq, 128 + .irq_mask_ack = rawhide_mask_and_ack_irq, 132 129 }; 133 130 134 131 static void ··· 180 177 } 181 178 182 179 for (i = 16; i < 128; ++i) { 183 - irq_to_desc(i)->status |= IRQ_LEVEL; 184 180 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 181 + irq_set_status_flags(i, IRQ_LEVEL); 185 182 } 186 183 187 184 init_i8259a_irqs();
+8 -8
arch/alpha/kernel/sys_rx164.c
··· 47 47 } 48 48 49 49 static inline void 50 - rx164_enable_irq(unsigned int irq) 50 + rx164_enable_irq(struct irq_data *d) 51 51 { 52 - rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 52 + rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 53 53 } 54 54 55 55 static void 56 - rx164_disable_irq(unsigned int irq) 56 + rx164_disable_irq(struct irq_data *d) 57 57 { 58 - rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 58 + rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 59 59 } 60 60 61 61 static struct irq_chip rx164_irq_type = { 62 62 .name = "RX164", 63 - .unmask = rx164_enable_irq, 64 - .mask = rx164_disable_irq, 65 - .mask_ack = rx164_disable_irq, 63 + .irq_unmask = rx164_enable_irq, 64 + .irq_mask = rx164_disable_irq, 65 + .irq_mask_ack = rx164_disable_irq, 66 66 }; 67 67 68 68 static void ··· 99 99 100 100 rx164_update_irq_hw(0); 101 101 for (i = 16; i < 40; ++i) { 102 - irq_to_desc(i)->status |= IRQ_LEVEL; 103 102 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 103 + irq_set_status_flags(i, IRQ_LEVEL); 104 104 } 105 105 106 106 init_i8259a_irqs();
+10 -10
arch/alpha/kernel/sys_sable.c
··· 443 443 /* GENERIC irq routines */ 444 444 445 445 static inline void 446 - sable_lynx_enable_irq(unsigned int irq) 446 + sable_lynx_enable_irq(struct irq_data *d) 447 447 { 448 448 unsigned long bit, mask; 449 449 450 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 450 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 451 451 spin_lock(&sable_lynx_irq_lock); 452 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 453 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 459 459 } 460 460 461 461 static void 462 - sable_lynx_disable_irq(unsigned int irq) 462 + sable_lynx_disable_irq(struct irq_data *d) 463 463 { 464 464 unsigned long bit, mask; 465 465 466 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 466 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 467 467 spin_lock(&sable_lynx_irq_lock); 468 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 469 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 475 475 } 476 476 477 477 static void 478 - sable_lynx_mask_and_ack_irq(unsigned int irq) 478 + sable_lynx_mask_and_ack_irq(struct irq_data *d) 479 479 { 480 480 unsigned long bit, mask; 481 481 482 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 482 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 483 483 spin_lock(&sable_lynx_irq_lock); 484 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 485 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 489 489 490 490 static struct irq_chip sable_lynx_irq_type = { 491 491 .name = "SABLE/LYNX", 492 - .unmask = sable_lynx_enable_irq, 493 - .mask = sable_lynx_disable_irq, 494 - .mask_ack = sable_lynx_mask_and_ack_irq, 492 + .irq_unmask = sable_lynx_enable_irq, 493 + .irq_mask = sable_lynx_disable_irq, 494 + .irq_mask_ack = sable_lynx_mask_and_ack_irq, 495 495 }; 496 496 497 497 static void ··· 518 518 long i; 519 519 520 520 for (i = 0; i < nr_of_irqs; ++i) { 521 - irq_to_desc(i)->status |= IRQ_LEVEL; 522 521 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 523 522 handle_level_irq); 523 + irq_set_status_flags(i, IRQ_LEVEL); 524 524 } 525 525 526 526 common_init_isa_dma();
+8 -6
arch/alpha/kernel/sys_takara.c
··· 45 45 } 46 46 47 47 static inline void 48 - takara_enable_irq(unsigned int irq) 48 + takara_enable_irq(struct irq_data *d) 49 49 { 50 + unsigned int irq = d->irq; 50 51 unsigned long mask; 51 52 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 52 53 takara_update_irq_hw(irq, mask); 53 54 } 54 55 55 56 static void 56 - takara_disable_irq(unsigned int irq) 57 + takara_disable_irq(struct irq_data *d) 57 58 { 59 + unsigned int irq = d->irq; 58 60 unsigned long mask; 59 61 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 60 62 takara_update_irq_hw(irq, mask); ··· 64 62 65 63 static struct irq_chip takara_irq_type = { 66 64 .name = "TAKARA", 67 - .unmask = takara_enable_irq, 68 - .mask = takara_disable_irq, 69 - .mask_ack = takara_disable_irq, 65 + .irq_unmask = takara_enable_irq, 66 + .irq_mask = takara_disable_irq, 67 + .irq_mask_ack = takara_disable_irq, 70 68 }; 71 69 72 70 static void ··· 138 136 takara_update_irq_hw(i, -1); 139 137 140 138 for (i = 16; i < 128; ++i) { 141 - irq_to_desc(i)->status |= IRQ_LEVEL; 142 139 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 140 + irq_set_status_flags(i, IRQ_LEVEL); 143 141 } 144 142 145 143 common_init_isa_dma();
+13 -9
arch/alpha/kernel/sys_titan.c
··· 112 112 } 113 113 114 114 static inline void 115 - titan_enable_irq(unsigned int irq) 115 + titan_enable_irq(struct irq_data *d) 116 116 { 117 + unsigned int irq = d->irq; 117 118 spin_lock(&titan_irq_lock); 118 119 titan_cached_irq_mask |= 1UL << (irq - 16); 119 120 titan_update_irq_hw(titan_cached_irq_mask); ··· 122 121 } 123 122 124 123 static inline void 125 - titan_disable_irq(unsigned int irq) 124 + titan_disable_irq(struct irq_data *d) 126 125 { 126 + unsigned int irq = d->irq; 127 127 spin_lock(&titan_irq_lock); 128 128 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 129 titan_update_irq_hw(titan_cached_irq_mask); ··· 146 144 } 147 145 148 146 static int 149 - titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 147 + titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, 148 + bool force) 150 149 { 150 + unsigned int irq = d->irq; 151 151 spin_lock(&titan_irq_lock); 152 152 titan_cpu_set_irq_affinity(irq - 16, *affinity); 153 153 titan_update_irq_hw(titan_cached_irq_mask); ··· 179 175 { 180 176 long i; 181 177 for (i = imin; i <= imax; ++i) { 182 - irq_to_desc(i)->status |= IRQ_LEVEL; 183 178 set_irq_chip_and_handler(i, ops, handle_level_irq); 179 + irq_set_status_flags(i, IRQ_LEVEL); 184 180 } 185 181 } 186 182 187 183 static struct irq_chip titan_irq_type = { 188 - .name = "TITAN", 189 - .unmask = titan_enable_irq, 190 - .mask = titan_disable_irq, 191 - .mask_ack = titan_disable_irq, 192 - .set_affinity = titan_set_irq_affinity, 184 + .name = "TITAN", 185 + .irq_unmask = titan_enable_irq, 186 + .irq_mask = titan_disable_irq, 187 + .irq_mask_ack = titan_disable_irq, 188 + .irq_set_affinity = titan_set_irq_affinity, 193 189 }; 194 190 195 191 static irqreturn_t
+19 -13
arch/alpha/kernel/sys_wildfire.c
··· 104 104 } 105 105 106 106 static void 107 - wildfire_enable_irq(unsigned int irq) 107 + wildfire_enable_irq(struct irq_data *d) 108 108 { 109 + unsigned int irq = d->irq; 110 + 109 111 if (irq < 16) 110 - i8259a_enable_irq(irq); 112 + i8259a_enable_irq(d); 111 113 112 114 spin_lock(&wildfire_irq_lock); 113 115 set_bit(irq, &cached_irq_mask); ··· 118 116 } 119 117 120 118 static void 121 - wildfire_disable_irq(unsigned int irq) 119 + wildfire_disable_irq(struct irq_data *d) 122 120 { 121 + unsigned int irq = d->irq; 122 + 123 123 if (irq < 16) 124 - i8259a_disable_irq(irq); 124 + i8259a_disable_irq(d); 125 125 126 126 spin_lock(&wildfire_irq_lock); 127 127 clear_bit(irq, &cached_irq_mask); ··· 132 128 } 133 129 134 130 static void 135 - wildfire_mask_and_ack_irq(unsigned int irq) 131 + wildfire_mask_and_ack_irq(struct irq_data *d) 136 132 { 133 + unsigned int irq = d->irq; 134 + 137 135 if (irq < 16) 138 - i8259a_mask_and_ack_irq(irq); 136 + i8259a_mask_and_ack_irq(d); 139 137 140 138 spin_lock(&wildfire_irq_lock); 141 139 clear_bit(irq, &cached_irq_mask); ··· 147 141 148 142 static struct irq_chip wildfire_irq_type = { 149 143 .name = "WILDFIRE", 150 - .unmask = wildfire_enable_irq, 151 - .mask = wildfire_disable_irq, 152 - .mask_ack = wildfire_mask_and_ack_irq, 144 + .irq_unmask = wildfire_enable_irq, 145 + .irq_mask = wildfire_disable_irq, 146 + .irq_mask_ack = wildfire_mask_and_ack_irq, 153 147 }; 154 148 155 149 static void __init ··· 183 177 for (i = 0; i < 16; ++i) { 184 178 if (i == 2) 185 179 continue; 186 - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; 187 180 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 188 181 handle_level_irq); 182 + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 189 183 } 190 184 191 - irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL; 192 185 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 193 186 handle_level_irq); 187 + irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); 194 188 for (i = 40; i < 64; ++i) { 195 - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; 196 189 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 197 190 handle_level_irq); 191 + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 198 192 } 199 193 200 - setup_irq(32+irq_bias, &isa_enable); 194 + setup_irq(32+irq_bias, &isa_enable); 201 195 } 202 196 203 197 static void __init
+2
arch/arm/common/Kconfig
··· 6 6 7 7 config ARM_VIC_NR 8 8 int 9 + default 4 if ARCH_S5PV210 10 + default 3 if ARCH_S5P6442 || ARCH_S5PC100 9 11 default 2 10 12 depends on ARM_VIC 11 13 help
-4
arch/arm/include/asm/mach/arch.h
··· 15 15 struct sys_timer; 16 16 17 17 struct machine_desc { 18 - /* 19 - * Note! The first two elements are used 20 - * by assembler code in head.S, head-common.S 21 - */ 22 18 unsigned int nr; /* architecture number */ 23 19 const char *name; /* architecture name */ 24 20 unsigned long boot_params; /* tagged list */
+2
arch/arm/include/asm/pgalloc.h
··· 10 10 #ifndef _ASMARM_PGALLOC_H 11 11 #define _ASMARM_PGALLOC_H 12 12 13 + #include <linux/pagemap.h> 14 + 13 15 #include <asm/domain.h> 14 16 #include <asm/pgtable-hwdef.h> 15 17 #include <asm/processor.h>
+23 -3
arch/arm/kernel/hw_breakpoint.c
··· 836 836 /* 837 837 * One-time initialisation. 838 838 */ 839 - static void reset_ctrl_regs(void *unused) 839 + static void reset_ctrl_regs(void *info) 840 840 { 841 - int i; 841 + int i, cpu = smp_processor_id(); 842 + u32 dbg_power; 843 + cpumask_t *cpumask = info; 842 844 843 845 /* 844 846 * v7 debug contains save and restore registers so that debug state ··· 851 849 * later on. 852 850 */ 853 851 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 852 + /* 853 + * Ensure sticky power-down is clear (i.e. debug logic is 854 + * powered up). 855 + */ 856 + asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); 857 + if ((dbg_power & 0x1) == 0) { 858 + pr_warning("CPU %d debug is powered down!\n", cpu); 859 + cpumask_or(cpumask, cpumask, cpumask_of(cpu)); 860 + return; 861 + } 862 + 854 863 /* 855 864 * Unconditionally clear the lock by writing a value 856 865 * other than 0xC5ACCE55 to the access register. ··· 900 887 static int __init arch_hw_breakpoint_init(void) 901 888 { 902 889 u32 dscr; 890 + cpumask_t cpumask = { CPU_BITS_NONE }; 903 891 904 892 debug_arch = get_debug_arch(); 905 893 ··· 925 911 * Reset the breakpoint resources. We assume that a halting 926 912 * debugger will leave the world in a nice state for us. 927 913 */ 928 - on_each_cpu(reset_ctrl_regs, NULL, 1); 914 + on_each_cpu(reset_ctrl_regs, &cpumask, 1); 915 + if (!cpumask_empty(&cpumask)) { 916 + core_num_brps = 0; 917 + core_num_reserved_brps = 0; 918 + core_num_wrps = 0; 919 + return 0; 920 + } 929 921 930 922 ARM_DBG_READ(c1, 0, dscr); 931 923 if (dscr & ARM_DSCR_HDBGEN) {
+3 -3
arch/arm/kernel/ptrace.c
··· 996 996 while (!(arch_ctrl.len & 0x1)) 997 997 arch_ctrl.len >>= 1; 998 998 999 - if (idx & 0x1) 1000 - reg = encode_ctrl_reg(arch_ctrl); 1001 - else 999 + if (num & 0x1) 1002 1000 reg = bp->attr.bp_addr; 1001 + else 1002 + reg = encode_ctrl_reg(arch_ctrl); 1003 1003 } 1004 1004 1005 1005 put:
+1 -1
arch/arm/mach-davinci/cpufreq.c
··· 132 132 return ret; 133 133 } 134 134 135 - static int __init davinci_cpu_init(struct cpufreq_policy *policy) 135 + static int davinci_cpu_init(struct cpufreq_policy *policy) 136 136 { 137 137 int result = 0; 138 138 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+7
arch/arm/mach-davinci/devices-da8xx.c
··· 480 480 .resource = da850_mcasp_resources, 481 481 }; 482 482 483 + struct platform_device davinci_pcm_device = { 484 + .name = "davinci-pcm-audio", 485 + .id = -1, 486 + }; 487 + 483 488 void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) 484 489 { 490 + platform_device_register(&davinci_pcm_device); 491 + 485 492 /* DA830/OMAP-L137 has 3 instances of McASP */ 486 493 if (cpu_is_davinci_da830() && id == 1) { 487 494 da830_mcasp1_device.dev.platform_data = pdata;
+9 -9
arch/arm/mach-davinci/gpio-tnetv107x.c
··· 58 58 59 59 spin_lock_irqsave(&ctlr->lock, flags); 60 60 61 - gpio_reg_set_bit(&regs->enable, gpio); 61 + gpio_reg_set_bit(regs->enable, gpio); 62 62 63 63 spin_unlock_irqrestore(&ctlr->lock, flags); 64 64 ··· 74 74 75 75 spin_lock_irqsave(&ctlr->lock, flags); 76 76 77 - gpio_reg_clear_bit(&regs->enable, gpio); 77 + gpio_reg_clear_bit(regs->enable, gpio); 78 78 79 79 spin_unlock_irqrestore(&ctlr->lock, flags); 80 80 } ··· 88 88 89 89 spin_lock_irqsave(&ctlr->lock, flags); 90 90 91 - gpio_reg_set_bit(&regs->direction, gpio); 91 + gpio_reg_set_bit(regs->direction, gpio); 92 92 93 93 spin_unlock_irqrestore(&ctlr->lock, flags); 94 94 ··· 106 106 spin_lock_irqsave(&ctlr->lock, flags); 107 107 108 108 if (value) 109 - gpio_reg_set_bit(&regs->data_out, gpio); 109 + gpio_reg_set_bit(regs->data_out, gpio); 110 110 else 111 - gpio_reg_clear_bit(&regs->data_out, gpio); 111 + gpio_reg_clear_bit(regs->data_out, gpio); 112 112 113 - gpio_reg_clear_bit(&regs->direction, gpio); 113 + gpio_reg_clear_bit(regs->direction, gpio); 114 114 115 115 spin_unlock_irqrestore(&ctlr->lock, flags); 116 116 ··· 124 124 unsigned gpio = chip->base + offset; 125 125 int ret; 126 126 127 - ret = gpio_reg_get_bit(&regs->data_in, gpio); 127 + ret = gpio_reg_get_bit(regs->data_in, gpio); 128 128 129 129 return ret ? 1 : 0; 130 130 } ··· 140 140 spin_lock_irqsave(&ctlr->lock, flags); 141 141 142 142 if (value) 143 - gpio_reg_set_bit(&regs->data_out, gpio); 143 + gpio_reg_set_bit(regs->data_out, gpio); 144 144 else 145 - gpio_reg_clear_bit(&regs->data_out, gpio); 145 + gpio_reg_clear_bit(regs->data_out, gpio); 146 146 147 147 spin_unlock_irqrestore(&ctlr->lock, flags); 148 148 }
+2
arch/arm/mach-davinci/include/mach/clkdev.h
··· 1 1 #ifndef __MACH_CLKDEV_H 2 2 #define __MACH_CLKDEV_H 3 3 4 + struct clk; 5 + 4 6 static inline int __clk_get(struct clk *clk) 5 7 { 6 8 return 1;
+6 -4
arch/arm/mach-omap2/mailbox.c
··· 193 193 omap_mbox_type_t irq) 194 194 { 195 195 struct omap_mbox2_priv *p = mbox->priv; 196 - u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; 197 - l = mbox_read_reg(p->irqdisable); 198 - l &= ~bit; 199 - mbox_write_reg(l, p->irqdisable); 196 + u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; 197 + 198 + if (!cpu_is_omap44xx()) 199 + bit = mbox_read_reg(p->irqdisable) & ~bit; 200 + 201 + mbox_write_reg(bit, p->irqdisable); 200 202 } 201 203 202 204 static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
+15 -18
arch/arm/mach-omap2/smartreflex.c
··· 282 282 dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" 283 283 "interrupt handler. Smartreflex will" 284 284 "not function as desired\n", __func__); 285 + kfree(name); 285 286 kfree(sr_info); 286 287 return ret; 287 288 } ··· 880 879 ret = sr_late_init(sr_info); 881 880 if (ret) { 882 881 pr_warning("%s: Error in SR late init\n", __func__); 883 - return ret; 882 + goto err_release_region; 884 883 } 885 884 } 886 885 ··· 891 890 * not try to create rest of the debugfs entries. 892 891 */ 893 892 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); 894 - if (!vdd_dbg_dir) 895 - return -EINVAL; 893 + if (!vdd_dbg_dir) { 894 + ret = -EINVAL; 895 + goto err_release_region; 896 + } 896 897 897 898 dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); 898 899 if (IS_ERR(dbg_dir)) { 899 900 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", 900 901 __func__); 901 - return PTR_ERR(dbg_dir); 902 + ret = PTR_ERR(dbg_dir); 903 + goto err_release_region; 902 904 } 903 905 904 906 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, ··· 917 913 if (IS_ERR(nvalue_dir)) { 918 914 dev_err(&pdev->dev, "%s: Unable to create debugfs directory" 919 915 "for n-values\n", __func__); 920 - return PTR_ERR(nvalue_dir); 916 + ret = PTR_ERR(nvalue_dir); 917 + goto err_release_region; 921 918 } 922 919 923 920 omap_voltage_get_volttable(sr_info->voltdm, &volt_data); ··· 927 922 " corresponding vdd vdd_%s. Cannot create debugfs" 928 923 "entries for n-values\n", 929 924 __func__, sr_info->voltdm->name); 930 - return -ENODATA; 925 + ret = -ENODATA; 926 + goto err_release_region; 931 927 } 932 928 933 929 for (i = 0; i < sr_info->nvalue_count; i++) { 934 - char *name; 935 - char volt_name[32]; 930 + char name[NVALUE_NAME_LEN + 1]; 936 931 937 - name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL); 938 - if (!name) { 939 - dev_err(&pdev->dev, "%s: Unable to allocate memory" 940 - " for n-value directory name\n", __func__); 941 - return -ENOMEM; 942 - } 943 - 944 - strcpy(name, "volt_"); 945 - sprintf(volt_name, "%d", volt_data[i].volt_nominal); 946 - strcat(name, volt_name); 932 + snprintf(name, sizeof(name), "volt_%d", 933 + volt_data[i].volt_nominal); 947 934 (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, 948 935 &(sr_info->nvalue_table[i].nvalue)); 949 936 }
+1
arch/arm/mach-pxa/pxa25x.c
··· 347 347 &pxa25x_device_assp, 348 348 &pxa25x_device_pwm0, 349 349 &pxa25x_device_pwm1, 350 + &pxa_device_asoc_platform, 350 351 }; 351 352 352 353 static struct sys_device pxa25x_sysdev[] = {
-2
arch/arm/mach-pxa/tosa-bt.c
··· 81 81 goto err_rfk_alloc; 82 82 } 83 83 84 - rfkill_set_led_trigger_name(rfk, "tosa-bt"); 85 - 86 84 rc = rfkill_register(rfk); 87 85 if (rc) 88 86 goto err_rfkill;
+6
arch/arm/mach-pxa/tosa.c
··· 875 875 .dev.platform_data = &sharpsl_rom_data, 876 876 }; 877 877 878 + static struct platform_device wm9712_device = { 879 + .name = "wm9712-codec", 880 + .id = -1, 881 + }; 882 + 878 883 static struct platform_device *devices[] __initdata = { 879 884 &tosascoop_device, 880 885 &tosascoop_jc_device, ··· 890 885 &tosaled_device, 891 886 &tosa_bt_device, 892 887 &sharpsl_rom_device, 888 + &wm9712_device, 893 889 }; 894 890 895 891 static void tosa_poweroff(void)
+1
arch/arm/mach-s3c2440/Kconfig
··· 99 99 select POWER_SUPPLY 100 100 select MACH_NEO1973 101 101 select S3C2410_PWM 102 + select S3C_DEV_USB_HOST 102 103 help 103 104 Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone 104 105
+13 -13
arch/arm/mach-s3c2440/include/mach/gta02.h
··· 44 44 #define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ 45 45 #define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ 46 46 47 - #define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */ 48 - #define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2 49 - #define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */ 50 - #define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */ 51 - #define GTA02_GPIO_nGSM_EN S3C2440_GPJ4 52 - #define GTA02_GPIO_3D_RESET S3C2440_GPJ5 53 - #define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */ 54 - #define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7 55 - #define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8 56 - #define GTA02_GPIO_KEEPACT S3C2440_GPJ8 57 - #define GTA02v1_GPIO_HP_IN S3C2440_GPJ10 58 - #define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */ 59 - #define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */ 47 + #define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */ 48 + #define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2) 49 + #define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */ 50 + #define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */ 51 + #define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4) 52 + #define GTA02_GPIO_3D_RESET S3C2410_GPJ(5) 53 + #define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */ 54 + #define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7) 55 + #define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8) 56 + #define GTA02_GPIO_KEEPACT S3C2410_GPJ(8) 57 + #define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10) 58 + #define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */ 59 + #define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */ 60 60 61 61 #define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 62 62 #define GTA02_IRQ_MODEM IRQ_EINT1
+6
arch/arm/mach-s3c64xx/clock.c
··· 151 151 .enable = s3c64xx_pclk_ctrl, 152 152 .ctrlbit = S3C_CLKCON_PCLK_IIC, 153 153 }, { 154 + .name = "i2c", 155 + .id = 1, 156 + .parent = &clk_p, 157 + .enable = s3c64xx_pclk_ctrl, 158 + .ctrlbit = S3C6410_CLKCON_PCLK_I2C1, 159 + }, { 154 160 .name = "iis", 155 161 .id = 0, 156 162 .parent = &clk_p,
+6 -5
arch/arm/mach-s3c64xx/dma.c
··· 690 690 691 691 regptr = regs + PL080_Cx_BASE(0); 692 692 693 - for (ch = 0; ch < 8; ch++, chno++, chptr++) { 694 - printk(KERN_INFO "%s: registering DMA %d (%p)\n", 695 - __func__, chno, regptr); 693 + for (ch = 0; ch < 8; ch++, chptr++) { 694 + pr_debug("%s: registering DMA %d (%p)\n", 695 + __func__, chno + ch, regptr); 696 696 697 697 chptr->bit = 1 << ch; 698 - chptr->number = chno; 698 + chptr->number = chno + ch; 699 699 chptr->dmac = dmac; 700 700 chptr->regs = regptr; 701 701 regptr += PL080_Cx_STRIDE; ··· 704 704 /* for the moment, permanently enable the controller */ 705 705 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); 706 706 707 - printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs); 707 + printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n", 708 + irq, regs, chno, chno+8); 708 709 709 710 return 0; 710 711
+2 -2
arch/arm/mach-s3c64xx/gpiolib.c
··· 72 72 .get_pull = s3c_gpio_getpull_updown, 73 73 }; 74 74 75 - int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) 75 + static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) 76 76 { 77 77 return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; 78 78 } ··· 138 138 }, 139 139 }; 140 140 141 - int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) 141 + static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) 142 142 { 143 143 return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; 144 144 }
+7 -6
arch/arm/mach-s3c64xx/mach-smdk6410.c
··· 28 28 #include <linux/delay.h> 29 29 #include <linux/smsc911x.h> 30 30 #include <linux/regulator/fixed.h> 31 + #include <linux/regulator/machine.h> 31 32 32 33 #ifdef CONFIG_SMDK6410_WM1190_EV1 33 34 #include <linux/mfd/wm8350/core.h> ··· 352 351 /* VDD_UH_MMC, LDO5 on J5 */ 353 352 static struct regulator_init_data smdk6410_vdduh_mmc = { 354 353 .constraints = { 355 - .name = "PVDD_UH/PVDD_MMC", 354 + .name = "PVDD_UH+PVDD_MMC", 356 355 .always_on = 1, 357 356 }, 358 357 }; ··· 418 417 /* S3C64xx internal logic & PLL */ 419 418 static struct regulator_init_data wm8350_dcdc1_data = { 420 419 .constraints = { 421 - .name = "PVDD_INT/PVDD_PLL", 420 + .name = "PVDD_INT+PVDD_PLL", 422 421 .min_uV = 1200000, 423 422 .max_uV = 1200000, 424 423 .always_on = 1, ··· 453 452 454 453 static struct regulator_init_data wm8350_dcdc4_data = { 455 454 .constraints = { 456 - .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV", 455 + .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV", 457 456 .min_uV = 3000000, 458 457 .max_uV = 3000000, 459 458 .always_on = 1, ··· 465 464 /* OTGi/1190-EV1 HPVDD & AVDD */ 466 465 static struct regulator_init_data wm8350_ldo4_data = { 467 466 .constraints = { 468 - .name = "PVDD_OTGI/HPVDD/AVDD", 467 + .name = "PVDD_OTGI+HPVDD+AVDD", 469 468 .min_uV = 1200000, 470 469 .max_uV = 1200000, 471 470 .apply_uV = 1, ··· 553 552 554 553 static struct regulator_init_data wm1192_dcdc3 = { 555 554 .constraints = { 556 - .name = "PVDD_MEM/PVDD_GPS", 555 + .name = "PVDD_MEM+PVDD_GPS", 557 556 .always_on = 1, 558 557 }, 559 558 }; ··· 564 563 565 564 static struct regulator_init_data wm1192_ldo1 = { 566 565 .constraints = { 567 - .name = "PVDD_LCD/PVDD_EXT", 566 + .name = "PVDD_LCD+PVDD_EXT", 568 567 .always_on = 1, 569 568 }, 570 569 .consumer_supplies = wm1192_ldo1_consumers,
+1 -1
arch/arm/mach-s3c64xx/setup-keypad.c
··· 17 17 void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) 18 18 { 19 19 /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ 20 - s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3)); 20 + s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3)); 21 21 22 22 /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ 23 23 s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
+1 -1
arch/arm/mach-s3c64xx/setup-sdhci.c
··· 56 56 else 57 57 ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); 58 58 59 - printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); 59 + pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); 60 60 writel(ctrl2, r + S3C_SDHCI_CONTROL2); 61 61 writel(ctrl3, r + S3C_SDHCI_CONTROL3); 62 62 }
+2 -2
arch/arm/mach-s5p64x0/include/mach/gpio.h
··· 23 23 #define S5P6440_GPIO_A_NR (6) 24 24 #define S5P6440_GPIO_B_NR (7) 25 25 #define S5P6440_GPIO_C_NR (8) 26 - #define S5P6440_GPIO_F_NR (2) 26 + #define S5P6440_GPIO_F_NR (16) 27 27 #define S5P6440_GPIO_G_NR (7) 28 28 #define S5P6440_GPIO_H_NR (10) 29 29 #define S5P6440_GPIO_I_NR (16) ··· 36 36 #define S5P6450_GPIO_B_NR (7) 37 37 #define S5P6450_GPIO_C_NR (8) 38 38 #define S5P6450_GPIO_D_NR (8) 39 - #define S5P6450_GPIO_F_NR (2) 39 + #define S5P6450_GPIO_F_NR (16) 40 40 #define S5P6450_GPIO_G_NR (14) 41 41 #define S5P6450_GPIO_H_NR (10) 42 42 #define S5P6450_GPIO_I_NR (16)
+1
arch/arm/mach-shmobile/board-ag5evm.c
··· 454 454 gpio_direction_output(GPIO_PORT217, 0); 455 455 mdelay(1); 456 456 gpio_set_value(GPIO_PORT217, 1); 457 + mdelay(100); 457 458 458 459 /* LCD backlight controller */ 459 460 gpio_request(GPIO_PORT235, NULL); /* RESET */
+1 -1
arch/arm/mach-shmobile/board-ap4evb.c
··· 1303 1303 1304 1304 lcdc_info.clock_source = LCDC_CLK_BUS; 1305 1305 lcdc_info.ch[0].interface_type = RGB18; 1306 - lcdc_info.ch[0].clock_divider = 2; 1306 + lcdc_info.ch[0].clock_divider = 3; 1307 1307 lcdc_info.ch[0].flags = 0; 1308 1308 lcdc_info.ch[0].lcd_size_cfg.width = 152; 1309 1309 lcdc_info.ch[0].lcd_size_cfg.height = 91;
+1 -1
arch/arm/mach-shmobile/board-mackerel.c
··· 303 303 .lcd_cfg = mackerel_lcdc_modes, 304 304 .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), 305 305 .interface_type = RGB24, 306 - .clock_divider = 2, 306 + .clock_divider = 3, 307 307 .flags = 0, 308 308 .lcd_size_cfg.width = 152, 309 309 .lcd_size_cfg.height = 91,
+14 -3
arch/arm/mach-shmobile/clock-sh73a0.c
··· 263 263 }; 264 264 265 265 enum { MSTP001, 266 - MSTP125, MSTP118, MSTP116, MSTP100, 266 + MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, 267 267 MSTP219, 268 268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 269 269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, ··· 275 275 276 276 static struct clk mstp_clks[MSTP_NR] = { 277 277 [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ 278 + [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */ 279 + [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */ 280 + [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */ 281 + [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */ 278 282 [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ 279 283 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ 280 284 [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ ··· 310 306 CLKDEV_CON_ID("r_clk", &r_clk), 311 307 312 308 /* DIV6 clocks */ 309 + CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), 310 + CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), 311 + CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), 313 312 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 314 313 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 315 314 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), ··· 320 313 321 314 /* MSTP32 clocks */ 322 315 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ 323 - CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 316 + CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */ 317 + CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ 318 + CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */ 319 + CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ 324 320 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ 325 321 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ 326 - CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ 327 322 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 323 + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ 324 + CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 328 325 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ 329 326 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 330 327 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
+5 -5
arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
··· 6 6 EW 0xE6020004, 0xA500 7 7 EW 0xE6030004, 0xA500 8 8 9 - DD 0x01001000, 0x01001000 10 - 11 9 LIST "GPIO Setting" 12 10 EB 0xE6051013, 0xA2 13 11 14 12 LIST "CPG" 15 - ED 0xE6150080, 0x00000180 16 13 ED 0xE61500C0, 0x00000002 17 14 18 15 WAIT 1, 0xFE40009C ··· 34 37 35 38 WAIT 1, 0xFE40009C 36 39 40 + LIST "SUB/USBClk" 41 + ED 0xE6150080, 0x00000180 42 + 37 43 LIST "BSC" 38 44 ED 0xFEC10000, 0x00E0001B 39 45 ··· 53 53 ED 0xFE40004C, 0x00110209 54 54 ED 0xFE400010, 0x00000087 55 55 56 - WAIT 10, 0xFE40009C 56 + WAIT 30, 0xFE40009C 57 57 58 58 ED 0xFE400084, 0x0000003F 59 59 EB 0xFE500000, 0x00 ··· 84 84 85 85 WAIT 1, 0xFE40009C 86 86 87 - ED 0xE6150354, 0x00000002 87 + ED 0xFE400354, 0x01AD8002 88 88 89 89 LIST "SCIF0 - Serial port for earlyprintk" 90 90 EB 0xE6053098, 0x11
+5 -5
arch/arm/mach-shmobile/include/mach/head-mackerel.txt
··· 6 6 EW 0xE6020004, 0xA500 7 7 EW 0xE6030004, 0xA500 8 8 9 - DD 0x01001000, 0x01001000 10 - 11 9 LIST "GPIO Setting" 12 10 EB 0xE6051013, 0xA2 13 11 14 12 LIST "CPG" 15 - ED 0xE6150080, 0x00000180 16 13 ED 0xE61500C0, 0x00000002 17 14 18 15 WAIT 1, 0xFE40009C ··· 34 37 35 38 WAIT 1, 0xFE40009C 36 39 40 + LIST "SUB/USBClk" 41 + ED 0xE6150080, 0x00000180 42 + 37 43 LIST "BSC" 38 44 ED 0xFEC10000, 0x00E0001B 39 45 ··· 53 53 ED 0xFE40004C, 0x00110209 54 54 ED 0xFE400010, 0x00000087 55 55 56 - WAIT 10, 0xFE40009C 56 + WAIT 30, 0xFE40009C 57 57 58 58 ED 0xFE400084, 0x0000003F 59 59 EB 0xFE500000, 0x00 ··· 84 84 85 85 WAIT 1, 0xFE40009C 86 86 87 - ED 0xE6150354, 0x00000002 87 + ED 0xFE400354, 0x01AD8002 88 88 89 89 LIST "SCIF0 - Serial port for earlyprintk" 90 90 EB 0xE6053098, 0x11
+2
arch/arm/plat-samsung/dev-uart.c
··· 15 15 #include <linux/kernel.h> 16 16 #include <linux/platform_device.h> 17 17 18 + #include <plat/devs.h> 19 + 18 20 /* uart devices */ 19 21 20 22 static struct platform_device s3c24xx_uart_device0 = {
+12 -4
arch/blackfin/lib/outs.S
··· 13 13 .align 2 14 14 15 15 ENTRY(_outsl) 16 + CC = R2 == 0; 17 + IF CC JUMP 1f; 16 18 P0 = R0; /* P0 = port */ 17 19 P1 = R1; /* P1 = address */ 18 20 P2 = R2; /* P2 = count */ ··· 22 20 LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; 23 21 .Llong_loop_s: R0 = [P1++]; 24 22 .Llong_loop_e: [P0] = R0; 25 - RTS; 23 + 1: RTS; 26 24 ENDPROC(_outsl) 27 25 28 26 ENTRY(_outsw) 27 + CC = R2 == 0; 28 + IF CC JUMP 1f; 29 29 P0 = R0; /* P0 = port */ 30 30 P1 = R1; /* P1 = address */ 31 31 P2 = R2; /* P2 = count */ ··· 35 31 LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; 36 32 .Lword_loop_s: R0 = W[P1++]; 37 33 .Lword_loop_e: W[P0] = R0; 38 - RTS; 34 + 1: RTS; 39 35 ENDPROC(_outsw) 40 36 41 37 ENTRY(_outsb) 38 + CC = R2 == 0; 39 + IF CC JUMP 1f; 42 40 P0 = R0; /* P0 = port */ 43 41 P1 = R1; /* P1 = address */ 44 42 P2 = R2; /* P2 = count */ ··· 48 42 LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; 49 43 .Lbyte_loop_s: R0 = B[P1++]; 50 44 .Lbyte_loop_e: B[P0] = R0; 51 - RTS; 45 + 1: RTS; 52 46 ENDPROC(_outsb) 53 47 54 48 ENTRY(_outsw_8) 49 + CC = R2 == 0; 50 + IF CC JUMP 1f; 55 51 P0 = R0; /* P0 = port */ 56 52 P1 = R1; /* P1 = address */ 57 53 P2 = R2; /* P2 = count */ ··· 64 56 R0 = R0 << 8; 65 57 R0 = R0 + R1; 66 58 .Lword8_loop_e: W[P0] = R0; 67 - RTS; 59 + 1: RTS; 68 60 ENDPROC(_outsw_8)
+2
arch/blackfin/mach-common/cache.S
··· 58 58 1: 59 59 .ifeqs "\flushins", BROK_FLUSH_INST 60 60 \flushins [P0++]; 61 + nop; 62 + nop; 61 63 2: nop; 62 64 .else 63 65 2: \flushins [P0++];
+16
arch/powerpc/include/asm/lppaca.h
··· 33 33 // 34 34 //---------------------------------------------------------------------------- 35 35 #include <linux/cache.h> 36 + #include <linux/threads.h> 36 37 #include <asm/types.h> 37 38 #include <asm/mmu.h> 39 + 40 + /* 41 + * We only have to have statically allocated lppaca structs on 42 + * legacy iSeries, which supports at most 64 cpus. 43 + */ 44 + #ifdef CONFIG_PPC_ISERIES 45 + #if NR_CPUS < 64 46 + #define NR_LPPACAS NR_CPUS 47 + #else 48 + #define NR_LPPACAS 64 49 + #endif 50 + #else /* not iSeries */ 51 + #define NR_LPPACAS 1 52 + #endif 53 + 38 54 39 55 /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 40 56 * alignment is sufficient to prevent this */
+6
arch/powerpc/include/asm/machdep.h
··· 240 240 * claims to support kexec. 241 241 */ 242 242 int (*machine_kexec_prepare)(struct kimage *image); 243 + 244 + /* Called to perform the _real_ kexec. 245 + * Do NOT allocate memory or fail here. We are past the point of 246 + * no return. 247 + */ 248 + void (*machine_kexec)(struct kimage *image); 243 249 #endif /* CONFIG_KEXEC */ 244 250 245 251 #ifdef CONFIG_SUSPEND
+4 -1
arch/powerpc/kernel/machine_kexec.c
··· 87 87 88 88 save_ftrace_enabled = __ftrace_enabled_save(); 89 89 90 - default_machine_kexec(image); 90 + if (ppc_md.machine_kexec) 91 + ppc_md.machine_kexec(image); 92 + else 93 + default_machine_kexec(image); 91 94 92 95 __ftrace_enabled_restore(save_ftrace_enabled); 93 96
-14
arch/powerpc/kernel/paca.c
··· 27 27 #ifdef CONFIG_PPC_BOOK3S 28 28 29 29 /* 30 - * We only have to have statically allocated lppaca structs on 31 - * legacy iSeries, which supports at most 64 cpus. 32 - */ 33 - #ifdef CONFIG_PPC_ISERIES 34 - #if NR_CPUS < 64 35 - #define NR_LPPACAS NR_CPUS 36 - #else 37 - #define NR_LPPACAS 64 38 - #endif 39 - #else /* not iSeries */ 40 - #define NR_LPPACAS 1 41 - #endif 42 - 43 - /* 44 30 * The structure which the hypervisor knows about - this structure 45 31 * should not cross a page boundary. The vpa_init/register_vpa call 46 32 * is now known to fail if the lppaca structure crosses a page
+5 -3
arch/powerpc/kernel/process.c
··· 353 353 prime_debug_regs(new_thread); 354 354 } 355 355 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 356 + #ifndef CONFIG_HAVE_HW_BREAKPOINT 356 357 static void set_debug_reg_defaults(struct thread_struct *thread) 357 358 { 358 359 if (thread->dabr) { ··· 361 360 set_dabr(0); 362 361 } 363 362 } 363 + #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 364 364 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 365 365 366 366 int set_dabr(unsigned long dabr) ··· 672 670 { 673 671 discard_lazy_cpu_state(); 674 672 675 - #ifdef CONFIG_HAVE_HW_BREAKPOINTS 673 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 676 674 flush_ptrace_hw_breakpoint(current); 677 - #else /* CONFIG_HAVE_HW_BREAKPOINTS */ 675 + #else /* CONFIG_HAVE_HW_BREAKPOINT */ 678 676 set_debug_reg_defaults(&current->thread); 679 - #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ 677 + #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 680 678 } 681 679 682 680 void
+2 -1
arch/powerpc/mm/numa.c
··· 1516 1516 { 1517 1517 int rc = 0; 1518 1518 1519 - if (firmware_has_feature(FW_FEATURE_VPHN) && 1519 + /* Disabled until races with load balancing are fixed */ 1520 + if (0 && firmware_has_feature(FW_FEATURE_VPHN) && 1520 1521 get_lppaca()->shared_proc) { 1521 1522 vphn_enabled = 1; 1522 1523 setup_cpu_associativity_change_counters();
+3 -3
arch/powerpc/mm/tlb_hash64.c
··· 38 38 * neesd to be flushed. This function will either perform the flush 39 39 * immediately or will batch it up if the current CPU has an active 40 40 * batch on it. 41 - * 42 - * Must be called from within some kind of spinlock/non-preempt region... 43 41 */ 44 42 void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 45 43 pte_t *ptep, unsigned long pte, int huge) 46 44 { 47 - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 45 + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 48 46 unsigned long vsid, vaddr; 49 47 unsigned int psize; 50 48 int ssize; ··· 97 99 */ 98 100 if (!batch->active) { 99 101 flush_hash_page(vaddr, rpte, psize, ssize, 0); 102 + put_cpu_var(ppc64_tlb_batch); 100 103 return; 101 104 } 102 105 ··· 126 127 batch->index = ++i; 127 128 if (i >= PPC64_TLB_BATCH_NR) 128 129 __flush_tlb_pending(batch); 130 + put_cpu_var(ppc64_tlb_batch); 129 131 } 130 132 131 133 /*
+3 -3
arch/powerpc/platforms/iseries/dt.c
··· 242 242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ 243 243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); 244 244 245 - for (i = 0; i < NR_CPUS; i++) { 246 - if (lppaca_of(i).dyn_proc_status >= 2) 245 + for (i = 0; i < NR_LPPACAS; i++) { 246 + if (lppaca[i].dyn_proc_status >= 2) 247 247 continue; 248 248 249 249 snprintf(p, 32 - (p - buf), "@%d", i); ··· 251 251 252 252 dt_prop_str(dt, "device_type", device_type_cpu); 253 253 254 - index = lppaca_of(i).dyn_hv_phys_proc_index; 254 + index = lppaca[i].dyn_hv_phys_proc_index; 255 255 d = &xIoHriProcessorVpd[index]; 256 256 257 257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
+1
arch/powerpc/platforms/iseries/setup.c
··· 680 680 * on but calling this function multiple times is fine. 681 681 */ 682 682 identify_cpu(0, mfspr(SPRN_PVR)); 683 + initialise_paca(&boot_paca, 0); 683 684 684 685 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
+1 -1
arch/sh/include/asm/sections.h
··· 3 3 4 4 #include <asm-generic/sections.h> 5 5 6 - extern void __nosave_begin, __nosave_end; 6 + extern long __nosave_begin, __nosave_end; 7 7 extern long __machvec_start, __machvec_end; 8 8 extern char __uncached_start, __uncached_end; 9 9 extern char _ebss[];
+9 -4
arch/sh/kernel/cpu/sh4/setup-sh7750.c
··· 14 14 #include <linux/io.h> 15 15 #include <linux/sh_timer.h> 16 16 #include <linux/serial_sci.h> 17 - #include <asm/machtypes.h> 17 + #include <generated/machtypes.h> 18 18 19 19 static struct resource rtc_resources[] = { 20 20 [0] = { ··· 255 255 256 256 void __init plat_early_device_setup(void) 257 257 { 258 + struct platform_device *dev[1]; 259 + 258 260 if (mach_is_rts7751r2d()) { 259 261 scif_platform_data.scscr |= SCSCR_CKE1; 260 - early_platform_add_devices(&scif_device, 1); 262 + dev[0] = &scif_device; 263 + early_platform_add_devices(dev, 1); 261 264 } else { 262 - early_platform_add_devices(&sci_device, 1); 263 - early_platform_add_devices(&scif_device, 1); 265 + dev[0] = &sci_device; 266 + early_platform_add_devices(dev, 1); 267 + dev[0] = &scif_device; 268 + early_platform_add_devices(dev, 1); 264 269 } 265 270 266 271 early_platform_add_devices(sh7750_early_devices,
+10
arch/sh/lib/delay.c
··· 10 10 void __delay(unsigned long loops) 11 11 { 12 12 __asm__ __volatile__( 13 + /* 14 + * ST40-300 appears to have an issue with this code, 15 + * normally taking two cycles each loop, as with all 16 + * other SH variants. If however the branch and the 17 + * delay slot straddle an 8 byte boundary, this increases 18 + * to 3 cycles. 19 + * This align directive ensures this doesn't occur. 20 + */ 21 + ".balign 8\n\t" 22 + 13 23 "tst %0, %0\n\t" 14 24 "1:\t" 15 25 "bf/s 1b\n\t"
+2 -1
arch/sh/mm/cache.c
··· 108 108 kunmap_atomic(vfrom, KM_USER0); 109 109 } 110 110 111 - if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 111 + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || 112 + (vma->vm_flags & VM_EXEC)) 112 113 __flush_purge_region(vto, PAGE_SIZE); 113 114 114 115 kunmap_atomic(vto, KM_USER1);
+6 -1
arch/x86/boot/compressed/mkpiggy.c
··· 62 62 if (fseek(f, -4L, SEEK_END)) { 63 63 perror(argv[1]); 64 64 } 65 - fread(&olen, sizeof olen, 1, f); 65 + 66 + if (fread(&olen, sizeof(olen), 1, f) != 1) { 67 + perror(argv[1]); 68 + return 1; 69 + } 70 + 66 71 ilen = ftell(f); 67 72 olen = getle32(&olen); 68 73 fclose(f);
+5
arch/x86/include/asm/msr-index.h
··· 36 36 #define MSR_IA32_PERFCTR1 0x000000c2 37 37 #define MSR_FSB_FREQ 0x000000cd 38 38 39 + #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 40 + #define NHM_C3_AUTO_DEMOTE (1UL << 25) 41 + #define NHM_C1_AUTO_DEMOTE (1UL << 26) 42 + #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) 43 + 39 44 #define MSR_MTRRcap 0x000000fe 40 45 #define MSR_IA32_BBL_CR_CTL 0x00000119 41 46
+1 -1
arch/x86/include/asm/uv/uv_bau.h
··· 176 176 struct bau_msg_header { 177 177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 178 178 /* bits 5:0 */ 179 - unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ 179 + unsigned int base_dest_nodeid:15; /* nasid of the */ 180 180 /* bits 20:6 */ /* first bit in uvhub map */ 181 181 unsigned int command:8; /* message type */ 182 182 /* bits 28:21 */
+3 -3
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
··· 158 158 { 159 159 if (c->x86 == 0x06) { 160 160 if (cpu_has(c, X86_FEATURE_EST)) 161 - printk(KERN_WARNING PFX "Warning: EST-capable CPU " 162 - "detected. The acpi-cpufreq module offers " 163 - "voltage scaling in addition of frequency " 161 + printk_once(KERN_WARNING PFX "Warning: EST-capable " 162 + "CPU detected. The acpi-cpufreq module offers " 163 + "voltage scaling in addition to frequency " 164 164 "scaling. You should use that instead of " 165 165 "p4-clockmod, if possible.\n"); 166 166 switch (c->x86_model) {
+1 -1
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
··· 195 195 cmd_incomplete: 196 196 iowrite16(0, &pcch_hdr->status); 197 197 spin_unlock(&pcc_lock); 198 - return -EINVAL; 198 + return 0; 199 199 } 200 200 201 201 static int pcc_cpufreq_target(struct cpufreq_policy *policy,
+10 -3
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 1537 1537 static int __cpuinit powernowk8_init(void) 1538 1538 { 1539 1539 unsigned int i, supported_cpus = 0, cpu; 1540 + int rv; 1540 1541 1541 1542 for_each_online_cpu(i) { 1542 1543 int rc; ··· 1556 1555 1557 1556 cpb_capable = true; 1558 1557 1559 - register_cpu_notifier(&cpb_nb); 1560 - 1561 1558 msrs = msrs_alloc(); 1562 1559 if (!msrs) { 1563 1560 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); 1564 1561 return -ENOMEM; 1565 1562 } 1563 + 1564 + register_cpu_notifier(&cpb_nb); 1566 1565 1567 1566 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1568 1567 ··· 1575 1574 (cpb_enabled ? "on" : "off")); 1576 1575 } 1577 1576 1578 - return cpufreq_register_driver(&cpufreq_amd64_driver); 1577 + rv = cpufreq_register_driver(&cpufreq_amd64_driver); 1578 + if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { 1579 + unregister_cpu_notifier(&cpb_nb); 1580 + msrs_free(msrs); 1581 + msrs = NULL; 1582 + } 1583 + return rv; 1579 1584 } 1580 1585 1581 1586 /* driver entry point for term */
+1 -5
arch/x86/mm/numa_64.c
··· 780 780 int physnid; 781 781 int nid = NUMA_NO_NODE; 782 782 783 - apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 784 - if (apicid != BAD_APICID) 785 - nid = apicid_to_node[apicid]; 786 - if (nid == NUMA_NO_NODE) 787 - nid = early_cpu_to_node(cpu); 783 + nid = early_cpu_to_node(cpu); 788 784 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); 789 785 790 786 /*
+1 -2
arch/x86/platform/olpc/olpc_dt.c
··· 140 140 * wasted bootmem) and hand off chunks of it to callers. 141 141 */ 142 142 res = alloc_bootmem(chunk_size); 143 - if (!res) 144 - return NULL; 143 + BUG_ON(!res); 145 144 prom_early_allocated += chunk_size; 146 145 memset(res, 0, chunk_size); 147 146 free_mem = chunk_size;
+2 -2
arch/x86/platform/uv/tlb_uv.c
··· 1364 1364 memset(bd2, 0, sizeof(struct bau_desc)); 1365 1365 bd2->header.sw_ack_flag = 1; 1366 1366 /* 1367 - * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub 1367 + * base_dest_nodeid is the nasid of the first uvhub 1368 1368 * in the partition. The bit map will indicate uvhub numbers, 1369 1369 * which are 0-N in a partition. Pnodes are unique system-wide. 1370 1370 */ 1371 - bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; 1371 + bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1372 1372 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1373 1373 bd2->header.command = UV_NET_ENDPOINT_INTD; 1374 1374 bd2->header.int_both = 1;
+6 -12
block/blk-core.c
··· 352 352 WARN_ON(!irqs_disabled()); 353 353 354 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 355 - __blk_run_queue(q); 355 + __blk_run_queue(q, false); 356 356 } 357 357 EXPORT_SYMBOL(blk_start_queue); 358 358 ··· 403 403 /** 404 404 * __blk_run_queue - run a single device queue 405 405 * @q: The queue to run 406 + * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. 406 407 * 407 408 * Description: 408 409 * See @blk_run_queue. This variant must be called with the queue lock 409 410 * held and interrupts disabled. 410 411 * 411 412 */ 412 - void __blk_run_queue(struct request_queue *q) 413 + void __blk_run_queue(struct request_queue *q, bool force_kblockd) 413 414 { 414 415 blk_remove_plug(q); 415 416 ··· 424 423 * Only recurse once to avoid overrunning the stack, let the unplug 425 424 * handling reinvoke the handler shortly if we already got there. 426 425 */ 427 - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 426 + if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 428 427 q->request_fn(q); 429 428 queue_flag_clear(QUEUE_FLAG_REENTER, q); 430 429 } else { ··· 447 446 unsigned long flags; 448 447 449 448 spin_lock_irqsave(q->queue_lock, flags); 450 - __blk_run_queue(q); 449 + __blk_run_queue(q, false); 451 450 spin_unlock_irqrestore(q->queue_lock, flags); 452 451 } 453 452 EXPORT_SYMBOL(blk_run_queue); ··· 1054 1053 1055 1054 drive_stat_acct(rq, 1); 1056 1055 __elv_add_request(q, rq, where, 0); 1057 - __blk_run_queue(q); 1056 + __blk_run_queue(q, false); 1058 1057 spin_unlock_irqrestore(q->queue_lock, flags); 1059 1058 } 1060 1059 EXPORT_SYMBOL(blk_insert_request); ··· 2610 2609 return queue_work(kblockd_workqueue, work); 2611 2610 } 2612 2611 EXPORT_SYMBOL(kblockd_schedule_work); 2613 - 2614 - int kblockd_schedule_delayed_work(struct request_queue *q, 2615 - struct delayed_work *dwork, unsigned long delay) 2616 - { 2617 - return queue_delayed_work(kblockd_workqueue, dwork, delay); 2618 - } 2619 - EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2620 2612 2621 2613 int __init blk_dev_init(void) 2622 2614 {
+5 -3
block/blk-flush.c
··· 66 66 67 67 /* 68 68 * Moving a request silently to empty queue_head may stall the 69 - * queue. Kick the queue in those cases. 69 + * queue. Kick the queue in those cases. This function is called 70 + * from request completion path and calling directly into 71 + * request_fn may confuse the driver. Always use kblockd. 70 72 */ 71 73 if (was_empty && next_rq) 72 - __blk_run_queue(q); 74 + __blk_run_queue(q, true); 73 75 } 74 76 75 77 static void pre_flush_end_io(struct request *rq, int error) ··· 132 130 BUG(); 133 131 } 134 132 135 - elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 133 + elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 136 134 return rq; 137 135 } 138 136
+1 -1
block/blk-lib.c
··· 132 132 } 133 133 134 134 /** 135 - * blkdev_issue_zeroout generate number of zero filed write bios 135 + * blkdev_issue_zeroout - generate number of zero filed write bios 136 136 * @bdev: blockdev to issue 137 137 * @sector: start sector 138 138 * @nr_sects: number of sectors to write
+18 -11
block/blk-throttle.c
··· 20 20 /* Throttling is performed over 100ms slice and after that slice is renewed */ 21 21 static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22 22 23 + /* A workqueue to queue throttle related work */ 24 + static struct workqueue_struct *kthrotld_workqueue; 25 + static void throtl_schedule_delayed_work(struct throtl_data *td, 26 + unsigned long delay); 27 + 23 28 struct throtl_rb_root { 24 29 struct rb_root rb; 25 30 struct rb_node *left; ··· 350 345 update_min_dispatch_time(st); 351 346 352 347 if (time_before_eq(st->min_disptime, jiffies)) 353 - throtl_schedule_delayed_work(td->queue, 0); 348 + throtl_schedule_delayed_work(td, 0); 354 349 else 355 - throtl_schedule_delayed_work(td->queue, 356 - (st->min_disptime - jiffies)); 350 + throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); 357 351 } 358 352 359 353 static inline void ··· 819 815 } 820 816 821 817 /* Call with queue lock held */ 822 - void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) 818 + static void 819 + throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) 823 820 { 824 821 825 - struct throtl_data *td = q->td; 826 822 struct delayed_work *dwork = &td->throtl_work; 827 823 828 824 if (total_nr_queued(td) > 0) { ··· 831 827 * Cancel that and schedule a new one. 832 828 */ 833 829 __cancel_delayed_work(dwork); 834 - kblockd_schedule_delayed_work(q, dwork, delay); 830 + queue_delayed_work(kthrotld_workqueue, dwork, delay); 835 831 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 836 832 delay, jiffies); 837 833 } 838 834 } 839 - EXPORT_SYMBOL(throtl_schedule_delayed_work); 840 835 841 836 static void 842 837 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) ··· 923 920 smp_mb__after_atomic_inc(); 924 921 925 922 /* Schedule a work now to process the limit change */ 926 - throtl_schedule_delayed_work(td->queue, 0); 923 + throtl_schedule_delayed_work(td, 0); 927 924 } 928 925 929 926 static void throtl_update_blkio_group_write_bps(void *key, ··· 937 934 smp_mb__before_atomic_inc(); 938 935 atomic_inc(&td->limits_changed); 939 936 smp_mb__after_atomic_inc(); 940 - throtl_schedule_delayed_work(td->queue, 0); 937 + throtl_schedule_delayed_work(td, 0); 941 938 } 942 939 943 940 static void throtl_update_blkio_group_read_iops(void *key, ··· 951 948 smp_mb__before_atomic_inc(); 952 949 atomic_inc(&td->limits_changed); 953 950 smp_mb__after_atomic_inc(); 954 - throtl_schedule_delayed_work(td->queue, 0); 951 + throtl_schedule_delayed_work(td, 0); 955 952 } 956 953 957 954 static void throtl_update_blkio_group_write_iops(void *key, ··· 965 962 smp_mb__before_atomic_inc(); 966 963 atomic_inc(&td->limits_changed); 967 964 smp_mb__after_atomic_inc(); 968 - throtl_schedule_delayed_work(td->queue, 0); 965 + throtl_schedule_delayed_work(td, 0); 969 966 } 970 967 971 968 void throtl_shutdown_timer_wq(struct request_queue *q) ··· 1138 1135 1139 1136 static int __init throtl_init(void) 1140 1137 { 1138 + kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); 1139 + if (!kthrotld_workqueue) 1140 + panic("Failed to create kthrotld\n"); 1141 + 1141 1142 blkio_policy_register(&blkio_policy_throtl); 1142 1143 return 0; 1143 1144 }
+3 -3
block/cfq-iosched.c
··· 3355 3355 cfqd->busy_queues > 1) { 3356 3356 cfq_del_timer(cfqd, cfqq); 3357 3357 cfq_clear_cfqq_wait_request(cfqq); 3358 - __blk_run_queue(cfqd->queue); 3358 + __blk_run_queue(cfqd->queue, false); 3359 3359 } else { 3360 3360 cfq_blkiocg_update_idle_time_stats( 3361 3361 &cfqq->cfqg->blkg); ··· 3370 3370 * this new queue is RT and the current one is BE 3371 3371 */ 3372 3372 cfq_preempt_queue(cfqd, cfqq); 3373 - __blk_run_queue(cfqd->queue); 3373 + __blk_run_queue(cfqd->queue, false); 3374 3374 } 3375 3375 } 3376 3376 ··· 3731 3731 struct request_queue *q = cfqd->queue; 3732 3732 3733 3733 spin_lock_irq(q->queue_lock); 3734 - __blk_run_queue(cfqd->queue); 3734 + __blk_run_queue(cfqd->queue, false); 3735 3735 spin_unlock_irq(q->queue_lock); 3736 3736 } 3737 3737
+2 -2
block/elevator.c
··· 602 602 */ 603 603 elv_drain_elevator(q); 604 604 while (q->rq.elvpriv) { 605 - __blk_run_queue(q); 605 + __blk_run_queue(q, false); 606 606 spin_unlock_irq(q->queue_lock); 607 607 msleep(10); 608 608 spin_lock_irq(q->queue_lock); ··· 651 651 * with anything. There's no point in delaying queue 652 652 * processing. 653 653 */ 654 - __blk_run_queue(q); 654 + __blk_run_queue(q, false); 655 655 break; 656 656 657 657 case ELEVATOR_INSERT_SORT:
+6 -1
drivers/acpi/acpica/aclocal.h
··· 416 416 u8 originally_enabled; /* True if GPE was originally enabled */ 417 417 }; 418 418 419 + struct acpi_gpe_notify_object { 420 + struct acpi_namespace_node *node; 421 + struct acpi_gpe_notify_object *next; 422 + }; 423 + 419 424 union acpi_gpe_dispatch_info { 420 425 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 421 426 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 422 - struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ 427 + struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */ 423 428 }; 424 429 425 430 /*
+13 -4
drivers/acpi/acpica/evgpe.c
··· 457 457 acpi_status status; 458 458 struct acpi_gpe_event_info *local_gpe_event_info; 459 459 struct acpi_evaluate_info *info; 460 + struct acpi_gpe_notify_object *notify_object; 460 461 461 462 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 462 463 ··· 509 508 * from this thread -- because handlers may in turn run other 510 509 * control methods. 511 510 */ 512 - status = 513 - acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. 514 - device_node, 515 - ACPI_NOTIFY_DEVICE_WAKE); 511 + status = acpi_ev_queue_notify_request( 512 + local_gpe_event_info->dispatch.device.node, 513 + ACPI_NOTIFY_DEVICE_WAKE); 514 + 515 + notify_object = local_gpe_event_info->dispatch.device.next; 516 + while (ACPI_SUCCESS(status) && notify_object) { 517 + status = acpi_ev_queue_notify_request( 518 + notify_object->node, 519 + ACPI_NOTIFY_DEVICE_WAKE); 520 + notify_object = notify_object->next; 521 + } 522 + 516 523 break; 517 524 518 525 case ACPI_GPE_DISPATCH_METHOD:
+37 -13
drivers/acpi/acpica/evxfgpe.c
··· 198 198 acpi_status status = AE_BAD_PARAMETER; 199 199 struct acpi_gpe_event_info *gpe_event_info; 200 200 struct acpi_namespace_node *device_node; 201 + struct acpi_gpe_notify_object *notify_object; 201 202 acpi_cpu_flags flags; 203 + u8 gpe_dispatch_mask; 202 204 203 205 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); 204 206 ··· 223 221 goto unlock_and_exit; 224 222 } 225 223 224 + if (wake_device == ACPI_ROOT_OBJECT) { 225 + goto out; 226 + } 227 + 226 228 /* 227 229 * If there is no method or handler for this GPE, then the 228 230 * wake_device will be notified whenever this GPE fires (aka 229 231 * "implicit notify") Note: The GPE is assumed to be 230 232 * level-triggered (for windows compatibility). 231 233 */ 232 - if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 233 - ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { 234 - 235 - /* Validate wake_device is of type Device */ 236 - 237 - device_node = ACPI_CAST_PTR(struct acpi_namespace_node, 238 - wake_device); 239 - if (device_node->type != ACPI_TYPE_DEVICE) { 240 - goto unlock_and_exit; 241 - } 242 - gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | 243 - ACPI_GPE_LEVEL_TRIGGERED); 244 - gpe_event_info->dispatch.device_node = device_node; 234 + gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK; 235 + if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE 236 + && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) { 237 + goto out; 245 238 } 246 239 240 + /* Validate wake_device is of type Device */ 241 + 242 + device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); 243 + if (device_node->type != ACPI_TYPE_DEVICE) { 244 + goto unlock_and_exit; 245 + } 246 + 247 + if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) { 248 + gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | 249 + ACPI_GPE_LEVEL_TRIGGERED); 250 + gpe_event_info->dispatch.device.node = device_node; 251 + gpe_event_info->dispatch.device.next = NULL; 252 + } else { 253 + /* There are multiple devices to notify implicitly. */ 254 + 255 + notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object)); 256 + if (!notify_object) { 257 + status = AE_NO_MEMORY; 258 + goto unlock_and_exit; 259 + } 260 + 261 + notify_object->node = device_node; 262 + notify_object->next = gpe_event_info->dispatch.device.next; 263 + gpe_event_info->dispatch.device.next = notify_object; 264 + } 265 + 266 + out: 247 267 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 248 268 status = AE_OK; 249 269
+14 -6
drivers/acpi/debugfs.c
··· 26 26 size_t count, loff_t *ppos) 27 27 { 28 28 static char *buf; 29 - static int uncopied_bytes; 29 + static u32 max_size; 30 + static u32 uncopied_bytes; 31 + 30 32 struct acpi_table_header table; 31 33 acpi_status status; 32 34 ··· 39 37 if (copy_from_user(&table, user_buf, 40 38 sizeof(struct acpi_table_header))) 41 39 return -EFAULT; 42 - uncopied_bytes = table.length; 43 - buf = kzalloc(uncopied_bytes, GFP_KERNEL); 40 + uncopied_bytes = max_size = table.length; 41 + buf = kzalloc(max_size, GFP_KERNEL); 44 42 if (!buf) 45 43 return -ENOMEM; 46 44 } 47 45 48 - if (uncopied_bytes < count) { 49 - kfree(buf); 46 + if (buf == NULL) 50 47 return -EINVAL; 51 - } 48 + 49 + if ((*ppos > max_size) || 50 + (*ppos + count > max_size) || 51 + (*ppos + count < count) || 52 + (count > uncopied_bytes)) 53 + return -EINVAL; 52 54 53 55 if (copy_from_user(buf + (*ppos), user_buf, count)) { 54 56 kfree(buf); 57 + buf = NULL; 55 58 return -EFAULT; 56 59 } 57 60 ··· 66 59 if (!uncopied_bytes) { 67 60 status = acpi_install_method(buf); 68 61 kfree(buf); 62 + buf = NULL; 69 63 if (ACPI_FAILURE(status)) 70 64 return -EINVAL; 71 65 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
-5
drivers/block/loop.c
··· 78 78 79 79 #include <asm/uaccess.h> 80 80 81 - static DEFINE_MUTEX(loop_mutex); 82 81 static LIST_HEAD(loop_devices); 83 82 static DEFINE_MUTEX(loop_devices_mutex); 84 83 ··· 1500 1501 { 1501 1502 struct loop_device *lo = bdev->bd_disk->private_data; 1502 1503 1503 - mutex_lock(&loop_mutex); 1504 1504 mutex_lock(&lo->lo_ctl_mutex); 1505 1505 lo->lo_refcnt++; 1506 1506 mutex_unlock(&lo->lo_ctl_mutex); 1507 - mutex_unlock(&loop_mutex); 1508 1507 1509 1508 return 0; 1510 1509 } ··· 1512 1515 struct loop_device *lo = disk->private_data; 1513 1516 int err; 1514 1517 1515 - mutex_lock(&loop_mutex); 1516 1518 mutex_lock(&lo->lo_ctl_mutex); 1517 1519 1518 1520 if (--lo->lo_refcnt) ··· 1536 1540 out: 1537 1541 mutex_unlock(&lo->lo_ctl_mutex); 1538 1542 out_unlocked: 1539 - mutex_unlock(&loop_mutex); 1540 1543 return 0; 1541 1544 } 1542 1545
+8
drivers/char/ipmi/ipmi_si_intf.c
··· 900 900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 901 901 #endif 902 902 903 + /* 904 + * last_timeout_jiffies is updated here to avoid 905 + * smi_timeout() handler passing very large time_diff 906 + * value to smi_event_handler() that causes 907 + * the send command to abort. 908 + */ 909 + smi_info->last_timeout_jiffies = jiffies; 910 + 903 911 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 904 912 905 913 if (smi_info->thread)
+8
drivers/char/virtio_console.c
··· 388 388 unsigned int len; 389 389 int ret; 390 390 391 + if (!port->portdev) { 392 + /* Device has been unplugged. vqs are already gone. */ 393 + return; 394 + } 391 395 vq = port->in_vq; 392 396 if (port->inbuf) 393 397 buf = port->inbuf; ··· 474 470 void *buf; 475 471 unsigned int len; 476 472 473 + if (!port->portdev) { 474 + /* Device has been unplugged. vqs are already gone. */ 475 + return; 476 + } 477 477 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 478 478 kfree(buf); 479 479 port->outvq_full = false;
+15 -12
drivers/cpufreq/cpufreq.c
··· 1919 1919 1920 1920 ret = sysdev_driver_register(&cpu_sysdev_class, 1921 1921 &cpufreq_sysdev_driver); 1922 + if (ret) 1923 + goto err_null_driver; 1922 1924 1923 - if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1925 + if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1924 1926 int i; 1925 1927 ret = -ENODEV; 1926 1928 ··· 1937 1935 if (ret) { 1938 1936 dprintk("no CPU initialized for driver %s\n", 1939 1937 driver_data->name); 1940 - sysdev_driver_unregister(&cpu_sysdev_class, 1941 - &cpufreq_sysdev_driver); 1942 - 1943 - spin_lock_irqsave(&cpufreq_driver_lock, flags); 1944 - cpufreq_driver = NULL; 1945 - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1938 + goto err_sysdev_unreg; 1946 1939 } 1947 1940 } 1948 1941 1949 - if (!ret) { 1950 - register_hotcpu_notifier(&cpufreq_cpu_notifier); 1951 - dprintk("driver %s up and running\n", driver_data->name); 1952 - cpufreq_debug_enable_ratelimit(); 1953 - } 1942 + register_hotcpu_notifier(&cpufreq_cpu_notifier); 1943 + dprintk("driver %s up and running\n", driver_data->name); 1944 + cpufreq_debug_enable_ratelimit(); 1954 1945 1946 + return 0; 1947 + err_sysdev_unreg: 1948 + sysdev_driver_unregister(&cpu_sysdev_class, 1949 + &cpufreq_sysdev_driver); 1950 + err_null_driver: 1951 + spin_lock_irqsave(&cpufreq_driver_lock, flags); 1952 + cpufreq_driver = NULL; 1953 + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1955 1954 return ret; 1956 1955 } 1957 1956 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
+2 -2
drivers/gpu/drm/drm_fb_helper.c
··· 677 677 struct drm_crtc_helper_funcs *crtc_funcs; 678 678 u16 *red, *green, *blue, *transp; 679 679 struct drm_crtc *crtc; 680 - int i, rc = 0; 680 + int i, j, rc = 0; 681 681 int start; 682 682 683 683 for (i = 0; i < fb_helper->crtc_count; i++) { ··· 690 690 transp = cmap->transp; 691 691 start = cmap->start; 692 692 693 - for (i = 0; i < cmap->len; i++) { 693 + for (j = 0; j < cmap->len; j++) { 694 694 u16 hred, hgreen, hblue, htransp = 0xffff; 695 695 696 696 hred = *red++;
+10
drivers/gpu/drm/i915/i915_reg.h
··· 1566 1566 1567 1567 /* Backlight control */ 1568 1568 #define BLC_PWM_CTL 0x61254 1569 + #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) 1569 1570 #define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1571 + #define BLM_COMBINATION_MODE (1 << 30) 1572 + /* 1573 + * This is the most significant 15 bits of the number of backlight cycles in a 1574 + * complete cycle of the modulated backlight control. 1575 + * 1576 + * The actual value is this field multiplied by two. 1577 + */ 1578 + #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) 1579 + #define BLM_LEGACY_MODE (1 << 16) 1570 1580 /* 1571 1581 * This is the number of cycles out of the backlight modulation cycle for which 1572 1582 * the backlight is on.
+36
drivers/gpu/drm/i915/intel_panel.c
··· 30 30 31 31 #include "intel_drv.h" 32 32 33 + #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 34 + 33 35 void 34 36 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 35 37 struct drm_display_mode *adjusted_mode) ··· 112 110 dev_priv->pch_pf_size = (width << 16) | height; 113 111 } 114 112 113 + static int is_backlight_combination_mode(struct drm_device *dev) 114 + { 115 + struct drm_i915_private *dev_priv = dev->dev_private; 116 + 117 + if (INTEL_INFO(dev)->gen >= 4) 118 + return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; 119 + 120 + if (IS_GEN2(dev)) 121 + return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; 122 + 123 + return 0; 124 + } 125 + 115 126 static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 116 127 { 117 128 u32 val; ··· 181 166 if (INTEL_INFO(dev)->gen < 4) 182 167 max &= ~1; 183 168 } 169 + 170 + if (is_backlight_combination_mode(dev)) 171 + max *= 0xff; 184 172 } 185 173 186 174 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); ··· 201 183 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 202 184 if (IS_PINEVIEW(dev)) 203 185 val >>= 1; 186 + 187 + if (is_backlight_combination_mode(dev)){ 188 + u8 lbpc; 189 + 190 + val &= ~1; 191 + pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 192 + val *= lbpc; 193 + } 204 194 } 205 195 206 196 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); ··· 231 205 232 206 if (HAS_PCH_SPLIT(dev)) 233 207 return intel_pch_panel_set_backlight(dev, level); 208 + 209 + if (is_backlight_combination_mode(dev)){ 210 + u32 max = intel_panel_get_max_backlight(dev); 211 + u8 lbpc; 212 + 213 + lbpc = level * 0xfe / max + 1; 214 + level /= lbpc; 215 + pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); 216 + } 217 + 234 218 tmp = I915_READ(BLC_PWM_CTL); 235 219 if (IS_PINEVIEW(dev)) { 236 220 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+2 -1
drivers/gpu/drm/nouveau/nouveau_dma.c
··· 83 83 return ret; 84 84 85 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 86 - ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); 86 + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, 87 + &chan->m2mf_ntfy); 87 88 if (ret) 88 89 return ret; 89 90
+2 -1
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 853 853 extern int nouveau_notifier_init_channel(struct nouveau_channel *); 854 854 extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 855 855 extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, 856 - int cout, uint32_t *offset); 856 + int cout, uint32_t start, uint32_t end, 857 + uint32_t *offset); 857 858 extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); 858 859 extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, 859 860 struct drm_file *);
+4 -2
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 759 759 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 760 760 mem->page_alignment << PAGE_SHIFT, size_nc, 761 761 (nvbo->tile_flags >> 8) & 0x3ff, &node); 762 - if (ret) 763 - return ret; 762 + if (ret) { 763 + mem->mm_node = NULL; 764 + return (ret == -ENOSPC) ? 0 : ret; 765 + } 764 766 765 767 node->page_shift = 12; 766 768 if (nvbo->vma.node)
+1 -1
drivers/gpu/drm/nouveau/nouveau_mm.c
··· 123 123 return 0; 124 124 } 125 125 126 - return -ENOMEM; 126 + return -ENOSPC; 127 127 } 128 128 129 129 int
+7 -4
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 95 95 96 96 int 97 97 nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 98 - int size, uint32_t *b_offset) 98 + int size, uint32_t start, uint32_t end, 99 + uint32_t *b_offset) 99 100 { 100 101 struct drm_device *dev = chan->dev; 101 102 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 105 104 uint32_t offset; 106 105 int target, ret; 107 106 108 - mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); 107 + mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, 108 + start, end, 0); 109 109 if (mem) 110 - mem = drm_mm_get_block(mem, size, 0); 110 + mem = drm_mm_get_block_range(mem, size, 0, start, end); 111 111 if (!mem) { 112 112 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); 113 113 return -ENOMEM; ··· 184 182 if (IS_ERR(chan)) 185 183 return PTR_ERR(chan); 186 184 187 - ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 185 + ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, 186 + &na->offset); 188 187 nouveau_channel_put(&chan); 189 188 return ret; 190 189 }
+8
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 403 403 void 404 404 nv50_instmem_flush(struct drm_device *dev) 405 405 { 406 + struct drm_nouveau_private *dev_priv = dev->dev_private; 407 + 408 + spin_lock(&dev_priv->ramin_lock); 406 409 nv_wr32(dev, 0x00330c, 0x00000001); 407 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 408 411 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 + spin_unlock(&dev_priv->ramin_lock); 409 413 } 410 414 411 415 void 412 416 nv84_instmem_flush(struct drm_device *dev) 413 417 { 418 + struct drm_nouveau_private *dev_priv = dev->dev_private; 419 + 420 + spin_lock(&dev_priv->ramin_lock); 414 421 nv_wr32(dev, 0x070000, 0x00000001); 415 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 416 423 NV_ERROR(dev, "PRAMIN flush timeout\n"); 424 + spin_unlock(&dev_priv->ramin_lock); 417 425 } 418 426
+4
drivers/gpu/drm/nouveau/nv50_vm.c
··· 173 173 void 174 174 nv50_vm_flush_engine(struct drm_device *dev, int engine) 175 175 { 176 + struct drm_nouveau_private *dev_priv = dev->dev_private; 177 + 178 + spin_lock(&dev_priv->ramin_lock); 176 179 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 177 180 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 178 181 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 182 + spin_unlock(&dev_priv->ramin_lock); 179 183 }
+1 -2
drivers/gpu/drm/radeon/evergreen.c
··· 2194 2194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2195 2195 } 2196 2196 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2197 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2198 2197 r700_vram_gtt_location(rdev, &rdev->mc); 2199 2198 radeon_update_bandwidth_info(rdev); 2200 2199 ··· 2933 2934 /* XXX: ontario has problems blitting to gart at the moment */ 2934 2935 if (rdev->family == CHIP_PALM) { 2935 2936 rdev->asic->copy = NULL; 2936 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2937 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2937 2938 } 2938 2939 2939 2940 /* allocate wb buffer */
+2 -2
drivers/gpu/drm/radeon/evergreen_blit_kms.c
··· 623 623 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 624 624 return r; 625 625 } 626 - rdev->mc.active_vram_size = rdev->mc.real_vram_size; 626 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 627 627 return 0; 628 628 } 629 629 ··· 631 631 { 632 632 int r; 633 633 634 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 634 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 635 635 if (rdev->r600_blit.shader_obj == NULL) 636 636 return; 637 637 /* If we can't reserve the bo, unref should be enough to destroy
+1 -1
drivers/gpu/drm/radeon/ni.c
··· 1039 1039 if (enable) 1040 1040 WREG32(CP_ME_CNTL, 0); 1041 1041 else { 1042 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1042 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1043 1043 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1044 1044 WREG32(SCRATCH_UMSK, 0); 1045 1045 }
+2 -20
drivers/gpu/drm/radeon/r100.c
··· 70 70 71 71 void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 72 72 { 73 - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; 74 - u32 tmp; 75 - 76 - /* make sure flip is at vb rather than hb */ 77 - tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); 78 - tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; 79 - /* make sure pending bit is asserted */ 80 - tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; 81 - WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); 82 - 83 - /* set pageflip to happen as late as possible in the vblank interval. 84 - * same field for crtc1/2 85 - */ 86 - tmp = RREG32(RADEON_CRTC_GEN_CNTL); 87 - tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK; 88 - WREG32(RADEON_CRTC_GEN_CNTL, tmp); 89 - 90 73 /* enable the pflip int */ 91 74 radeon_irq_kms_pflip_irq_get(rdev, crtc); 92 75 } ··· 1024 1041 return r; 1025 1042 } 1026 1043 rdev->cp.ready = true; 1027 - rdev->mc.active_vram_size = rdev->mc.real_vram_size; 1044 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1028 1045 return 0; 1029 1046 } 1030 1047 ··· 1042 1059 void r100_cp_disable(struct radeon_device *rdev) 1043 1060 { 1044 1061 /* Disable ring */ 1045 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1062 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1046 1063 rdev->cp.ready = false; 1047 1064 WREG32(RADEON_CP_CSQ_MODE, 0); 1048 1065 WREG32(RADEON_CP_CSQ_CNTL, 0); ··· 2312 2329 /* FIXME we don't use the second aperture yet when we could use it */ 2313 2330 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2314 2331 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2315 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2316 2332 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2317 2333 if (rdev->flags & RADEON_IS_IGP) { 2318 2334 uint32_t tom;
+1 -2
drivers/gpu/drm/radeon/r600.c
··· 1256 1256 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1257 1257 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1258 1258 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1259 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1260 1259 r600_vram_gtt_location(rdev, &rdev->mc); 1261 1260 1262 1261 if (rdev->flags & RADEON_IS_IGP) { ··· 1937 1938 */ 1938 1939 void r600_cp_stop(struct radeon_device *rdev) 1939 1940 { 1940 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1941 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1941 1942 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1942 1943 WREG32(SCRATCH_UMSK, 0); 1943 1944 }
+2 -2
drivers/gpu/drm/radeon/r600_blit_kms.c
··· 558 558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 559 559 return r; 560 560 } 561 - rdev->mc.active_vram_size = rdev->mc.real_vram_size; 561 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 562 562 return 0; 563 563 } 564 564 ··· 566 566 { 567 567 int r; 568 568 569 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 569 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 570 570 if (rdev->r600_blit.shader_obj == NULL) 571 571 return; 572 572 /* If we can't reserve the bo, unref should be enough to destroy
+1 -1
drivers/gpu/drm/radeon/radeon.h
··· 357 357 * about vram size near mc fb location */ 358 358 u64 mc_vram_size; 359 359 u64 visible_vram_size; 360 - u64 active_vram_size; 361 360 u64 gtt_size; 362 361 u64 gtt_start; 363 362 u64 gtt_end; ··· 1491 1492 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1492 1493 extern int radeon_resume_kms(struct drm_device *dev); 1493 1494 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1495 + extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1494 1496 1495 1497 /* 1496 1498 * r600 functions used by radeon_encoder.c
+3
drivers/gpu/drm/radeon/radeon_asic.c
··· 834 834 .pm_finish = &evergreen_pm_finish, 835 835 .pm_init_profile = &rs780_pm_init_profile, 836 836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 837 + .pre_page_flip = &evergreen_pre_page_flip, 838 + .page_flip = &evergreen_page_flip, 839 + .post_page_flip = &evergreen_post_page_flip, 837 840 }; 838 841 839 842 static struct radeon_asic btc_asic = {
+4 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 151 151 { 152 152 struct radeon_device *rdev = dev->dev_private; 153 153 struct drm_radeon_gem_info *args = data; 154 + struct ttm_mem_type_manager *man; 155 + 156 + man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 154 157 155 158 args->vram_size = rdev->mc.real_vram_size; 156 - args->vram_visible = rdev->mc.real_vram_size; 159 + args->vram_visible = (u64)man->size << PAGE_SHIFT; 157 160 if (rdev->stollen_vga_memory) 158 161 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 159 162 args->vram_visible -= radeon_fbdev_total_size(rdev);
+2 -1
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
··· 443 443 (target_fb->bits_per_pixel * 8)); 444 444 crtc_pitch |= crtc_pitch << 16; 445 445 446 - 446 + crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; 447 447 if (tiling_flags & RADEON_TILING_MACRO) { 448 448 if (ASIC_IS_R300(rdev)) 449 449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | ··· 502 502 gen_cntl_val = RREG32(gen_cntl_reg); 503 503 gen_cntl_val &= ~(0xf << 8); 504 504 gen_cntl_val |= (format << 8); 505 + gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK; 505 506 WREG32(gen_cntl_reg, gen_cntl_val); 506 507 507 508 crtc_offset = (u32)base;
+14
drivers/gpu/drm/radeon/radeon_ttm.c
··· 589 589 DRM_INFO("radeon: ttm finalized\n"); 590 590 } 591 591 592 + /* this should only be called at bootup or when userspace 593 + * isn't running */ 594 + void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) 595 + { 596 + struct ttm_mem_type_manager *man; 597 + 598 + if (!rdev->mman.initialized) 599 + return; 600 + 601 + man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 602 + /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 603 + man->size = size >> PAGE_SHIFT; 604 + } 605 + 592 606 static struct vm_operations_struct radeon_ttm_vm_ops; 593 607 static const struct vm_operations_struct *ttm_vm_ops = NULL; 594 608
-1
drivers/gpu/drm/radeon/rs600.c
··· 751 751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 752 752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 753 753 rdev->mc.visible_vram_size = rdev->mc.aper_size; 754 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 755 754 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 756 755 base = RREG32_MC(R_000004_MC_FB_LOCATION); 757 756 base = G_000004_MC_FB_START(base) << 16;
-1
drivers/gpu/drm/radeon/rs690.c
··· 157 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 158 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 159 159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 160 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 161 160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 161 base = G_000100_MC_FB_START(base) << 16; 163 162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+1 -2
drivers/gpu/drm/radeon/rv770.c
··· 307 307 */ 308 308 void r700_cp_stop(struct radeon_device *rdev) 309 309 { 310 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 310 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 311 311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 312 312 WREG32(SCRATCH_UMSK, 0); 313 313 } ··· 1123 1123 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1124 1124 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1125 1125 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1126 - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1127 1126 r700_vram_gtt_location(rdev, &rdev->mc); 1128 1127 radeon_update_bandwidth_info(rdev); 1129 1128
+1
drivers/i2c/busses/i2c-eg20t.c
··· 29 29 #include <linux/pci.h> 30 30 #include <linux/mutex.h> 31 31 #include <linux/ktime.h> 32 + #include <linux/slab.h> 32 33 33 34 #define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ 34 35 #define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
+1 -1
drivers/i2c/busses/i2c-ocores.c
··· 249 249 static int ocores_i2c_of_probe(struct platform_device* pdev, 250 250 struct ocores_i2c* i2c) 251 251 { 252 - __be32* val; 252 + const __be32* val; 253 253 254 254 val = of_get_property(pdev->dev.of_node, "regstep", NULL); 255 255 if (!val) {
+1 -3
drivers/i2c/busses/i2c-omap.c
··· 378 378 * REVISIT: Some wkup sources might not be needed. 379 379 */ 380 380 dev->westate = OMAP_I2C_WE_ALL; 381 - if (dev->rev < OMAP_I2C_REV_ON_4430) 382 - omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, 383 - dev->westate); 381 + omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); 384 382 } 385 383 } 386 384 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+24
drivers/idle/intel_idle.c
··· 62 62 #include <linux/notifier.h> 63 63 #include <linux/cpu.h> 64 64 #include <asm/mwait.h> 65 + #include <asm/msr.h> 65 66 66 67 #define INTEL_IDLE_VERSION "0.4" 67 68 #define PREFIX "intel_idle: " ··· 84 83 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 85 84 86 85 static struct cpuidle_state *cpuidle_state_table; 86 + 87 + /* 88 + * Hardware C-state auto-demotion may not always be optimal. 89 + * Indicate which enable bits to clear here. 90 + */ 91 + static unsigned long long auto_demotion_disable_flags; 87 92 88 93 /* 89 94 * Set this flag for states where the HW flushes the TLB for us ··· 288 281 .notifier_call = setup_broadcast_cpuhp_notify, 289 282 }; 290 283 284 + static void auto_demotion_disable(void *dummy) 285 + { 286 + unsigned long long msr_bits; 287 + 288 + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); 289 + msr_bits &= ~auto_demotion_disable_flags; 290 + wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); 291 + } 292 + 291 293 /* 292 294 * intel_idle_probe() 293 295 */ ··· 340 324 case 0x25: /* Westmere */ 341 325 case 0x2C: /* Westmere */ 342 326 cpuidle_state_table = nehalem_cstates; 327 + auto_demotion_disable_flags = 328 + (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); 343 329 break; 344 330 345 331 case 0x1C: /* 28 - Atom Processor */ 332 + cpuidle_state_table = atom_cstates; 333 + break; 334 + 346 335 case 0x26: /* 38 - Lincroft Atom Processor */ 347 336 cpuidle_state_table = atom_cstates; 337 + auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; 348 338 break; 349 339 350 340 case 0x2A: /* SNB */ ··· 458 436 return -EIO; 459 437 } 460 438 } 439 + if (auto_demotion_disable_flags) 440 + smp_call_function(auto_demotion_disable, NULL, 1); 461 441 462 442 return 0; 463 443 }
+1 -1
drivers/isdn/hardware/eicon/istream.c
··· 62 62 stream interface. 63 63 If synchronous service was requested, then function 64 64 does return amount of data written to stream. 65 - 'final' does indicate that pice of data to be written is 65 + 'final' does indicate that piece of data to be written is 66 66 final part of frame (necessary only by structured datatransfer) 67 67 return 0 if zero lengh packet was written 68 68 return -1 if stream is full
+7 -7
drivers/media/common/tuners/tda8290.c
··· 658 658 #define TDA8290_ID 0x89 659 659 u8 reg = 0x1f, id; 660 660 struct i2c_msg msg_read[] = { 661 - { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 662 - { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 661 + { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg }, 662 + { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, 663 663 }; 664 664 665 665 /* detect tda8290 */ 666 666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 667 - printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 667 + printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", 668 668 __func__, reg); 669 669 return -ENODEV; 670 670 } ··· 685 685 #define TDA8295C2_ID 0x8b 686 686 u8 reg = 0x2f, id; 687 687 struct i2c_msg msg_read[] = { 688 - { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 689 - { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 688 + { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg }, 689 + { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, 690 690 }; 691 691 692 - /* detect tda8290 */ 692 + /* detect tda8295 */ 693 693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 694 - printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 694 + printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", 695 695 __func__, reg); 696 696 return -ENODEV; 697 697 }
+19 -2
drivers/media/dvb/dvb-usb/dib0700_devices.c
··· 870 870 return 0; 871 871 } 872 872 873 + static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index, 874 + u16 pid, int onoff) 875 + { 876 + struct dib0700_state *st = adapter->dev->priv; 877 + if (st->is_dib7000pc) 878 + return dib7000p_pid_filter(adapter->fe, index, pid, onoff); 879 + return dib7000m_pid_filter(adapter->fe, index, pid, onoff); 880 + } 881 + 882 + static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) 883 + { 884 + struct dib0700_state *st = adapter->dev->priv; 885 + if (st->is_dib7000pc) 886 + return dib7000p_pid_filter_ctrl(adapter->fe, onoff); 887 + return dib7000m_pid_filter_ctrl(adapter->fe, onoff); 888 + } 889 + 873 890 static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) 874 891 { 875 892 return dib7000p_pid_filter(adapter->fe, index, pid, onoff); ··· 1892 1875 { 1893 1876 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 1894 1877 .pid_filter_count = 32, 1895 - .pid_filter = stk70x0p_pid_filter, 1896 - .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 1878 + .pid_filter = stk7700p_pid_filter, 1879 + .pid_filter_ctrl = stk7700p_pid_filter_ctrl, 1897 1880 .frontend_attach = stk7700p_frontend_attach, 1898 1881 .tuner_attach = stk7700p_tuner_attach, 1899 1882
+3 -3
drivers/media/dvb/dvb-usb/lmedm04.c
··· 659 659 } 660 660 661 661 /* Default firmware for LME2510C */ 662 - const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; 662 + char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; 663 663 664 664 static void lme_coldreset(struct usb_device *dev) 665 665 { ··· 1006 1006 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 1007 1007 .usb_ctrl = DEVICE_SPECIFIC, 1008 1008 .download_firmware = lme2510_download_firmware, 1009 - .firmware = lme_firmware, 1009 + .firmware = (const char *)&lme_firmware, 1010 1010 .size_of_priv = sizeof(struct lme2510_state), 1011 1011 .num_adapters = 1, 1012 1012 .adapter = { ··· 1109 1109 1110 1110 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 1111 1111 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); 1112 - MODULE_VERSION("1.74"); 1112 + MODULE_VERSION("1.75"); 1113 1113 MODULE_LICENSE("GPL");
+19
drivers/media/dvb/frontends/dib7000m.c
··· 1285 1285 } 1286 1286 EXPORT_SYMBOL(dib7000m_get_i2c_master); 1287 1287 1288 + int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) 1289 + { 1290 + struct dib7000m_state *state = fe->demodulator_priv; 1291 + u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef; 1292 + val |= (onoff & 0x1) << 4; 1293 + dprintk("PID filter enabled %d", onoff); 1294 + return dib7000m_write_word(state, 294 + state->reg_offs, val); 1295 + } 1296 + EXPORT_SYMBOL(dib7000m_pid_filter_ctrl); 1297 + 1298 + int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) 1299 + { 1300 + struct dib7000m_state *state = fe->demodulator_priv; 1301 + dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff); 1302 + return dib7000m_write_word(state, 300 + state->reg_offs + id, 1303 + onoff ? (1 << 13) | pid : 0); 1304 + } 1305 + EXPORT_SYMBOL(dib7000m_pid_filter); 1306 + 1288 1307 #if 0 1289 1308 /* used with some prototype boards */ 1290 1309 int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
+15
drivers/media/dvb/frontends/dib7000m.h
··· 46 46 extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, 47 47 enum dibx000_i2c_interface, 48 48 int); 49 + extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff); 50 + extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff); 49 51 #else 50 52 static inline 51 53 struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, ··· 64 62 { 65 63 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 66 64 return NULL; 65 + } 66 + static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, 67 + u16 pid, u8 onoff) 68 + { 69 + printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 70 + return -ENODEV; 71 + } 72 + 73 + static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, 74 + uint8_t onoff) 75 + { 76 + printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 77 + return -ENODEV; 67 78 } 68 79 #endif 69 80
-1
drivers/media/dvb/mantis/mantis_pci.c
··· 22 22 #include <linux/moduleparam.h> 23 23 #include <linux/kernel.h> 24 24 #include <asm/io.h> 25 - #include <asm/pgtable.h> 26 25 #include <asm/page.h> 27 26 #include <linux/kmod.h> 28 27 #include <linux/vmalloc.h>
+1 -2
drivers/media/rc/ir-raw.c
··· 112 112 { 113 113 ktime_t now; 114 114 s64 delta; /* ns */ 115 - struct ir_raw_event ev; 115 + DEFINE_IR_RAW_EVENT(ev); 116 116 int rc = 0; 117 117 118 118 if (!dev->raw) ··· 125 125 * being called for the first time, note that delta can't 126 126 * possibly be negative. 127 127 */ 128 - ev.duration = 0; 129 128 if (delta > IR_MAX_DURATION || !dev->raw->last_type) 130 129 type |= IR_START_EVENT; 131 130 else
+15 -12
drivers/media/rc/mceusb.c
··· 148 148 MCE_GEN2_TX_INV, 149 149 POLARIS_EVK, 150 150 CX_HYBRID_TV, 151 + MULTIFUNCTION, 151 152 }; 152 153 153 154 struct mceusb_model { ··· 156 155 u32 mce_gen2:1; 157 156 u32 mce_gen3:1; 158 157 u32 tx_mask_normal:1; 159 - u32 is_polaris:1; 160 158 u32 no_tx:1; 159 + 160 + int ir_intfnum; 161 161 162 162 const char *rc_map; /* Allow specify a per-board map */ 163 163 const char *name; /* per-board name */ ··· 181 179 .tx_mask_normal = 1, 182 180 }, 183 181 [POLARIS_EVK] = { 184 - .is_polaris = 1, 185 182 /* 186 183 * In fact, the EVK is shipped without 187 184 * remotes, but we should have something handy, ··· 190 189 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 191 190 }, 192 191 [CX_HYBRID_TV] = { 193 - .is_polaris = 1, 194 192 .no_tx = 1, /* tx isn't wired up at all */ 195 193 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 194 + }, 195 + [MULTIFUNCTION] = { 196 + .mce_gen2 = 1, 197 + .ir_intfnum = 2, 196 198 }, 197 199 }; 198 200 ··· 220 216 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, 221 217 /* Philips/Spinel plus IR transceiver for ASUS */ 222 218 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, 223 - /* Realtek MCE IR Receiver */ 224 - { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, 219 + /* Realtek MCE IR Receiver and card reader */ 220 + { USB_DEVICE(VENDOR_REALTEK, 0x0161), 221 + .driver_info = MULTIFUNCTION }, 225 222 /* SMK/Toshiba G83C0004D410 */ 226 223 { USB_DEVICE(VENDOR_SMK, 0x031d), 227 224 .driver_info = MCE_GEN2_TX_INV }, ··· 1106 1101 bool is_gen3; 1107 1102 bool is_microsoft_gen1; 1108 1103 bool tx_mask_normal; 1109 - bool is_polaris; 1104 + int ir_intfnum; 1110 1105 1111 1106 dev_dbg(&intf->dev, "%s called\n", __func__); 1112 1107 ··· 1115 1110 is_gen3 = mceusb_model[model].mce_gen3; 1116 1111 is_microsoft_gen1 = mceusb_model[model].mce_gen1; 1117 1112 tx_mask_normal = mceusb_model[model].tx_mask_normal; 1118 - is_polaris = mceusb_model[model].is_polaris; 1113 + ir_intfnum = mceusb_model[model].ir_intfnum; 1119 1114 1120 - if (is_polaris) { 1121 - /* Interface 0 is IR */ 1122 - if (idesc->desc.bInterfaceNumber) 1123 - return -ENODEV; 1124 - } 1115 + /* There are multi-function devices with non-IR interfaces */ 1116 + if (idesc->desc.bInterfaceNumber != ir_intfnum) 1117 + return -ENODEV; 1125 1118 1126 1119 /* step through the endpoints to find first bulk in and out endpoint */ 1127 1120 for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
+3 -2
drivers/media/rc/nuvoton-cir.c
··· 385 385 386 386 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) 387 387 { 388 - /* set number of bytes needed for wake key comparison (default 67) */ 389 - nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP); 388 + /* set number of bytes needed for wake from s3 (default 65) */ 389 + nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES, 390 + CIR_WAKE_FIFO_CMP_DEEP); 390 391 391 392 /* set tolerance/variance allowed per byte during wake compare */ 392 393 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
+5 -2
drivers/media/rc/nuvoton-cir.h
··· 305 305 #define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 306 306 #define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 307 307 308 - /* CIR Wake FIFO buffer is 67 bytes long */ 309 - #define CIR_WAKE_FIFO_LEN 67 308 + /* 309 + * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes 310 + * the system comparing only 65 bytes (fails with this set to 67) 311 + */ 312 + #define CIR_WAKE_FIFO_CMP_BYTES 65 310 313 /* CIR Wake byte comparison tolerance */ 311 314 #define CIR_WAKE_CMP_TOLERANCE 5 312 315
+1 -1
drivers/media/rc/rc-main.c
··· 850 850 count++; 851 851 } else { 852 852 for (i = 0; i < ARRAY_SIZE(proto_names); i++) { 853 - if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) { 853 + if (!strcasecmp(tmp, proto_names[i].name)) { 854 854 tmp += strlen(proto_names[i].name); 855 855 mask = proto_names[i].type; 856 856 break;
+24 -4
drivers/media/video/au0828/au0828-video.c
··· 1758 1758 if (rc < 0) 1759 1759 return rc; 1760 1760 1761 - return videobuf_reqbufs(&fh->vb_vidq, rb); 1761 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1762 + rc = videobuf_reqbufs(&fh->vb_vidq, rb); 1763 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1764 + rc = videobuf_reqbufs(&fh->vb_vbiq, rb); 1765 + 1766 + return rc; 1762 1767 } 1763 1768 1764 1769 static int vidioc_querybuf(struct file *file, void *priv, ··· 1777 1772 if (rc < 0) 1778 1773 return rc; 1779 1774 1780 - return videobuf_querybuf(&fh->vb_vidq, b); 1775 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1776 + rc = videobuf_querybuf(&fh->vb_vidq, b); 1777 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1778 + rc = videobuf_querybuf(&fh->vb_vbiq, b); 1779 + 1780 + return rc; 1781 1781 } 1782 1782 1783 1783 static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) ··· 1795 1785 if (rc < 0) 1796 1786 return rc; 1797 1787 1798 - return videobuf_qbuf(&fh->vb_vidq, b); 1788 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1789 + rc = videobuf_qbuf(&fh->vb_vidq, b); 1790 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1791 + rc = videobuf_qbuf(&fh->vb_vbiq, b); 1792 + 1793 + return rc; 1799 1794 } 1800 1795 1801 1796 static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) ··· 1821 1806 dev->greenscreen_detected = 0; 1822 1807 } 1823 1808 1824 - return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); 1809 + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1810 + rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); 1811 + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) 1812 + rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK); 1813 + 1814 + return rc; 1825 1815 } 1826 1816 1827 1817 static struct v4l2_file_operations au0828_v4l_fops = {
+49 -1
drivers/media/video/cx18/cx18-cards.c
··· 95 95 .i2c = &cx18_i2c_std, 96 96 }; 97 97 98 + static const struct cx18_card cx18_card_hvr1600_s5h1411 = { 99 + .type = CX18_CARD_HVR_1600_S5H1411, 100 + .name = "Hauppauge HVR-1600", 101 + .comment = "Simultaneous Digital and Analog TV capture supported\n", 102 + .v4l2_capabilities = CX18_CAP_ENCODER, 103 + .hw_audio_ctrl = CX18_HW_418_AV, 104 + .hw_muxer = CX18_HW_CS5345, 105 + .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | 106 + CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | 107 + CX18_HW_Z8F0811_IR_HAUP, 108 + .video_inputs = { 109 + { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, 110 + { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, 111 + { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, 112 + { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, 113 + { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, 114 + }, 115 + .audio_inputs = { 116 + { CX18_CARD_INPUT_AUD_TUNER, 117 + CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, 118 + { CX18_CARD_INPUT_LINE_IN1, 119 + CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, 120 + { CX18_CARD_INPUT_LINE_IN2, 121 + CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, 122 + }, 123 + .radio_input = { CX18_CARD_INPUT_AUD_TUNER, 124 + CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, 125 + .ddr = { 126 + /* ESMT M13S128324A-5B memory */ 127 + .chip_config = 0x003, 128 + .refresh = 0x30c, 129 + .timing1 = 0x44220e82, 130 + .timing2 = 0x08, 131 + .tune_lane = 0, 132 + .initial_emrs = 0, 133 + }, 134 + .gpio_init.initial_value = 0x3001, 135 + .gpio_init.direction = 0x3001, 136 + .gpio_i2c_slave_reset = { 137 + .active_lo_mask = 0x3001, 138 + .msecs_asserted = 10, 139 + .msecs_recovery = 40, 140 + .ir_reset_mask = 0x0001, 141 + }, 142 + .i2c = &cx18_i2c_std, 143 + }; 144 + 98 145 static const struct cx18_card cx18_card_hvr1600_samsung = { 99 146 .type = CX18_CARD_HVR_1600_SAMSUNG, 100 147 .name = "Hauppauge HVR-1600 (Preproduction)", ··· 570 523 &cx18_card_toshiba_qosmio_dvbt, 571 524 &cx18_card_leadtek_pvr2100, 572 525 &cx18_card_leadtek_dvr3100h, 573 - &cx18_card_gotview_dvd3 526 + &cx18_card_gotview_dvd3, 527 + &cx18_card_hvr1600_s5h1411 574 528 }; 575 529 576 530 const struct cx18_card *cx18_get_card(u16 index)
+23 -2
drivers/media/video/cx18/cx18-driver.c
··· 157 157 "\t\t\t 7 = Leadtek WinFast PVR2100\n" 158 158 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" 159 159 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" 160 + "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n" 160 161 "\t\t\t 0 = Autodetect (default)\n" 161 162 "\t\t\t-1 = Ignore this card\n\t\t"); 162 163 MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); ··· 338 337 switch (cx->card->type) { 339 338 case CX18_CARD_HVR_1600_ESMT: 340 339 case CX18_CARD_HVR_1600_SAMSUNG: 340 + case CX18_CARD_HVR_1600_S5H1411: 341 341 tveeprom_hauppauge_analog(&c, tv, eedata); 342 342 break; 343 343 case CX18_CARD_YUAN_MPC718: ··· 367 365 from the model number. Use the cardtype module option if you 368 366 have one of these preproduction models. */ 369 367 switch (tv.model) { 370 - case 74000 ... 74999: 368 + case 74301: /* Retail models */ 369 + case 74321: 370 + case 74351: /* OEM models */ 371 + case 74361: 372 + /* Digital side is s5h1411/tda18271 */ 373 + cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411); 374 + break; 375 + case 74021: /* Retail models */ 376 + case 74031: 377 + case 74041: 378 + case 74141: 379 + case 74541: /* OEM models */ 380 + case 74551: 381 + case 74591: 382 + case 74651: 383 + case 74691: 384 + case 74751: 385 + case 74891: 386 + /* Digital side is s5h1409/mxl5005s */ 371 387 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 372 388 break; 373 389 case 0x718: ··· 397 377 CX18_ERR("Invalid EEPROM\n"); 398 378 return; 399 379 default: 400 - CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model); 380 + CX18_ERR("Unknown model %d, defaulting to original HVR-1600 " 381 + "(cardtype=1)\n", tv.model); 401 382 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 402 383 break; 403 384 }
+2 -1
drivers/media/video/cx18/cx18-driver.h
··· 85 85 #define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ 86 86 #define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ 87 87 #define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ 88 - #define CX18_CARD_LAST 8 88 + #define CX18_CARD_HVR_1600_S5H1411 9 /* Hauppauge HVR 1600 s5h1411/tda18271*/ 89 + #define CX18_CARD_LAST 9 89 90 90 91 #define CX18_ENC_STREAM_TYPE_MPG 0 91 92 #define CX18_ENC_STREAM_TYPE_TS 1
+38
drivers/media/video/cx18/cx18-dvb.c
··· 29 29 #include "cx18-gpio.h" 30 30 #include "s5h1409.h" 31 31 #include "mxl5005s.h" 32 + #include "s5h1411.h" 33 + #include "tda18271.h" 32 34 #include "zl10353.h" 33 35 34 36 #include <linux/firmware.h> ··· 76 74 .status_mode = S5H1409_DEMODLOCKING, 77 75 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, 78 76 .hvr1600_opt = S5H1409_HVR1600_OPTIMIZE 77 + }; 78 + 79 + /* 80 + * CX18_CARD_HVR_1600_S5H1411 81 + */ 82 + static struct s5h1411_config hcw_s5h1411_config = { 83 + .output_mode = S5H1411_SERIAL_OUTPUT, 84 + .gpio = S5H1411_GPIO_OFF, 85 + .vsb_if = S5H1411_IF_44000, 86 + .qam_if = S5H1411_IF_4000, 87 + .inversion = S5H1411_INVERSION_ON, 88 + .status_mode = S5H1411_DEMODLOCKING, 89 + .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, 90 + }; 91 + 92 + static struct tda18271_std_map hauppauge_tda18271_std_map = { 93 + .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3, 94 + .if_lvl = 6, .rfagc_top = 0x37 }, 95 + .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0, 96 + .if_lvl = 6, .rfagc_top = 0x37 }, 97 + }; 98 + 99 + static struct tda18271_config hauppauge_tda18271_config = { 100 + .std_map = &hauppauge_tda18271_std_map, 101 + .gate = TDA18271_GATE_DIGITAL, 102 + .output_opt = TDA18271_OUTPUT_LT_OFF, 79 103 }; 80 104 81 105 /* ··· 272 244 switch (cx->card->type) { 273 245 case CX18_CARD_HVR_1600_ESMT: 274 246 case CX18_CARD_HVR_1600_SAMSUNG: 247 + case CX18_CARD_HVR_1600_S5H1411: 275 248 v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); 276 249 v |= 0x00400000; /* Serial Mode */ 277 250 v |= 0x00002000; /* Data Length - Byte */ ··· 483 454 &hauppauge_hvr1600_tuner); 484 455 ret = 0; 485 456 } 457 + break; 458 + case CX18_CARD_HVR_1600_S5H1411: 459 + dvb->fe = dvb_attach(s5h1411_attach, 460 + &hcw_s5h1411_config, 461 + &cx->i2c_adap[0]); 462 + if (dvb->fe != NULL) 463 + dvb_attach(tda18271_attach, dvb->fe, 464 + 0x60, &cx->i2c_adap[0], 465 + &hauppauge_tda18271_config); 486 466 break; 487 467 case CX18_CARD_LEADTEK_DVR3100H: 488 468 dvb->fe = dvb_attach(zl10353_attach,
-10
drivers/media/video/cx23885/cx23885-i2c.c
··· 122 122 123 123 if (!i2c_wait_done(i2c_adap)) 124 124 goto eio; 125 - if (!i2c_slave_did_ack(i2c_adap)) { 126 - retval = -ENXIO; 127 - goto err; 128 - } 129 125 if (i2c_debug) { 130 126 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); 131 127 if (!(ctrl & I2C_NOSTOP)) ··· 154 158 155 159 eio: 156 160 retval = -EIO; 157 - err: 158 161 if (i2c_debug) 159 162 printk(KERN_ERR " ERR: %d\n", retval); 160 163 return retval; ··· 204 209 205 210 if (!i2c_wait_done(i2c_adap)) 206 211 goto eio; 207 - if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) { 208 - retval = -ENXIO; 209 - goto err; 210 - } 211 212 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; 212 213 if (i2c_debug) { 213 214 dprintk(1, " %02x", msg->buf[cnt]); ··· 215 224 216 225 eio: 217 226 retval = -EIO; 218 - err: 219 227 if (i2c_debug) 220 228 printk(KERN_ERR " ERR: %d\n", retval); 221 229 return retval;
+2 -1
drivers/media/video/cx25840/cx25840-core.c
··· 2015 2015 kfree(state); 2016 2016 return err; 2017 2017 } 2018 - v4l2_ctrl_cluster(2, &state->volume); 2018 + if (!is_cx2583x(state)) 2019 + v4l2_ctrl_cluster(2, &state->volume); 2019 2020 v4l2_ctrl_handler_setup(&state->hdl); 2020 2021 2021 2022 if (client->dev.platform_data) {
+51 -7
drivers/media/video/ivtv/ivtv-irq.c
··· 628 628 static void ivtv_irq_dma_err(struct ivtv *itv) 629 629 { 630 630 u32 data[CX2341X_MBOX_MAX_DATA]; 631 + u32 status; 631 632 632 633 del_timer(&itv->dma_timer); 634 + 633 635 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 636 + status = read_reg(IVTV_REG_DMASTATUS); 634 637 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], 635 - read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); 636 - write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); 638 + status, itv->cur_dma_stream); 639 + /* 640 + * We do *not* write back to the IVTV_REG_DMASTATUS register to 641 + * clear the error status, if either the encoder write (0x02) or 642 + * decoder read (0x01) bus master DMA operation do not indicate 643 + * completed. We can race with the DMA engine, which may have 644 + * transitioned to completed status *after* we read the register. 645 + * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the 646 + * DMA engine has completed, will cause the DMA engine to stop working. 647 + */ 648 + status &= 0x3; 649 + if (status == 0x3) 650 + write_reg(status, IVTV_REG_DMASTATUS); 651 + 637 652 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && 638 653 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { 639 654 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; 640 655 641 - /* retry */ 642 - if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) 656 + if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { 657 + /* retry */ 658 + /* 659 + * FIXME - handle cases of DMA error similar to 660 + * encoder below, except conditioned on status & 0x1 661 + */ 643 662 ivtv_dma_dec_start(s); 644 - else 645 - ivtv_dma_enc_start(s); 646 - return; 663 + return; 664 + } else { 665 + if ((status & 0x2) == 0) { 666 + /* 667 + * CX2341x Bus Master DMA write is ongoing. 668 + * Reset the timer and let it complete. 669 + */ 670 + itv->dma_timer.expires = 671 + jiffies + msecs_to_jiffies(600); 672 + add_timer(&itv->dma_timer); 673 + return; 674 + } 675 + 676 + if (itv->dma_retries < 3) { 677 + /* 678 + * CX2341x Bus Master DMA write has ended. 679 + * Retry the write, starting with the first 680 + * xfer segment. Just retrying the current 681 + * segment is not sufficient. 682 + */ 683 + s->sg_processed = 0; 684 + itv->dma_retries++; 685 + ivtv_dma_enc_start_xfer(s); 686 + return; 687 + } 688 + /* Too many retries, give up on this one */ 689 + } 690 + 647 691 } 648 692 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { 649 693 ivtv_udma_start(itv);
-1
drivers/media/video/mem2mem_testdev.c
··· 1011 1011 v4l2_m2m_release(dev->m2m_dev); 1012 1012 del_timer_sync(&dev->timer); 1013 1013 video_unregister_device(dev->vfd); 1014 - video_device_release(dev->vfd); 1015 1014 v4l2_device_unregister(&dev->v4l2_dev); 1016 1015 kfree(dev); 1017 1016
+6 -4
drivers/media/video/s2255drv.c
··· 57 57 #include <linux/usb.h> 58 58 59 59 #define S2255_MAJOR_VERSION 1 60 - #define S2255_MINOR_VERSION 20 60 + #define S2255_MINOR_VERSION 21 61 61 #define S2255_RELEASE 0 62 62 #define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ 63 63 S2255_MINOR_VERSION, \ ··· 312 312 }; 313 313 314 314 /* current cypress EEPROM firmware version */ 315 - #define S2255_CUR_USB_FWVER ((3 << 8) | 6) 315 + #define S2255_CUR_USB_FWVER ((3 << 8) | 11) 316 316 /* current DSP FW version */ 317 - #define S2255_CUR_DSP_FWVER 8 317 + #define S2255_CUR_DSP_FWVER 10102 318 318 /* Need DSP version 5+ for video status feature */ 319 319 #define S2255_MIN_DSP_STATUS 5 320 320 #define S2255_MIN_DSP_COLORFILTER 8 ··· 492 492 493 493 static void s2255_reset_dsppower(struct s2255_dev *dev) 494 494 { 495 - s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1); 495 + s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1); 496 496 msleep(10); 497 497 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); 498 + msleep(600); 499 + s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1); 498 500 return; 499 501 } 500 502
+2 -2
drivers/mfd/asic3.c
··· 143 143 unsigned long flags; 144 144 struct asic3 *asic; 145 145 146 - desc->chip->ack(irq); 146 + desc->irq_data.chip->irq_ack(&desc->irq_data); 147 147 148 - asic = desc->handler_data; 148 + asic = get_irq_data(irq); 149 149 150 150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 151 151 u32 status;
+2 -2
drivers/mfd/davinci_voicecodec.c
··· 118 118 119 119 /* Voice codec interface client */ 120 120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; 121 - cell->name = "davinci_vcif"; 121 + cell->name = "davinci-vcif"; 122 122 cell->driver_data = davinci_vc; 123 123 124 124 /* Voice codec CQ93VC client */ 125 125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; 126 - cell->name = "cq93vc"; 126 + cell->name = "cq93vc-codec"; 127 127 cell->driver_data = davinci_vc; 128 128 129 129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
+5 -5
drivers/mfd/tps6586x.c
··· 150 150 static inline int __tps6586x_writes(struct i2c_client *client, int reg, 151 151 int len, uint8_t *val) 152 152 { 153 - int ret; 153 + int ret, i; 154 154 155 - ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); 156 - if (ret < 0) { 157 - dev_err(&client->dev, "failed writings to 0x%02x\n", reg); 158 - return ret; 155 + for (i = 0; i < len; i++) { 156 + ret = __tps6586x_write(client, reg + i, *(val + i)); 157 + if (ret < 0) 158 + return ret; 159 159 } 160 160 161 161 return 0;
+9 -3
drivers/mfd/ucb1x00-ts.c
··· 385 385 idev->close = ucb1x00_ts_close; 386 386 387 387 __set_bit(EV_ABS, idev->evbit); 388 - __set_bit(ABS_X, idev->absbit); 389 - __set_bit(ABS_Y, idev->absbit); 390 - __set_bit(ABS_PRESSURE, idev->absbit); 391 388 392 389 input_set_drvdata(idev, ts); 390 + 391 + ucb1x00_adc_enable(ts->ucb); 392 + ts->x_res = ucb1x00_ts_read_xres(ts); 393 + ts->y_res = ucb1x00_ts_read_yres(ts); 394 + ucb1x00_adc_disable(ts->ucb); 395 + 396 + input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); 397 + input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); 398 + input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); 393 399 394 400 err = input_register_device(idev); 395 401 if (err)
+18
drivers/mfd/wm8994-core.c
··· 246 246 struct wm8994 *wm8994 = dev_get_drvdata(dev); 247 247 int ret; 248 248 249 + /* Don't actually go through with the suspend if the CODEC is 250 + * still active (eg, for audio passthrough from CP. */ 251 + ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1); 252 + if (ret < 0) { 253 + dev_err(dev, "Failed to read power status: %d\n", ret); 254 + } else if (ret & WM8994_VMID_SEL_MASK) { 255 + dev_dbg(dev, "CODEC still active, ignoring suspend\n"); 256 + return 0; 257 + } 258 + 249 259 /* GPIO configuration state is saved here since we may be configuring 250 260 * the GPIO alternate functions even if we're not using the gpiolib 251 261 * driver for them. ··· 271 261 if (ret < 0) 272 262 dev_err(dev, "Failed to save LDO registers: %d\n", ret); 273 263 264 + wm8994->suspended = true; 265 + 274 266 ret = regulator_bulk_disable(wm8994->num_supplies, 275 267 wm8994->supplies); 276 268 if (ret != 0) { ··· 287 275 { 288 276 struct wm8994 *wm8994 = dev_get_drvdata(dev); 289 277 int ret; 278 + 279 + /* We may have lied to the PM core about suspending */ 280 + if (!wm8994->suspended) 281 + return 0; 290 282 291 283 ret = regulator_bulk_enable(wm8994->num_supplies, 292 284 wm8994->supplies); ··· 313 297 &wm8994->gpio_regs); 314 298 if (ret < 0) 315 299 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); 300 + 301 + wm8994->suspended = false; 316 302 317 303 return 0; 318 304 }
+1
drivers/misc/bmp085.c
··· 449 449 { "bmp085", 0 }, 450 450 { } 451 451 }; 452 + MODULE_DEVICE_TABLE(i2c, bmp085_id); 452 453 453 454 static struct i2c_driver bmp085_driver = { 454 455 .driver = {
+1 -1
drivers/mmc/core/core.c
··· 1529 1529 * still present 1530 1530 */ 1531 1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1532 - && mmc_card_is_removable(host)) 1532 + && !(host->caps & MMC_CAP_NONREMOVABLE)) 1533 1533 host->bus_ops->detect(host); 1534 1534 1535 1535 /*
+1 -2
drivers/mmc/core/sdio.c
··· 792 792 */ 793 793 mmc_release_host(host); 794 794 err = mmc_add_card(host->card); 795 - mmc_claim_host(host); 796 795 if (err) 797 796 goto remove_added; 798 797 ··· 804 805 goto remove_added; 805 806 } 806 807 808 + mmc_claim_host(host); 807 809 return 0; 808 810 809 811 810 812 remove_added: 811 813 /* Remove without lock if the device has been added. */ 812 - mmc_release_host(host); 813 814 mmc_sdio_remove(host); 814 815 mmc_claim_host(host); 815 816 remove:
+16 -12
drivers/net/bnx2x/bnx2x.h
··· 22 22 * (you will need to reboot afterwards) */ 23 23 /* #define BNX2X_STOP_ON_ERROR */ 24 24 25 - #define DRV_MODULE_VERSION "1.62.00-5" 25 + #define DRV_MODULE_VERSION "1.62.00-6" 26 26 #define DRV_MODULE_RELDATE "2011/01/30" 27 27 #define BNX2X_BC_VER 0x040200 28 28 ··· 1613 1613 #define BNX2X_BTR 4 1614 1614 #define MAX_SPQ_PENDING 8 1615 1615 1616 - 1617 - /* CMNG constants 1618 - derived from lab experiments, and not from system spec calculations !!! */ 1619 - #define DEF_MIN_RATE 100 1616 + /* CMNG constants, as derived from system spec calculations */ 1617 + /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ 1618 + #define DEF_MIN_RATE 100 1620 1619 /* resolution of the rate shaping timer - 100 usec */ 1621 - #define RS_PERIODIC_TIMEOUT_USEC 100 1622 - /* resolution of fairness algorithm in usecs - 1623 - coefficient for calculating the actual t fair */ 1624 - #define T_FAIR_COEF 10000000 1620 + #define RS_PERIODIC_TIMEOUT_USEC 100 1625 1621 /* number of bytes in single QM arbitration cycle - 1626 - coefficient for calculating the fairness timer */ 1627 - #define QM_ARB_BYTES 40000 1628 - #define FAIR_MEM 2 1622 + * coefficient for calculating the fairness timer */ 1623 + #define QM_ARB_BYTES 160000 1624 + /* resolution of Min algorithm 1:100 */ 1625 + #define MIN_RES 100 1626 + /* how many bytes above threshold for the minimal credit of Min algorithm*/ 1627 + #define MIN_ABOVE_THRESH 32768 1628 + /* Fairness algorithm integration time coefficient - 1629 + * for calculating the actual Tfair */ 1630 + #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) 1631 + /* Memory of fairness algorithm . 2 cycles */ 1632 + #define FAIR_MEM 2 1629 1633 1630 1634 1631 1635 #define ATTN_NIG_FOR_FUNC (1L << 8)
+51 -14
drivers/net/bnx2x/bnx2x_cmn.c
··· 259 259 #endif 260 260 } 261 261 262 + /* Timestamp option length allowed for TPA aggregation: 263 + * 264 + * nop nop kind length echo val 265 + */ 266 + #define TPA_TSTAMP_OPT_LEN 12 267 + /** 268 + * Calculate the approximate value of the MSS for this 269 + * aggregation using the first packet of it. 270 + * 271 + * @param bp 272 + * @param parsing_flags Parsing flags from the START CQE 273 + * @param len_on_bd Total length of the first packet for the 274 + * aggregation. 275 + */ 276 + static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, 277 + u16 len_on_bd) 278 + { 279 + /* TPA arrgregation won't have an IP options and TCP options 280 + * other than timestamp. 281 + */ 282 + u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); 283 + 284 + 285 + /* Check if there was a TCP timestamp, if there is it's will 286 + * always be 12 bytes length: nop nop kind length echo val. 287 + * 288 + * Otherwise FW would close the aggregation. 289 + */ 290 + if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) 291 + hdrs_len += TPA_TSTAMP_OPT_LEN; 292 + 293 + return len_on_bd - hdrs_len; 294 + } 295 + 262 296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 263 297 struct sk_buff *skb, 264 298 struct eth_fast_path_rx_cqe *fp_cqe, 265 - u16 cqe_idx) 299 + u16 cqe_idx, u16 parsing_flags) 266 300 { 267 301 struct sw_rx_page *rx_pg, old_rx_pg; 268 302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); ··· 309 275 310 276 /* This is needed in order to enable forwarding support */ 311 277 if (frag_size) 312 - skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, 313 - max(frag_size, (u32)len_on_bd)); 278 + skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, 279 + len_on_bd); 314 280 315 281 #ifdef BNX2X_STOP_ON_ERROR 316 282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { ··· 378 344 if (likely(new_skb)) { 379 345 /* fix ip xsum and give it to the stack */ 380 346 /* (no need to map the new skb) */ 347 + u16 parsing_flags = 348 + le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); 381 349 382 350 prefetch(skb); 383 351 prefetch(((char *)(skb)) + L1_CACHE_BYTES); ··· 409 373 } 410 374 411 375 if (!bnx2x_fill_frag_skb(bp, fp, skb, 412 - &cqe->fast_path_cqe, cqe_idx)) { 413 - if ((le16_to_cpu(cqe->fast_path_cqe. 414 - pars_flags.flags) & PARSING_FLAGS_VLAN)) 376 + &cqe->fast_path_cqe, cqe_idx, 377 + parsing_flags)) { 378 + if (parsing_flags & PARSING_FLAGS_VLAN) 415 379 __vlan_hwaccel_put_tag(skb, 416 380 le16_to_cpu(cqe->fast_path_cqe. 417 381 vlan_tag)); ··· 739 703 { 740 704 u16 line_speed = bp->link_vars.line_speed; 741 705 if (IS_MF(bp)) { 742 - u16 maxCfg = (bp->mf_config[BP_VN(bp)] & 743 - FUNC_MF_CFG_MAX_BW_MASK) >> 744 - FUNC_MF_CFG_MAX_BW_SHIFT; 745 - /* Calculate the current MAX line speed limit for the DCC 746 - * capable devices 706 + u16 maxCfg = bnx2x_extract_max_cfg(bp, 707 + bp->mf_config[BP_VN(bp)]); 708 + 709 + /* Calculate the current MAX line speed limit for the MF 710 + * devices 747 711 */ 748 - if (IS_MF_SD(bp)) { 712 + if (IS_MF_SI(bp)) 713 + line_speed = (line_speed * maxCfg) / 100; 714 + else { /* SD mode */ 749 715 u16 vn_max_rate = maxCfg * 100; 750 716 751 717 if (vn_max_rate < line_speed) 752 718 line_speed = vn_max_rate; 753 - } else /* IS_MF_SI(bp)) */ 754 - line_speed = (line_speed * maxCfg) / 100; 719 + } 755 720 } 756 721 757 722 return line_speed;
+20
drivers/net/bnx2x/bnx2x_cmn.h
··· 1044 1044 void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1045 1045 void bnx2x_release_phy_lock(struct bnx2x *bp); 1046 1046 1047 + /** 1048 + * Extracts MAX BW part from MF configuration. 1049 + * 1050 + * @param bp 1051 + * @param mf_cfg 1052 + * 1053 + * @return u16 1054 + */ 1055 + static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1056 + { 1057 + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1058 + FUNC_MF_CFG_MAX_BW_SHIFT; 1059 + if (!max_cfg) { 1060 + BNX2X_ERR("Illegal configuration detected for Max BW - " 1061 + "using 100 instead\n"); 1062 + max_cfg = 100; 1063 + } 1064 + return max_cfg; 1065 + } 1066 + 1047 1067 #endif /* BNX2X_CMN_H */
+12 -13
drivers/net/bnx2x/bnx2x_ethtool.c
··· 238 238 speed |= (cmd->speed_hi << 16); 239 239 240 240 if (IS_MF_SI(bp)) { 241 - u32 param = 0; 241 + u32 param = 0, part; 242 242 u32 line_speed = bp->link_vars.line_speed; 243 243 244 244 /* use 10G if no link detected */ ··· 251 251 REQ_BC_VER_4_SET_MF_BW); 252 252 return -EINVAL; 253 253 } 254 - if (line_speed < speed) { 255 - BNX2X_DEV_INFO("New speed should be less or equal " 256 - "to actual line speed\n"); 254 + part = (speed * 100) / line_speed; 255 + if (line_speed < speed || !part) { 256 + BNX2X_DEV_INFO("Speed setting should be in a range " 257 + "from 1%% to 100%% " 258 + "of actual line speed\n"); 257 259 return -EINVAL; 258 260 } 259 261 /* load old values */ ··· 265 263 param &= FUNC_MF_CFG_MIN_BW_MASK; 266 264 267 265 /* set new MAX value */ 268 - param |= (((speed * 100) / line_speed) 269 - << FUNC_MF_CFG_MAX_BW_SHIFT) 266 + param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT) 270 267 & FUNC_MF_CFG_MAX_BW_MASK; 271 268 272 269 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); ··· 1782 1781 { 0x100, 0x350 }, /* manuf_info */ 1783 1782 { 0x450, 0xf0 }, /* feature_info */ 1784 1783 { 0x640, 0x64 }, /* upgrade_key_info */ 1785 - { 0x6a4, 0x64 }, 1786 1784 { 0x708, 0x70 }, /* manuf_key_info */ 1787 - { 0x778, 0x70 }, 1788 1785 { 0, 0 } 1789 1786 }; 1790 1787 __be32 buf[0x350 / 4]; ··· 1932 1933 buf[4] = 1; 1933 1934 etest->flags |= ETH_TEST_FL_FAILED; 1934 1935 } 1935 - if (bp->port.pmf) 1936 - if (bnx2x_link_test(bp, is_serdes) != 0) { 1937 - buf[5] = 1; 1938 - etest->flags |= ETH_TEST_FL_FAILED; 1939 - } 1936 + 1937 + if (bnx2x_link_test(bp, is_serdes) != 0) { 1938 + buf[5] = 1; 1939 + etest->flags |= ETH_TEST_FL_FAILED; 1940 + } 1940 1941 1941 1942 #ifdef BNX2X_EXTRA_DEBUG 1942 1943 bnx2x_panic_dump(bp);
+1 -1
drivers/net/bnx2x/bnx2x_init.h
··· 241 241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 242 242 * want to handle "system kill" flow at the moment. 243 243 */ 244 - BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), 244 + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), 245 245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 246 246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 247 247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+14 -4
drivers/net/bnx2x/bnx2x_main.c
··· 1974 1974 vn_max_rate = 0; 1975 1975 1976 1976 } else { 1977 + u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 1978 + 1977 1979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1978 1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1979 - /* If min rate is zero - set it to 1 */ 1981 + /* If fairness is enabled (not all min rates are zeroes) and 1982 + if current min rate is zero - set it to 1. 1983 + This is a requirement of the algorithm. */ 1980 1984 if (bp->vn_weight_sum && (vn_min_rate == 0)) 1981 1985 vn_min_rate = DEF_MIN_RATE; 1982 - vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1983 - FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1986 + 1987 + if (IS_MF_SI(bp)) 1988 + /* maxCfg in percents of linkspeed */ 1989 + vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 1990 + else 1991 + /* maxCfg is absolute in 100Mb units */ 1992 + vn_max_rate = maxCfg * 100; 1984 1993 } 1985 1994 1986 1995 DP(NETIF_MSG_IFUP, ··· 2015 2006 m_fair_vn.vn_credit_delta = 2016 2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2017 2008 (8 * bp->vn_weight_sum))), 2018 - (bp->cmng.fair_vars.fair_threshold * 2)); 2009 + (bp->cmng.fair_vars.fair_threshold + 2010 + MIN_ABOVE_THRESH)); 2019 2011 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2020 2012 m_fair_vn.vn_credit_delta); 2021 2013 }
+2 -2
drivers/net/bnx2x/bnx2x_stats.c
··· 1239 1239 if (unlikely(bp->panic)) 1240 1240 return; 1241 1241 1242 + bnx2x_stats_stm[bp->stats_state][event].action(bp); 1243 + 1242 1244 /* Protect a state change flow */ 1243 1245 spin_lock_bh(&bp->stats_lock); 1244 1246 state = bp->stats_state; 1245 1247 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1246 1248 spin_unlock_bh(&bp->stats_lock); 1247 - 1248 - bnx2x_stats_stm[state][event].action(bp); 1249 1249 1250 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1251 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+1
drivers/net/can/softing/softing_main.c
··· 633 633 }; 634 634 635 635 static const struct can_bittiming_const softing_btr_const = { 636 + .name = "softing", 636 637 .tseg1_min = 1, 637 638 .tseg1_max = 16, 638 639 .tseg2_min = 1,
+25 -8
drivers/net/cnic.c
··· 2760 2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2761 2761 int kcqe_cnt; 2762 2762 2763 + /* status block index must be read before reading other fields */ 2764 + rmb(); 2763 2765 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2764 2766 2765 2767 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { ··· 2772 2770 barrier(); 2773 2771 if (status_idx != *cp->kcq1.status_idx_ptr) { 2774 2772 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2773 + /* status block index must be read first */ 2774 + rmb(); 2775 2775 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2776 2776 } else 2777 2777 break; ··· 2892 2888 u32 last_status = *info->status_idx_ptr; 2893 2889 int kcqe_cnt; 2894 2890 2891 + /* status block index must be read before reading the KCQ */ 2892 + rmb(); 2895 2893 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2896 2894 2897 2895 service_kcqes(dev, kcqe_cnt); ··· 2904 2898 break; 2905 2899 2906 2900 last_status = *info->status_idx_ptr; 2901 + /* status block index must be read before reading the KCQ */ 2902 + rmb(); 2907 2903 } 2908 2904 return last_status; 2909 2905 } ··· 2914 2906 { 2915 2907 struct cnic_dev *dev = (struct cnic_dev *) data; 2916 2908 struct cnic_local *cp = dev->cnic_priv; 2917 - u32 status_idx; 2909 + u32 status_idx, new_status_idx; 2918 2910 2919 2911 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2920 2912 return; 2921 2913 2922 - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2914 + while (1) { 2915 + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2923 2916 2924 - CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2917 + CNIC_WR16(dev, cp->kcq1.io_addr, 2918 + cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2925 2919 2926 - if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 2927 - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2920 + if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { 2921 + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2922 + status_idx, IGU_INT_ENABLE, 1); 2923 + break; 2924 + } 2925 + 2926 + new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2927 + 2928 + if (new_status_idx != status_idx) 2929 + continue; 2928 2930 2929 2931 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2930 2932 MAX_KCQ_IDX); 2931 2933 2932 2934 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2933 2935 status_idx, IGU_INT_ENABLE, 1); 2934 - } else { 2935 - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2936 - status_idx, IGU_INT_ENABLE, 1); 2936 + 2937 + break; 2937 2938 } 2938 2939 } 2939 2940
+1 -1
drivers/net/davinci_emac.c
··· 1008 1008 int ret; 1009 1009 1010 1010 /* free and bail if we are shutting down */ 1011 - if (unlikely(!netif_running(ndev))) { 1011 + if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { 1012 1012 dev_kfree_skb_any(skb); 1013 1013 return; 1014 1014 }
+1 -2
drivers/net/dnet.c
··· 337 337 for (i = 0; i < PHY_MAX_ADDR; i++) 338 338 bp->mii_bus->irq[i] = PHY_POLL; 339 339 340 - platform_set_drvdata(bp->dev, bp->mii_bus); 341 - 342 340 if (mdiobus_register(bp->mii_bus)) { 343 341 err = -ENXIO; 344 342 goto err_out_free_mdio_irq; ··· 861 863 bp = netdev_priv(dev); 862 864 bp->dev = dev; 863 865 866 + platform_set_drvdata(pdev, dev); 864 867 SET_NETDEV_DEV(dev, &pdev->dev); 865 868 866 869 spin_lock_init(&bp->lock);
+2 -1
drivers/net/e1000/e1000_osdep.h
··· 42 42 #define GBE_CONFIG_RAM_BASE \ 43 43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 44 44 45 - #define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) 45 + #define GBE_CONFIG_BASE_VIRT \ 46 + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) 46 47 47 48 #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 48 49 (iowrite16_rep(base + offset, data, count))
+2 -1
drivers/net/e1000e/netdev.c
··· 5967 5967 /* APME bit in EEPROM is mapped to WUC.APME */ 5968 5968 eeprom_data = er32(WUC); 5969 5969 eeprom_apme_mask = E1000_WUC_APME; 5970 - if (eeprom_data & E1000_WUC_PHY_WAKE) 5970 + if ((hw->mac.type > e1000_ich10lan) && 5971 + (eeprom_data & E1000_WUC_PHY_WAKE)) 5971 5972 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 5972 5973 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 5973 5974 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
+2 -1
drivers/net/fec.c
··· 74 74 }, { 75 75 .name = "imx28-fec", 76 76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 77 - } 77 + }, 78 + { } 78 79 }; 79 80 80 81 static unsigned char macaddr[ETH_ALEN];
+1 -1
drivers/net/igbvf/vf.c
··· 220 220 * The parameter rar_count will usually be hw->mac.rar_entry_count 221 221 * unless there are workarounds that change this. 222 222 **/ 223 - void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 223 + static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 224 224 u8 *mc_addr_list, u32 mc_addr_count, 225 225 u32 rar_used_count, u32 rar_count) 226 226 {
+1 -1
drivers/net/macb.c
··· 260 260 for (i = 0; i < PHY_MAX_ADDR; i++) 261 261 bp->mii_bus->irq[i] = PHY_POLL; 262 262 263 - platform_set_drvdata(bp->dev, bp->mii_bus); 263 + dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 264 264 265 265 if (mdiobus_register(bp->mii_bus)) 266 266 goto err_out_free_mdio_irq;
+1
drivers/net/pcmcia/fmvj18x_cs.c
··· 691 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 692 692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 693 693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 694 + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), 694 695 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 695 696 PCMCIA_DEVICE_NULL, 696 697 };
+6
drivers/net/r8169.c
··· 25 25 #include <linux/dma-mapping.h> 26 26 #include <linux/pm_runtime.h> 27 27 #include <linux/firmware.h> 28 + #include <linux/pci-aspm.h> 28 29 29 30 #include <asm/system.h> 30 31 #include <asm/io.h> ··· 3020 3019 mii->phy_id_mask = 0x1f; 3021 3020 mii->reg_num_mask = 0x1f; 3022 3021 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); 3022 + 3023 + /* disable ASPM completely as that cause random device stop working 3024 + * problems as well as full system hangs for some PCIe devices users */ 3025 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 3026 + PCIE_LINK_STATE_CLKPM); 3023 3027 3024 3028 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3025 3029 rc = pci_enable_device(pdev);
-3
drivers/net/skge.c
··· 3856 3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3857 3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3858 3858 3859 - /* device is off until link detection */ 3860 - netif_carrier_off(dev); 3861 - 3862 3859 return dev; 3863 3860 } 3864 3861
+5 -4
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 219 219 struct tx_buf *tx_buf = NULL; 220 220 struct sk_buff *nskb = NULL; 221 221 int ret = 0, i; 222 - u16 *hdr, tx_skb_cnt = 0; 222 + u16 tx_skb_cnt = 0; 223 223 u8 *buf; 224 + __le16 *hdr; 224 225 225 226 if (hif_dev->tx.tx_skb_cnt == 0) 226 227 return 0; ··· 246 245 247 246 buf = tx_buf->buf; 248 247 buf += tx_buf->offset; 249 - hdr = (u16 *)buf; 250 - *hdr++ = nskb->len; 251 - *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; 248 + hdr = (__le16 *)buf; 249 + *hdr++ = cpu_to_le16(nskb->len); 250 + *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); 252 251 buf += 4; 253 252 memcpy(buf, nskb->data, nskb->len); 254 253 tx_buf->len = nskb->len + 4;
+3 -2
drivers/net/wireless/ath/ath9k/mac.c
··· 885 885 struct ath_common *common = ath9k_hw_common(ah); 886 886 887 887 if (!(ints & ATH9K_INT_GLOBAL)) 888 - ath9k_hw_enable_interrupts(ah); 888 + ath9k_hw_disable_interrupts(ah); 889 889 890 890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 891 891 ··· 963 963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 964 964 } 965 965 966 - ath9k_hw_enable_interrupts(ah); 966 + if (ints & ATH9K_INT_GLOBAL) 967 + ath9k_hw_enable_interrupts(ah); 967 968 968 969 return; 969 970 }
+2
drivers/net/wireless/ath/carl9170/usb.c
··· 118 118 { USB_DEVICE(0x057c, 0x8402) }, 119 119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ 120 120 { USB_DEVICE(0x1668, 0x1200) }, 121 + /* Airlive X.USB a/b/g/n */ 122 + { USB_DEVICE(0x1b75, 0x9170) }, 121 123 122 124 /* terminate */ 123 125 {}
+1 -1
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 51 51 #include "iwl-agn-debugfs.h" 52 52 53 53 /* Highest firmware API version supported */ 54 - #define IWL5000_UCODE_API_MAX 2 54 + #define IWL5000_UCODE_API_MAX 5 55 55 #define IWL5150_UCODE_API_MAX 2 56 56 57 57 /* Lowest firmware API version supported */
+1
drivers/net/wireless/p54/p54usb.c
··· 98 98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 99 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 100 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 101 + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ 101 102 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 102 103 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 103 104 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
+3
drivers/net/wireless/rndis_wlan.c
··· 2597 2597 __le32 mode; 2598 2598 int ret; 2599 2599 2600 + if (priv->device_type != RNDIS_BCM4320B) 2601 + return -ENOTSUPP; 2602 + 2600 2603 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, 2601 2604 enabled ? "enabled" : "disabled", 2602 2605 timeout);
+42 -70
drivers/of/pdt.c
··· 36 36 (p)->unique_id = of_pdt_unique_id++; \ 37 37 } while (0) 38 38 39 - static inline const char *of_pdt_node_name(struct device_node *dp) 39 + static char * __init of_pdt_build_full_name(struct device_node *dp) 40 40 { 41 - return dp->path_component_name; 41 + int len, ourlen, plen; 42 + char *n; 43 + 44 + dp->path_component_name = build_path_component(dp); 45 + 46 + plen = strlen(dp->parent->full_name); 47 + ourlen = strlen(dp->path_component_name); 48 + len = ourlen + plen + 2; 49 + 50 + n = prom_early_alloc(len); 51 + strcpy(n, dp->parent->full_name); 52 + if (!of_node_is_root(dp->parent)) { 53 + strcpy(n + plen, "/"); 54 + plen++; 55 + } 56 + strcpy(n + plen, dp->path_component_name); 57 + 58 + return n; 42 59 } 43 60 44 - #else 61 + #else /* CONFIG_SPARC */ 45 62 46 63 static inline void of_pdt_incr_unique_id(void *p) { } 47 64 static inline void irq_trans_init(struct device_node *dp) { } 48 65 49 - static inline const char *of_pdt_node_name(struct device_node *dp) 66 + static char * __init of_pdt_build_full_name(struct device_node *dp) 50 67 { 51 - return dp->name; 68 + static int failsafe_id = 0; /* for generating unique names on failure */ 69 + char *buf; 70 + int len; 71 + 72 + if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len)) 73 + goto failsafe; 74 + 75 + buf = prom_early_alloc(len + 1); 76 + if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len)) 77 + goto failsafe; 78 + return buf; 79 + 80 + failsafe: 81 + buf = prom_early_alloc(strlen(dp->parent->full_name) + 82 + strlen(dp->name) + 16); 83 + sprintf(buf, "%s/%s@unknown%i", 84 + of_node_is_root(dp->parent) ? "" : dp->parent->full_name, 85 + dp->name, failsafe_id++); 86 + pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf); 87 + return buf; 52 88 } 53 89 54 90 #endif /* !CONFIG_SPARC */ ··· 168 132 return buf; 169 133 } 170 134 171 - static char * __init of_pdt_try_pkg2path(phandle node) 172 - { 173 - char *res, *buf = NULL; 174 - int len; 175 - 176 - if (!of_pdt_prom_ops->pkg2path) 177 - return NULL; 178 - 179 - if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len)) 180 - return NULL; 181 - buf = prom_early_alloc(len + 1); 182 - if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) { 183 - pr_err("%s: package-to-path failed\n", __func__); 184 - return NULL; 185 - } 186 - 187 - res = strrchr(buf, '/'); 188 - if (!res) { 189 - pr_err("%s: couldn't find / in %s\n", __func__, buf); 190 - return NULL; 191 - } 192 - return res+1; 193 - } 194 - 195 - /* 196 - * When fetching the node's name, first try using package-to-path; if 197 - * that fails (either because the arch hasn't supplied a PROM callback, 198 - * or some other random failure), fall back to just looking at the node's 199 - * 'name' property. 200 - */ 201 - static char * __init of_pdt_build_name(phandle node) 202 - { 203 - char *buf; 204 - 205 - buf = of_pdt_try_pkg2path(node); 206 - if (!buf) 207 - buf = of_pdt_get_one_property(node, "name"); 208 - 209 - return buf; 210 - } 211 - 212 135 static struct device_node * __init of_pdt_create_node(phandle node, 213 136 struct device_node *parent) 214 137 { ··· 182 187 183 188 kref_init(&dp->kref); 184 189 185 - dp->name = of_pdt_build_name(node); 190 + dp->name = of_pdt_get_one_property(node, "name"); 186 191 dp->type = of_pdt_get_one_property(node, "device_type"); 187 192 dp->phandle = node; 188 193 ··· 191 196 irq_trans_init(dp); 192 197 193 198 return dp; 194 - } 195 - 196 - static char * __init of_pdt_build_full_name(struct device_node *dp) 197 - { 198 - int len, ourlen, plen; 199 - char *n; 200 - 201 - plen = strlen(dp->parent->full_name); 202 - ourlen = strlen(of_pdt_node_name(dp)); 203 - len = ourlen + plen + 2; 204 - 205 - n = prom_early_alloc(len); 206 - strcpy(n, dp->parent->full_name); 207 - if (!of_node_is_root(dp->parent)) { 208 - strcpy(n + plen, "/"); 209 - plen++; 210 - } 211 - strcpy(n + plen, of_pdt_node_name(dp)); 212 - 213 - return n; 214 199 } 215 200 216 201 static struct device_node * __init of_pdt_build_tree(struct device_node *parent, ··· 215 240 *(*nextp) = dp; 216 241 *nextp = &dp->allnext; 217 242 218 - #if defined(CONFIG_SPARC) 219 - dp->path_component_name = build_path_component(dp); 220 - #endif 221 243 dp->full_name = of_pdt_build_full_name(dp); 222 244 223 245 dp->child = of_pdt_build_tree(dp,
+3
drivers/pcmcia/pxa2xx_colibri.c
··· 181 181 { 182 182 int ret; 183 183 184 + if (!machine_is_colibri() && !machine_is_colibri320()) 185 + return -ENODEV; 186 + 184 187 colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 185 188 if (!colibri_pcmcia_device) 186 189 return -ENOMEM;
+1 -1
drivers/pps/generators/Kconfig
··· 6 6 7 7 config PPS_GENERATOR_PARPORT 8 8 tristate "Parallel port PPS signal generator" 9 - depends on PARPORT 9 + depends on PARPORT && BROKEN 10 10 help 11 11 If you say yes here you get support for a PPS signal generator which 12 12 utilizes STROBE pin of a parallel port to send PPS signals. It uses
+7 -5
drivers/rtc/rtc-s3c.c
··· 77 77 } 78 78 79 79 /* Update control registers */ 80 - static void s3c_rtc_setaie(int to) 80 + static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) 81 81 { 82 82 unsigned int tmp; 83 83 84 - pr_debug("%s: aie=%d\n", __func__, to); 84 + pr_debug("%s: aie=%d\n", __func__, enabled); 85 85 86 86 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 87 87 88 - if (to) 88 + if (enabled) 89 89 tmp |= S3C2410_RTCALM_ALMEN; 90 90 91 91 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 92 + 93 + return 0; 92 94 } 93 95 94 96 static int s3c_rtc_setpie(struct device *dev, int enabled) ··· 310 308 311 309 writeb(alrm_en, base + S3C2410_RTCALM); 312 310 313 - s3c_rtc_setaie(alrm->enabled); 311 + s3c_rtc_setaie(dev, alrm->enabled); 314 312 315 313 return 0; 316 314 } ··· 442 440 rtc_device_unregister(rtc); 443 441 444 442 s3c_rtc_setpie(&dev->dev, 0); 445 - s3c_rtc_setaie(0); 443 + s3c_rtc_setaie(&dev->dev, 0); 446 444 447 445 clk_disable(rtc_clk); 448 446 clk_put(rtc_clk);
+2 -2
drivers/s390/block/xpram.c
··· 62 62 /* 63 63 * Parameter parsing functions. 64 64 */ 65 - static int __initdata devs = XPRAM_DEVS; 66 - static char __initdata *sizes[XPRAM_MAX_DEVS]; 65 + static int devs = XPRAM_DEVS; 66 + static char *sizes[XPRAM_MAX_DEVS]; 67 67 68 68 module_param(devs, int, 0); 69 69 module_param_array(sizes, charp, NULL, 0);
+2 -1
drivers/s390/char/keyboard.c
··· 460 460 unsigned int cmd, unsigned long arg) 461 461 { 462 462 void __user *argp; 463 - int ct, perm; 463 + unsigned int ct; 464 + int perm; 464 465 465 466 argp = (void __user *)arg; 466 467
+8
drivers/s390/char/tape.h
··· 280 280 return rc; 281 281 } 282 282 283 + static inline void 284 + tape_do_io_async_free(struct tape_device *device, struct tape_request *request) 285 + { 286 + request->callback = (void *) tape_free_request; 287 + request->callback_data = NULL; 288 + tape_do_io_async(device, request); 289 + } 290 + 283 291 extern int tape_oper_handler(int irq, int status); 284 292 extern void tape_noper_handler(int irq, int status); 285 293 extern int tape_open(struct tape_device *);
+41 -18
drivers/s390/char/tape_34xx.c
··· 53 53 * Medium sense for 34xx tapes. There is no 'real' medium sense call. 54 54 * So we just do a normal sense. 55 55 */ 56 - static int 57 - tape_34xx_medium_sense(struct tape_device *device) 56 + static void __tape_34xx_medium_sense(struct tape_request *request) 58 57 { 59 - struct tape_request *request; 60 - unsigned char *sense; 61 - int rc; 58 + struct tape_device *device = request->device; 59 + unsigned char *sense; 62 60 63 - request = tape_alloc_request(1, 32); 64 - if (IS_ERR(request)) { 65 - DBF_EXCEPTION(6, "MSEN fail\n"); 66 - return PTR_ERR(request); 67 - } 68 - 69 - request->op = TO_MSEN; 70 - tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); 71 - 72 - rc = tape_do_io_interruptible(device, request); 73 61 if (request->rc == 0) { 74 62 sense = request->cpdata; 75 63 ··· 76 88 device->tape_generic_status |= GMT_WR_PROT(~0); 77 89 else 78 90 device->tape_generic_status &= ~GMT_WR_PROT(~0); 79 - } else { 91 + } else 80 92 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", 81 93 request->rc); 82 - } 83 94 tape_free_request(request); 95 + } 84 96 97 + static int tape_34xx_medium_sense(struct tape_device *device) 98 + { 99 + struct tape_request *request; 100 + int rc; 101 + 102 + request = tape_alloc_request(1, 32); 103 + if (IS_ERR(request)) { 104 + DBF_EXCEPTION(6, "MSEN fail\n"); 105 + return PTR_ERR(request); 106 + } 107 + 108 + request->op = TO_MSEN; 109 + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); 110 + rc = tape_do_io_interruptible(device, request); 111 + __tape_34xx_medium_sense(request); 85 112 return rc; 113 + } 114 + 115 + static void tape_34xx_medium_sense_async(struct tape_device *device) 116 + { 117 + struct tape_request *request; 118 + 119 + request = tape_alloc_request(1, 32); 120 + if (IS_ERR(request)) { 121 + DBF_EXCEPTION(6, "MSEN fail\n"); 122 + return; 123 + } 124 + 125 + request->op = TO_MSEN; 126 + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); 127 + request->callback = (void *) __tape_34xx_medium_sense; 128 + request->callback_data = NULL; 129 + tape_do_io_async(device, request); 86 130 } 87 131 88 132 struct tape_34xx_work { ··· 129 109 * is inserted but cannot call tape_do_io* from an interrupt context. 130 110 * Maybe that's useful for other actions we want to start from the 131 111 * interrupt handler. 112 + * Note: the work handler is called by the system work queue. The tape 113 + * commands started by the handler need to be asynchrounous, otherwise 114 + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). 132 115 */ 133 116 static void 134 117 tape_34xx_work_handler(struct work_struct *work) ··· 142 119 143 120 switch(p->op) { 144 121 case TO_MSEN: 145 - tape_34xx_medium_sense(device); 122 + tape_34xx_medium_sense_async(device); 146 123 break; 147 124 default: 148 125 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
+67 -16
drivers/s390/char/tape_3590.c
··· 329 329 /* 330 330 * Enable encryption 331 331 */ 332 - static int tape_3592_enable_crypt(struct tape_device *device) 332 + static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) 333 333 { 334 334 struct tape_request *request; 335 335 char *data; 336 336 337 337 DBF_EVENT(6, "tape_3592_enable_crypt\n"); 338 338 if (!crypt_supported(device)) 339 - return -ENOSYS; 339 + return ERR_PTR(-ENOSYS); 340 340 request = tape_alloc_request(2, 72); 341 341 if (IS_ERR(request)) 342 - return PTR_ERR(request); 342 + return request; 343 343 data = request->cpdata; 344 344 memset(data,0,72); 345 345 ··· 354 354 request->op = TO_CRYPT_ON; 355 355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 356 356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 357 + return request; 358 + } 359 + 360 + static int tape_3592_enable_crypt(struct tape_device *device) 361 + { 362 + struct tape_request *request; 363 + 364 + request = __tape_3592_enable_crypt(device); 365 + if (IS_ERR(request)) 366 + return PTR_ERR(request); 357 367 return tape_do_io_free(device, request); 368 + } 369 + 370 + static void tape_3592_enable_crypt_async(struct tape_device *device) 371 + { 372 + struct tape_request *request; 373 + 374 + request = __tape_3592_enable_crypt(device); 375 + if (!IS_ERR(request)) 376 + tape_do_io_async_free(device, request); 358 377 } 359 378 360 379 /* 361 380 * Disable encryption 362 381 */ 363 - static int tape_3592_disable_crypt(struct tape_device *device) 382 + static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) 364 383 { 365 384 struct tape_request *request; 366 385 char *data; 367 386 368 387 DBF_EVENT(6, "tape_3592_disable_crypt\n"); 369 388 if (!crypt_supported(device)) 370 - return -ENOSYS; 389 + return ERR_PTR(-ENOSYS); 371 390 request = tape_alloc_request(2, 72); 372 391 if (IS_ERR(request)) 373 - return PTR_ERR(request); 392 + return request; 374 393 data = request->cpdata; 375 394 memset(data,0,72); 376 395 ··· 402 383 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 403 384 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 404 385 386 + return request; 387 + } 388 + 389 + static int tape_3592_disable_crypt(struct tape_device *device) 390 + { 391 + struct tape_request *request; 392 + 393 + request = __tape_3592_disable_crypt(device); 394 + if (IS_ERR(request)) 395 + return PTR_ERR(request); 405 396 return tape_do_io_free(device, request); 397 + } 398 + 399 + static void tape_3592_disable_crypt_async(struct tape_device *device) 400 + { 401 + struct tape_request *request; 402 + 403 + request = __tape_3592_disable_crypt(device); 404 + if (!IS_ERR(request)) 405 + tape_do_io_async_free(device, request); 406 406 } 407 407 408 408 /* ··· 495 457 /* 496 458 * SENSE Medium: Get Sense data about medium state 497 459 */ 498 - static int 499 - tape_3590_sense_medium(struct tape_device *device) 460 + static int tape_3590_sense_medium(struct tape_device *device) 500 461 { 501 462 struct tape_request *request; 502 463 ··· 505 468 request->op = TO_MSEN; 506 469 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); 507 470 return tape_do_io_free(device, request); 471 + } 472 + 473 + static void tape_3590_sense_medium_async(struct tape_device *device) 474 + { 475 + struct tape_request *request; 476 + 477 + request = tape_alloc_request(1, 128); 478 + if (IS_ERR(request)) 479 + return; 480 + request->op = TO_MSEN; 481 + tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); 482 + tape_do_io_async_free(device, request); 508 483 } 509 484 510 485 /* ··· 595 546 * 2. The attention msg is written to the "read subsystem data" buffer. 596 547 * In this case we probably should print it to the console. 597 548 */ 598 - static int 599 - tape_3590_read_attmsg(struct tape_device *device) 549 + static void tape_3590_read_attmsg_async(struct tape_device *device) 600 550 { 601 551 struct tape_request *request; 602 552 char *buf; 603 553 604 554 request = tape_alloc_request(3, 4096); 605 555 if (IS_ERR(request)) 606 - return PTR_ERR(request); 556 + return; 607 557 request->op = TO_READ_ATTMSG; 608 558 buf = request->cpdata; 609 559 buf[0] = PREP_RD_SS_DATA; ··· 610 562 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); 611 563 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); 612 564 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 613 - return tape_do_io_free(device, request); 565 + tape_do_io_async_free(device, request); 614 566 } 615 567 616 568 /* 617 569 * These functions are used to schedule follow-up actions from within an 618 570 * interrupt context (like unsolicited interrupts). 571 + * Note: the work handler is called by the system work queue. The tape 572 + * commands started by the handler need to be asynchrounous, otherwise 573 + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). 619 574 */ 620 575 struct work_handler_data { 621 576 struct tape_device *device; ··· 634 583 635 584 switch (p->op) { 636 585 case TO_MSEN: 637 - tape_3590_sense_medium(p->device); 586 + tape_3590_sense_medium_async(p->device); 638 587 break; 639 588 case TO_READ_ATTMSG: 640 - tape_3590_read_attmsg(p->device); 589 + tape_3590_read_attmsg_async(p->device); 641 590 break; 642 591 case TO_CRYPT_ON: 643 - tape_3592_enable_crypt(p->device); 592 + tape_3592_enable_crypt_async(p->device); 644 593 break; 645 594 case TO_CRYPT_OFF: 646 - tape_3592_disable_crypt(p->device); 595 + tape_3592_disable_crypt_async(p->device); 647 596 break; 648 597 default: 649 598 DBF_EVENT(3, "T3590: work handler undefined for "
+1 -1
drivers/scsi/scsi_lib.c
··· 443 443 &sdev->request_queue->queue_flags); 444 444 if (flagset) 445 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 446 - __blk_run_queue(sdev->request_queue); 446 + __blk_run_queue(sdev->request_queue, false); 447 447 if (flagset) 448 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 449 449 spin_unlock(sdev->request_queue->queue_lock);
+1 -1
drivers/scsi/scsi_transport_fc.c
··· 3829 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3830 3830 if (flagset) 3831 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3832 - __blk_run_queue(rport->rqst_q); 3832 + __blk_run_queue(rport->rqst_q, false); 3833 3833 if (flagset) 3834 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3835 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
+1
drivers/tty/serial/serial_cs.c
··· 712 712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 713 713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 714 714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 715 + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), 715 716 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 716 717 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 717 718 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
+10 -5
drivers/usb/gadget/f_phonet.c
··· 346 346 347 347 if (unlikely(!skb)) 348 348 break; 349 - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, 350 - req->actual); 349 + 350 + if (skb->len == 0) { /* First fragment */ 351 + skb->protocol = htons(ETH_P_PHONET); 352 + skb_reset_mac_header(skb); 353 + /* Can't use pskb_pull() on page in IRQ */ 354 + memcpy(skb_put(skb, 1), page_address(page), 1); 355 + } 356 + 357 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 358 + skb->len == 0, req->actual); 351 359 page = NULL; 352 360 353 361 if (req->actual < req->length) { /* Last fragment */ 354 - skb->protocol = htons(ETH_P_PHONET); 355 - skb_reset_mac_header(skb); 356 - pskb_pull(skb, 1); 357 362 skb->dev = dev; 358 363 dev->stats.rx_packets++; 359 364 dev->stats.rx_bytes += skb->len;
+1
drivers/usb/host/ehci-xilinx-of.c
··· 29 29 30 30 #include <linux/of.h> 31 31 #include <linux/of_platform.h> 32 + #include <linux/of_address.h> 32 33 33 34 /** 34 35 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
+8 -1
drivers/video/backlight/ltv350qv.c
··· 239 239 lcd->spi = spi; 240 240 lcd->power = FB_BLANK_POWERDOWN; 241 241 lcd->buffer = kzalloc(8, GFP_KERNEL); 242 + if (!lcd->buffer) { 243 + ret = -ENOMEM; 244 + goto out_free_lcd; 245 + } 242 246 243 247 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); 244 248 if (IS_ERR(ld)) { 245 249 ret = PTR_ERR(ld); 246 - goto out_free_lcd; 250 + goto out_free_buffer; 247 251 } 248 252 lcd->ld = ld; 249 253 ··· 261 257 262 258 out_unregister: 263 259 lcd_device_unregister(ld); 260 + out_free_buffer: 261 + kfree(lcd->buffer); 264 262 out_free_lcd: 265 263 kfree(lcd); 266 264 return ret; ··· 274 268 275 269 ltv350qv_power(lcd, FB_BLANK_POWERDOWN); 276 270 lcd_device_unregister(lcd->ld); 271 + kfree(lcd->buffer); 277 272 kfree(lcd); 278 273 279 274 return 0;
+5 -2
drivers/watchdog/sbc_fitpc2_wdt.c
··· 201 201 static int __init fitpc2_wdt_init(void) 202 202 { 203 203 int err; 204 + const char *brd_name; 204 205 205 - if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) 206 + brd_name = dmi_get_system_info(DMI_BOARD_NAME); 207 + 208 + if (!brd_name || !strstr(brd_name, "SBC-FITPC2")) 206 209 return -ENODEV; 207 210 208 - pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); 211 + pr_info("%s found\n", brd_name); 209 212 210 213 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { 211 214 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
+4 -26
fs/ceph/dir.c
··· 60 60 } 61 61 di->dentry = dentry; 62 62 di->lease_session = NULL; 63 - di->parent_inode = igrab(dentry->d_parent->d_inode); 64 63 dentry->d_fsdata = di; 65 64 dentry->d_time = jiffies; 66 65 ceph_dentry_lru_add(dentry); ··· 409 410 spin_lock(&inode->i_lock); 410 411 if (ci->i_release_count == fi->dir_release_count) { 411 412 dout(" marking %p complete\n", inode); 412 - ci->i_ceph_flags |= CEPH_I_COMPLETE; 413 + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ 413 414 ci->i_max_offset = filp->f_pos; 414 415 } 415 416 spin_unlock(&inode->i_lock); ··· 496 497 497 498 /* .snap dir? */ 498 499 if (err == -ENOENT && 500 + ceph_snap(parent) == CEPH_NOSNAP && 499 501 strcmp(dentry->d_name.name, 500 502 fsc->mount_options->snapdir_name) == 0) { 501 503 struct inode *inode = ceph_get_snapdir(parent); ··· 993 993 { 994 994 struct inode *dir; 995 995 996 - if (nd->flags & LOOKUP_RCU) 996 + if (nd && nd->flags & LOOKUP_RCU) 997 997 return -ECHILD; 998 998 999 999 dir = dentry->d_parent->d_inode; ··· 1030 1030 static void ceph_dentry_release(struct dentry *dentry) 1031 1031 { 1032 1032 struct ceph_dentry_info *di = ceph_dentry(dentry); 1033 - struct inode *parent_inode = NULL; 1034 - u64 snapid = CEPH_NOSNAP; 1035 1033 1036 - if (!IS_ROOT(dentry)) { 1037 - parent_inode = di->parent_inode; 1038 - if (parent_inode) 1039 - snapid = ceph_snap(parent_inode); 1040 - } 1041 - dout("dentry_release %p parent %p\n", dentry, parent_inode); 1042 - if (parent_inode && snapid != CEPH_SNAPDIR) { 1043 - struct ceph_inode_info *ci = ceph_inode(parent_inode); 1044 - 1045 - spin_lock(&parent_inode->i_lock); 1046 - if (ci->i_shared_gen == di->lease_shared_gen || 1047 - snapid <= CEPH_MAXSNAP) { 1048 - dout(" clearing %p complete (d_release)\n", 1049 - parent_inode); 1050 - ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 1051 - ci->i_release_count++; 1052 - } 1053 - spin_unlock(&parent_inode->i_lock); 1054 - } 1034 + dout("dentry_release %p\n", dentry); 1055 1035 if (di) { 1056 1036 ceph_dentry_lru_del(dentry); 1057 1037 if (di->lease_session) ··· 1039 1059 kmem_cache_free(ceph_dentry_cachep, di); 1040 1060 dentry->d_fsdata = NULL; 1041 1061 } 1042 - if (parent_inode) 1043 - iput(parent_inode); 1044 1062 } 1045 1063 1046 1064 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
+1 -1
fs/ceph/inode.c
··· 707 707 (issued & CEPH_CAP_FILE_EXCL) == 0 && 708 708 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 709 709 dout(" marking %p complete (empty)\n", inode); 710 - ci->i_ceph_flags |= CEPH_I_COMPLETE; 710 + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ 711 711 ci->i_max_offset = 2; 712 712 } 713 713 break;
-1
fs/ceph/super.h
··· 207 207 struct dentry *dentry; 208 208 u64 time; 209 209 u64 offset; 210 - struct inode *parent_inode; 211 210 }; 212 211 213 212 struct ceph_inode_xattrs_info {
+24 -2
fs/dcache.c
··· 1523 1523 } 1524 1524 EXPORT_SYMBOL(d_alloc_root); 1525 1525 1526 + static struct dentry * __d_find_any_alias(struct inode *inode) 1527 + { 1528 + struct dentry *alias; 1529 + 1530 + if (list_empty(&inode->i_dentry)) 1531 + return NULL; 1532 + alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); 1533 + __dget(alias); 1534 + return alias; 1535 + } 1536 + 1537 + static struct dentry * d_find_any_alias(struct inode *inode) 1538 + { 1539 + struct dentry *de; 1540 + 1541 + spin_lock(&inode->i_lock); 1542 + de = __d_find_any_alias(inode); 1543 + spin_unlock(&inode->i_lock); 1544 + return de; 1545 + } 1546 + 1547 + 1526 1548 /** 1527 1549 * d_obtain_alias - find or allocate a dentry for a given inode 1528 1550 * @inode: inode to allocate the dentry for ··· 1574 1552 if (IS_ERR(inode)) 1575 1553 return ERR_CAST(inode); 1576 1554 1577 - res = d_find_alias(inode); 1555 + res = d_find_any_alias(inode); 1578 1556 if (res) 1579 1557 goto out_iput; 1580 1558 ··· 1587 1565 1588 1566 1589 1567 spin_lock(&inode->i_lock); 1590 - res = __d_find_alias(inode, 0); 1568 + res = __d_find_any_alias(inode); 1591 1569 if (res) { 1592 1570 spin_unlock(&inode->i_lock); 1593 1571 dput(tmp);
+2 -6
fs/exofs/namei.c
··· 272 272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page); 273 273 if (!new_de) 274 274 goto out_dir; 275 - inode_inc_link_count(old_inode); 276 275 err = exofs_set_link(new_dir, new_de, new_page, old_inode); 277 276 new_inode->i_ctime = CURRENT_TIME; 278 277 if (dir_de) ··· 285 286 if (new_dir->i_nlink >= EXOFS_LINK_MAX) 286 287 goto out_dir; 287 288 } 288 - inode_inc_link_count(old_inode); 289 289 err = exofs_add_link(new_dentry, old_inode); 290 - if (err) { 291 - inode_dec_link_count(old_inode); 290 + if (err) 292 291 goto out_dir; 293 - } 294 292 if (dir_de) 295 293 inode_inc_link_count(new_dir); 296 294 } ··· 295 299 old_inode->i_ctime = CURRENT_TIME; 296 300 297 301 exofs_delete_entry(old_de, old_page); 298 - inode_dec_link_count(old_inode); 302 + mark_inode_dirty(old_inode); 299 303 300 304 if (dir_de) { 301 305 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
+2 -7
fs/ext2/namei.c
··· 344 344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); 345 345 if (!new_de) 346 346 goto out_dir; 347 - inode_inc_link_count(old_inode); 348 347 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); 349 348 new_inode->i_ctime = CURRENT_TIME_SEC; 350 349 if (dir_de) ··· 355 356 if (new_dir->i_nlink >= EXT2_LINK_MAX) 356 357 goto out_dir; 357 358 } 358 - inode_inc_link_count(old_inode); 359 359 err = ext2_add_link(new_dentry, old_inode); 360 - if (err) { 361 - inode_dec_link_count(old_inode); 360 + if (err) 362 361 goto out_dir; 363 - } 364 362 if (dir_de) 365 363 inode_inc_link_count(new_dir); 366 364 } ··· 365 369 /* 366 370 * Like most other Unix systems, set the ctime for inodes on a 367 371 * rename. 368 - * inode_dec_link_count() will mark the inode dirty. 369 372 */ 370 373 old_inode->i_ctime = CURRENT_TIME_SEC; 374 + mark_inode_dirty(old_inode); 371 375 372 376 ext2_delete_entry (old_de, old_page); 373 - inode_dec_link_count(old_inode); 374 377 375 378 if (dir_de) { 376 379 if (old_dir != new_dir)
+2 -2
fs/fat/namei_vfat.c
··· 43 43 44 44 static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) 45 45 { 46 - if (nd->flags & LOOKUP_RCU) 46 + if (nd && nd->flags & LOOKUP_RCU) 47 47 return -ECHILD; 48 48 49 49 /* This is not negative dentry. Always valid. */ ··· 54 54 55 55 static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) 56 56 { 57 - if (nd->flags & LOOKUP_RCU) 57 + if (nd && nd->flags & LOOKUP_RCU) 58 58 return -ECHILD; 59 59 60 60 /*
+1 -1
fs/fuse/dir.c
··· 158 158 { 159 159 struct inode *inode; 160 160 161 - if (nd->flags & LOOKUP_RCU) 161 + if (nd && nd->flags & LOOKUP_RCU) 162 162 return -ECHILD; 163 163 164 164 inode = entry->d_inode;
+1 -1
fs/gfs2/dentry.c
··· 44 44 int error; 45 45 int had_lock = 0; 46 46 47 - if (nd->flags & LOOKUP_RCU) 47 + if (nd && nd->flags & LOOKUP_RCU) 48 48 return -ECHILD; 49 49 50 50 parent = dget_parent(dentry);
+14 -38
fs/hfs/dir.c
··· 238 238 } 239 239 240 240 /* 241 - * hfs_unlink() 241 + * hfs_remove() 242 242 * 243 - * This is the unlink() entry in the inode_operations structure for 244 - * regular HFS directories. The purpose is to delete an existing 245 - * file, given the inode for the parent directory and the name 246 - * (and its length) of the existing file. 243 + * This serves as both unlink() and rmdir() in the inode_operations 244 + * structure for regular HFS directories. The purpose is to delete 245 + * an existing child, given the inode for the parent directory and 246 + * the name (and its length) of the existing directory. 247 + * 248 + * HFS does not have hardlinks, so both rmdir and unlink set the 249 + * link count to 0. The only difference is the emptiness check. 247 250 */ 248 - static int hfs_unlink(struct inode *dir, struct dentry *dentry) 251 + static int hfs_remove(struct inode *dir, struct dentry *dentry) 249 252 { 250 - struct inode *inode; 253 + struct inode *inode = dentry->d_inode; 251 254 int res; 252 255 253 - inode = dentry->d_inode; 254 - res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 255 - if (res) 256 - return res; 257 - 258 - drop_nlink(inode); 259 - hfs_delete_inode(inode); 260 - inode->i_ctime = CURRENT_TIME_SEC; 261 - mark_inode_dirty(inode); 262 - 263 - return res; 264 - } 265 - 266 - /* 267 - * hfs_rmdir() 268 - * 269 - * This is the rmdir() entry in the inode_operations structure for 270 - * regular HFS directories. The purpose is to delete an existing 271 - * directory, given the inode for the parent directory and the name 272 - * (and its length) of the existing directory. 273 - */ 274 - static int hfs_rmdir(struct inode *dir, struct dentry *dentry) 275 - { 276 - struct inode *inode; 277 - int res; 278 - 279 - inode = dentry->d_inode; 280 - if (inode->i_size != 2) 256 + if (S_ISDIR(inode->i_mode) && inode->i_size != 2) 281 257 return -ENOTEMPTY; 282 258 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 283 259 if (res) ··· 283 307 284 308 /* Unlink destination if it already exists */ 285 309 if (new_dentry->d_inode) { 286 - res = hfs_unlink(new_dir, new_dentry); 310 + res = hfs_remove(new_dir, new_dentry); 287 311 if (res) 288 312 return res; 289 313 } ··· 308 332 const struct inode_operations hfs_dir_inode_operations = { 309 333 .create = hfs_create, 310 334 .lookup = hfs_lookup, 311 - .unlink = hfs_unlink, 335 + .unlink = hfs_remove, 312 336 .mkdir = hfs_mkdir, 313 - .rmdir = hfs_rmdir, 337 + .rmdir = hfs_remove, 314 338 .rename = hfs_rename, 315 339 .setattr = hfs_inode_setattr, 316 340 };
+1 -1
fs/jfs/namei.c
··· 1600 1600 1601 1601 static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) 1602 1602 { 1603 - if (nd->flags & LOOKUP_RCU) 1603 + if (nd && nd->flags & LOOKUP_RCU) 1604 1604 return -ECHILD; 1605 1605 /* 1606 1606 * This is not negative dentry. Always valid.
+2 -6
fs/minix/namei.c
··· 213 213 new_de = minix_find_entry(new_dentry, &new_page); 214 214 if (!new_de) 215 215 goto out_dir; 216 - inode_inc_link_count(old_inode); 217 216 minix_set_link(new_de, new_page, old_inode); 218 217 new_inode->i_ctime = CURRENT_TIME_SEC; 219 218 if (dir_de) ··· 224 225 if (new_dir->i_nlink >= info->s_link_max) 225 226 goto out_dir; 226 227 } 227 - inode_inc_link_count(old_inode); 228 228 err = minix_add_link(new_dentry, old_inode); 229 - if (err) { 230 - inode_dec_link_count(old_inode); 229 + if (err) 231 230 goto out_dir; 232 - } 233 231 if (dir_de) 234 232 inode_inc_link_count(new_dir); 235 233 } 236 234 237 235 minix_delete_entry(old_de, old_page); 238 - inode_dec_link_count(old_inode); 236 + mark_inode_dirty(old_inode); 239 237 240 238 if (dir_de) { 241 239 minix_set_link(dir_de, dir_page, new_dir);
+11 -3
fs/namei.c
··· 1546 1546 /* nd->path had been dropped */ 1547 1547 current->total_link_count = 0; 1548 1548 nd->path = save; 1549 + nd->inode = save.dentry->d_inode; 1549 1550 path_get(&nd->path); 1550 1551 nd->flags |= LOOKUP_REVAL; 1551 1552 result = link_path_walk(name, nd); ··· 2456 2455 /* !O_CREAT, simple open */ 2457 2456 error = do_path_lookup(dfd, pathname, flags, &nd); 2458 2457 if (unlikely(error)) 2459 - goto out_filp; 2458 + goto out_filp2; 2460 2459 error = -ELOOP; 2461 2460 if (!(nd.flags & LOOKUP_FOLLOW)) { 2462 2461 if (nd.inode->i_op->follow_link) 2463 - goto out_path; 2462 + goto out_path2; 2464 2463 } 2465 2464 error = -ENOTDIR; 2466 2465 if (nd.flags & LOOKUP_DIRECTORY) { 2467 2466 if (!nd.inode->i_op->lookup) 2468 - goto out_path; 2467 + goto out_path2; 2469 2468 } 2470 2469 audit_inode(pathname, nd.path.dentry); 2471 2470 filp = finish_open(&nd, open_flag, acc_mode); 2471 + out2: 2472 2472 release_open_intent(&nd); 2473 2473 return filp; 2474 + 2475 + out_path2: 2476 + path_put(&nd.path); 2477 + out_filp2: 2478 + filp = ERR_PTR(error); 2479 + goto out2; 2474 2480 2475 2481 creat: 2476 2482 /* OK, have to create the file. Find the parent. */
+42 -2
fs/nfs/nfs4proc.c
··· 51 51 #include <linux/sunrpc/bc_xprt.h> 52 52 #include <linux/xattr.h> 53 53 #include <linux/utsname.h> 54 + #include <linux/mm.h> 54 55 55 56 #include "nfs4_fs.h" 56 57 #include "delegation.h" ··· 3253 3252 } 3254 3253 } 3255 3254 3255 + static int buf_to_pages_noslab(const void *buf, size_t buflen, 3256 + struct page **pages, unsigned int *pgbase) 3257 + { 3258 + struct page *newpage, **spages; 3259 + int rc = 0; 3260 + size_t len; 3261 + spages = pages; 3262 + 3263 + do { 3264 + len = min(PAGE_CACHE_SIZE, buflen); 3265 + newpage = alloc_page(GFP_KERNEL); 3266 + 3267 + if (newpage == NULL) 3268 + goto unwind; 3269 + memcpy(page_address(newpage), buf, len); 3270 + buf += len; 3271 + buflen -= len; 3272 + *pages++ = newpage; 3273 + rc++; 3274 + } while (buflen != 0); 3275 + 3276 + return rc; 3277 + 3278 + unwind: 3279 + for(; rc > 0; rc--) 3280 + __free_page(spages[rc-1]); 3281 + return -ENOMEM; 3282 + } 3283 + 3256 3284 struct nfs4_cached_acl { 3257 3285 int cached; 3258 3286 size_t len; ··· 3450 3420 .rpc_argp = &arg, 3451 3421 .rpc_resp = &res, 3452 3422 }; 3453 - int ret; 3423 + int ret, i; 3454 3424 3455 3425 if (!nfs4_server_supports_acls(server)) 3456 3426 return -EOPNOTSUPP; 3427 + i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3428 + if (i < 0) 3429 + return i; 3457 3430 nfs_inode_return_delegation(inode); 3458 - buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3459 3431 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3432 + 3433 + /* 3434 + * Free each page after tx, so the only ref left is 3435 + * held by the network stack 3436 + */ 3437 + for (; i > 0; i--) 3438 + put_page(pages[i-1]); 3439 + 3460 3440 /* 3461 3441 * Acl update can result in inode attribute update. 3462 3442 * so mark the attribute cache invalid.
+1 -1
fs/nfsd/nfs4callback.c
··· 432 432 * If the server returns different values for sessionID, slotID or 433 433 * sequence number, the server is looney tunes. 434 434 */ 435 - p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4); 435 + p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4); 436 436 if (unlikely(p == NULL)) 437 437 goto out_overflow; 438 438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
+7 -6
fs/nfsd/nfs4state.c
··· 2445 2445 static struct nfs4_delegation * 2446 2446 find_delegation_file(struct nfs4_file *fp, stateid_t *stid) 2447 2447 { 2448 - struct nfs4_delegation *dp = NULL; 2448 + struct nfs4_delegation *dp; 2449 2449 2450 2450 spin_lock(&recall_lock); 2451 - list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { 2452 - if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) 2453 - break; 2454 - } 2451 + list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 2452 + if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) { 2453 + spin_unlock(&recall_lock); 2454 + return dp; 2455 + } 2455 2456 spin_unlock(&recall_lock); 2456 - return dp; 2457 + return NULL; 2457 2458 } 2458 2459 2459 2460 int share_access_to_flags(u32 share_access)
+2 -2
fs/nfsd/nfs4xdr.c
··· 1142 1142 1143 1143 u32 dummy; 1144 1144 char *machine_name; 1145 - int i; 1145 + int i, j; 1146 1146 int nr_secflavs; 1147 1147 1148 1148 READ_BUF(16); ··· 1215 1215 READ_BUF(4); 1216 1216 READ32(dummy); 1217 1217 READ_BUF(dummy * 4); 1218 - for (i = 0; i < dummy; ++i) 1218 + for (j = 0; j < dummy; ++j) 1219 1219 READ32(dummy); 1220 1220 break; 1221 1221 case RPC_AUTH_GSS:
+1 -7
fs/nilfs2/namei.c
··· 397 397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); 398 398 if (!new_de) 399 399 goto out_dir; 400 - inc_nlink(old_inode); 401 400 nilfs_set_link(new_dir, new_de, new_page, old_inode); 402 401 nilfs_mark_inode_dirty(new_dir); 403 402 new_inode->i_ctime = CURRENT_TIME; ··· 410 411 if (new_dir->i_nlink >= NILFS_LINK_MAX) 411 412 goto out_dir; 412 413 } 413 - inc_nlink(old_inode); 414 414 err = nilfs_add_link(new_dentry, old_inode); 415 - if (err) { 416 - drop_nlink(old_inode); 417 - nilfs_mark_inode_dirty(old_inode); 415 + if (err) 418 416 goto out_dir; 419 - } 420 417 if (dir_de) { 421 418 inc_nlink(new_dir); 422 419 nilfs_mark_inode_dirty(new_dir); ··· 426 431 old_inode->i_ctime = CURRENT_TIME; 427 432 428 433 nilfs_delete_entry(old_de, old_page); 429 - drop_nlink(old_inode); 430 434 431 435 if (dir_de) { 432 436 nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+2 -1
fs/nilfs2/segment.c
··· 430 430 nilfs_segctor_map_segsum_entry( 431 431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 432 432 433 - if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 433 + if (NILFS_I(inode)->i_root && 434 + !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 434 435 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 435 436 /* skip finfo */ 436 437 }
+1 -1
fs/ocfs2/dcache.c
··· 56 56 int ret = 0; /* if all else fails, just return false */ 57 57 struct ocfs2_super *osb; 58 58 59 - if (nd->flags & LOOKUP_RCU) 59 + if (nd && nd->flags & LOOKUP_RCU) 60 60 return -ECHILD; 61 61 62 62 inode = dentry->d_inode;
+8
fs/open.c
··· 233 233 234 234 if (!(file->f_mode & FMODE_WRITE)) 235 235 return -EBADF; 236 + 237 + /* It's not possible punch hole on append only file */ 238 + if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode)) 239 + return -EPERM; 240 + 241 + if (IS_IMMUTABLE(inode)) 242 + return -EPERM; 243 + 236 244 /* 237 245 * Revalidate the write permissions, in case security policy has 238 246 * changed since the files were opened.
-30
fs/proc/base.c
··· 2620 2620 &proc_self_inode_operations, NULL, {}), 2621 2621 }; 2622 2622 2623 - /* 2624 - * Exceptional case: normally we are not allowed to unhash a busy 2625 - * directory. In this case, however, we can do it - no aliasing problems 2626 - * due to the way we treat inodes. 2627 - */ 2628 - static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) 2629 - { 2630 - struct inode *inode; 2631 - struct task_struct *task; 2632 - 2633 - if (nd->flags & LOOKUP_RCU) 2634 - return -ECHILD; 2635 - 2636 - inode = dentry->d_inode; 2637 - task = get_proc_task(inode); 2638 - if (task) { 2639 - put_task_struct(task); 2640 - return 1; 2641 - } 2642 - d_drop(dentry); 2643 - return 0; 2644 - } 2645 - 2646 - static const struct dentry_operations proc_base_dentry_operations = 2647 - { 2648 - .d_revalidate = proc_base_revalidate, 2649 - .d_delete = pid_delete_dentry, 2650 - }; 2651 - 2652 2623 static struct dentry *proc_base_instantiate(struct inode *dir, 2653 2624 struct dentry *dentry, struct task_struct *task, const void *ptr) 2654 2625 { ··· 2656 2685 if (p->fop) 2657 2686 inode->i_fop = p->fop; 2658 2687 ei->op = p->op; 2659 - d_set_d_op(dentry, &proc_base_dentry_operations); 2660 2688 d_add(dentry, inode); 2661 2689 error = NULL; 2662 2690 out:
+6 -2
fs/proc/inode.c
··· 27 27 static void proc_evict_inode(struct inode *inode) 28 28 { 29 29 struct proc_dir_entry *de; 30 + struct ctl_table_header *head; 30 31 31 32 truncate_inode_pages(&inode->i_data, 0); 32 33 end_writeback(inode); ··· 39 38 de = PROC_I(inode)->pde; 40 39 if (de) 41 40 pde_put(de); 42 - if (PROC_I(inode)->sysctl) 43 - sysctl_head_put(PROC_I(inode)->sysctl); 41 + head = PROC_I(inode)->sysctl; 42 + if (head) { 43 + rcu_assign_pointer(PROC_I(inode)->sysctl, NULL); 44 + sysctl_head_put(head); 45 + } 44 46 } 45 47 46 48 struct vfsmount *proc_mnt;
+1 -1
fs/proc/proc_devtree.c
··· 233 233 return; 234 234 root = of_find_node_by_path("/"); 235 235 if (root == NULL) { 236 - printk(KERN_ERR "/proc/device-tree: can't find root\n"); 236 + pr_debug("/proc/device-tree: can't find root\n"); 237 237 return; 238 238 } 239 239 proc_device_tree_add_node(root, proc_device_tree);
+5 -2
fs/proc/proc_sysctl.c
··· 408 408 const struct dentry *dentry, const struct inode *inode, 409 409 unsigned int len, const char *str, const struct qstr *name) 410 410 { 411 + struct ctl_table_header *head; 411 412 /* Although proc doesn't have negative dentries, rcu-walk means 412 413 * that inode here can be NULL */ 414 + /* AV: can it, indeed? */ 413 415 if (!inode) 414 - return 0; 416 + return 1; 415 417 if (name->len != len) 416 418 return 1; 417 419 if (memcmp(name->name, str, len)) 418 420 return 1; 419 - return !sysctl_is_seen(PROC_I(inode)->sysctl); 421 + head = rcu_dereference(PROC_I(inode)->sysctl); 422 + return !head || !sysctl_is_seen(head); 420 423 } 421 424 422 425 static const struct dentry_operations proc_sys_dentry_operations = {
+1 -1
fs/reiserfs/namei.c
··· 771 771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, 772 772 dentry, inode, &security); 773 773 if (retval) { 774 - dir->i_nlink--; 774 + DEC_DIR_INODE_NLINK(dir) 775 775 goto out_failed; 776 776 } 777 777
-2
fs/reiserfs/xattr.c
··· 978 978 979 979 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) 980 980 { 981 - if (nd->flags & LOOKUP_RCU) 982 - return -ECHILD; 983 981 return -EPERM; 984 982 } 985 983
+2 -6
fs/sysv/namei.c
··· 245 245 new_de = sysv_find_entry(new_dentry, &new_page); 246 246 if (!new_de) 247 247 goto out_dir; 248 - inode_inc_link_count(old_inode); 249 248 sysv_set_link(new_de, new_page, old_inode); 250 249 new_inode->i_ctime = CURRENT_TIME_SEC; 251 250 if (dir_de) ··· 256 257 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 257 258 goto out_dir; 258 259 } 259 - inode_inc_link_count(old_inode); 260 260 err = sysv_add_link(new_dentry, old_inode); 261 - if (err) { 262 - inode_dec_link_count(old_inode); 261 + if (err) 263 262 goto out_dir; 264 - } 265 263 if (dir_de) 266 264 inode_inc_link_count(new_dir); 267 265 } 268 266 269 267 sysv_delete_entry(old_de, old_page); 270 - inode_dec_link_count(old_inode); 268 + mark_inode_dirty(old_inode); 271 269 272 270 if (dir_de) { 273 271 sysv_set_link(dir_de, dir_page, new_dir);
+5 -6
fs/udf/namei.c
··· 32 32 #include <linux/crc-itu-t.h> 33 33 #include <linux/exportfs.h> 34 34 35 + enum { UDF_MAX_LINKS = 0xffff }; 36 + 35 37 static inline int udf_match(int len1, const unsigned char *name1, int len2, 36 38 const unsigned char *name2) 37 39 { ··· 652 650 struct udf_inode_info *iinfo; 653 651 654 652 err = -EMLINK; 655 - if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) 653 + if (dir->i_nlink >= UDF_MAX_LINKS) 656 654 goto out; 657 655 658 656 err = -EIO; ··· 1036 1034 struct fileIdentDesc cfi, *fi; 1037 1035 int err; 1038 1036 1039 - if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1037 + if (inode->i_nlink >= UDF_MAX_LINKS) 1040 1038 return -EMLINK; 1041 - } 1042 1039 1043 1040 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 1044 1041 if (!fi) { ··· 1132 1131 goto end_rename; 1133 1132 1134 1133 retval = -EMLINK; 1135 - if (!new_inode && 1136 - new_dir->i_nlink >= 1137 - (256 << sizeof(new_dir->i_nlink)) - 1) 1134 + if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS) 1138 1135 goto end_rename; 1139 1136 } 1140 1137 if (!nfi) {
+2 -7
fs/ufs/namei.c
··· 306 306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); 307 307 if (!new_de) 308 308 goto out_dir; 309 - inode_inc_link_count(old_inode); 310 309 ufs_set_link(new_dir, new_de, new_page, old_inode); 311 310 new_inode->i_ctime = CURRENT_TIME_SEC; 312 311 if (dir_de) ··· 317 318 if (new_dir->i_nlink >= UFS_LINK_MAX) 318 319 goto out_dir; 319 320 } 320 - inode_inc_link_count(old_inode); 321 321 err = ufs_add_link(new_dentry, old_inode); 322 - if (err) { 323 - inode_dec_link_count(old_inode); 322 + if (err) 324 323 goto out_dir; 325 - } 326 324 if (dir_de) 327 325 inode_inc_link_count(new_dir); 328 326 } ··· 327 331 /* 328 332 * Like most other Unix systems, set the ctime for inodes on a 329 333 * rename. 330 - * inode_dec_link_count() will mark the inode dirty. 331 334 */ 332 335 old_inode->i_ctime = CURRENT_TIME_SEC; 333 336 334 337 ufs_delete_entry(old_dir, old_de, old_page); 335 - inode_dec_link_count(old_inode); 338 + mark_inode_dirty(old_inode); 336 339 337 340 if (dir_de) { 338 341 ufs_set_link(old_inode, dir_de, dir_page, new_dir);
+8 -3
fs/xfs/linux-2.6/xfs_ioctl.c
··· 695 695 xfs_mount_t *mp, 696 696 void __user *arg) 697 697 { 698 - xfs_fsop_geom_v1_t fsgeo; 698 + xfs_fsop_geom_t fsgeo; 699 699 int error; 700 700 701 - error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); 701 + error = xfs_fs_geometry(mp, &fsgeo, 3); 702 702 if (error) 703 703 return -error; 704 704 705 - if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 705 + /* 706 + * Caller should have passed an argument of type 707 + * xfs_fsop_geom_v1_t. This is a proper subset of the 708 + * xfs_fsop_geom_t that xfs_fs_geometry() fills in. 709 + */ 710 + if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) 706 711 return -XFS_ERROR(EFAULT); 707 712 return 0; 708 713 }
-1
include/keys/rxrpc-type.h
··· 99 99 * structure of raw payloads passed to add_key() or instantiate key 100 100 */ 101 101 struct rxrpc_key_data_v1 { 102 - u32 kif_version; /* 1 */ 103 102 u16 security_index; 104 103 u16 ticket_length; 105 104 u32 expiry; /* time_t */
+1 -4
include/linux/blkdev.h
··· 699 699 extern void blk_stop_queue(struct request_queue *q); 700 700 extern void blk_sync_queue(struct request_queue *q); 701 701 extern void __blk_stop_queue(struct request_queue *q); 702 - extern void __blk_run_queue(struct request_queue *); 702 + extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); 703 703 extern void blk_run_queue(struct request_queue *); 704 704 extern int blk_rq_map_user(struct request_queue *, struct request *, 705 705 struct rq_map_data *, void __user *, unsigned long, ··· 1088 1088 1089 1089 struct work_struct; 1090 1090 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1091 - int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); 1092 1091 1093 1092 #ifdef CONFIG_BLK_CGROUP 1094 1093 /* ··· 1135 1136 extern int blk_throtl_init(struct request_queue *q); 1136 1137 extern void blk_throtl_exit(struct request_queue *q); 1137 1138 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1138 - extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); 1139 1139 extern void throtl_shutdown_timer_wq(struct request_queue *q); 1140 1140 #else /* CONFIG_BLK_DEV_THROTTLING */ 1141 1141 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) ··· 1144 1146 1145 1147 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1146 1148 static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1147 - static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} 1148 1149 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} 1149 1150 #endif /* CONFIG_BLK_DEV_THROTTLING */ 1150 1151
-1
include/linux/blktrace_api.h
··· 245 245 246 246 extern void blk_dump_cmd(char *buf, struct request *rq); 247 247 extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); 248 - extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); 249 248 250 249 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 251 250
+1 -1
include/linux/ceph/messenger.h
··· 123 123 #define SOCK_CLOSED 11 /* socket state changed to closed */ 124 124 #define OPENING 13 /* open connection w/ (possibly new) peer */ 125 125 #define DEAD 14 /* dead, about to kfree */ 126 + #define BACKOFF 15 126 127 127 128 /* 128 129 * A single connection with another host. ··· 161 160 struct list_head out_queue; 162 161 struct list_head out_sent; /* sending or sent but unacked */ 163 162 u64 out_seq; /* last message queued for send */ 164 - bool out_keepalive_pending; 165 163 166 164 u64 in_seq, in_seq_acked; /* last message received, acked */ 167 165
+7 -4
include/linux/gfp.h
··· 332 332 return alloc_pages_current(gfp_mask, order); 333 333 } 334 334 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 335 - struct vm_area_struct *vma, unsigned long addr); 335 + struct vm_area_struct *vma, unsigned long addr, 336 + int node); 336 337 #else 337 338 #define alloc_pages(gfp_mask, order) \ 338 339 alloc_pages_node(numa_node_id(), gfp_mask, order) 339 - #define alloc_pages_vma(gfp_mask, order, vma, addr) \ 340 + #define alloc_pages_vma(gfp_mask, order, vma, addr, node) \ 340 341 alloc_pages(gfp_mask, order) 341 342 #endif 342 343 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 343 - #define alloc_page_vma(gfp_mask, vma, addr) \ 344 - alloc_pages_vma(gfp_mask, 0, vma, addr) 344 + #define alloc_page_vma(gfp_mask, vma, addr) \ 345 + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) 346 + #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 347 + alloc_pages_vma(gfp_mask, 0, vma, addr, node) 345 348 346 349 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 347 350 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+1
include/linux/mfd/wm8994/core.h
··· 71 71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; 72 72 73 73 /* Used over suspend/resume */ 74 + bool suspended; 74 75 u16 ldo_regs[WM8994_NUM_LDO_REGS]; 75 76 u16 gpio_regs[WM8994_NUM_GPIO_REGS]; 76 77
+3
include/linux/netdevice.h
··· 2392 2392 extern int netdev_info(const struct net_device *dev, const char *format, ...) 2393 2393 __attribute__ ((format (printf, 2, 3))); 2394 2394 2395 + #define MODULE_ALIAS_NETDEV(device) \ 2396 + MODULE_ALIAS("netdev-" device) 2397 + 2395 2398 #if defined(DEBUG) 2396 2399 #define netdev_dbg(__dev, format, args...) \ 2397 2400 netdev_printk(KERN_DEBUG, __dev, format, ##args)
-3
include/linux/ptrace.h
··· 102 102 103 103 extern long arch_ptrace(struct task_struct *child, long request, 104 104 unsigned long addr, unsigned long data); 105 - extern int ptrace_traceme(void); 106 105 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 107 106 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 108 - extern int ptrace_attach(struct task_struct *tsk); 109 - extern int ptrace_detach(struct task_struct *, unsigned int); 110 107 extern void ptrace_disable(struct task_struct *); 111 108 extern int ptrace_check_attach(struct task_struct *task, int kill); 112 109 extern int ptrace_request(struct task_struct *child, long request,
+10 -4
include/linux/sysctl.h
··· 930 930 931 931 #ifdef __KERNEL__ 932 932 #include <linux/list.h> 933 + #include <linux/rcupdate.h> 933 934 934 935 /* For the /proc/sys support */ 935 936 struct ctl_table; ··· 1038 1037 struct ctl_table trees. */ 1039 1038 struct ctl_table_header 1040 1039 { 1041 - struct ctl_table *ctl_table; 1042 - struct list_head ctl_entry; 1043 - int used; 1044 - int count; 1040 + union { 1041 + struct { 1042 + struct ctl_table *ctl_table; 1043 + struct list_head ctl_entry; 1044 + int used; 1045 + int count; 1046 + }; 1047 + struct rcu_head rcu; 1048 + }; 1045 1049 struct completion *unregistering; 1046 1050 struct ctl_table *ctl_table_arg; 1047 1051 struct ctl_table_root *root;
+3 -3
include/trace/events/block.h
··· 31 31 0 : blk_rq_sectors(rq); 32 32 __entry->errors = rq->errors; 33 33 34 - blk_fill_rwbs_rq(__entry->rwbs, rq); 34 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 35 35 blk_dump_cmd(__get_str(cmd), rq); 36 36 ), 37 37 ··· 118 118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 119 119 blk_rq_bytes(rq) : 0; 120 120 121 - blk_fill_rwbs_rq(__entry->rwbs, rq); 121 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 122 122 blk_dump_cmd(__get_str(cmd), rq); 123 123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 124 124 ), ··· 563 563 __entry->nr_sector = blk_rq_sectors(rq); 564 564 __entry->old_dev = dev; 565 565 __entry->old_sector = from; 566 - blk_fill_rwbs_rq(__entry->rwbs, rq); 566 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 567 567 ), 568 568 569 569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+5 -2
kernel/cpuset.c
··· 1575 1575 return -ENODEV; 1576 1576 1577 1577 trialcs = alloc_trial_cpuset(cs); 1578 - if (!trialcs) 1579 - return -ENOMEM; 1578 + if (!trialcs) { 1579 + retval = -ENOMEM; 1580 + goto out; 1581 + } 1580 1582 1581 1583 switch (cft->private) { 1582 1584 case FILE_CPULIST: ··· 1593 1591 } 1594 1592 1595 1593 free_trial_cpuset(trialcs); 1594 + out: 1596 1595 cgroup_unlock(); 1597 1596 return retval; 1598 1597 }
+3 -3
kernel/ptrace.c
··· 163 163 return !err; 164 164 } 165 165 166 - int ptrace_attach(struct task_struct *task) 166 + static int ptrace_attach(struct task_struct *task) 167 167 { 168 168 int retval; 169 169 ··· 219 219 * Performs checks and sets PT_PTRACED. 220 220 * Should be used by all ptrace implementations for PTRACE_TRACEME. 221 221 */ 222 - int ptrace_traceme(void) 222 + static int ptrace_traceme(void) 223 223 { 224 224 int ret = -EPERM; 225 225 ··· 293 293 return false; 294 294 } 295 295 296 - int ptrace_detach(struct task_struct *child, unsigned int data) 296 + static int ptrace_detach(struct task_struct *child, unsigned int data) 297 297 { 298 298 bool dead = false; 299 299
+9 -5
kernel/sched_rt.c
··· 210 210 211 211 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 212 212 { 213 - int this_cpu = smp_processor_id(); 214 213 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 215 214 struct sched_rt_entity *rt_se; 216 215 217 - rt_se = rt_rq->tg->rt_se[this_cpu]; 216 + int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 217 + 218 + rt_se = rt_rq->tg->rt_se[cpu]; 218 219 219 220 if (rt_rq->rt_nr_running) { 220 221 if (rt_se && !on_rt_rq(rt_se)) ··· 227 226 228 227 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 229 228 { 230 - int this_cpu = smp_processor_id(); 231 229 struct sched_rt_entity *rt_se; 230 + int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 232 231 233 - rt_se = rt_rq->tg->rt_se[this_cpu]; 232 + rt_se = rt_rq->tg->rt_se[cpu]; 234 233 235 234 if (rt_se && on_rt_rq(rt_se)) 236 235 dequeue_rt_entity(rt_se); ··· 566 565 if (rt_rq->rt_time || rt_rq->rt_nr_running) 567 566 idle = 0; 568 567 raw_spin_unlock(&rt_rq->rt_runtime_lock); 569 - } else if (rt_rq->rt_nr_running) 568 + } else if (rt_rq->rt_nr_running) { 570 569 idle = 0; 570 + if (!rt_rq_throttled(rt_rq)) 571 + enqueue = 1; 572 + } 571 573 572 574 if (enqueue) 573 575 sched_rt_rq_enqueue(rt_rq);
+10 -5
kernel/sysctl.c
··· 194 194 static struct ctl_table root_table[]; 195 195 static struct ctl_table_root sysctl_table_root; 196 196 static struct ctl_table_header root_table_header = { 197 - .count = 1, 197 + {{.count = 1, 198 198 .ctl_table = root_table, 199 - .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), 199 + .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, 200 200 .root = &sysctl_table_root, 201 201 .set = &sysctl_table_root.default_set, 202 202 }; ··· 1567 1567 spin_unlock(&sysctl_lock); 1568 1568 } 1569 1569 1570 + static void free_head(struct rcu_head *rcu) 1571 + { 1572 + kfree(container_of(rcu, struct ctl_table_header, rcu)); 1573 + } 1574 + 1570 1575 void sysctl_head_put(struct ctl_table_header *head) 1571 1576 { 1572 1577 spin_lock(&sysctl_lock); 1573 1578 if (!--head->count) 1574 - kfree(head); 1579 + call_rcu(&head->rcu, free_head); 1575 1580 spin_unlock(&sysctl_lock); 1576 1581 } 1577 1582 ··· 1953 1948 start_unregistering(header); 1954 1949 if (!--header->parent->count) { 1955 1950 WARN_ON(1); 1956 - kfree(header->parent); 1951 + call_rcu(&header->parent->rcu, free_head); 1957 1952 } 1958 1953 if (!--header->count) 1959 - kfree(header); 1954 + call_rcu(&header->rcu, free_head); 1960 1955 spin_unlock(&sysctl_lock); 1961 1956 } 1962 1957
-16
kernel/trace/blktrace.c
··· 1827 1827 rwbs[i] = '\0'; 1828 1828 } 1829 1829 1830 - void blk_fill_rwbs_rq(char *rwbs, struct request *rq) 1831 - { 1832 - int rw = rq->cmd_flags & 0x03; 1833 - int bytes; 1834 - 1835 - if (rq->cmd_flags & REQ_DISCARD) 1836 - rw |= REQ_DISCARD; 1837 - 1838 - if (rq->cmd_flags & REQ_SECURE) 1839 - rw |= REQ_SECURE; 1840 - 1841 - bytes = blk_rq_bytes(rq); 1842 - 1843 - blk_fill_rwbs(rwbs, rw, bytes); 1844 - } 1845 - 1846 1830 #endif /* CONFIG_EVENT_TRACING */ 1847 1831
+1 -1
lib/nlattr.c
··· 148 148 { 149 149 int i, len = 0; 150 150 151 - for (i = 0; i < n; i++) { 151 + for (i = 0; i < n; i++, p++) { 152 152 if (p->len) 153 153 len += nla_total_size(p->len); 154 154 else if (nla_attr_minlen[p->type])
+19 -9
mm/huge_memory.c
··· 650 650 651 651 static inline struct page *alloc_hugepage_vma(int defrag, 652 652 struct vm_area_struct *vma, 653 - unsigned long haddr) 653 + unsigned long haddr, int nd) 654 654 { 655 655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), 656 - HPAGE_PMD_ORDER, vma, haddr); 656 + HPAGE_PMD_ORDER, vma, haddr, nd); 657 657 } 658 658 659 659 #ifndef CONFIG_NUMA ··· 678 678 if (unlikely(khugepaged_enter(vma))) 679 679 return VM_FAULT_OOM; 680 680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 681 - vma, haddr); 681 + vma, haddr, numa_node_id()); 682 682 if (unlikely(!page)) 683 683 goto out; 684 684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { ··· 799 799 } 800 800 801 801 for (i = 0; i < HPAGE_PMD_NR; i++) { 802 - pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 803 - vma, address); 802 + pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, 803 + vma, address, page_to_nid(page)); 804 804 if (unlikely(!pages[i] || 805 805 mem_cgroup_newpage_charge(pages[i], mm, 806 806 GFP_KERNEL))) { ··· 902 902 if (transparent_hugepage_enabled(vma) && 903 903 !transparent_hugepage_debug_cow()) 904 904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 905 - vma, haddr); 905 + vma, haddr, numa_node_id()); 906 906 else 907 907 new_page = NULL; 908 908 ··· 1745 1745 static void collapse_huge_page(struct mm_struct *mm, 1746 1746 unsigned long address, 1747 1747 struct page **hpage, 1748 - struct vm_area_struct *vma) 1748 + struct vm_area_struct *vma, 1749 + int node) 1749 1750 { 1750 1751 pgd_t *pgd; 1751 1752 pud_t *pud; ··· 1774 1773 * mmap_sem in read mode is good idea also to allow greater 1775 1774 * scalability. 1776 1775 */ 1777 - new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); 1776 + new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1777 + node); 1778 1778 if (unlikely(!new_page)) { 1779 1779 up_read(&mm->mmap_sem); 1780 1780 *hpage = ERR_PTR(-ENOMEM); ··· 1921 1919 struct page *page; 1922 1920 unsigned long _address; 1923 1921 spinlock_t *ptl; 1922 + int node = -1; 1924 1923 1925 1924 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1926 1925 ··· 1952 1949 page = vm_normal_page(vma, _address, pteval); 1953 1950 if (unlikely(!page)) 1954 1951 goto out_unmap; 1952 + /* 1953 + * Chose the node of the first page. This could 1954 + * be more sophisticated and look at more pages, 1955 + * but isn't for now. 1956 + */ 1957 + if (node == -1) 1958 + node = page_to_nid(page); 1955 1959 VM_BUG_ON(PageCompound(page)); 1956 1960 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 1957 1961 goto out_unmap; ··· 1975 1965 pte_unmap_unlock(pte, ptl); 1976 1966 if (ret) 1977 1967 /* collapse_huge_page will return with the mmap_sem released */ 1978 - collapse_huge_page(mm, address, hpage, vma); 1968 + collapse_huge_page(mm, address, hpage, vma, node); 1979 1969 out: 1980 1970 return ret; 1981 1971 }
+7 -7
mm/mempolicy.c
··· 1524 1524 } 1525 1525 1526 1526 /* Return a zonelist indicated by gfp for node representing a mempolicy */ 1527 - static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) 1527 + static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 1528 + int nd) 1528 1529 { 1529 - int nd = numa_node_id(); 1530 - 1531 1530 switch (policy->mode) { 1532 1531 case MPOL_PREFERRED: 1533 1532 if (!(policy->flags & MPOL_F_LOCAL)) ··· 1678 1679 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1679 1680 huge_page_shift(hstate_vma(vma))), gfp_flags); 1680 1681 } else { 1681 - zl = policy_zonelist(gfp_flags, *mpol); 1682 + zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 1682 1683 if ((*mpol)->mode == MPOL_BIND) 1683 1684 *nodemask = &(*mpol)->v.nodes; 1684 1685 } ··· 1819 1820 */ 1820 1821 struct page * 1821 1822 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1822 - unsigned long addr) 1823 + unsigned long addr, int node) 1823 1824 { 1824 1825 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1825 1826 struct zonelist *zl; ··· 1835 1836 put_mems_allowed(); 1836 1837 return page; 1837 1838 } 1838 - zl = policy_zonelist(gfp, pol); 1839 + zl = policy_zonelist(gfp, pol, node); 1839 1840 if (unlikely(mpol_needs_cond_ref(pol))) { 1840 1841 /* 1841 1842 * slow path: ref counted shared policy ··· 1891 1892 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1892 1893 else 1893 1894 page = __alloc_pages_nodemask(gfp, order, 1894 - policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); 1895 + policy_zonelist(gfp, pol, numa_node_id()), 1896 + policy_nodemask(gfp, pol)); 1895 1897 put_mems_allowed(); 1896 1898 return page; 1897 1899 }
+54 -17
net/ceph/messenger.c
··· 336 336 ceph_msg_put(con->out_msg); 337 337 con->out_msg = NULL; 338 338 } 339 - con->out_keepalive_pending = false; 340 339 con->in_seq = 0; 341 340 con->in_seq_acked = 0; 342 341 } ··· 1247 1248 con->auth_retry); 1248 1249 if (con->auth_retry == 2) { 1249 1250 con->error_msg = "connect authorization failure"; 1250 - reset_connection(con); 1251 - set_bit(CLOSED, &con->state); 1252 1251 return -1; 1253 1252 } 1254 1253 con->auth_retry = 1; ··· 1712 1715 1713 1716 /* open the socket first? */ 1714 1717 if (con->sock == NULL) { 1715 - /* 1716 - * if we were STANDBY and are reconnecting _this_ 1717 - * connection, bump connect_seq now. Always bump 1718 - * global_seq. 1719 - */ 1720 - if (test_and_clear_bit(STANDBY, &con->state)) 1721 - con->connect_seq++; 1722 - 1723 1718 prepare_write_banner(msgr, con); 1724 1719 prepare_write_connect(msgr, con, 1); 1725 1720 prepare_read_banner(con); ··· 1940 1951 work.work); 1941 1952 1942 1953 mutex_lock(&con->mutex); 1954 + if (test_and_clear_bit(BACKOFF, &con->state)) { 1955 + dout("con_work %p backing off\n", con); 1956 + if (queue_delayed_work(ceph_msgr_wq, &con->work, 1957 + round_jiffies_relative(con->delay))) { 1958 + dout("con_work %p backoff %lu\n", con, con->delay); 1959 + mutex_unlock(&con->mutex); 1960 + return; 1961 + } else { 1962 + con->ops->put(con); 1963 + dout("con_work %p FAILED to back off %lu\n", con, 1964 + con->delay); 1965 + } 1966 + } 1943 1967 1968 + if (test_bit(STANDBY, &con->state)) { 1969 + dout("con_work %p STANDBY\n", con); 1970 + goto done; 1971 + } 1944 1972 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1945 1973 dout("con_work CLOSED\n"); 1946 1974 con_close_socket(con); ··· 2014 2008 /* Requeue anything that hasn't been acked */ 2015 2009 list_splice_init(&con->out_sent, &con->out_queue); 2016 2010 2017 - /* If there are no messages in the queue, place the connection 2018 - * in a STANDBY state (i.e., don't try to reconnect just yet). */ 2019 - if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { 2020 - dout("fault setting STANDBY\n"); 2011 + /* If there are no messages queued or keepalive pending, place 2012 + * the connection in a STANDBY state */ 2013 + if (list_empty(&con->out_queue) && 2014 + !test_bit(KEEPALIVE_PENDING, &con->state)) { 2015 + dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2016 + clear_bit(WRITE_PENDING, &con->state); 2021 2017 set_bit(STANDBY, &con->state); 2022 2018 } else { 2023 2019 /* retry after a delay. */ ··· 2027 2019 con->delay = BASE_DELAY_INTERVAL; 2028 2020 else if (con->delay < MAX_DELAY_INTERVAL) 2029 2021 con->delay *= 2; 2030 - dout("fault queueing %p delay %lu\n", con, con->delay); 2031 2022 con->ops->get(con); 2032 2023 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2033 - round_jiffies_relative(con->delay)) == 0) 2024 + round_jiffies_relative(con->delay))) { 2025 + dout("fault queued %p delay %lu\n", con, con->delay); 2026 + } else { 2034 2027 con->ops->put(con); 2028 + dout("fault failed to queue %p delay %lu, backoff\n", 2029 + con, con->delay); 2030 + /* 2031 + * In many cases we see a socket state change 2032 + * while con_work is running and end up 2033 + * queuing (non-delayed) work, such that we 2034 + * can't backoff with a delay. Set a flag so 2035 + * that when con_work restarts we schedule the 2036 + * delay then. 2037 + */ 2038 + set_bit(BACKOFF, &con->state); 2039 + } 2035 2040 } 2036 2041 2037 2042 out_unlock: ··· 2115 2094 } 2116 2095 EXPORT_SYMBOL(ceph_messenger_destroy); 2117 2096 2097 + static void clear_standby(struct ceph_connection *con) 2098 + { 2099 + /* come back from STANDBY? */ 2100 + if (test_and_clear_bit(STANDBY, &con->state)) { 2101 + mutex_lock(&con->mutex); 2102 + dout("clear_standby %p and ++connect_seq\n", con); 2103 + con->connect_seq++; 2104 + WARN_ON(test_bit(WRITE_PENDING, &con->state)); 2105 + WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); 2106 + mutex_unlock(&con->mutex); 2107 + } 2108 + } 2109 + 2118 2110 /* 2119 2111 * Queue up an outgoing message on the given connection. 2120 2112 */ ··· 2160 2126 2161 2127 /* if there wasn't anything waiting to send before, queue 2162 2128 * new work */ 2129 + clear_standby(con); 2163 2130 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2164 2131 queue_con(con); 2165 2132 } ··· 2226 2191 */ 2227 2192 void ceph_con_keepalive(struct ceph_connection *con) 2228 2193 { 2194 + dout("con_keepalive %p\n", con); 2195 + clear_standby(con); 2229 2196 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2230 2197 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2231 2198 queue_con(con);
+13 -5
net/ceph/pagevec.c
··· 16 16 int num_pages, bool write_page) 17 17 { 18 18 struct page **pages; 19 - int rc; 19 + int got = 0; 20 + int rc = 0; 20 21 21 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 22 23 if (!pages) 23 24 return ERR_PTR(-ENOMEM); 24 25 25 26 down_read(&current->mm->mmap_sem); 26 - rc = get_user_pages(current, current->mm, (unsigned long)data, 27 - num_pages, write_page, 0, pages, NULL); 27 + while (got < num_pages) { 28 + rc = get_user_pages(current, current->mm, 29 + (unsigned long)data + ((unsigned long)got * PAGE_SIZE), 30 + num_pages - got, write_page, 0, pages + got, NULL); 31 + if (rc < 0) 32 + break; 33 + BUG_ON(rc == 0); 34 + got += rc; 35 + } 28 36 up_read(&current->mm->mmap_sem); 29 - if (rc < num_pages) 37 + if (rc < 0) 30 38 goto fail; 31 39 return pages; 32 40 33 41 fail: 34 - ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 42 + ceph_put_page_vector(pages, got, false); 35 43 return ERR_PTR(rc); 36 44 } 37 45 EXPORT_SYMBOL(ceph_get_direct_page_vector);
+10 -2
net/core/dev.c
··· 1114 1114 void dev_load(struct net *net, const char *name) 1115 1115 { 1116 1116 struct net_device *dev; 1117 + int no_module; 1117 1118 1118 1119 rcu_read_lock(); 1119 1120 dev = dev_get_by_name_rcu(net, name); 1120 1121 rcu_read_unlock(); 1121 1122 1122 - if (!dev && capable(CAP_NET_ADMIN)) 1123 - request_module("%s", name); 1123 + no_module = !dev; 1124 + if (no_module && capable(CAP_NET_ADMIN)) 1125 + no_module = request_module("netdev-%s", name); 1126 + if (no_module && capable(CAP_SYS_MODULE)) { 1127 + if (!request_module("%s", name)) 1128 + pr_err("Loading kernel module for a network device " 1129 + "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1130 + "instead\n", name); 1131 + } 1124 1132 } 1125 1133 EXPORT_SYMBOL(dev_load); 1126 1134
+1 -1
net/core/dev_addr_lists.c
··· 144 144 145 145 list_for_each_entry(ha, &from_list->list, list) { 146 146 type = addr_type ? addr_type : ha->type; 147 - __hw_addr_del(to_list, ha->addr, addr_len, addr_type); 147 + __hw_addr_del(to_list, ha->addr, addr_len, type); 148 148 } 149 149 } 150 150 EXPORT_SYMBOL(__hw_addr_del_multiple);
+1 -1
net/dcb/dcbnl.c
··· 1193 1193 goto err; 1194 1194 } 1195 1195 1196 - if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { 1196 + if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1197 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1198 1198 err = ops->ieee_setpfc(netdev, pfc); 1199 1199 if (err)
+3 -4
net/dccp/input.c
··· 614 614 /* Caller (dccp_v4_do_rcv) will send Reset */ 615 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 616 616 return 1; 617 + } else if (sk->sk_state == DCCP_CLOSED) { 618 + dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 619 + return 1; 617 620 } 618 621 619 622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { ··· 671 668 } 672 669 673 670 switch (sk->sk_state) { 674 - case DCCP_CLOSED: 675 - dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 676 - return 1; 677 - 678 671 case DCCP_REQUESTING: 679 672 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 680 673 if (queued >= 0)
+17 -3
net/dns_resolver/dns_key.c
··· 67 67 size_t result_len = 0; 68 68 const char *data = _data, *end, *opt; 69 69 70 - kenter("%%%d,%s,'%s',%zu", 71 - key->serial, key->description, data, datalen); 70 + kenter("%%%d,%s,'%*.*s',%zu", 71 + key->serial, key->description, 72 + (int)datalen, (int)datalen, data, datalen); 72 73 73 74 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 74 75 return -EINVAL; ··· 218 217 seq_printf(m, ": %u", key->datalen); 219 218 } 220 219 220 + /* 221 + * read the DNS data 222 + * - the key's semaphore is read-locked 223 + */ 224 + static long dns_resolver_read(const struct key *key, 225 + char __user *buffer, size_t buflen) 226 + { 227 + if (key->type_data.x[0]) 228 + return key->type_data.x[0]; 229 + 230 + return user_read(key, buffer, buflen); 231 + } 232 + 221 233 struct key_type key_type_dns_resolver = { 222 234 .name = "dns_resolver", 223 235 .instantiate = dns_resolver_instantiate, ··· 238 224 .revoke = user_revoke, 239 225 .destroy = user_destroy, 240 226 .describe = dns_resolver_describe, 241 - .read = user_read, 227 + .read = dns_resolver_read, 242 228 }; 243 229 244 230 static int __init init_dns_resolver(void)
+1 -1
net/ipv4/ip_gre.c
··· 1765 1765 MODULE_LICENSE("GPL"); 1766 1766 MODULE_ALIAS_RTNL_LINK("gre"); 1767 1767 MODULE_ALIAS_RTNL_LINK("gretap"); 1768 - MODULE_ALIAS("gre0"); 1768 + MODULE_ALIAS_NETDEV("gre0");
+1 -1
net/ipv4/ipip.c
··· 913 913 module_init(ipip_init); 914 914 module_exit(ipip_fini); 915 915 MODULE_LICENSE("GPL"); 916 - MODULE_ALIAS("tunl0"); 916 + MODULE_ALIAS_NETDEV("tunl0");
+10 -7
net/ipv6/route.c
··· 2557 2557 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2558 2558 void __user *buffer, size_t *lenp, loff_t *ppos) 2559 2559 { 2560 - struct net *net = current->nsproxy->net_ns; 2561 - int delay = net->ipv6.sysctl.flush_delay; 2562 - if (write) { 2563 - proc_dointvec(ctl, write, buffer, lenp, ppos); 2564 - fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); 2565 - return 0; 2566 - } else 2560 + struct net *net; 2561 + int delay; 2562 + if (!write) 2567 2563 return -EINVAL; 2564 + 2565 + net = (struct net *)ctl->extra1; 2566 + delay = net->ipv6.sysctl.flush_delay; 2567 + proc_dointvec(ctl, write, buffer, lenp, ppos); 2568 + fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); 2569 + return 0; 2568 2570 } 2569 2571 2570 2572 ctl_table ipv6_route_table_template[] = { ··· 2653 2651 2654 2652 if (table) { 2655 2653 table[0].data = &net->ipv6.sysctl.flush_delay; 2654 + table[0].extra1 = net; 2656 2655 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2657 2656 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2658 2657 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
+1 -1
net/ipv6/sit.c
··· 1290 1290 module_init(sit_init); 1291 1291 module_exit(sit_cleanup); 1292 1292 MODULE_LICENSE("GPL"); 1293 - MODULE_ALIAS("sit0"); 1293 + MODULE_ALIAS_NETDEV("sit0");
+2 -2
net/netfilter/ipvs/ip_vs_ctl.c
··· 808 808 dest->u_threshold = udest->u_threshold; 809 809 dest->l_threshold = udest->l_threshold; 810 810 811 - spin_lock(&dest->dst_lock); 811 + spin_lock_bh(&dest->dst_lock); 812 812 ip_vs_dst_reset(dest); 813 - spin_unlock(&dest->dst_lock); 813 + spin_unlock_bh(&dest->dst_lock); 814 814 815 815 if (add) 816 816 ip_vs_new_estimator(&dest->stats);
+4
net/netfilter/nf_log.c
··· 85 85 86 86 int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 87 87 { 88 + if (pf >= ARRAY_SIZE(nf_loggers)) 89 + return -EINVAL; 88 90 mutex_lock(&nf_log_mutex); 89 91 if (__find_logger(pf, logger->name) == NULL) { 90 92 mutex_unlock(&nf_log_mutex); ··· 100 98 101 99 void nf_log_unbind_pf(u_int8_t pf) 102 100 { 101 + if (pf >= ARRAY_SIZE(nf_loggers)) 102 + return; 103 103 mutex_lock(&nf_log_mutex); 104 104 rcu_assign_pointer(nf_loggers[pf], NULL); 105 105 mutex_unlock(&nf_log_mutex);
+14 -4
net/netlink/af_netlink.c
··· 1407 1407 int noblock = flags&MSG_DONTWAIT; 1408 1408 size_t copied; 1409 1409 struct sk_buff *skb, *data_skb; 1410 - int err; 1410 + int err, ret; 1411 1411 1412 1412 if (flags&MSG_OOB) 1413 1413 return -EOPNOTSUPP; ··· 1470 1470 1471 1471 skb_free_datagram(sk, skb); 1472 1472 1473 - if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1474 - netlink_dump(sk); 1473 + if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 1474 + ret = netlink_dump(sk); 1475 + if (ret) { 1476 + sk->sk_err = ret; 1477 + sk->sk_error_report(sk); 1478 + } 1479 + } 1475 1480 1476 1481 scm_recv(sock, msg, siocb->scm, flags); 1477 1482 out: ··· 1741 1736 struct netlink_callback *cb; 1742 1737 struct sock *sk; 1743 1738 struct netlink_sock *nlk; 1739 + int ret; 1744 1740 1745 1741 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1746 1742 if (cb == NULL) ··· 1770 1764 nlk->cb = cb; 1771 1765 mutex_unlock(nlk->cb_mutex); 1772 1766 1773 - netlink_dump(sk); 1767 + ret = netlink_dump(sk); 1768 + 1774 1769 sock_put(sk); 1770 + 1771 + if (ret) 1772 + return ret; 1775 1773 1776 1774 /* We successfully started a dump, by returning -EINTR we 1777 1775 * signal not to send ACK even if it was requested.
+1
net/rxrpc/ar-input.c
··· 423 423 goto protocol_error; 424 424 } 425 425 426 + case RXRPC_PACKET_TYPE_ACKALL: 426 427 case RXRPC_PACKET_TYPE_ACK: 427 428 /* ACK processing is done in process context */ 428 429 read_lock_bh(&call->state_lock);
+2
sound/pci/hda/patch_cirrus.c
··· 1039 1039 {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, 1040 1040 {0x11, AC_VERB_SET_PROC_STATE, 0x00}, 1041 1041 1042 + #if 0 /* Don't to set to D3 as we are in power-up sequence */ 1042 1043 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ 1043 1044 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ 1044 1045 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ 1046 + #endif 1045 1047 1046 1048 {} /* terminator */ 1047 1049 };
+5
sound/pci/hda/patch_hdmi.c
··· 1634 1634 { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1635 1635 { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1636 1636 { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1637 + { .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1638 + { .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1639 + /* 17 is known to be absent */ 1637 1640 { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1638 1641 { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1639 1642 { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, ··· 1679 1676 MODULE_ALIAS("snd-hda-codec-id:10de0012"); 1680 1677 MODULE_ALIAS("snd-hda-codec-id:10de0013"); 1681 1678 MODULE_ALIAS("snd-hda-codec-id:10de0014"); 1679 + MODULE_ALIAS("snd-hda-codec-id:10de0015"); 1680 + MODULE_ALIAS("snd-hda-codec-id:10de0016"); 1682 1681 MODULE_ALIAS("snd-hda-codec-id:10de0018"); 1683 1682 MODULE_ALIAS("snd-hda-codec-id:10de0019"); 1684 1683 MODULE_ALIAS("snd-hda-codec-id:10de001a");
+3 -6
sound/pci/hda/patch_realtek.c
··· 1133 1133 nid = spec->autocfg.hp_pins[i]; 1134 1134 if (!nid) 1135 1135 break; 1136 - if (snd_hda_jack_detect(codec, nid)) { 1137 - spec->jack_present = 1; 1138 - break; 1139 - } 1140 - alc_report_jack(codec, spec->autocfg.hp_pins[i]); 1136 + alc_report_jack(codec, nid); 1137 + spec->jack_present |= snd_hda_jack_detect(codec, nid); 1141 1138 } 1142 1139 1143 1140 mute = spec->jack_present ? HDA_AMP_MUTE : 0; ··· 15012 15015 SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), 15013 15016 SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), 15014 15017 SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), 15015 - SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC), 15018 + SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC), 15016 15019 SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), 15017 15020 SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), 15018 15021 SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
+42 -9
sound/soc/codecs/wm8994.c
··· 110 110 111 111 unsigned int aif1clk_enable:1; 112 112 unsigned int aif2clk_enable:1; 113 + 114 + unsigned int aif1clk_disable:1; 115 + unsigned int aif2clk_disable:1; 113 116 }; 114 117 115 118 static int wm8994_readable(unsigned int reg) ··· 1018 1015 1019 1016 switch (event) { 1020 1017 case SND_SOC_DAPM_PRE_PMU: 1021 - if (wm8994->aif1clk_enable) 1018 + if (wm8994->aif1clk_enable) { 1022 1019 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1023 1020 WM8994_AIF1CLK_ENA_MASK, 1024 1021 WM8994_AIF1CLK_ENA); 1025 - if (wm8994->aif2clk_enable) 1022 + wm8994->aif1clk_enable = 0; 1023 + } 1024 + if (wm8994->aif2clk_enable) { 1026 1025 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1027 1026 WM8994_AIF2CLK_ENA_MASK, 1028 1027 WM8994_AIF2CLK_ENA); 1028 + wm8994->aif2clk_enable = 0; 1029 + } 1029 1030 break; 1030 1031 } 1031 1032 ··· 1044 1037 1045 1038 switch (event) { 1046 1039 case SND_SOC_DAPM_POST_PMD: 1047 - if (wm8994->aif1clk_enable) { 1040 + if (wm8994->aif1clk_disable) { 1048 1041 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1049 1042 WM8994_AIF1CLK_ENA_MASK, 0); 1050 - wm8994->aif1clk_enable = 0; 1043 + wm8994->aif1clk_disable = 0; 1051 1044 } 1052 - if (wm8994->aif2clk_enable) { 1045 + if (wm8994->aif2clk_disable) { 1053 1046 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1054 1047 WM8994_AIF2CLK_ENA_MASK, 0); 1055 - wm8994->aif2clk_enable = 0; 1048 + wm8994->aif2clk_disable = 0; 1056 1049 } 1057 1050 break; 1058 1051 } ··· 1070 1063 case SND_SOC_DAPM_PRE_PMU: 1071 1064 wm8994->aif1clk_enable = 1; 1072 1065 break; 1066 + case SND_SOC_DAPM_POST_PMD: 1067 + wm8994->aif1clk_disable = 1; 1068 + break; 1073 1069 } 1074 1070 1075 1071 return 0; ··· 1088 1078 case SND_SOC_DAPM_PRE_PMU: 1089 1079 wm8994->aif2clk_enable = 1; 1090 1080 break; 1081 + case SND_SOC_DAPM_POST_PMD: 1082 + wm8994->aif2clk_disable = 1; 1083 + break; 1091 1084 } 1092 1085 1086 + return 0; 1087 + } 1088 + 1089 + static int adc_mux_ev(struct snd_soc_dapm_widget *w, 1090 + struct snd_kcontrol *kcontrol, int event) 1091 + { 1092 + late_enable_ev(w, kcontrol, event); 1093 1093 return 0; 1094 1094 } 1095 1095 ··· 1423 1403 SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), 1424 1404 }; 1425 1405 1406 + static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = { 1407 + SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, 1408 + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), 1409 + SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, 1410 + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), 1411 + }; 1412 + 1413 + static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = { 1414 + SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), 1415 + SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), 1416 + }; 1417 + 1426 1418 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { 1427 1419 SND_SOC_DAPM_INPUT("DMIC1DAT"), 1428 1420 SND_SOC_DAPM_INPUT("DMIC2DAT"), ··· 1528 1496 */ 1529 1497 SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), 1530 1498 SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), 1531 - 1532 - SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), 1533 - SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), 1534 1499 1535 1500 SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), 1536 1501 SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), ··· 3309 3280 if (wm8994->revision < 4) { 3310 3281 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, 3311 3282 ARRAY_SIZE(wm8994_lateclk_revd_widgets)); 3283 + snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets, 3284 + ARRAY_SIZE(wm8994_adc_revd_widgets)); 3312 3285 snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, 3313 3286 ARRAY_SIZE(wm8994_dac_revd_widgets)); 3314 3287 } else { 3315 3288 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, 3316 3289 ARRAY_SIZE(wm8994_lateclk_widgets)); 3290 + snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets, 3291 + ARRAY_SIZE(wm8994_adc_widgets)); 3317 3292 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, 3318 3293 ARRAY_SIZE(wm8994_dac_widgets)); 3319 3294 }
+5
sound/soc/codecs/wm9081.c
··· 15 15 #include <linux/moduleparam.h> 16 16 #include <linux/init.h> 17 17 #include <linux/delay.h> 18 + #include <linux/device.h> 18 19 #include <linux/pm.h> 19 20 #include <linux/i2c.h> 20 21 #include <linux/platform_device.h> ··· 1341 1340 i2c_set_clientdata(i2c, wm9081); 1342 1341 wm9081->control_type = SND_SOC_I2C; 1343 1342 wm9081->control_data = i2c; 1343 + 1344 + if (dev_get_platdata(&i2c->dev)) 1345 + memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev), 1346 + sizeof(wm9081->retune)); 1344 1347 1345 1348 ret = snd_soc_register_codec(&i2c->dev, 1346 1349 &soc_codec_dev_wm9081, &wm9081_dai, 1);
+8 -3
tools/perf/util/header.c
··· 270 270 const char *name, bool is_kallsyms) 271 271 { 272 272 const size_t size = PATH_MAX; 273 - char *realname = realpath(name, NULL), 274 - *filename = malloc(size), 273 + char *realname, *filename = malloc(size), 275 274 *linkname = malloc(size), *targetname; 276 275 int len, err = -1; 276 + 277 + if (is_kallsyms) 278 + realname = (char *)name; 279 + else 280 + realname = realpath(name, NULL); 277 281 278 282 if (realname == NULL || filename == NULL || linkname == NULL) 279 283 goto out_free; ··· 310 306 if (symlink(targetname, linkname) == 0) 311 307 err = 0; 312 308 out_free: 313 - free(realname); 309 + if (!is_kallsyms) 310 + free(realname); 314 311 free(filename); 315 312 free(linkname); 316 313 return err;
+1 -1
tools/perf/util/symbol.c
··· 1836 1836 int err = -1, fd; 1837 1837 char symfs_vmlinux[PATH_MAX]; 1838 1838 1839 - snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s", 1839 + snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", 1840 1840 symbol_conf.symfs, vmlinux); 1841 1841 fd = open(symfs_vmlinux, O_RDONLY); 1842 1842 if (fd < 0)