Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into sh-latest

+6275 -6295
+2 -2
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 - SUBLEVEL = 38 4 - EXTRAVERSION = 3 + SUBLEVEL = 39 4 + EXTRAVERSION = -rc1 5 5 NAME = Flesh-Eating Bats with Fangs 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/alpha/Kconfig
··· 11 11 select HAVE_GENERIC_HARDIRQS 12 12 select GENERIC_IRQ_PROBE 13 13 select AUTO_IRQ_AFFINITY if SMP 14 - select GENERIC_HARDIRQS_NO_DEPRECATED 14 + select GENERIC_IRQ_SHOW 15 15 help 16 16 The Alpha is a 64-bit general-purpose processor designed and 17 17 marketed by the Digital Equipment Corporation of blessed memory,
+10 -57
arch/alpha/kernel/irq.c
··· 67 67 } 68 68 #endif /* CONFIG_SMP */ 69 69 70 - int 71 - show_interrupts(struct seq_file *p, void *v) 70 + int arch_show_interrupts(struct seq_file *p, int prec) 72 71 { 73 72 int j; 74 - int irq = *(loff_t *) v; 75 - struct irqaction * action; 76 - struct irq_desc *desc; 77 - unsigned long flags; 78 73 79 74 #ifdef CONFIG_SMP 80 - if (irq == 0) { 81 - seq_puts(p, " "); 82 - for_each_online_cpu(j) 83 - seq_printf(p, "CPU%d ", j); 84 - seq_putc(p, '\n'); 85 - } 75 + seq_puts(p, "IPI: "); 76 + for_each_online_cpu(j) 77 + seq_printf(p, "%10lu ", cpu_data[j].ipi_count); 78 + seq_putc(p, '\n'); 86 79 #endif 87 - 88 - if (irq < ACTUAL_NR_IRQS) { 89 - desc = irq_to_desc(irq); 90 - 91 - if (!desc) 92 - return 0; 93 - 94 - raw_spin_lock_irqsave(&desc->lock, flags); 95 - action = desc->action; 96 - if (!action) 97 - goto unlock; 98 - seq_printf(p, "%3d: ", irq); 99 - #ifndef CONFIG_SMP 100 - seq_printf(p, "%10u ", kstat_irqs(irq)); 101 - #else 102 - for_each_online_cpu(j) 103 - seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); 104 - #endif 105 - seq_printf(p, " %14s", get_irq_desc_chip(desc)->name); 106 - seq_printf(p, " %c%s", 107 - (action->flags & IRQF_DISABLED)?'+':' ', 108 - action->name); 109 - 110 - for (action=action->next; action; action = action->next) { 111 - seq_printf(p, ", %c%s", 112 - (action->flags & IRQF_DISABLED)?'+':' ', 113 - action->name); 114 - } 115 - 116 - seq_putc(p, '\n'); 117 - unlock: 118 - raw_spin_unlock_irqrestore(&desc->lock, flags); 119 - } else if (irq == ACTUAL_NR_IRQS) { 120 - #ifdef CONFIG_SMP 121 - seq_puts(p, "IPI: "); 122 - for_each_online_cpu(j) 123 - seq_printf(p, "%10lu ", cpu_data[j].ipi_count); 124 - seq_putc(p, '\n'); 125 - #endif 126 - seq_puts(p, "PMI: "); 127 - for_each_online_cpu(j) 128 - seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); 129 - seq_puts(p, " Performance Monitoring\n"); 130 - seq_printf(p, "ERR: %10lu\n", irq_err_count); 131 - } 80 + seq_puts(p, "PMI: "); 81 + for_each_online_cpu(j) 82 + seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); 83 + seq_puts(p, " Performance Monitoring\n"); 84 + seq_printf(p, "ERR: %10lu\n", irq_err_count); 132 85 return 0; 133 86 } 134 87
+1 -1
arch/alpha/kernel/irq_alpha.c
··· 228 228 void __init 229 229 init_rtc_irq(void) 230 230 { 231 - set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 231 + irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 232 232 handle_simple_irq, "RTC"); 233 233 setup_irq(RTC_IRQ, &timer_irqaction); 234 234 }
+1 -1
arch/alpha/kernel/irq_i8259.c
··· 92 92 outb(0xff, 0xA1); /* mask all of 8259A-2 */ 93 93 94 94 for (i = 0; i < 16; i++) { 95 - set_irq_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); 95 + irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); 96 96 } 97 97 98 98 setup_irq(2, &cascade);
+1 -1
arch/alpha/kernel/irq_pyxis.c
··· 102 102 for (i = 16; i < 48; ++i) { 103 103 if ((ignore_mask >> i) & 1) 104 104 continue; 105 - set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 105 + irq_set_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 106 106 irq_set_status_flags(i, IRQ_LEVEL); 107 107 } 108 108
+1 -1
arch/alpha/kernel/irq_srm.c
··· 51 51 for (i = 16; i < max; ++i) { 52 52 if (i < 64 && ((ignore_mask >> i) & 1)) 53 53 continue; 54 - set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 54 + irq_set_chip_and_handler(i, &srm_irq_type, handle_level_irq); 55 55 irq_set_status_flags(i, IRQ_LEVEL); 56 56 } 57 57 }
+1 -1
arch/alpha/kernel/sys_alcor.c
··· 125 125 on while IRQ probing. */ 126 126 if (i >= 16+20 && i <= 16+30) 127 127 continue; 128 - set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 128 + irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 129 129 irq_set_status_flags(i, IRQ_LEVEL); 130 130 } 131 131 i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
+2 -2
arch/alpha/kernel/sys_cabriolet.c
··· 105 105 outb(0xff, 0x806); 106 106 107 107 for (i = 16; i < 35; ++i) { 108 - set_irq_chip_and_handler(i, &cabriolet_irq_type, 109 - handle_level_irq); 108 + irq_set_chip_and_handler(i, &cabriolet_irq_type, 109 + handle_level_irq); 110 110 irq_set_status_flags(i, IRQ_LEVEL); 111 111 } 112 112 }
+1 -1
arch/alpha/kernel/sys_dp264.c
··· 270 270 { 271 271 long i; 272 272 for (i = imin; i <= imax; ++i) { 273 - set_irq_chip_and_handler(i, ops, handle_level_irq); 273 + irq_set_chip_and_handler(i, ops, handle_level_irq); 274 274 irq_set_status_flags(i, IRQ_LEVEL); 275 275 } 276 276 }
+1 -1
arch/alpha/kernel/sys_eb64p.c
··· 118 118 init_i8259a_irqs(); 119 119 120 120 for (i = 16; i < 32; ++i) { 121 - set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 121 + irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 122 122 irq_set_status_flags(i, IRQ_LEVEL); 123 123 } 124 124
+1 -1
arch/alpha/kernel/sys_eiger.c
··· 138 138 init_i8259a_irqs(); 139 139 140 140 for (i = 16; i < 128; ++i) { 141 - set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 141 + irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 142 142 irq_set_status_flags(i, IRQ_LEVEL); 143 143 } 144 144 }
+5 -5
arch/alpha/kernel/sys_jensen.c
··· 171 171 { 172 172 init_i8259a_irqs(); 173 173 174 - set_irq_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq); 175 - set_irq_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq); 176 - set_irq_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq); 177 - set_irq_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq); 178 - set_irq_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq); 174 + irq_set_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq); 175 + irq_set_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq); 176 + irq_set_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq); 177 + irq_set_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq); 178 + irq_set_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq); 179 179 180 180 common_init_isa_dma(); 181 181 }
+4 -4
arch/alpha/kernel/sys_marvel.c
··· 276 276 277 277 /* Set up the lsi irqs. */ 278 278 for (i = 0; i < 128; ++i) { 279 - set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 279 + irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq); 280 280 irq_set_status_flags(i, IRQ_LEVEL); 281 281 } 282 282 ··· 290 290 291 291 /* Set up the msi irqs. */ 292 292 for (i = 128; i < (128 + 512); ++i) { 293 - set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 293 + irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq); 294 294 irq_set_status_flags(i, IRQ_LEVEL); 295 295 } 296 296 ··· 308 308 309 309 /* Reserve the legacy irqs. */ 310 310 for (i = 0; i < 16; ++i) { 311 - set_irq_chip_and_handler(i, &marvel_legacy_irq_type, 312 - handle_level_irq); 311 + irq_set_chip_and_handler(i, &marvel_legacy_irq_type, 312 + handle_level_irq); 313 313 } 314 314 315 315 /* Init the io7 irqs. */
+2 -1
arch/alpha/kernel/sys_mikasa.c
··· 98 98 mikasa_update_irq_hw(0); 99 99 100 100 for (i = 16; i < 32; ++i) { 101 - set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 101 + irq_set_chip_and_handler(i, &mikasa_irq_type, 102 + handle_level_irq); 102 103 irq_set_status_flags(i, IRQ_LEVEL); 103 104 } 104 105
+2 -1
arch/alpha/kernel/sys_noritake.c
··· 127 127 outw(0, 0x54c); 128 128 129 129 for (i = 16; i < 48; ++i) { 130 - set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 130 + irq_set_chip_and_handler(i, &noritake_irq_type, 131 + handle_level_irq); 131 132 irq_set_status_flags(i, IRQ_LEVEL); 132 133 } 133 134
+2 -1
arch/alpha/kernel/sys_rawhide.c
··· 180 180 } 181 181 182 182 for (i = 16; i < 128; ++i) { 183 - set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 183 + irq_set_chip_and_handler(i, &rawhide_irq_type, 184 + handle_level_irq); 184 185 irq_set_status_flags(i, IRQ_LEVEL); 185 186 } 186 187
+1 -1
arch/alpha/kernel/sys_rx164.c
··· 99 99 100 100 rx164_update_irq_hw(0); 101 101 for (i = 16; i < 40; ++i) { 102 - set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 102 + irq_set_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 103 103 irq_set_status_flags(i, IRQ_LEVEL); 104 104 } 105 105
+2 -2
arch/alpha/kernel/sys_sable.c
··· 518 518 long i; 519 519 520 520 for (i = 0; i < nr_of_irqs; ++i) { 521 - set_irq_chip_and_handler(i, &sable_lynx_irq_type, 522 - handle_level_irq); 521 + irq_set_chip_and_handler(i, &sable_lynx_irq_type, 522 + handle_level_irq); 523 523 irq_set_status_flags(i, IRQ_LEVEL); 524 524 } 525 525
+2 -1
arch/alpha/kernel/sys_takara.c
··· 138 138 takara_update_irq_hw(i, -1); 139 139 140 140 for (i = 16; i < 128; ++i) { 141 - set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 141 + irq_set_chip_and_handler(i, &takara_irq_type, 142 + handle_level_irq); 142 143 irq_set_status_flags(i, IRQ_LEVEL); 143 144 } 144 145
+1 -1
arch/alpha/kernel/sys_titan.c
··· 179 179 { 180 180 long i; 181 181 for (i = imin; i <= imax; ++i) { 182 - set_irq_chip_and_handler(i, ops, handle_level_irq); 182 + irq_set_chip_and_handler(i, ops, handle_level_irq); 183 183 irq_set_status_flags(i, IRQ_LEVEL); 184 184 } 185 185 }
+6 -6
arch/alpha/kernel/sys_wildfire.c
··· 183 183 for (i = 0; i < 16; ++i) { 184 184 if (i == 2) 185 185 continue; 186 - set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 187 - handle_level_irq); 186 + irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, 187 + handle_level_irq); 188 188 irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 189 189 } 190 190 191 - set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 192 - handle_level_irq); 191 + irq_set_chip_and_handler(36 + irq_bias, &wildfire_irq_type, 192 + handle_level_irq); 193 193 irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); 194 194 for (i = 40; i < 64; ++i) { 195 - set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 196 - handle_level_irq); 195 + irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, 196 + handle_level_irq); 197 197 irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 198 198 } 199 199
+2
arch/arm/Kconfig
··· 28 28 select HAVE_C_RECORDMCOUNT 29 29 select HAVE_GENERIC_HARDIRQS 30 30 select HAVE_SPARSE_IRQ 31 + select GENERIC_IRQ_SHOW 31 32 help 32 33 The ARM series is a line of low-power-consumption RISC chip designs 33 34 licensed by ARM Ltd and targeted at embedded applications and ··· 366 365 select GENERIC_CLOCKEVENTS 367 366 select ARCH_REQUIRE_GPIOLIB 368 367 select CLKDEV_LOOKUP 368 + select HAVE_SCHED_CLOCK 369 369 help 370 370 Support for Freescale MXC/iMX-based family of processors 371 371
+1 -9
arch/arm/boot/compressed/head.S
··· 21 21 22 22 #if defined(CONFIG_DEBUG_ICEDCC) 23 23 24 - #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) 24 + #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 25 25 .macro loadsp, rb, tmp 26 26 .endm 27 27 .macro writeb, ch, rb 28 - mcr p14, 0, \ch, c0, c5, 0 29 - .endm 30 - #elif defined(CONFIG_CPU_V7) 31 - .macro loadsp, rb, tmp 32 - .endm 33 - .macro writeb, ch, rb 34 - wait: mrc p14, 0, pc, c0, c1, 0 35 - bcs wait 36 28 mcr p14, 0, \ch, c0, c5, 0 37 29 .endm 38 30 #elif defined(CONFIG_CPU_XSCALE)
+1 -11
arch/arm/boot/compressed/misc.c
··· 36 36 37 37 #ifdef CONFIG_DEBUG_ICEDCC 38 38 39 - #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) 39 + #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 40 40 41 41 static void icedcc_putc(int ch) 42 42 { ··· 52 52 asm("mcr p14, 0, %0, c0, c5, 0" : : "r" (ch)); 53 53 } 54 54 55 - #elif defined(CONFIG_CPU_V7) 56 - 57 - static void icedcc_putc(int ch) 58 - { 59 - asm( 60 - "wait: mrc p14, 0, pc, c0, c1, 0 \n\ 61 - bcs wait \n\ 62 - mcr p14, 0, %0, c0, c5, 0 " 63 - : : "r" (ch)); 64 - } 65 55 66 56 #elif defined(CONFIG_CPU_XSCALE) 67 57
+7 -8
arch/arm/common/gic.c
··· 213 213 214 214 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 215 215 { 216 - struct gic_chip_data *chip_data = get_irq_data(irq); 217 - struct irq_chip *chip = get_irq_chip(irq); 216 + struct gic_chip_data *chip_data = irq_get_handler_data(irq); 217 + struct irq_chip *chip = irq_get_chip(irq); 218 218 unsigned int cascade_irq, gic_irq; 219 219 unsigned long status; 220 220 ··· 257 257 { 258 258 if (gic_nr >= MAX_GIC_NR) 259 259 BUG(); 260 - if (set_irq_data(irq, &gic_data[gic_nr]) != 0) 260 + if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) 261 261 BUG(); 262 - set_irq_chained_handler(irq, gic_handle_cascade_irq); 262 + irq_set_chained_handler(irq, gic_handle_cascade_irq); 263 263 } 264 264 265 265 static void __init gic_dist_init(struct gic_chip_data *gic, ··· 319 319 * Setup the Linux IRQ subsystem. 320 320 */ 321 321 for (i = irq_start; i < irq_limit; i++) { 322 - set_irq_chip(i, &gic_chip); 323 - set_irq_chip_data(i, gic); 324 - set_irq_handler(i, handle_level_irq); 322 + irq_set_chip_and_handler(i, &gic_chip, handle_level_irq); 323 + irq_set_chip_data(i, gic); 325 324 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 326 325 } 327 326 ··· 381 382 unsigned long flags; 382 383 383 384 local_irq_save(flags); 384 - irq_to_desc(irq)->status |= IRQ_NOPROBE; 385 + irq_set_status_flags(irq, IRQ_NOPROBE); 385 386 gic_unmask_irq(irq_get_irq_data(irq)); 386 387 local_irq_restore(flags); 387 388 }
+2 -2
arch/arm/common/it8152.c
··· 88 88 __raw_writel((0), IT8152_INTC_LDCNIRR); 89 89 90 90 for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) { 91 - set_irq_chip(irq, &it8152_irq_chip); 92 - set_irq_handler(irq, handle_level_irq); 91 + irq_set_chip_and_handler(irq, &it8152_irq_chip, 92 + handle_level_irq); 93 93 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 94 94 } 95 95 }
+8 -9
arch/arm/common/locomo.c
··· 140 140 141 141 static void locomo_handler(unsigned int irq, struct irq_desc *desc) 142 142 { 143 - struct locomo *lchip = get_irq_chip_data(irq); 143 + struct locomo *lchip = irq_get_chip_data(irq); 144 144 int req, i; 145 145 146 146 /* Acknowledge the parent IRQ */ ··· 197 197 /* 198 198 * Install handler for IRQ_LOCOMO_HW. 199 199 */ 200 - set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); 201 - set_irq_chip_data(lchip->irq, lchip); 202 - set_irq_chained_handler(lchip->irq, locomo_handler); 200 + irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); 201 + irq_set_chip_data(lchip->irq, lchip); 202 + irq_set_chained_handler(lchip->irq, locomo_handler); 203 203 204 204 /* Install handlers for IRQ_LOCOMO_* */ 205 205 for ( ; irq <= lchip->irq_base + 3; irq++) { 206 - set_irq_chip(irq, &locomo_chip); 207 - set_irq_chip_data(irq, lchip); 208 - set_irq_handler(irq, handle_level_irq); 206 + irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); 207 + irq_set_chip_data(irq, lchip); 209 208 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 210 209 } 211 210 } ··· 475 476 device_for_each_child(lchip->dev, NULL, locomo_remove_child); 476 477 477 478 if (lchip->irq != NO_IRQ) { 478 - set_irq_chained_handler(lchip->irq, NULL); 479 - set_irq_data(lchip->irq, NULL); 479 + irq_set_chained_handler(lchip->irq, NULL); 480 + irq_set_handler_data(lchip->irq, NULL); 480 481 } 481 482 482 483 iounmap(lchip->base);
+12 -12
arch/arm/common/sa1111.c
··· 202 202 sa1111_irq_handler(unsigned int irq, struct irq_desc *desc) 203 203 { 204 204 unsigned int stat0, stat1, i; 205 - struct sa1111 *sachip = get_irq_data(irq); 205 + struct sa1111 *sachip = irq_get_handler_data(irq); 206 206 void __iomem *mapbase = sachip->base + SA1111_INTC; 207 207 208 208 stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); ··· 472 472 sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); 473 473 474 474 for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) { 475 - set_irq_chip(irq, &sa1111_low_chip); 476 - set_irq_chip_data(irq, sachip); 477 - set_irq_handler(irq, handle_edge_irq); 475 + irq_set_chip_and_handler(irq, &sa1111_low_chip, 476 + handle_edge_irq); 477 + irq_set_chip_data(irq, sachip); 478 478 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 479 479 } 480 480 481 481 for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) { 482 - set_irq_chip(irq, &sa1111_high_chip); 483 - set_irq_chip_data(irq, sachip); 484 - set_irq_handler(irq, handle_edge_irq); 482 + irq_set_chip_and_handler(irq, &sa1111_high_chip, 483 + handle_edge_irq); 484 + irq_set_chip_data(irq, sachip); 485 485 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 486 486 } 487 487 488 488 /* 489 489 * Register SA1111 interrupt 490 490 */ 491 - set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); 492 - set_irq_data(sachip->irq, sachip); 493 - set_irq_chained_handler(sachip->irq, sa1111_irq_handler); 491 + irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); 492 + irq_set_handler_data(sachip->irq, sachip); 493 + irq_set_chained_handler(sachip->irq, sa1111_irq_handler); 494 494 } 495 495 496 496 /* ··· 815 815 clk_disable(sachip->clk); 816 816 817 817 if (sachip->irq != NO_IRQ) { 818 - set_irq_chained_handler(sachip->irq, NULL); 819 - set_irq_data(sachip->irq, NULL); 818 + irq_set_chained_handler(sachip->irq, NULL); 819 + irq_set_handler_data(sachip->irq, NULL); 820 820 821 821 release_mem_region(sachip->phys + SA1111_INTC, 512); 822 822 }
+3 -3
arch/arm/common/vic.c
··· 305 305 if (vic_sources & (1 << i)) { 306 306 unsigned int irq = irq_start + i; 307 307 308 - set_irq_chip(irq, &vic_chip); 309 - set_irq_chip_data(irq, base); 310 - set_irq_handler(irq, handle_level_irq); 308 + irq_set_chip_and_handler(irq, &vic_chip, 309 + handle_level_irq); 310 + irq_set_chip_data(irq, base); 311 311 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 312 312 } 313 313 }
-8
arch/arm/include/asm/hw_irq.h
··· 10 10 irq_err_count++; 11 11 } 12 12 13 - /* 14 - * Obsolete inline function for calling irq descriptor handlers. 15 - */ 16 - static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc) 17 - { 18 - desc->handle_irq(irq, desc); 19 - } 20 - 21 13 void set_irq_flags(unsigned int irq, unsigned int flags); 22 14 23 15 #define IRQF_VALID (1 << 0)
-2
arch/arm/include/asm/mach/udc_pxa2xx.h
··· 20 20 * VBUS IRQ and omit the methods above. Store the GPIO number 21 21 * here. Note that sometimes the signals go through inverters... 22 22 */ 23 - bool gpio_vbus_inverted; 24 - int gpio_vbus; /* high == vbus present */ 25 23 bool gpio_pullup_inverted; 26 24 int gpio_pullup; /* high == pullup activated */ 27 25 };
-25
arch/arm/kernel/bios32.c
··· 159 159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285); 160 160 161 161 /* 162 - * Same as above. The PrPMC800 carrier board for the PrPMC1100 163 - * card maps the host-bridge @ 00:01:00 for some reason and it 164 - * ends up getting scanned. Note that we only want to do this 165 - * fixup when we find the IXP4xx on a PrPMC system, which is why 166 - * we check the machine type. We could be running on a board 167 - * with an IXP4xx target device and we don't want to kill the 168 - * resources in that case. 169 - */ 170 - static void __devinit pci_fixup_prpmc1100(struct pci_dev *dev) 171 - { 172 - int i; 173 - 174 - if (machine_is_prpmc1100()) { 175 - dev->class &= 0xff; 176 - dev->class |= PCI_CLASS_BRIDGE_HOST << 8; 177 - for (i = 0; i < PCI_NUM_RESOURCES; i++) { 178 - dev->resource[i].start = 0; 179 - dev->resource[i].end = 0; 180 - dev->resource[i].flags = 0; 181 - } 182 - } 183 - } 184 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP4XX, pci_fixup_prpmc1100); 185 - 186 - /* 187 162 * PCI IDE controllers use non-standard I/O port decoding, respect it. 188 163 */ 189 164 static void __devinit pci_fixup_ide_bases(struct pci_dev *dev)
+1 -18
arch/arm/kernel/debug.S
··· 25 25 .macro addruart, rp, rv 26 26 .endm 27 27 28 - #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) 28 + #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 29 29 30 30 .macro senduart, rd, rx 31 31 mcr p14, 0, \rd, c0, c5, 0 ··· 47 47 tst \rx, #0x20000000 48 48 bne 1001b 49 49 1002: 50 - .endm 51 - 52 - #elif defined(CONFIG_CPU_V7) 53 - 54 - .macro senduart, rd, rx 55 - mcr p14, 0, \rd, c0, c5, 0 56 - .endm 57 - 58 - .macro busyuart, rd, rx 59 - busy: mrc p14, 0, pc, c0, c1, 0 60 - bcs busy 61 - .endm 62 - 63 - .macro waituart, rd, rx 64 - wait: mrc p14, 0, pc, c0, c1, 0 65 - bcs wait 66 - 67 50 .endm 68 51 69 52 #elif defined(CONFIG_CPU_XSCALE)
+3 -3
arch/arm/kernel/ecard.c
··· 1043 1043 */ 1044 1044 if (slot < 8) { 1045 1045 ec->irq = 32 + slot; 1046 - set_irq_chip(ec->irq, &ecard_chip); 1047 - set_irq_handler(ec->irq, handle_level_irq); 1046 + irq_set_chip_and_handler(ec->irq, &ecard_chip, 1047 + handle_level_irq); 1048 1048 set_irq_flags(ec->irq, IRQF_VALID); 1049 1049 } 1050 1050 ··· 1103 1103 1104 1104 irqhw = ecard_probeirqhw(); 1105 1105 1106 - set_irq_chained_handler(IRQ_EXPANSIONCARD, 1106 + irq_set_chained_handler(IRQ_EXPANSIONCARD, 1107 1107 irqhw ? ecard_irqexp_handler : ecard_irq_handler); 1108 1108 1109 1109 ecard_proc_init();
+2 -2
arch/arm/kernel/etm.c
··· 338 338 .fops = &etb_fops, 339 339 }; 340 340 341 - static int __init etb_probe(struct amba_device *dev, const struct amba_id *id) 341 + static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id) 342 342 { 343 343 struct tracectx *t = &tracer; 344 344 int ret = 0; ··· 530 530 static struct kobj_attribute trace_mode_attr = 531 531 __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); 532 532 533 - static int __init etm_probe(struct amba_device *dev, const struct amba_id *id) 533 + static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id) 534 534 { 535 535 struct tracectx *t = &tracer; 536 536 int ret = 0;
+11 -59
arch/arm/kernel/irq.c
··· 51 51 52 52 unsigned long irq_err_count; 53 53 54 - int show_interrupts(struct seq_file *p, void *v) 54 + int arch_show_interrupts(struct seq_file *p, int prec) 55 55 { 56 - int i = *(loff_t *) v, cpu; 57 - struct irq_desc *desc; 58 - struct irqaction * action; 59 - unsigned long flags; 60 - int prec, n; 61 - 62 - for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++) 63 - n *= 10; 64 - 65 - #ifdef CONFIG_SMP 66 - if (prec < 4) 67 - prec = 4; 68 - #endif 69 - 70 - if (i == 0) { 71 - char cpuname[12]; 72 - 73 - seq_printf(p, "%*s ", prec, ""); 74 - for_each_present_cpu(cpu) { 75 - sprintf(cpuname, "CPU%d", cpu); 76 - seq_printf(p, " %10s", cpuname); 77 - } 78 - seq_putc(p, '\n'); 79 - } 80 - 81 - if (i < nr_irqs) { 82 - desc = irq_to_desc(i); 83 - raw_spin_lock_irqsave(&desc->lock, flags); 84 - action = desc->action; 85 - if (!action) 86 - goto unlock; 87 - 88 - seq_printf(p, "%*d: ", prec, i); 89 - for_each_present_cpu(cpu) 90 - seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 91 - seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-"); 92 - seq_printf(p, " %s", action->name); 93 - for (action = action->next; action; action = action->next) 94 - seq_printf(p, ", %s", action->name); 95 - 96 - seq_putc(p, '\n'); 97 - unlock: 98 - raw_spin_unlock_irqrestore(&desc->lock, flags); 99 - } else if (i == nr_irqs) { 100 56 #ifdef CONFIG_FIQ 101 - show_fiq_list(p, prec); 57 + show_fiq_list(p, prec); 102 58 #endif 103 59 #ifdef CONFIG_SMP 104 - show_ipi_list(p, prec); 60 + show_ipi_list(p, prec); 105 61 #endif 106 62 #ifdef CONFIG_LOCAL_TIMERS 107 - show_local_irqs(p, prec); 63 + show_local_irqs(p, prec); 108 64 #endif 109 - seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 110 - } 65 + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 111 66 return 0; 112 67 } 113 68 ··· 99 144 100 145 void set_irq_flags(unsigned int irq, unsigned int iflags) 101 146 { 102 - struct irq_desc *desc; 103 - unsigned long flags; 147 + unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 104 148 105 149 if (irq >= nr_irqs) { 106 150 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); 107 151 return; 108 152 } 109 153 110 - desc = irq_to_desc(irq); 111 - raw_spin_lock_irqsave(&desc->lock, flags); 112 - desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 113 154 if (iflags & IRQF_VALID) 114 - desc->status &= ~IRQ_NOREQUEST; 155 + clr |= IRQ_NOREQUEST; 115 156 if (iflags & IRQF_PROBE) 116 - desc->status &= ~IRQ_NOPROBE; 157 + clr |= IRQ_NOPROBE; 117 158 if (!(iflags & IRQF_NOAUTOEN)) 118 - desc->status &= ~IRQ_NOAUTOEN; 119 - raw_spin_unlock_irqrestore(&desc->lock, flags); 159 + clr |= IRQ_NOAUTOEN; 160 + /* Order is clear bits in "clr" then set bits in "set" */ 161 + irq_modify_status(irq, clr, set & ~clr); 120 162 } 121 163 122 164 void __init init_IRQ(void)
+6 -4
arch/arm/kernel/kprobes-decode.c
··· 594 594 long cpsr = regs->ARM_cpsr; 595 595 596 596 fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); 597 - regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ 597 + if (rn != 15) 598 + regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ 598 599 rdv = fnr.r1; 599 600 600 601 if (rd == 15) { ··· 623 622 long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; 624 623 long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; 625 624 long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ 625 + long rnv_wb; 626 626 627 - /* Save Rn in case of writeback. */ 628 - regs->uregs[rn] = 629 - insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); 627 + rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); 628 + if (rn != 15) 629 + regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ 630 630 } 631 631 632 632 static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs)
+25 -8
arch/arm/kernel/perf_event.c
··· 79 79 void (*write_counter)(int idx, u32 val); 80 80 void (*start)(void); 81 81 void (*stop)(void); 82 + void (*reset)(void *); 82 83 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] 83 84 [PERF_COUNT_HW_CACHE_OP_MAX] 84 85 [PERF_COUNT_HW_CACHE_RESULT_MAX]; ··· 205 204 static u64 206 205 armpmu_event_update(struct perf_event *event, 207 206 struct hw_perf_event *hwc, 208 - int idx) 207 + int idx, int overflow) 209 208 { 210 - int shift = 64 - 32; 211 - s64 prev_raw_count, new_raw_count; 212 - u64 delta; 209 + u64 delta, prev_raw_count, new_raw_count; 213 210 214 211 again: 215 212 prev_raw_count = local64_read(&hwc->prev_count); ··· 217 218 new_raw_count) != prev_raw_count) 218 219 goto again; 219 220 220 - delta = (new_raw_count << shift) - (prev_raw_count << shift); 221 - delta >>= shift; 221 + new_raw_count &= armpmu->max_period; 222 + prev_raw_count &= armpmu->max_period; 223 + 224 + if (overflow) 225 + delta = armpmu->max_period - prev_raw_count + new_raw_count; 226 + else 227 + delta = new_raw_count - prev_raw_count; 222 228 223 229 local64_add(delta, &event->count); 224 230 local64_sub(delta, &hwc->period_left); ··· 240 236 if (hwc->idx < 0) 241 237 return; 242 238 243 - armpmu_event_update(event, hwc, hwc->idx); 239 + armpmu_event_update(event, hwc, hwc->idx, 0); 244 240 } 245 241 246 242 static void ··· 258 254 if (!(hwc->state & PERF_HES_STOPPED)) { 259 255 armpmu->disable(hwc, hwc->idx); 260 256 barrier(); /* why? */ 261 - armpmu_event_update(event, hwc, hwc->idx); 257 + armpmu_event_update(event, hwc, hwc->idx, 0); 262 258 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 263 259 } 264 260 } ··· 627 623 #include "perf_event_xscale.c" 628 624 #include "perf_event_v6.c" 629 625 #include "perf_event_v7.c" 626 + 627 + /* 628 + * Ensure the PMU has sane values out of reset. 629 + * This requires SMP to be available, so exists as a separate initcall. 630 + */ 631 + static int __init 632 + armpmu_reset(void) 633 + { 634 + if (armpmu && armpmu->reset) 635 + return on_each_cpu(armpmu->reset, NULL, 1); 636 + return 0; 637 + } 638 + arch_initcall(armpmu_reset); 630 639 631 640 static int __init 632 641 init_hw_perf_events(void)
+1 -1
arch/arm/kernel/perf_event_v6.c
··· 474 474 continue; 475 475 476 476 hwc = &event->hw; 477 - armpmu_event_update(event, hwc, idx); 477 + armpmu_event_update(event, hwc, idx, 1); 478 478 data.period = event->hw.last_period; 479 479 if (!armpmu_event_set_period(event, hwc, idx)) 480 480 continue;
+19 -7
arch/arm/kernel/perf_event_v7.c
··· 466 466 static inline void armv7_pmnc_write(unsigned long val) 467 467 { 468 468 val &= ARMV7_PMNC_MASK; 469 + isb(); 469 470 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); 470 471 } 471 472 ··· 503 502 504 503 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; 505 504 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); 505 + isb(); 506 506 507 507 return idx; 508 508 } ··· 782 780 continue; 783 781 784 782 hwc = &event->hw; 785 - armpmu_event_update(event, hwc, idx); 783 + armpmu_event_update(event, hwc, idx, 1); 786 784 data.period = event->hw.last_period; 787 785 if (!armpmu_event_set_period(event, hwc, idx)) 788 786 continue; ··· 849 847 } 850 848 } 851 849 850 + static void armv7pmu_reset(void *info) 851 + { 852 + u32 idx, nb_cnt = armpmu->num_events; 853 + 854 + /* The counter and interrupt enable registers are unknown at reset. */ 855 + for (idx = 1; idx < nb_cnt; ++idx) 856 + armv7pmu_disable_event(NULL, idx); 857 + 858 + /* Initialize & Reset PMNC: C and P bits */ 859 + armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); 860 + } 861 + 852 862 static struct arm_pmu armv7pmu = { 853 863 .handle_irq = armv7pmu_handle_irq, 854 864 .enable = armv7pmu_enable_event, ··· 870 856 .get_event_idx = armv7pmu_get_event_idx, 871 857 .start = armv7pmu_start, 872 858 .stop = armv7pmu_stop, 859 + .reset = armv7pmu_reset, 873 860 .raw_event_mask = 0xFF, 874 861 .max_period = (1LLU << 32) - 1, 875 862 }; 876 863 877 - static u32 __init armv7_reset_read_pmnc(void) 864 + static u32 __init armv7_read_num_pmnc_events(void) 878 865 { 879 866 u32 nb_cnt; 880 - 881 - /* Initialize & Reset PMNC: C and P bits */ 882 - armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); 883 867 884 868 /* Read the nb of CNTx counters supported from PMNC */ 885 869 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; ··· 892 880 armv7pmu.name = "ARMv7 Cortex-A8"; 893 881 armv7pmu.cache_map = &armv7_a8_perf_cache_map; 894 882 armv7pmu.event_map = &armv7_a8_perf_map; 895 - armv7pmu.num_events = armv7_reset_read_pmnc(); 883 + armv7pmu.num_events = armv7_read_num_pmnc_events(); 896 884 return &armv7pmu; 897 885 } 898 886 ··· 902 890 armv7pmu.name = "ARMv7 Cortex-A9"; 903 891 armv7pmu.cache_map = &armv7_a9_perf_cache_map; 904 892 armv7pmu.event_map = &armv7_a9_perf_map; 905 - armv7pmu.num_events = armv7_reset_read_pmnc(); 893 + armv7pmu.num_events = armv7_read_num_pmnc_events(); 906 894 return &armv7pmu; 907 895 } 908 896 #else
+2 -2
arch/arm/kernel/perf_event_xscale.c
··· 246 246 continue; 247 247 248 248 hwc = &event->hw; 249 - armpmu_event_update(event, hwc, idx); 249 + armpmu_event_update(event, hwc, idx, 1); 250 250 data.period = event->hw.last_period; 251 251 if (!armpmu_event_set_period(event, hwc, idx)) 252 252 continue; ··· 578 578 continue; 579 579 580 580 hwc = &event->hw; 581 - armpmu_event_update(event, hwc, idx); 581 + armpmu_event_update(event, hwc, idx, 1); 582 582 data.period = event->hw.last_period; 583 583 if (!armpmu_event_set_period(event, hwc, idx)) 584 584 continue;
+11 -3
arch/arm/kernel/sleep.S
··· 119 119 #else 120 120 ldr r0, sleep_save_sp @ stack phys addr 121 121 #endif 122 - msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off 122 + setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off 123 123 #ifdef MULTI_CPU 124 - ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn 124 + @ load v:p, stack, return fn, resume fn 125 + ARM( ldmia r0!, {r1, sp, lr, pc} ) 126 + THUMB( ldmia r0!, {r1, r2, r3, r4} ) 127 + THUMB( mov sp, r2 ) 128 + THUMB( mov lr, r3 ) 129 + THUMB( bx r4 ) 125 130 #else 126 - ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn 131 + @ load v:p, stack, return fn 132 + ARM( ldmia r0!, {r1, sp, lr} ) 133 + THUMB( ldmia r0!, {r1, r2, lr} ) 134 + THUMB( mov sp, r2 ) 127 135 b cpu_do_resume 128 136 #endif 129 137 ENDPROC(cpu_resume)
+3 -3
arch/arm/mach-at91/at91cap9_devices.c
··· 72 72 return; 73 73 74 74 if (cpu_is_at91cap9_revB()) 75 - set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH); 75 + irq_set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH); 76 76 77 77 /* Enable VBus control for UHP ports */ 78 78 for (i = 0; i < data->ports; i++) { ··· 157 157 void __init at91_add_device_usba(struct usba_platform_data *data) 158 158 { 159 159 if (cpu_is_at91cap9_revB()) { 160 - set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH); 160 + irq_set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH); 161 161 at91_sys_write(AT91_MATRIX_UDPHS, AT91_MATRIX_SELECT_UDPHS | 162 162 AT91_MATRIX_UDPHS_BYPASS_LOCK); 163 163 } ··· 861 861 return; 862 862 863 863 if (cpu_is_at91cap9_revB()) 864 - set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH); 864 + irq_set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH); 865 865 866 866 at91_set_A_periph(AT91_PIN_PC1, 0); /* LCDHSYNC */ 867 867 at91_set_A_periph(AT91_PIN_PC2, 0); /* LCDDOTCK */
+15 -28
arch/arm/mach-at91/gpio.c
··· 287 287 else 288 288 wakeups[bank] &= ~mask; 289 289 290 - set_irq_wake(gpio_chip[bank].bank->id, state); 290 + irq_set_irq_wake(gpio_chip[bank].bank->id, state); 291 291 292 292 return 0; 293 293 } ··· 375 375 376 376 static struct irq_chip gpio_irqchip = { 377 377 .name = "GPIO", 378 + .irq_disable = gpio_irq_mask, 378 379 .irq_mask = gpio_irq_mask, 379 380 .irq_unmask = gpio_irq_unmask, 380 381 .irq_set_type = gpio_irq_type, ··· 385 384 static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) 386 385 { 387 386 unsigned pin; 388 - struct irq_desc *gpio; 389 - struct at91_gpio_chip *at91_gpio; 390 - void __iomem *pio; 387 + struct irq_data *idata = irq_desc_get_irq_data(desc); 388 + struct irq_chip *chip = irq_data_get_irq_chip(idata); 389 + struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata); 390 + void __iomem *pio = at91_gpio->regbase; 391 391 u32 isr; 392 392 393 - at91_gpio = get_irq_chip_data(irq); 394 - pio = at91_gpio->regbase; 395 - 396 393 /* temporarily mask (level sensitive) parent IRQ */ 397 - desc->irq_data.chip->irq_ack(&desc->irq_data); 394 + chip->irq_ack(idata); 398 395 for (;;) { 399 396 /* Reading ISR acks pending (edge triggered) GPIO interrupts. 400 397 * When there none are pending, we're finished unless we need ··· 408 409 } 409 410 410 411 pin = at91_gpio->chip.base; 411 - gpio = &irq_desc[pin]; 412 412 413 413 while (isr) { 414 - if (isr & 1) { 415 - if (unlikely(gpio->depth)) { 416 - /* 417 - * The core ARM interrupt handler lazily disables IRQs so 418 - * another IRQ must be generated before it actually gets 419 - * here to be disabled on the GPIO controller. 420 - */ 421 - gpio_irq_mask(irq_get_irq_data(pin)); 422 - } 423 - else 424 - generic_handle_irq(pin); 425 - } 414 + if (isr & 1) 415 + generic_handle_irq(pin); 426 416 pin++; 427 - gpio++; 428 417 isr >>= 1; 429 418 } 430 419 } 431 - desc->irq_data.chip->irq_unmask(&desc->irq_data); 420 + chip->irq_unmask(idata); 432 421 /* now it may re-trigger */ 433 422 } 434 423 ··· 505 518 __raw_writel(~0, this->regbase + PIO_IDR); 506 519 507 520 for (i = 0, pin = this->chip.base; i < 32; i++, pin++) { 508 - lockdep_set_class(&irq_desc[pin].lock, &gpio_lock_class); 521 + irq_set_lockdep_class(pin, &gpio_lock_class); 509 522 510 523 /* 511 524 * Can use the "simple" and not "edge" handler since it's 512 525 * shorter, and the AIC handles interrupts sanely. 513 526 */ 514 - set_irq_chip(pin, &gpio_irqchip); 515 - set_irq_handler(pin, handle_simple_irq); 527 + irq_set_chip_and_handler(pin, &gpio_irqchip, 528 + handle_simple_irq); 516 529 set_irq_flags(pin, IRQF_VALID); 517 530 } 518 531 ··· 523 536 if (prev && prev->next == this) 524 537 continue; 525 538 526 - set_irq_chip_data(id, this); 527 - set_irq_chained_handler(id, gpio_irq_handler); 539 + irq_set_chip_data(id, this); 540 + irq_set_chained_handler(id, gpio_irq_handler); 528 541 } 529 542 pr_info("AT91: %d gpio irqs in %d banks\n", pin - PIN_BASE, gpio_banks); 530 543 }
+1 -1
arch/arm/mach-at91/include/mach/at572d940hf.h
··· 89 89 /* 90 90 * System Peripherals (offset from AT91_BASE_SYS) 91 91 */ 92 - #define AT91_SDRAMC (0xffffea00 - AT91_BASE_SYS) 92 + #define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS) 93 93 #define AT91_SMC (0xffffec00 - AT91_BASE_SYS) 94 94 #define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS) 95 95 #define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
+1 -2
arch/arm/mach-at91/irq.c
··· 143 143 /* Active Low interrupt, with the specified priority */ 144 144 at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]); 145 145 146 - set_irq_chip(i, &at91_aic_chip); 147 - set_irq_handler(i, handle_level_irq); 146 + irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq); 148 147 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 149 148 150 149 /* Perform 8 End Of Interrupt Command to make sure AIC will not Lock out nIRQ */
+5 -5
arch/arm/mach-bcmring/irq.c
··· 93 93 unsigned int i; 94 94 for (i = 0; i < 32; i++) { 95 95 unsigned int irq = irq_start + i; 96 - set_irq_chip(irq, chip); 97 - set_irq_chip_data(irq, base); 96 + irq_set_chip(irq, chip); 97 + irq_set_chip_data(irq, base); 98 98 99 99 if (vic_sources & (1 << i)) { 100 - set_irq_handler(irq, handle_level_irq); 100 + irq_set_handler(irq, handle_level_irq); 101 101 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 102 102 } 103 103 } ··· 119 119 120 120 /* special cases */ 121 121 if (INTCHW_INTC1_GPIO0 & IRQ_INTC1_VALID_MASK) { 122 - set_irq_handler(IRQ_GPIO0, handle_simple_irq); 122 + irq_set_handler(IRQ_GPIO0, handle_simple_irq); 123 123 } 124 124 if (INTCHW_INTC1_GPIO1 & IRQ_INTC1_VALID_MASK) { 125 - set_irq_handler(IRQ_GPIO1, handle_simple_irq); 125 + irq_set_handler(IRQ_GPIO1, handle_simple_irq); 126 126 } 127 127 }
+4 -4
arch/arm/mach-clps711x/irq.c
··· 112 112 113 113 for (i = 0; i < NR_IRQS; i++) { 114 114 if (INT1_IRQS & (1 << i)) { 115 - set_irq_handler(i, handle_level_irq); 116 - set_irq_chip(i, &int1_chip); 115 + irq_set_chip_and_handler(i, &int1_chip, 116 + handle_level_irq); 117 117 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 118 118 } 119 119 if (INT2_IRQS & (1 << i)) { 120 - set_irq_handler(i, handle_level_irq); 121 - set_irq_chip(i, &int2_chip); 120 + irq_set_chip_and_handler(i, &int2_chip, 121 + handle_level_irq); 122 122 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 123 123 } 124 124 }
+2 -2
arch/arm/mach-davinci/cp_intc.c
··· 167 167 168 168 /* Set up genirq dispatching for cp_intc */ 169 169 for (i = 0; i < num_irq; i++) { 170 - set_irq_chip(i, &cp_intc_irq_chip); 170 + irq_set_chip(i, &cp_intc_irq_chip); 171 171 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 172 - set_irq_handler(i, handle_edge_irq); 172 + irq_set_handler(i, handle_edge_irq); 173 173 } 174 174 175 175 /* Enable global interrupt */
+20 -29
arch/arm/mach-davinci/gpio.c
··· 62 62 { 63 63 struct davinci_gpio_regs __iomem *g; 64 64 65 - g = (__force struct davinci_gpio_regs __iomem *)get_irq_chip_data(irq); 65 + g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq); 66 66 67 67 return g; 68 68 } ··· 208 208 static void gpio_irq_disable(struct irq_data *d) 209 209 { 210 210 struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); 211 - u32 mask = (u32) irq_data_get_irq_data(d); 211 + u32 mask = (u32) irq_data_get_irq_handler_data(d); 212 212 213 213 __raw_writel(mask, &g->clr_falling); 214 214 __raw_writel(mask, &g->clr_rising); ··· 217 217 static void gpio_irq_enable(struct irq_data *d) 218 218 { 219 219 struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); 220 - u32 mask = (u32) irq_data_get_irq_data(d); 221 - unsigned status = irq_desc[d->irq].status; 220 + u32 mask = (u32) irq_data_get_irq_handler_data(d); 221 + unsigned status = irqd_get_trigger_type(d); 222 222 223 223 status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING; 224 224 if (!status) ··· 233 233 static int gpio_irq_type(struct irq_data *d, unsigned trigger) 234 234 { 235 235 struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); 236 - u32 mask = (u32) irq_data_get_irq_data(d); 236 + u32 mask = (u32) irq_data_get_irq_handler_data(d); 237 237 238 238 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 239 239 return -EINVAL; 240 240 241 - irq_desc[d->irq].status &= ~IRQ_TYPE_SENSE_MASK; 242 - irq_desc[d->irq].status |= trigger; 243 - 244 - /* don't enable the IRQ if it's currently disabled */ 245 - if (irq_desc[d->irq].depth == 0) { 246 - __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING) 247 - ? &g->set_falling : &g->clr_falling); 248 - __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING) 249 - ? &g->set_rising : &g->clr_rising); 250 - } 251 241 return 0; 252 242 } 253 243 ··· 246 256 .irq_enable = gpio_irq_enable, 247 257 .irq_disable = gpio_irq_disable, 248 258 .irq_set_type = gpio_irq_type, 259 + .flags = IRQCHIP_SET_TYPE_MASKED, 249 260 }; 250 261 251 262 static void ··· 276 285 status >>= 16; 277 286 278 287 /* now demux them to the right lowlevel handler */ 279 - n = (int)get_irq_data(irq); 288 + n = (int)irq_get_handler_data(irq); 280 289 while (status) { 281 290 res = ffs(status); 282 291 n += res; ··· 314 323 static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger) 315 324 { 316 325 struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); 317 - u32 mask = (u32) irq_data_get_irq_data(d); 326 + u32 mask = (u32) irq_data_get_irq_handler_data(d); 318 327 319 328 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 320 329 return -EINVAL; ··· 386 395 387 396 /* AINTC handles mask/unmask; GPIO handles triggering */ 388 397 irq = bank_irq; 389 - gpio_irqchip_unbanked = *get_irq_desc_chip(irq_to_desc(irq)); 398 + gpio_irqchip_unbanked = *irq_get_chip(irq); 390 399 gpio_irqchip_unbanked.name = "GPIO-AINTC"; 391 400 gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked; 392 401 ··· 397 406 398 407 /* set the direct IRQs up to use that irqchip */ 399 408 for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) { 400 - set_irq_chip(irq, &gpio_irqchip_unbanked); 401 - set_irq_data(irq, (void *) __gpio_mask(gpio)); 402 - set_irq_chip_data(irq, (__force void *) g); 403 - irq_desc[irq].status |= IRQ_TYPE_EDGE_BOTH; 409 + irq_set_chip(irq, &gpio_irqchip_unbanked); 410 + irq_set_handler_data(irq, (void *)__gpio_mask(gpio)); 411 + irq_set_chip_data(irq, (__force void *)g); 412 + irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); 404 413 } 405 414 406 415 goto done; ··· 421 430 __raw_writel(~0, &g->clr_rising); 422 431 423 432 /* set up all irqs in this bank */ 424 - set_irq_chained_handler(bank_irq, gpio_irq_handler); 425 - set_irq_chip_data(bank_irq, (__force void *) g); 426 - set_irq_data(bank_irq, (void *) irq); 433 + irq_set_chained_handler(bank_irq, gpio_irq_handler); 434 + irq_set_chip_data(bank_irq, (__force void *)g); 435 + irq_set_handler_data(bank_irq, (void *)irq); 427 436 428 437 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { 429 - set_irq_chip(irq, &gpio_irqchip); 430 - set_irq_chip_data(irq, (__force void *) g); 431 - set_irq_data(irq, (void *) __gpio_mask(gpio)); 432 - set_irq_handler(irq, handle_simple_irq); 438 + irq_set_chip(irq, &gpio_irqchip); 439 + irq_set_chip_data(irq, (__force void *)g); 440 + irq_set_handler_data(irq, (void *)__gpio_mask(gpio)); 441 + irq_set_handler(irq, handle_simple_irq); 433 442 set_irq_flags(irq, IRQF_VALID); 434 443 } 435 444
+3 -3
arch/arm/mach-davinci/irq.c
··· 154 154 155 155 /* set up genirq dispatch for ARM INTC */ 156 156 for (i = 0; i < davinci_soc_info.intc_irq_num; i++) { 157 - set_irq_chip(i, &davinci_irq_chip_0); 157 + irq_set_chip(i, &davinci_irq_chip_0); 158 158 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 159 159 if (i != IRQ_TINT1_TINT34) 160 - set_irq_handler(i, handle_edge_irq); 160 + irq_set_handler(i, handle_edge_irq); 161 161 else 162 - set_irq_handler(i, handle_level_irq); 162 + irq_set_handler(i, handle_level_irq); 163 163 } 164 164 }
+1 -1
arch/arm/mach-dove/include/mach/dove.h
··· 136 136 #define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xe803c) 137 137 #define DOVE_AU1_SPDIFO_GPIO_EN (1 << 1) 138 138 #define DOVE_NAND_GPIO_EN (1 << 0) 139 - #define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_VIRT_BASE + 0x40) 139 + #define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40) 140 140 #define DOVE_SPI_GPIO_SEL (1 << 5) 141 141 #define DOVE_UART1_GPIO_SEL (1 << 4) 142 142 #define DOVE_AU1_GPIO_SEL (1 << 3)
+9 -11
arch/arm/mach-dove/irq.c
··· 86 86 if (!(cause & (1 << irq))) 87 87 continue; 88 88 irq = pmu_to_irq(irq); 89 - desc = irq_desc + irq; 90 - desc_handle_irq(irq, desc); 89 + generic_handle_irq(irq); 91 90 } 92 91 } 93 92 ··· 102 103 */ 103 104 orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0, 104 105 IRQ_DOVE_GPIO_START); 105 - set_irq_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler); 106 - set_irq_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler); 107 - set_irq_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler); 108 - set_irq_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler); 106 + irq_set_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler); 107 + irq_set_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler); 108 + irq_set_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler); 109 + irq_set_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler); 109 110 110 111 orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0, 111 112 IRQ_DOVE_GPIO_START + 32); 112 - set_irq_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler); 113 + irq_set_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler); 113 114 114 115 orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0, 115 116 IRQ_DOVE_GPIO_START + 64); ··· 121 122 writel(0, PMU_INTERRUPT_CAUSE); 122 123 123 124 for (i = IRQ_DOVE_PMU_START; i < NR_IRQS; i++) { 124 - set_irq_chip(i, &pmu_irq_chip); 125 - set_irq_handler(i, handle_level_irq); 126 - irq_desc[i].status |= IRQ_LEVEL; 125 + irq_set_chip_and_handler(i, &pmu_irq_chip, handle_level_irq); 126 + irq_set_status_flags(i, IRQ_LEVEL); 127 127 set_irq_flags(i, IRQF_VALID); 128 128 } 129 - set_irq_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler); 129 + irq_set_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler); 130 130 }
-3
arch/arm/mach-dove/mpp.c
··· 147 147 u32 pmu_sig_ctrl[PMU_SIG_REGS]; 148 148 int i; 149 149 150 - /* Initialize gpiolib. */ 151 - orion_gpio_init(); 152 - 153 150 for (i = 0; i < MPP_NR_REGS; i++) 154 151 mpp_ctrl[i] = readl(MPP_CTRL(i)); 155 152
+2 -2
arch/arm/mach-ebsa110/core.c
··· 66 66 local_irq_restore(flags); 67 67 68 68 for (irq = 0; irq < NR_IRQS; irq++) { 69 - set_irq_chip(irq, &ebsa110_irq_chip); 70 - set_irq_handler(irq, handle_level_irq); 69 + irq_set_chip_and_handler(irq, &ebsa110_irq_chip, 70 + handle_level_irq); 71 71 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 72 72 } 73 73 }
+30 -22
arch/arm/mach-ep93xx/gpio.c
··· 117 117 int port = line >> 3; 118 118 int port_mask = 1 << (line & 7); 119 119 120 - if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { 120 + if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { 121 121 gpio_int_type2[port] ^= port_mask; /* switch edge direction */ 122 122 ep93xx_gpio_update_int_params(port); 123 123 } ··· 131 131 int port = line >> 3; 132 132 int port_mask = 1 << (line & 7); 133 133 134 - if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 134 + if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) 135 135 gpio_int_type2[port] ^= port_mask; /* switch edge direction */ 136 136 137 137 gpio_int_unmasked[port] &= ~port_mask; ··· 165 165 */ 166 166 static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) 167 167 { 168 - struct irq_desc *desc = irq_desc + d->irq; 169 168 const int gpio = irq_to_gpio(d->irq); 170 169 const int port = gpio >> 3; 171 170 const int port_mask = 1 << (gpio & 7); 171 + irq_flow_handler_t handler; 172 172 173 173 gpio_direction_input(gpio); 174 174 ··· 176 176 case IRQ_TYPE_EDGE_RISING: 177 177 gpio_int_type1[port] |= port_mask; 178 178 gpio_int_type2[port] |= port_mask; 179 - desc->handle_irq = handle_edge_irq; 179 + handler = handle_edge_irq; 180 180 break; 181 181 case IRQ_TYPE_EDGE_FALLING: 182 182 gpio_int_type1[port] |= port_mask; 183 183 gpio_int_type2[port] &= ~port_mask; 184 - desc->handle_irq = handle_edge_irq; 184 + handler = handle_edge_irq; 185 185 break; 186 186 case IRQ_TYPE_LEVEL_HIGH: 187 187 gpio_int_type1[port] &= ~port_mask; 188 188 gpio_int_type2[port] |= port_mask; 189 - desc->handle_irq = handle_level_irq; 189 + handler = handle_level_irq; 190 190 break; 191 191 case IRQ_TYPE_LEVEL_LOW: 192 192 gpio_int_type1[port] &= ~port_mask; 193 193 gpio_int_type2[port] &= ~port_mask; 194 - desc->handle_irq = handle_level_irq; 194 + handler = handle_level_irq; 195 195 break; 196 196 case IRQ_TYPE_EDGE_BOTH: 197 197 gpio_int_type1[port] |= port_mask; ··· 200 200 gpio_int_type2[port] &= ~port_mask; /* falling */ 201 201 else 202 202 gpio_int_type2[port] |= port_mask; /* rising */ 203 - desc->handle_irq = handle_edge_irq; 203 + handler = handle_edge_irq; 204 204 break; 205 205 default: 206 206 pr_err("failed to set irq type %d for gpio %d\n", type, gpio); 207 207 return -EINVAL; 208 208 } 209 209 210 - gpio_int_enabled[port] |= port_mask; 210 + __irq_set_handler_locked(d->irq, handler); 211 211 212 - desc->status &= ~IRQ_TYPE_SENSE_MASK; 213 - desc->status |= type & IRQ_TYPE_SENSE_MASK; 212 + gpio_int_enabled[port] |= port_mask; 214 213 215 214 ep93xx_gpio_update_int_params(port); 216 215 ··· 231 232 232 233 for (gpio_irq = gpio_to_irq(0); 233 234 gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { 234 - set_irq_chip(gpio_irq, &ep93xx_gpio_irq_chip); 235 - set_irq_handler(gpio_irq, handle_level_irq); 235 + irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip, 236 + handle_level_irq); 236 237 set_irq_flags(gpio_irq, IRQF_VALID); 237 238 } 238 239 239 - set_irq_chained_handler(IRQ_EP93XX_GPIO_AB, ep93xx_gpio_ab_irq_handler); 240 - set_irq_chained_handler(IRQ_EP93XX_GPIO0MUX, ep93xx_gpio_f_irq_handler); 241 - set_irq_chained_handler(IRQ_EP93XX_GPIO1MUX, ep93xx_gpio_f_irq_handler); 242 - set_irq_chained_handler(IRQ_EP93XX_GPIO2MUX, ep93xx_gpio_f_irq_handler); 243 - set_irq_chained_handler(IRQ_EP93XX_GPIO3MUX, ep93xx_gpio_f_irq_handler); 244 - set_irq_chained_handler(IRQ_EP93XX_GPIO4MUX, ep93xx_gpio_f_irq_handler); 245 - set_irq_chained_handler(IRQ_EP93XX_GPIO5MUX, ep93xx_gpio_f_irq_handler); 246 - set_irq_chained_handler(IRQ_EP93XX_GPIO6MUX, ep93xx_gpio_f_irq_handler); 247 - set_irq_chained_handler(IRQ_EP93XX_GPIO7MUX, ep93xx_gpio_f_irq_handler); 240 + irq_set_chained_handler(IRQ_EP93XX_GPIO_AB, 241 + ep93xx_gpio_ab_irq_handler); 242 + irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX, 243 + ep93xx_gpio_f_irq_handler); 244 + irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX, 245 + ep93xx_gpio_f_irq_handler); 246 + irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX, 247 + ep93xx_gpio_f_irq_handler); 248 + irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX, 249 + ep93xx_gpio_f_irq_handler); 250 + irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX, 251 + ep93xx_gpio_f_irq_handler); 252 + irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX, 253 + ep93xx_gpio_f_irq_handler); 254 + irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX, 255 + ep93xx_gpio_f_irq_handler); 256 + irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX, 257 + ep93xx_gpio_f_irq_handler); 248 258 } 249 259 250 260
+6 -7
arch/arm/mach-exynos4/irq-combiner.c
··· 54 54 55 55 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 56 56 { 57 - struct combiner_chip_data *chip_data = get_irq_data(irq); 58 - struct irq_chip *chip = get_irq_chip(irq); 57 + struct combiner_chip_data *chip_data = irq_get_handler_data(irq); 58 + struct irq_chip *chip = irq_get_chip(irq); 59 59 unsigned int cascade_irq, combiner_irq; 60 60 unsigned long status; 61 61 ··· 93 93 { 94 94 if (combiner_nr >= MAX_COMBINER_NR) 95 95 BUG(); 96 - if (set_irq_data(irq, &combiner_data[combiner_nr]) != 0) 96 + if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) 97 97 BUG(); 98 - set_irq_chained_handler(irq, combiner_handle_cascade_irq); 98 + irq_set_chained_handler(irq, combiner_handle_cascade_irq); 99 99 } 100 100 101 101 void __init combiner_init(unsigned int combiner_nr, void __iomem *base, ··· 119 119 120 120 for (i = irq_start; i < combiner_data[combiner_nr].irq_offset 121 121 + MAX_IRQ_IN_COMBINER; i++) { 122 - set_irq_chip(i, &combiner_chip); 123 - set_irq_chip_data(i, &combiner_data[combiner_nr]); 124 - set_irq_handler(i, handle_level_irq); 122 + irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq); 123 + irq_set_chip_data(i, &combiner_data[combiner_nr]); 125 124 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 126 125 } 127 126 }
+8 -7
arch/arm/mach-exynos4/irq-eint.c
··· 190 190 191 191 static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) 192 192 { 193 - u32 *irq_data = get_irq_data(irq); 194 - struct irq_chip *chip = get_irq_chip(irq); 193 + u32 *irq_data = irq_get_handler_data(irq); 194 + struct irq_chip *chip = irq_get_chip(irq); 195 195 196 196 chip->irq_mask(&desc->irq_data); 197 197 ··· 208 208 int irq; 209 209 210 210 for (irq = 0 ; irq <= 31 ; irq++) { 211 - set_irq_chip(IRQ_EINT(irq), &exynos4_irq_eint); 212 - set_irq_handler(IRQ_EINT(irq), handle_level_irq); 211 + irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint, 212 + handle_level_irq); 213 213 set_irq_flags(IRQ_EINT(irq), IRQF_VALID); 214 214 } 215 215 216 - set_irq_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31); 216 + irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31); 217 217 218 218 for (irq = 0 ; irq <= 15 ; irq++) { 219 219 eint0_15_data[irq] = IRQ_EINT(irq); 220 220 221 - set_irq_data(exynos4_get_irq_nr(irq), &eint0_15_data[irq]); 222 - set_irq_chained_handler(exynos4_get_irq_nr(irq), 221 + irq_set_handler_data(exynos4_get_irq_nr(irq), 222 + &eint0_15_data[irq]); 223 + irq_set_chained_handler(exynos4_get_irq_nr(irq), 223 224 exynos4_irq_eint0_15); 224 225 } 225 226
+1 -2
arch/arm/mach-footbridge/common.c
··· 102 102 *CSR_FIQ_DISABLE = -1; 103 103 104 104 for (irq = _DC21285_IRQ(0); irq < _DC21285_IRQ(20); irq++) { 105 - set_irq_chip(irq, &fb_chip); 106 - set_irq_handler(irq, handle_level_irq); 105 + irq_set_chip_and_handler(irq, &fb_chip, handle_level_irq); 107 106 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 108 107 } 109 108 }
+1 -1
arch/arm/mach-footbridge/dc21285-timer.c
··· 30 30 return 0; 31 31 } 32 32 33 - static int cksrc_dc21285_disable(struct clocksource *cs) 33 + static void cksrc_dc21285_disable(struct clocksource *cs) 34 34 { 35 35 *CSR_TIMER2_CNTL = 0; 36 36 }
+5 -5
arch/arm/mach-footbridge/isa-irq.c
··· 151 151 152 152 if (host_irq != (unsigned int)-1) { 153 153 for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) { 154 - set_irq_chip(irq, &isa_lo_chip); 155 - set_irq_handler(irq, handle_level_irq); 154 + irq_set_chip_and_handler(irq, &isa_lo_chip, 155 + handle_level_irq); 156 156 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 157 157 } 158 158 159 159 for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) { 160 - set_irq_chip(irq, &isa_hi_chip); 161 - set_irq_handler(irq, handle_level_irq); 160 + irq_set_chip_and_handler(irq, &isa_hi_chip, 161 + handle_level_irq); 162 162 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 163 163 } 164 164 ··· 166 166 request_resource(&ioport_resource, &pic2_resource); 167 167 setup_irq(IRQ_ISA_CASCADE, &irq_cascade); 168 168 169 - set_irq_chained_handler(host_irq, isa_irq_handler); 169 + irq_set_chained_handler(host_irq, isa_irq_handler); 170 170 171 171 /* 172 172 * On the NetWinder, don't automatically
+6 -8
arch/arm/mach-gemini/gpio.c
··· 127 127 128 128 static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 129 129 { 130 + unsigned int port = (unsigned int)irq_desc_get_handler_data(desc); 130 131 unsigned int gpio_irq_no, irq_stat; 131 - unsigned int port = (unsigned int)get_irq_data(irq); 132 132 133 133 irq_stat = __raw_readl(GPIO_BASE(port) + GPIO_INT_STAT); 134 134 ··· 138 138 if ((irq_stat & 1) == 0) 139 139 continue; 140 140 141 - BUG_ON(!(irq_desc[gpio_irq_no].handle_irq)); 142 - irq_desc[gpio_irq_no].handle_irq(gpio_irq_no, 143 - &irq_desc[gpio_irq_no]); 141 + generic_handle_irq(gpio_irq_no); 144 142 } 145 143 } 146 144 ··· 217 219 218 220 for (j = GPIO_IRQ_BASE + i * 32; 219 221 j < GPIO_IRQ_BASE + (i + 1) * 32; j++) { 220 - set_irq_chip(j, &gpio_irq_chip); 221 - set_irq_handler(j, handle_edge_irq); 222 + irq_set_chip_and_handler(j, &gpio_irq_chip, 223 + handle_edge_irq); 222 224 set_irq_flags(j, IRQF_VALID); 223 225 } 224 226 225 - set_irq_chained_handler(IRQ_GPIO(i), gpio_irq_handler); 226 - set_irq_data(IRQ_GPIO(i), (void *)i); 227 + irq_set_chained_handler(IRQ_GPIO(i), gpio_irq_handler); 228 + irq_set_handler_data(IRQ_GPIO(i), (void *)i); 227 229 } 228 230 229 231 BUG_ON(gpiochip_add(&gemini_gpio_chip));
+3 -3
arch/arm/mach-gemini/irq.c
··· 81 81 request_resource(&iomem_resource, &irq_resource); 82 82 83 83 for (i = 0; i < NR_IRQS; i++) { 84 - set_irq_chip(i, &gemini_irq_chip); 84 + irq_set_chip(i, &gemini_irq_chip); 85 85 if((i >= IRQ_TIMER1 && i <= IRQ_TIMER3) || (i >= IRQ_SERIRQ0 && i <= IRQ_SERIRQ1)) { 86 - set_irq_handler(i, handle_edge_irq); 86 + irq_set_handler(i, handle_edge_irq); 87 87 mode |= 1 << i; 88 88 level |= 1 << i; 89 89 } else { 90 - set_irq_handler(i, handle_level_irq); 90 + irq_set_handler(i, handle_level_irq); 91 91 } 92 92 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 93 93 }
+11 -11
arch/arm/mach-h720x/common.c
··· 199 199 200 200 /* Initialize global IRQ's, fast path */ 201 201 for (irq = 0; irq < NR_GLBL_IRQS; irq++) { 202 - set_irq_chip(irq, &h720x_global_chip); 203 - set_irq_handler(irq, handle_level_irq); 202 + irq_set_chip_and_handler(irq, &h720x_global_chip, 203 + handle_level_irq); 204 204 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 205 205 } 206 206 207 207 /* Initialize multiplexed IRQ's, slow path */ 208 208 for (irq = IRQ_CHAINED_GPIOA(0) ; irq <= IRQ_CHAINED_GPIOD(31); irq++) { 209 - set_irq_chip(irq, &h720x_gpio_chip); 210 - set_irq_handler(irq, handle_edge_irq); 209 + irq_set_chip_and_handler(irq, &h720x_gpio_chip, 210 + handle_edge_irq); 211 211 set_irq_flags(irq, IRQF_VALID ); 212 212 } 213 - set_irq_chained_handler(IRQ_GPIOA, h720x_gpioa_demux_handler); 214 - set_irq_chained_handler(IRQ_GPIOB, h720x_gpiob_demux_handler); 215 - set_irq_chained_handler(IRQ_GPIOC, h720x_gpioc_demux_handler); 216 - set_irq_chained_handler(IRQ_GPIOD, h720x_gpiod_demux_handler); 213 + irq_set_chained_handler(IRQ_GPIOA, h720x_gpioa_demux_handler); 214 + irq_set_chained_handler(IRQ_GPIOB, h720x_gpiob_demux_handler); 215 + irq_set_chained_handler(IRQ_GPIOC, h720x_gpioc_demux_handler); 216 + irq_set_chained_handler(IRQ_GPIOD, h720x_gpiod_demux_handler); 217 217 218 218 #ifdef CONFIG_CPU_H7202 219 219 for (irq = IRQ_CHAINED_GPIOE(0) ; irq <= IRQ_CHAINED_GPIOE(31); irq++) { 220 - set_irq_chip(irq, &h720x_gpio_chip); 221 - set_irq_handler(irq, handle_edge_irq); 220 + irq_set_chip_and_handler(irq, &h720x_gpio_chip, 221 + handle_edge_irq); 222 222 set_irq_flags(irq, IRQF_VALID ); 223 223 } 224 - set_irq_chained_handler(IRQ_GPIOE, h720x_gpioe_demux_handler); 224 + irq_set_chained_handler(IRQ_GPIOE, h720x_gpioe_demux_handler); 225 225 #endif 226 226 227 227 /* Enable multiplexed irq's */
+11 -6
arch/arm/mach-h720x/cpu-h7202.c
··· 141 141 /* 142 142 * mask multiplexed timer IRQs 143 143 */ 144 - static void inline mask_timerx_irq(struct irq_data *d) 144 + static void inline __mask_timerx_irq(unsigned int irq) 145 145 { 146 146 unsigned int bit; 147 - bit = 2 << ((d->irq == IRQ_TIMER64B) ? 4 : (d->irq - IRQ_TIMER1)); 147 + bit = 2 << ((irq == IRQ_TIMER64B) ? 4 : (irq - IRQ_TIMER1)); 148 148 CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) &= ~bit; 149 + } 150 + 151 + static void inline mask_timerx_irq(struct irq_data *d) 152 + { 153 + __mask_timerx_irq(d->irq); 149 154 } 150 155 151 156 /* ··· 201 196 202 197 for (irq = IRQ_TIMER1; 203 198 irq < IRQ_CHAINED_TIMERX(NR_TIMERX_IRQS); irq++) { 204 - mask_timerx_irq(irq); 205 - set_irq_chip(irq, &h7202_timerx_chip); 206 - set_irq_handler(irq, handle_edge_irq); 199 + __mask_timerx_irq(irq); 200 + irq_set_chip_and_handler(irq, &h7202_timerx_chip, 201 + handle_edge_irq); 207 202 set_irq_flags(irq, IRQF_VALID ); 208 203 } 209 - set_irq_chained_handler(IRQ_TIMERX, h7202_timerx_demux_handler); 204 + irq_set_chained_handler(IRQ_TIMERX, h7202_timerx_demux_handler); 210 205 211 206 h720x_init_irq(); 212 207 }
+1
arch/arm/mach-imx/Kconfig
··· 255 255 bool "Vista Silicon i.MX27 Visstrim_m10" 256 256 select SOC_IMX27 257 257 select IMX_HAVE_PLATFORM_IMX_I2C 258 + select IMX_HAVE_PLATFORM_IMX_SSI 258 259 select IMX_HAVE_PLATFORM_IMX_UART 259 260 select IMX_HAVE_PLATFORM_MXC_MMC 260 261 select IMX_HAVE_PLATFORM_MXC_EHCI
+7 -1
arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
··· 34 34 #include <mach/mx25.h> 35 35 #include <mach/imx-uart.h> 36 36 #include <mach/audmux.h> 37 + #include <mach/esdhc.h> 37 38 38 39 #include "devices-imx25.h" 39 40 ··· 243 242 .flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE, 244 243 }; 245 244 245 + static struct esdhc_platform_data sd1_pdata = { 246 + .cd_gpio = GPIO_SD1CD, 247 + .wp_gpio = -EINVAL, 248 + }; 249 + 246 250 /* 247 251 * system init for baseboard usage. Will be called by cpuimx25 init. 248 252 * ··· 281 275 imx25_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata); 282 276 283 277 imx25_add_flexcan1(NULL); 284 - imx25_add_sdhci_esdhc_imx(0, NULL); 278 + imx25_add_sdhci_esdhc_imx(0, &sd1_pdata); 285 279 286 280 gpio_request(GPIO_LED1, "LED1"); 287 281 gpio_direction_output(GPIO_LED1, 1);
+5 -5
arch/arm/mach-iop13xx/irq.c
··· 224 224 225 225 for(i = 0; i <= IRQ_IOP13XX_HPI; i++) { 226 226 if (i < 32) 227 - set_irq_chip(i, &iop13xx_irqchip1); 227 + irq_set_chip(i, &iop13xx_irqchip1); 228 228 else if (i < 64) 229 - set_irq_chip(i, &iop13xx_irqchip2); 229 + irq_set_chip(i, &iop13xx_irqchip2); 230 230 else if (i < 96) 231 - set_irq_chip(i, &iop13xx_irqchip3); 231 + irq_set_chip(i, &iop13xx_irqchip3); 232 232 else 233 - set_irq_chip(i, &iop13xx_irqchip4); 233 + irq_set_chip(i, &iop13xx_irqchip4); 234 234 235 - set_irq_handler(i, handle_level_irq); 235 + irq_set_handler(i, handle_level_irq); 236 236 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 237 237 } 238 238
+3 -3
arch/arm/mach-iop13xx/msi.c
··· 118 118 119 119 void __init iop13xx_msi_init(void) 120 120 { 121 - set_irq_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler); 121 + irq_set_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler); 122 122 } 123 123 124 124 /* ··· 178 178 if (irq < 0) 179 179 return irq; 180 180 181 - set_irq_msi(irq, desc); 181 + irq_set_msi_desc(irq, desc); 182 182 183 183 msg.address_hi = 0x0; 184 184 msg.address_lo = IOP13XX_MU_MIMR_PCI; ··· 187 187 msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); 188 188 189 189 write_msi_msg(irq, &msg); 190 - set_irq_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); 190 + irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); 191 191 192 192 return 0; 193 193 }
+1 -2
arch/arm/mach-iop32x/irq.c
··· 68 68 *IOP3XX_PCIIRSR = 0x0f; 69 69 70 70 for (i = 0; i < NR_IRQS; i++) { 71 - set_irq_chip(i, &ext_chip); 72 - set_irq_handler(i, handle_level_irq); 71 + irq_set_chip_and_handler(i, &ext_chip, handle_level_irq); 73 72 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 74 73 } 75 74 }
+3 -2
arch/arm/mach-iop33x/irq.c
··· 110 110 *IOP3XX_PCIIRSR = 0x0f; 111 111 112 112 for (i = 0; i < NR_IRQS; i++) { 113 - set_irq_chip(i, (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2); 114 - set_irq_handler(i, handle_level_irq); 113 + irq_set_chip_and_handler(i, 114 + (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2, 115 + handle_level_irq); 115 116 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 116 117 } 117 118 }
+10 -10
arch/arm/mach-ixp2000/core.c
··· 476 476 */ 477 477 for (irq = IRQ_IXP2000_SOFT_INT; irq <= IRQ_IXP2000_THDB3; irq++) { 478 478 if ((1 << irq) & IXP2000_VALID_IRQ_MASK) { 479 - set_irq_chip(irq, &ixp2000_irq_chip); 480 - set_irq_handler(irq, handle_level_irq); 479 + irq_set_chip_and_handler(irq, &ixp2000_irq_chip, 480 + handle_level_irq); 481 481 set_irq_flags(irq, IRQF_VALID); 482 482 } else set_irq_flags(irq, 0); 483 483 } ··· 485 485 for (irq = IRQ_IXP2000_DRAM0_MIN_ERR; irq <= IRQ_IXP2000_SP_INT; irq++) { 486 486 if((1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)) & 487 487 IXP2000_VALID_ERR_IRQ_MASK) { 488 - set_irq_chip(irq, &ixp2000_err_irq_chip); 489 - set_irq_handler(irq, handle_level_irq); 488 + irq_set_chip_and_handler(irq, &ixp2000_err_irq_chip, 489 + handle_level_irq); 490 490 set_irq_flags(irq, IRQF_VALID); 491 491 } 492 492 else 493 493 set_irq_flags(irq, 0); 494 494 } 495 - set_irq_chained_handler(IRQ_IXP2000_ERRSUM, ixp2000_err_irq_handler); 495 + irq_set_chained_handler(IRQ_IXP2000_ERRSUM, ixp2000_err_irq_handler); 496 496 497 497 for (irq = IRQ_IXP2000_GPIO0; irq <= IRQ_IXP2000_GPIO7; irq++) { 498 - set_irq_chip(irq, &ixp2000_GPIO_irq_chip); 499 - set_irq_handler(irq, handle_level_irq); 498 + irq_set_chip_and_handler(irq, &ixp2000_GPIO_irq_chip, 499 + handle_level_irq); 500 500 set_irq_flags(irq, IRQF_VALID); 501 501 } 502 - set_irq_chained_handler(IRQ_IXP2000_GPIO, ixp2000_GPIO_irq_handler); 502 + irq_set_chained_handler(IRQ_IXP2000_GPIO, ixp2000_GPIO_irq_handler); 503 503 504 504 /* 505 505 * Enable PCI irqs. The actual PCI[AB] decoding is done in ··· 508 508 */ 509 509 ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << IRQ_IXP2000_PCI)); 510 510 for (irq = IRQ_IXP2000_PCIA; irq <= IRQ_IXP2000_PCIB; irq++) { 511 - set_irq_chip(irq, &ixp2000_pci_irq_chip); 512 - set_irq_handler(irq, handle_level_irq); 511 + irq_set_chip_and_handler(irq, &ixp2000_pci_irq_chip, 512 + handle_level_irq); 513 513 set_irq_flags(irq, IRQF_VALID); 514 514 } 515 515 }
+3 -3
arch/arm/mach-ixp2000/ixdp2x00.c
··· 158 158 *board_irq_mask = 0xffffffff; 159 159 160 160 for(irq = IXP2000_BOARD_IRQ(0); irq < IXP2000_BOARD_IRQ(board_irq_count); irq++) { 161 - set_irq_chip(irq, &ixdp2x00_cpld_irq_chip); 162 - set_irq_handler(irq, handle_level_irq); 161 + irq_set_chip_and_handler(irq, &ixdp2x00_cpld_irq_chip, 162 + handle_level_irq); 163 163 set_irq_flags(irq, IRQF_VALID); 164 164 } 165 165 166 166 /* Hook into PCI interrupt */ 167 - set_irq_chained_handler(IRQ_IXP2000_PCIB, ixdp2x00_irq_handler); 167 + irq_set_chained_handler(IRQ_IXP2000_PCIB, ixdp2x00_irq_handler); 168 168 } 169 169 170 170 /*************************************************************************
+3 -3
arch/arm/mach-ixp2000/ixdp2x01.c
··· 115 115 116 116 for (irq = NR_IXP2000_IRQS; irq < NR_IXDP2X01_IRQS; irq++) { 117 117 if (irq & valid_irq_mask) { 118 - set_irq_chip(irq, &ixdp2x01_irq_chip); 119 - set_irq_handler(irq, handle_level_irq); 118 + irq_set_chip_and_handler(irq, &ixdp2x01_irq_chip, 119 + handle_level_irq); 120 120 set_irq_flags(irq, IRQF_VALID); 121 121 } else { 122 122 set_irq_flags(irq, 0); ··· 124 124 } 125 125 126 126 /* Hook into PCI interrupts */ 127 - set_irq_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler); 127 + irq_set_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler); 128 128 } 129 129 130 130
+7 -7
arch/arm/mach-ixp23xx/core.c
··· 289 289 { 290 290 switch (type) { 291 291 case IXP23XX_IRQ_LEVEL: 292 - set_irq_chip(irq, &ixp23xx_irq_level_chip); 293 - set_irq_handler(irq, handle_level_irq); 292 + irq_set_chip_and_handler(irq, &ixp23xx_irq_level_chip, 293 + handle_level_irq); 294 294 break; 295 295 case IXP23XX_IRQ_EDGE: 296 - set_irq_chip(irq, &ixp23xx_irq_edge_chip); 297 - set_irq_handler(irq, handle_edge_irq); 296 + irq_set_chip_and_handler(irq, &ixp23xx_irq_edge_chip, 297 + handle_edge_irq); 298 298 break; 299 299 } 300 300 set_irq_flags(irq, IRQF_VALID); ··· 324 324 } 325 325 326 326 for (irq = IRQ_IXP23XX_INTA; irq <= IRQ_IXP23XX_INTB; irq++) { 327 - set_irq_chip(irq, &ixp23xx_pci_irq_chip); 328 - set_irq_handler(irq, handle_level_irq); 327 + irq_set_chip_and_handler(irq, &ixp23xx_pci_irq_chip, 328 + handle_level_irq); 329 329 set_irq_flags(irq, IRQF_VALID); 330 330 } 331 331 332 - set_irq_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler); 332 + irq_set_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler); 333 333 } 334 334 335 335
+6 -6
arch/arm/mach-ixp23xx/ixdp2351.c
··· 136 136 irq++) { 137 137 if (IXDP2351_INTA_IRQ_MASK(irq) & IXDP2351_INTA_IRQ_VALID) { 138 138 set_irq_flags(irq, IRQF_VALID); 139 - set_irq_handler(irq, handle_level_irq); 140 - set_irq_chip(irq, &ixdp2351_inta_chip); 139 + irq_set_chip_and_handler(irq, &ixdp2351_inta_chip, 140 + handle_level_irq); 141 141 } 142 142 } 143 143 ··· 147 147 irq++) { 148 148 if (IXDP2351_INTB_IRQ_MASK(irq) & IXDP2351_INTB_IRQ_VALID) { 149 149 set_irq_flags(irq, IRQF_VALID); 150 - set_irq_handler(irq, handle_level_irq); 151 - set_irq_chip(irq, &ixdp2351_intb_chip); 150 + irq_set_chip_and_handler(irq, &ixdp2351_intb_chip, 151 + handle_level_irq); 152 152 } 153 153 } 154 154 155 - set_irq_chained_handler(IRQ_IXP23XX_INTA, ixdp2351_inta_handler); 156 - set_irq_chained_handler(IRQ_IXP23XX_INTB, ixdp2351_intb_handler); 155 + irq_set_chained_handler(IRQ_IXP23XX_INTA, ixdp2351_inta_handler); 156 + irq_set_chained_handler(IRQ_IXP23XX_INTB, ixdp2351_intb_handler); 157 157 } 158 158 159 159 /*
+2 -2
arch/arm/mach-ixp23xx/roadrunner.c
··· 110 110 111 111 static void __init roadrunner_pci_preinit(void) 112 112 { 113 - set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQ_TYPE_LEVEL_LOW); 114 - set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQ_TYPE_LEVEL_LOW); 113 + irq_set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQ_TYPE_LEVEL_LOW); 114 + irq_set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQ_TYPE_LEVEL_LOW); 115 115 116 116 ixp23xx_pci_preinit(); 117 117 }
+4 -4
arch/arm/mach-ixp4xx/avila-pci.c
··· 39 39 40 40 void __init avila_pci_preinit(void) 41 41 { 42 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 43 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 44 - set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 45 - set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 42 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 43 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 44 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 45 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 46 46 ixp4xx_pci_preinit(); 47 47 } 48 48
+2 -2
arch/arm/mach-ixp4xx/common.c
··· 252 252 253 253 /* Default to all level triggered */ 254 254 for(i = 0; i < NR_IRQS; i++) { 255 - set_irq_chip(i, &ixp4xx_irq_chip); 256 - set_irq_handler(i, handle_level_irq); 255 + irq_set_chip_and_handler(i, &ixp4xx_irq_chip, 256 + handle_level_irq); 257 257 set_irq_flags(i, IRQF_VALID); 258 258 } 259 259 }
+2 -2
arch/arm/mach-ixp4xx/coyote-pci.c
··· 32 32 33 33 void __init coyote_pci_preinit(void) 34 34 { 35 - set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW); 36 - set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW); 35 + irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW); 36 + irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW); 37 37 ixp4xx_pci_preinit(); 38 38 } 39 39
+6 -6
arch/arm/mach-ixp4xx/dsmg600-pci.c
··· 35 35 36 36 void __init dsmg600_pci_preinit(void) 37 37 { 38 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 39 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 40 - set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 41 - set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 42 - set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); 43 - set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW); 38 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 39 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 40 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 41 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 42 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); 43 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW); 44 44 ixp4xx_pci_preinit(); 45 45 } 46 46
+3 -3
arch/arm/mach-ixp4xx/fsg-pci.c
··· 32 32 33 33 void __init fsg_pci_preinit(void) 34 34 { 35 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 36 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 37 - set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 35 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 36 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 37 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 38 38 ixp4xx_pci_preinit(); 39 39 } 40 40
+2 -2
arch/arm/mach-ixp4xx/gateway7001-pci.c
··· 29 29 30 30 void __init gateway7001_pci_preinit(void) 31 31 { 32 - set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW); 33 - set_irq_type(IRQ_IXP4XX_GPIO11, IRQ_TYPE_LEVEL_LOW); 32 + irq_set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW); 33 + irq_set_irq_type(IRQ_IXP4XX_GPIO11, IRQ_TYPE_LEVEL_LOW); 34 34 35 35 ixp4xx_pci_preinit(); 36 36 }
+6 -6
arch/arm/mach-ixp4xx/goramo_mlr.c
··· 420 420 gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT); 421 421 gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN); 422 422 gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN); 423 - set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); 424 - set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); 423 + irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); 424 + irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); 425 425 426 426 set_control(CONTROL_HSS0_DTR_N, 1); 427 427 set_control(CONTROL_HSS1_DTR_N, 1); ··· 441 441 #ifdef CONFIG_PCI 442 442 static void __init gmlr_pci_preinit(void) 443 443 { 444 - set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); 445 - set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); 446 - set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); 447 - set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); 444 + irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); 445 + irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); 446 + irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); 447 + irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); 448 448 ixp4xx_pci_preinit(); 449 449 } 450 450
+2 -2
arch/arm/mach-ixp4xx/gtwx5715-pci.c
··· 43 43 */ 44 44 void __init gtwx5715_pci_preinit(void) 45 45 { 46 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 47 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 46 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 47 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 48 48 ixp4xx_pci_preinit(); 49 49 } 50 50
+4 -4
arch/arm/mach-ixp4xx/ixdp425-pci.c
··· 36 36 37 37 void __init ixdp425_pci_preinit(void) 38 38 { 39 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 40 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 41 - set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 42 - set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 39 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 40 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 41 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 42 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 43 43 ixp4xx_pci_preinit(); 44 44 } 45 45
+2 -2
arch/arm/mach-ixp4xx/ixdpg425-pci.c
··· 25 25 26 26 void __init ixdpg425_pci_preinit(void) 27 27 { 28 - set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW); 29 - set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW); 28 + irq_set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW); 29 + irq_set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW); 30 30 31 31 ixp4xx_pci_preinit(); 32 32 }
+5 -5
arch/arm/mach-ixp4xx/nas100d-pci.c
··· 33 33 34 34 void __init nas100d_pci_preinit(void) 35 35 { 36 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 37 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 38 - set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 39 - set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 40 - set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); 36 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 37 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 38 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 39 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); 40 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); 41 41 ixp4xx_pci_preinit(); 42 42 } 43 43
+3 -3
arch/arm/mach-ixp4xx/nslu2-pci.c
··· 32 32 33 33 void __init nslu2_pci_preinit(void) 34 34 { 35 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 36 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 37 - set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 35 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 36 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 37 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); 38 38 ixp4xx_pci_preinit(); 39 39 } 40 40
+2 -2
arch/arm/mach-ixp4xx/vulcan-pci.c
··· 38 38 pr_info("Vulcan PCI: limiting CardBus memory size to %dMB\n", 39 39 (int)(pci_cardbus_mem_size >> 20)); 40 40 #endif 41 - set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 42 - set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 41 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); 42 + irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); 43 43 ixp4xx_pci_preinit(); 44 44 } 45 45
+2 -2
arch/arm/mach-ixp4xx/wg302v2-pci.c
··· 29 29 30 30 void __init wg302v2_pci_preinit(void) 31 31 { 32 - set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW); 33 - set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW); 32 + irq_set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW); 33 + irq_set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW); 34 34 35 35 ixp4xx_pci_preinit(); 36 36 }
+8 -7
arch/arm/mach-kirkwood/irq.c
··· 35 35 */ 36 36 orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0, 37 37 IRQ_KIRKWOOD_GPIO_START); 38 - set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler); 39 - set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler); 40 - set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler); 41 - set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler); 38 + irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler); 39 + irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler); 40 + irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler); 41 + irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler); 42 42 43 43 orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0, 44 44 IRQ_KIRKWOOD_GPIO_START + 32); 45 - set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler); 46 - set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler); 47 - set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23, gpio_irq_handler); 45 + irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler); 46 + irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler); 47 + irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23, 48 + gpio_irq_handler); 48 49 }
+7
arch/arm/mach-kirkwood/sheevaplug-setup.c
··· 58 58 59 59 static struct gpio_led sheevaplug_led_pins[] = { 60 60 { 61 + .name = "plug:red:misc", 62 + .default_trigger = "none", 63 + .gpio = 46, 64 + .active_low = 1, 65 + }, 66 + { 61 67 .name = "plug:green:health", 62 68 .default_trigger = "default-on", 63 69 .gpio = 49, ··· 86 80 87 81 static unsigned int sheevaplug_mpp_config[] __initdata = { 88 82 MPP29_GPIO, /* USB Power Enable */ 83 + MPP46_GPIO, /* LED Red */ 89 84 MPP49_GPIO, /* LED */ 90 85 0 91 86 };
+1 -1
arch/arm/mach-ks8695/gpio.c
··· 80 80 local_irq_restore(flags); 81 81 82 82 /* Set IRQ triggering type */ 83 - set_irq_type(gpio_irq[pin], type); 83 + irq_set_irq_type(gpio_irq[pin], type); 84 84 85 85 /* enable interrupt mode */ 86 86 ks8695_gpio_mode(pin, 0);
+10 -8
arch/arm/mach-ks8695/irq.c
··· 115 115 } 116 116 117 117 if (level_triggered) { 118 - set_irq_chip(d->irq, &ks8695_irq_level_chip); 119 - set_irq_handler(d->irq, handle_level_irq); 118 + irq_set_chip_and_handler(d->irq, &ks8695_irq_level_chip, 119 + handle_level_irq); 120 120 } 121 121 else { 122 - set_irq_chip(d->irq, &ks8695_irq_edge_chip); 123 - set_irq_handler(d->irq, handle_edge_irq); 122 + irq_set_chip_and_handler(d->irq, &ks8695_irq_edge_chip, 123 + handle_edge_irq); 124 124 } 125 125 126 126 __raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC); ··· 158 158 case KS8695_IRQ_UART_RX: 159 159 case KS8695_IRQ_COMM_TX: 160 160 case KS8695_IRQ_COMM_RX: 161 - set_irq_chip(irq, &ks8695_irq_level_chip); 162 - set_irq_handler(irq, handle_level_irq); 161 + irq_set_chip_and_handler(irq, 162 + &ks8695_irq_level_chip, 163 + handle_level_irq); 163 164 break; 164 165 165 166 /* Edge-triggered interrupts */ 166 167 default: 167 168 /* clear pending bit */ 168 169 ks8695_irq_ack(irq_get_irq_data(irq)); 169 - set_irq_chip(irq, &ks8695_irq_edge_chip); 170 - set_irq_handler(irq, handle_edge_irq); 170 + irq_set_chip_and_handler(irq, 171 + &ks8695_irq_edge_chip, 172 + handle_edge_irq); 171 173 } 172 174 173 175 set_irq_flags(irq, IRQF_VALID);
+5 -5
arch/arm/mach-lpc32xx/irq.c
··· 290 290 } 291 291 292 292 /* Ok to use the level handler for all types */ 293 - set_irq_handler(d->irq, handle_level_irq); 293 + irq_set_handler(d->irq, handle_level_irq); 294 294 295 295 return 0; 296 296 } ··· 390 390 391 391 /* Configure supported IRQ's */ 392 392 for (i = 0; i < NR_IRQS; i++) { 393 - set_irq_chip(i, &lpc32xx_irq_chip); 394 - set_irq_handler(i, handle_level_irq); 393 + irq_set_chip_and_handler(i, &lpc32xx_irq_chip, 394 + handle_level_irq); 395 395 set_irq_flags(i, IRQF_VALID); 396 396 } 397 397 ··· 406 406 __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE)); 407 407 408 408 /* MIC SUBIRQx interrupts will route handling to the chain handlers */ 409 - set_irq_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler); 410 - set_irq_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler); 409 + irq_set_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler); 410 + irq_set_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler); 411 411 412 412 /* Initially disable all wake events */ 413 413 __raw_writel(0, LPC32XX_CLKPWR_P01_ER);
+9 -9
arch/arm/mach-mmp/irq-mmp2.c
··· 110 110 if (chip->irq_ack) 111 111 chip->irq_ack(d); 112 112 113 - set_irq_chip(irq, chip); 113 + irq_set_chip(irq, chip); 114 114 set_irq_flags(irq, IRQF_VALID); 115 - set_irq_handler(irq, handle_level_irq); 115 + irq_set_handler(irq, handle_level_irq); 116 116 } 117 117 } 118 118 ··· 122 122 123 123 for (irq = 0; irq < IRQ_MMP2_MUX_BASE; irq++) { 124 124 icu_mask_irq(irq_get_irq_data(irq)); 125 - set_irq_chip(irq, &icu_irq_chip); 125 + irq_set_chip(irq, &icu_irq_chip); 126 126 set_irq_flags(irq, IRQF_VALID); 127 127 128 128 switch (irq) { ··· 133 133 case IRQ_MMP2_SSP_MUX: 134 134 break; 135 135 default: 136 - set_irq_handler(irq, handle_level_irq); 136 + irq_set_handler(irq, handle_level_irq); 137 137 break; 138 138 } 139 139 } ··· 149 149 init_mux_irq(&misc_irq_chip, IRQ_MMP2_MISC_BASE, 15); 150 150 init_mux_irq(&ssp_irq_chip, IRQ_MMP2_SSP_BASE, 2); 151 151 152 - set_irq_chained_handler(IRQ_MMP2_PMIC_MUX, pmic_irq_demux); 153 - set_irq_chained_handler(IRQ_MMP2_RTC_MUX, rtc_irq_demux); 154 - set_irq_chained_handler(IRQ_MMP2_TWSI_MUX, twsi_irq_demux); 155 - set_irq_chained_handler(IRQ_MMP2_MISC_MUX, misc_irq_demux); 156 - set_irq_chained_handler(IRQ_MMP2_SSP_MUX, ssp_irq_demux); 152 + irq_set_chained_handler(IRQ_MMP2_PMIC_MUX, pmic_irq_demux); 153 + irq_set_chained_handler(IRQ_MMP2_RTC_MUX, rtc_irq_demux); 154 + irq_set_chained_handler(IRQ_MMP2_TWSI_MUX, twsi_irq_demux); 155 + irq_set_chained_handler(IRQ_MMP2_MISC_MUX, misc_irq_demux); 156 + irq_set_chained_handler(IRQ_MMP2_SSP_MUX, ssp_irq_demux); 157 157 }
+1 -2
arch/arm/mach-mmp/irq-pxa168.c
··· 48 48 49 49 for (irq = 0; irq < 64; irq++) { 50 50 icu_mask_irq(irq_get_irq_data(irq)); 51 - set_irq_chip(irq, &icu_irq_chip); 52 - set_irq_handler(irq, handle_level_irq); 51 + irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); 53 52 set_irq_flags(irq, IRQF_VALID); 54 53 } 55 54 }
+1 -1
arch/arm/mach-msm/board-msm8960.c
··· 53 53 */ 54 54 for (i = GIC_PPI_START; i < GIC_SPI_START; i++) { 55 55 if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE) 56 - set_irq_handler(i, handle_percpu_irq); 56 + irq_set_handler(i, handle_percpu_irq); 57 57 } 58 58 } 59 59
+1 -1
arch/arm/mach-msm/board-msm8x60.c
··· 56 56 */ 57 57 for (i = GIC_PPI_START; i < GIC_SPI_START; i++) { 58 58 if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE) 59 - set_irq_handler(i, handle_percpu_irq); 59 + irq_set_handler(i, handle_percpu_irq); 60 60 } 61 61 } 62 62
+5 -5
arch/arm/mach-msm/board-trout-gpio.c
··· 214 214 { 215 215 int i; 216 216 for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) { 217 - set_irq_chip(i, &trout_gpio_irq_chip); 218 - set_irq_handler(i, handle_edge_irq); 217 + irq_set_chip_and_handler(i, &trout_gpio_irq_chip, 218 + handle_edge_irq); 219 219 set_irq_flags(i, IRQF_VALID); 220 220 } 221 221 222 222 for (i = 0; i < ARRAY_SIZE(msm_gpio_banks); i++) 223 223 gpiochip_add(&msm_gpio_banks[i].chip); 224 224 225 - set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH); 226 - set_irq_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler); 227 - set_irq_wake(MSM_GPIO_TO_INT(17), 1); 225 + irq_set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH); 226 + irq_set_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler); 227 + irq_set_irq_wake(MSM_GPIO_TO_INT(17), 1); 228 228 229 229 return 0; 230 230 }
+1 -1
arch/arm/mach-msm/board-trout-mmc.c
··· 174 174 if (IS_ERR(vreg_sdslot)) 175 175 return PTR_ERR(vreg_sdslot); 176 176 177 - set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1); 177 + irq_set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1); 178 178 179 179 if (!opt_disable_sdcard) 180 180 msm_add_sdcc(2, &trout_sdslot_data,
+25 -24
arch/arm/mach-msm/gpio-v2.c
··· 230 230 val, val2); 231 231 } 232 232 233 - static void msm_gpio_irq_ack(unsigned int irq) 233 + static void msm_gpio_irq_ack(struct irq_data *d) 234 234 { 235 - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); 235 + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); 236 236 237 237 writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio)); 238 238 if (test_bit(gpio, msm_gpio.dual_edge_irqs)) 239 239 msm_gpio_update_dual_edge_pos(gpio); 240 240 } 241 241 242 - static void msm_gpio_irq_mask(unsigned int irq) 242 + static void msm_gpio_irq_mask(struct irq_data *d) 243 243 { 244 - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); 244 + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); 245 245 unsigned long irq_flags; 246 246 247 247 spin_lock_irqsave(&tlmm_lock, irq_flags); ··· 251 251 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 252 252 } 253 253 254 - static void msm_gpio_irq_unmask(unsigned int irq) 254 + static void msm_gpio_irq_unmask(struct irq_data *d) 255 255 { 256 - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); 256 + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); 257 257 unsigned long irq_flags; 258 258 259 259 spin_lock_irqsave(&tlmm_lock, irq_flags); ··· 263 263 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 264 264 } 265 265 266 - static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type) 266 + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) 267 267 { 268 - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); 268 + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); 269 269 unsigned long irq_flags; 270 270 uint32_t bits; 271 271 ··· 275 275 276 276 if (flow_type & IRQ_TYPE_EDGE_BOTH) { 277 277 bits |= BIT(INTR_DECT_CTL); 278 - irq_desc[irq].handle_irq = handle_edge_irq; 278 + __irq_set_handler_locked(d->irq, handle_edge_irq); 279 279 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) 280 280 __set_bit(gpio, msm_gpio.dual_edge_irqs); 281 281 else 282 282 __clear_bit(gpio, msm_gpio.dual_edge_irqs); 283 283 } else { 284 284 bits &= ~BIT(INTR_DECT_CTL); 285 - irq_desc[irq].handle_irq = handle_level_irq; 285 + __irq_set_handler_locked(d->irq, handle_level_irq); 286 286 __clear_bit(gpio, msm_gpio.dual_edge_irqs); 287 287 } 288 288 ··· 309 309 */ 310 310 static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) 311 311 { 312 + struct irq_data *data = irq_desc_get_irq_data(desc); 312 313 unsigned long i; 313 314 314 315 for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS); ··· 319 318 generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip, 320 319 i)); 321 320 } 322 - desc->chip->ack(irq); 321 + data->chip->irq_ack(data); 323 322 } 324 323 325 - static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on) 324 + static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) 326 325 { 327 - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); 326 + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); 328 327 329 328 if (on) { 330 329 if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) 331 - set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1); 330 + irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1); 332 331 set_bit(gpio, msm_gpio.wake_irqs); 333 332 } else { 334 333 clear_bit(gpio, msm_gpio.wake_irqs); 335 334 if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) 336 - set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0); 335 + irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0); 337 336 } 338 337 339 338 return 0; ··· 341 340 342 341 static struct irq_chip msm_gpio_irq_chip = { 343 342 .name = "msmgpio", 344 - .mask = msm_gpio_irq_mask, 345 - .unmask = msm_gpio_irq_unmask, 346 - .ack = msm_gpio_irq_ack, 347 - .set_type = msm_gpio_irq_set_type, 348 - .set_wake = msm_gpio_irq_set_wake, 343 + .irq_mask = msm_gpio_irq_mask, 344 + .irq_unmask = msm_gpio_irq_unmask, 345 + .irq_ack = msm_gpio_irq_ack, 346 + .irq_set_type = msm_gpio_irq_set_type, 347 + .irq_set_wake = msm_gpio_irq_set_wake, 349 348 }; 350 349 351 350 static int __devinit msm_gpio_probe(struct platform_device *dev) ··· 362 361 363 362 for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) { 364 363 irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i); 365 - set_irq_chip(irq, &msm_gpio_irq_chip); 366 - set_irq_handler(irq, handle_level_irq); 364 + irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, 365 + handle_level_irq); 367 366 set_irq_flags(irq, IRQF_VALID); 368 367 } 369 368 370 - set_irq_chained_handler(TLMM_SCSS_SUMMARY_IRQ, 369 + irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ, 371 370 msm_summary_irq_handler); 372 371 return 0; 373 372 } ··· 379 378 if (ret < 0) 380 379 return ret; 381 380 382 - set_irq_handler(TLMM_SCSS_SUMMARY_IRQ, NULL); 381 + irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL); 383 382 384 383 return 0; 385 384 }
+9 -9
arch/arm/mach-msm/gpio.c
··· 293 293 val = readl(msm_chip->regs.int_edge); 294 294 if (flow_type & IRQ_TYPE_EDGE_BOTH) { 295 295 writel(val | mask, msm_chip->regs.int_edge); 296 - irq_desc[d->irq].handle_irq = handle_edge_irq; 296 + __irq_set_handler_locked(d->irq, handle_edge_irq); 297 297 } else { 298 298 writel(val & ~mask, msm_chip->regs.int_edge); 299 - irq_desc[d->irq].handle_irq = handle_level_irq; 299 + __irq_set_handler_locked(d->irq, handle_level_irq); 300 300 } 301 301 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { 302 302 msm_chip->both_edge_detect |= mask; ··· 354 354 msm_gpio_chips[j].chip.base + 355 355 msm_gpio_chips[j].chip.ngpio) 356 356 j++; 357 - set_irq_chip_data(i, &msm_gpio_chips[j]); 358 - set_irq_chip(i, &msm_gpio_irq_chip); 359 - set_irq_handler(i, handle_edge_irq); 357 + irq_set_chip_data(i, &msm_gpio_chips[j]); 358 + irq_set_chip_and_handler(i, &msm_gpio_irq_chip, 359 + handle_edge_irq); 360 360 set_irq_flags(i, IRQF_VALID); 361 361 } 362 362 ··· 366 366 gpiochip_add(&msm_gpio_chips[i].chip); 367 367 } 368 368 369 - set_irq_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); 370 - set_irq_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); 371 - set_irq_wake(INT_GPIO_GROUP1, 1); 372 - set_irq_wake(INT_GPIO_GROUP2, 2); 369 + irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); 370 + irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); 371 + irq_set_irq_wake(INT_GPIO_GROUP1, 1); 372 + irq_set_irq_wake(INT_GPIO_GROUP2, 2); 373 373 return 0; 374 374 } 375 375
+3 -4
arch/arm/mach-msm/irq-vic.c
··· 313 313 type = msm_irq_shadow_reg[index].int_type; 314 314 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 315 315 type |= b; 316 - irq_desc[d->irq].handle_irq = handle_edge_irq; 316 + __irq_set_handler_locked(d->irq, handle_edge_irq); 317 317 } 318 318 if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { 319 319 type &= ~b; 320 - irq_desc[d->irq].handle_irq = handle_level_irq; 320 + __irq_set_handler_locked(d->irq, handle_level_irq); 321 321 } 322 322 writel(type, treg); 323 323 msm_irq_shadow_reg[index].int_type = type; ··· 357 357 writel(3, VIC_INT_MASTEREN); 358 358 359 359 for (n = 0; n < NR_MSM_IRQS; n++) { 360 - set_irq_chip(n, &msm_irq_chip); 361 - set_irq_handler(n, handle_level_irq); 360 + irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq); 362 361 set_irq_flags(n, IRQF_VALID); 363 362 } 364 363 }
+3 -4
arch/arm/mach-msm/irq.c
··· 100 100 101 101 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 102 102 writel(readl(treg) | b, treg); 103 - irq_desc[d->irq].handle_irq = handle_edge_irq; 103 + __irq_set_handler_locked(d->irq, handle_edge_irq); 104 104 } 105 105 if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { 106 106 writel(readl(treg) & (~b), treg); 107 - irq_desc[d->irq].handle_irq = handle_level_irq; 107 + __irq_set_handler_locked(d->irq, handle_level_irq); 108 108 } 109 109 return 0; 110 110 } ··· 145 145 writel(1, VIC_INT_MASTEREN); 146 146 147 147 for (n = 0; n < NR_MSM_IRQS; n++) { 148 - set_irq_chip(n, &msm_irq_chip); 149 - set_irq_handler(n, handle_level_irq); 148 + irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq); 150 149 set_irq_flags(n, IRQF_VALID); 151 150 } 152 151 }
+5 -6
arch/arm/mach-msm/sirc.c
··· 105 105 val = readl(sirc_regs.int_type); 106 106 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 107 107 val |= mask; 108 - irq_desc[d->irq].handle_irq = handle_edge_irq; 108 + __irq_set_handler_locked(d->irq, handle_edge_irq); 109 109 } else { 110 110 val &= ~mask; 111 - irq_desc[d->irq].handle_irq = handle_level_irq; 111 + __irq_set_handler_locked(d->irq, handle_level_irq); 112 112 } 113 113 114 114 writel(val, sirc_regs.int_type); ··· 158 158 wake_enable = 0; 159 159 160 160 for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) { 161 - set_irq_chip(i, &sirc_irq_chip); 162 - set_irq_handler(i, handle_edge_irq); 161 + irq_set_chip_and_handler(i, &sirc_irq_chip, handle_edge_irq); 163 162 set_irq_flags(i, IRQF_VALID); 164 163 } 165 164 166 165 for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) { 167 - set_irq_chained_handler(sirc_reg_table[i].cascade_irq, 166 + irq_set_chained_handler(sirc_reg_table[i].cascade_irq, 168 167 sirc_irq_handler); 169 - set_irq_wake(sirc_reg_table[i].cascade_irq, 1); 168 + irq_set_irq_wake(sirc_reg_table[i].cascade_irq, 1); 170 169 } 171 170 return; 172 171 }
+4 -4
arch/arm/mach-mv78xx0/irq.c
··· 38 38 orion_gpio_init(0, 32, GPIO_VIRT_BASE, 39 39 mv78xx0_core_index() ? 0x18 : 0, 40 40 IRQ_MV78XX0_GPIO_START); 41 - set_irq_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler); 42 - set_irq_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler); 43 - set_irq_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler); 44 - set_irq_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler); 41 + irq_set_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler); 42 + irq_set_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler); 43 + irq_set_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler); 44 + irq_set_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler); 45 45 }
+11 -3
arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
··· 43 43 #include <mach/ipu.h> 44 44 #include <mach/mx3fb.h> 45 45 #include <mach/audmux.h> 46 + #include <mach/esdhc.h> 46 47 47 48 #include "devices-imx35.h" 48 49 #include "devices.h" ··· 164 163 MX35_PAD_SD1_DATA1__ESDHC1_DAT1, 165 164 MX35_PAD_SD1_DATA2__ESDHC1_DAT2, 166 165 MX35_PAD_SD1_DATA3__ESDHC1_DAT3, 166 + /* SD1 CD */ 167 + MX35_PAD_LD18__GPIO3_24, 167 168 }; 168 169 169 170 #define GPIO_LED1 IMX_GPIO_NR(3, 29) 170 171 #define GPIO_SWITCH1 IMX_GPIO_NR(3, 25) 171 - #define GPIO_LCDPWR (4) 172 + #define GPIO_LCDPWR IMX_GPIO_NR(1, 4) 173 + #define GPIO_SD1CD IMX_GPIO_NR(3, 24) 172 174 173 175 static void eukrea_mbimxsd_lcd_power_set(struct plat_lcd_data *pd, 174 176 unsigned int power) ··· 258 254 .flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE, 259 255 }; 260 256 257 + static struct esdhc_platform_data sd1_pdata = { 258 + .cd_gpio = GPIO_SD1CD, 259 + .wp_gpio = -EINVAL, 260 + }; 261 + 261 262 /* 262 263 * system init for baseboard usage. Will be called by cpuimx35 init. 263 264 * ··· 298 289 imx35_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata); 299 290 300 291 imx35_add_flexcan1(NULL); 301 - imx35_add_sdhci_esdhc_imx(0, NULL); 292 + imx35_add_sdhci_esdhc_imx(0, &sd1_pdata); 302 293 303 294 gpio_request(GPIO_LED1, "LED1"); 304 295 gpio_direction_output(GPIO_LED1, 1); ··· 310 301 311 302 gpio_request(GPIO_LCDPWR, "LCDPWR"); 312 303 gpio_direction_output(GPIO_LCDPWR, 1); 313 - gpio_free(GPIO_LCDPWR); 314 304 315 305 i2c_register_board_info(0, eukrea_mbimxsd_i2c_devices, 316 306 ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
+3 -4
arch/arm/mach-mx3/mach-mx31ads.c
··· 199 199 __raw_writew(0xFFFF, PBC_INTSTATUS_REG); 200 200 for (i = MXC_EXP_IO_BASE; i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); 201 201 i++) { 202 - set_irq_chip(i, &expio_irq_chip); 203 - set_irq_handler(i, handle_level_irq); 202 + irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq); 204 203 set_irq_flags(i, IRQF_VALID); 205 204 } 206 - set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_HIGH); 207 - set_irq_chained_handler(EXPIO_PARENT_INT, mx31ads_expio_irq_handler); 205 + irq_set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_HIGH); 206 + irq_set_chained_handler(EXPIO_PARENT_INT, mx31ads_expio_irq_handler); 208 207 } 209 208 210 209 #ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
+11 -1
arch/arm/mach-mx3/mach-pcm043.c
··· 40 40 #include <mach/mx3fb.h> 41 41 #include <mach/ulpi.h> 42 42 #include <mach/audmux.h> 43 + #include <mach/esdhc.h> 43 44 44 45 #include "devices-imx35.h" 45 46 #include "devices.h" ··· 218 217 MX35_PAD_SD1_DATA1__ESDHC1_DAT1, 219 218 MX35_PAD_SD1_DATA2__ESDHC1_DAT2, 220 219 MX35_PAD_SD1_DATA3__ESDHC1_DAT3, 220 + MX35_PAD_ATA_DATA10__GPIO2_23, /* WriteProtect */ 221 + MX35_PAD_ATA_DATA11__GPIO2_24, /* CardDetect */ 221 222 }; 222 223 223 224 #define AC97_GPIO_TXFS IMX_GPIO_NR(2, 31) 224 225 #define AC97_GPIO_TXD IMX_GPIO_NR(2, 28) 225 226 #define AC97_GPIO_RESET IMX_GPIO_NR(2, 0) 227 + #define SD1_GPIO_WP IMX_GPIO_NR(2, 23) 228 + #define SD1_GPIO_CD IMX_GPIO_NR(2, 24) 226 229 227 230 static void pcm043_ac97_warm_reset(struct snd_ac97 *ac97) 228 231 { ··· 351 346 } 352 347 __setup("otg_mode=", pcm043_otg_mode); 353 348 349 + static struct esdhc_platform_data sd1_pdata = { 350 + .wp_gpio = SD1_GPIO_WP, 351 + .cd_gpio = SD1_GPIO_CD, 352 + }; 353 + 354 354 /* 355 355 * Board specific initialization. 356 356 */ ··· 405 395 imx35_add_fsl_usb2_udc(&otg_device_pdata); 406 396 407 397 imx35_add_flexcan1(NULL); 408 - imx35_add_sdhci_esdhc_imx(0, NULL); 398 + imx35_add_sdhci_esdhc_imx(0, &sd1_pdata); 409 399 } 410 400 411 401 static void __init pcm043_timer_init(void)
+1
arch/arm/mach-mx5/Kconfig
··· 165 165 select IMX_HAVE_PLATFORM_IMX_I2C 166 166 select IMX_HAVE_PLATFORM_IMX_UART 167 167 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX 168 + select IMX_HAVE_PLATFORM_GPIO_KEYS 168 169 help 169 170 Include support for MX53 LOCO platform. This includes specific 170 171 configurations for the board and its peripherals.
+1 -1
arch/arm/mach-mx5/Makefile
··· 3 3 # 4 4 5 5 # Object file lists. 6 - obj-y := cpu.o mm.o clock-mx51-mx53.o devices.o ehci.o 6 + obj-y := cpu.o mm.o clock-mx51-mx53.o devices.o ehci.o system.o 7 7 obj-$(CONFIG_SOC_IMX50) += mm-mx50.o 8 8 9 9 obj-$(CONFIG_CPU_FREQ_IMX) += cpu_op-mx51.o
+2 -3
arch/arm/mach-mx5/board-mx51_babbage.c
··· 228 228 int ret; 229 229 230 230 /* reset FEC PHY */ 231 - ret = gpio_request(BABBAGE_FEC_PHY_RESET, "fec-phy-reset"); 231 + ret = gpio_request_one(BABBAGE_FEC_PHY_RESET, 232 + GPIOF_OUT_INIT_LOW, "fec-phy-reset"); 232 233 if (ret) { 233 234 printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret); 234 235 return; 235 236 } 236 - gpio_direction_output(BABBAGE_FEC_PHY_RESET, 0); 237 - gpio_set_value(BABBAGE_FEC_PHY_RESET, 0); 238 237 msleep(1); 239 238 gpio_set_value(BABBAGE_FEC_PHY_RESET, 1); 240 239 }
+4 -5
arch/arm/mach-mx5/board-mx53_evk.c
··· 34 34 #include <mach/imx-uart.h> 35 35 #include <mach/iomux-mx53.h> 36 36 37 - #define SMD_FEC_PHY_RST IMX_GPIO_NR(7, 6) 37 + #define MX53_EVK_FEC_PHY_RST IMX_GPIO_NR(7, 6) 38 38 #define EVK_ECSPI1_CS0 IMX_GPIO_NR(2, 30) 39 39 #define EVK_ECSPI1_CS1 IMX_GPIO_NR(3, 19) 40 40 ··· 82 82 int ret; 83 83 84 84 /* reset FEC PHY */ 85 - ret = gpio_request(SMD_FEC_PHY_RST, "fec-phy-reset"); 85 + ret = gpio_request_one(MX53_EVK_FEC_PHY_RST, GPIOF_OUT_INIT_LOW, 86 + "fec-phy-reset"); 86 87 if (ret) { 87 88 printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret); 88 89 return; 89 90 } 90 - gpio_direction_output(SMD_FEC_PHY_RST, 0); 91 - gpio_set_value(SMD_FEC_PHY_RST, 0); 92 91 msleep(1); 93 - gpio_set_value(SMD_FEC_PHY_RST, 1); 92 + gpio_set_value(MX53_EVK_FEC_PHY_RST, 1); 94 93 } 95 94 96 95 static struct fec_platform_data mx53_evk_fec_pdata = {
+25
arch/arm/mach-mx5/board-mx53_loco.c
··· 36 36 #include "crm_regs.h" 37 37 #include "devices-imx53.h" 38 38 39 + #define MX53_LOCO_POWER IMX_GPIO_NR(1, 8) 40 + #define MX53_LOCO_UI1 IMX_GPIO_NR(2, 14) 41 + #define MX53_LOCO_UI2 IMX_GPIO_NR(2, 15) 39 42 #define LOCO_FEC_PHY_RST IMX_GPIO_NR(7, 6) 40 43 41 44 static iomux_v3_cfg_t mx53_loco_pads[] = { ··· 183 180 MX53_PAD_GPIO_8__GPIO1_8, 184 181 }; 185 182 183 + #define GPIO_BUTTON(gpio_num, ev_code, act_low, descr, wake) \ 184 + { \ 185 + .gpio = gpio_num, \ 186 + .type = EV_KEY, \ 187 + .code = ev_code, \ 188 + .active_low = act_low, \ 189 + .desc = "btn " descr, \ 190 + .wakeup = wake, \ 191 + } 192 + 193 + static const struct gpio_keys_button loco_buttons[] __initconst = { 194 + GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0), 195 + GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0), 196 + GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0), 197 + }; 198 + 199 + static const struct gpio_keys_platform_data loco_button_data __initconst = { 200 + .buttons = loco_buttons, 201 + .nbuttons = ARRAY_SIZE(loco_buttons), 202 + }; 203 + 186 204 static inline void mx53_loco_fec_reset(void) 187 205 { 188 206 int ret; ··· 239 215 imx53_add_imx_i2c(1, &mx53_loco_i2c_data); 240 216 imx53_add_sdhci_esdhc_imx(0, NULL); 241 217 imx53_add_sdhci_esdhc_imx(2, NULL); 218 + imx_add_gpio_keys(&loco_button_data); 242 219 } 243 220 244 221 static void __init mx53_loco_timer_init(void)
+9
arch/arm/mach-mx5/clock-mx51-mx53.c
··· 865 865 .disable = _clk_ccgr_disable_inwait, 866 866 }; 867 867 868 + static struct clk gpc_dvfs_clk = { 869 + .enable_reg = MXC_CCM_CCGR5, 870 + .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET, 871 + .enable = _clk_ccgr_enable, 872 + .disable = _clk_ccgr_disable, 873 + }; 874 + 868 875 static struct clk gpt_32k_clk = { 869 876 .id = 0, 870 877 .parent = &ckil_clk, ··· 1455 1448 _REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk) 1456 1449 _REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk) 1457 1450 _REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk) 1451 + _REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk) 1458 1452 }; 1459 1453 1460 1454 static struct clk_lookup mx53_lookups[] = { ··· 1519 1511 clk_enable(&iim_clk); 1520 1512 mx51_revision(); 1521 1513 clk_disable(&iim_clk); 1514 + mx51_display_revision(); 1522 1515 1523 1516 /* move usb_phy_clk to 24MHz */ 1524 1517 clk_set_parent(&usb_phy1_clk, &osc_clk);
+59
arch/arm/mach-mx5/cpu.c
··· 21 21 static int cpu_silicon_rev = -1; 22 22 23 23 #define IIM_SREV 0x24 24 + #define MX50_HW_ADADIG_DIGPROG 0xB0 24 25 25 26 static int get_mx51_srev(void) 26 27 { ··· 51 50 return cpu_silicon_rev; 52 51 } 53 52 EXPORT_SYMBOL(mx51_revision); 53 + 54 + void mx51_display_revision(void) 55 + { 56 + int rev; 57 + char *srev; 58 + rev = mx51_revision(); 59 + 60 + switch (rev) { 61 + case IMX_CHIP_REVISION_2_0: 62 + srev = IMX_CHIP_REVISION_2_0_STRING; 63 + break; 64 + case IMX_CHIP_REVISION_3_0: 65 + srev = IMX_CHIP_REVISION_3_0_STRING; 66 + break; 67 + default: 68 + srev = IMX_CHIP_REVISION_UNKNOWN_STRING; 69 + } 70 + printk(KERN_INFO "CPU identified as i.MX51, silicon rev %s\n", srev); 71 + } 72 + EXPORT_SYMBOL(mx51_display_revision); 54 73 55 74 #ifdef CONFIG_NEON 56 75 ··· 127 106 return cpu_silicon_rev; 128 107 } 129 108 EXPORT_SYMBOL(mx53_revision); 109 + 110 + static int get_mx50_srev(void) 111 + { 112 + void __iomem *anatop = ioremap(MX50_ANATOP_BASE_ADDR, SZ_8K); 113 + u32 rev; 114 + 115 + if (!anatop) { 116 + cpu_silicon_rev = -EINVAL; 117 + return 0; 118 + } 119 + 120 + rev = readl(anatop + MX50_HW_ADADIG_DIGPROG); 121 + rev &= 0xff; 122 + 123 + iounmap(anatop); 124 + if (rev == 0x0) 125 + return IMX_CHIP_REVISION_1_0; 126 + else if (rev == 0x1) 127 + return IMX_CHIP_REVISION_1_1; 128 + return 0; 129 + } 130 + 131 + /* 132 + * Returns: 133 + * the silicon revision of the cpu 134 + * -EINVAL - not a mx50 135 + */ 136 + int mx50_revision(void) 137 + { 138 + if (!cpu_is_mx50()) 139 + return -EINVAL; 140 + 141 + if (cpu_silicon_rev == -1) 142 + cpu_silicon_rev = get_mx50_srev(); 143 + 144 + return cpu_silicon_rev; 145 + } 146 + EXPORT_SYMBOL(mx50_revision); 130 147 131 148 static int __init post_cpu_init(void) 132 149 {
+1 -1
arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
··· 212 212 213 213 gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq"); 214 214 gpio_direction_input(MBIMX51_TSC2007_GPIO); 215 - set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING); 215 + irq_set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING); 216 216 i2c_register_board_info(1, mbimx51_i2c_devices, 217 217 ARRAY_SIZE(mbimx51_i2c_devices)); 218 218
+4
arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c
··· 67 67 MX51_PAD_SD1_DATA1__SD1_DATA1, 68 68 MX51_PAD_SD1_DATA2__SD1_DATA2, 69 69 MX51_PAD_SD1_DATA3__SD1_DATA3, 70 + /* SD1 CD */ 71 + _MX51_PAD_GPIO1_0__SD1_CD | MUX_PAD_CTRL(PAD_CTL_PUS_22K_UP | 72 + PAD_CTL_PKE | PAD_CTL_SRE_FAST | 73 + PAD_CTL_DSE_HIGH | PAD_CTL_PUE | PAD_CTL_HYS), 70 74 }; 71 75 72 76 #define GPIO_LED1 IMX_GPIO_NR(3, 30)
-1
arch/arm/mach-mx5/mx51_efika.c
··· 42 42 #include <asm/mach-types.h> 43 43 #include <asm/mach/arch.h> 44 44 #include <asm/mach/time.h> 45 - #include <asm/mach-types.h> 46 45 47 46 #include "devices-imx51.h" 48 47 #include "devices.h"
+84
arch/arm/mach-mx5/system.c
··· 1 + /* 2 + * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. 3 + */ 4 + 5 + /* 6 + * The code contained herein is licensed under the GNU General Public 7 + * License. You may obtain a copy of the GNU General Public License 8 + * Version 2 or later at the following locations: 9 + * 10 + * http://www.opensource.org/licenses/gpl-license.html 11 + * http://www.gnu.org/copyleft/gpl.html 12 + */ 13 + #include <linux/platform_device.h> 14 + #include <linux/io.h> 15 + #include <mach/hardware.h> 16 + #include "crm_regs.h" 17 + 18 + /* set cpu low power mode before WFI instruction. This function is called 19 + * mx5 because it can be used for mx50, mx51, and mx53.*/ 20 + void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode) 21 + { 22 + u32 plat_lpc, arm_srpgcr, ccm_clpcr; 23 + u32 empgc0, empgc1; 24 + int stop_mode = 0; 25 + 26 + /* always allow platform to issue a deep sleep mode request */ 27 + plat_lpc = __raw_readl(MXC_CORTEXA8_PLAT_LPC) & 28 + ~(MXC_CORTEXA8_PLAT_LPC_DSM); 29 + ccm_clpcr = __raw_readl(MXC_CCM_CLPCR) & ~(MXC_CCM_CLPCR_LPM_MASK); 30 + arm_srpgcr = __raw_readl(MXC_SRPG_ARM_SRPGCR) & ~(MXC_SRPGCR_PCR); 31 + empgc0 = __raw_readl(MXC_SRPG_EMPGC0_SRPGCR) & ~(MXC_SRPGCR_PCR); 32 + empgc1 = __raw_readl(MXC_SRPG_EMPGC1_SRPGCR) & ~(MXC_SRPGCR_PCR); 33 + 34 + switch (mode) { 35 + case WAIT_CLOCKED: 36 + break; 37 + case WAIT_UNCLOCKED: 38 + ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET; 39 + break; 40 + case WAIT_UNCLOCKED_POWER_OFF: 41 + case STOP_POWER_OFF: 42 + plat_lpc |= MXC_CORTEXA8_PLAT_LPC_DSM 43 + | MXC_CORTEXA8_PLAT_LPC_DBG_DSM; 44 + if (mode == WAIT_UNCLOCKED_POWER_OFF) { 45 + ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET; 46 + ccm_clpcr &= ~MXC_CCM_CLPCR_VSTBY; 47 + ccm_clpcr &= ~MXC_CCM_CLPCR_SBYOS; 48 + stop_mode = 0; 49 + } else { 50 + ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET; 51 + ccm_clpcr |= 0x3 << MXC_CCM_CLPCR_STBY_COUNT_OFFSET; 52 + ccm_clpcr |= MXC_CCM_CLPCR_VSTBY; 53 + ccm_clpcr |= MXC_CCM_CLPCR_SBYOS; 54 + stop_mode = 1; 55 + } 56 + arm_srpgcr |= MXC_SRPGCR_PCR; 57 + 58 + if (tzic_enable_wake(1) != 0) 59 + return; 60 + break; 61 + case STOP_POWER_ON: 62 + ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET; 63 + break; 64 + default: 65 + printk(KERN_WARNING "UNKNOWN cpu power mode: %d\n", mode); 66 + return; 67 + } 68 + 69 + __raw_writel(plat_lpc, MXC_CORTEXA8_PLAT_LPC); 70 + __raw_writel(ccm_clpcr, MXC_CCM_CLPCR); 71 + __raw_writel(arm_srpgcr, MXC_SRPG_ARM_SRPGCR); 72 + 73 + /* Enable NEON SRPG for all but MX50TO1.0. */ 74 + if (mx50_revision() != IMX_CHIP_REVISION_1_0) 75 + __raw_writel(arm_srpgcr, MXC_SRPG_NEON_SRPGCR); 76 + 77 + if (stop_mode) { 78 + empgc0 |= MXC_SRPGCR_PCR; 79 + empgc1 |= MXC_SRPGCR_PCR; 80 + 81 + __raw_writel(empgc0, MXC_SRPG_EMPGC0_SRPGCR); 82 + __raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR); 83 + } 84 + }
+2
arch/arm/mach-mxs/Kconfig
··· 22 22 select SOC_IMX23 23 23 select MXS_HAVE_AMBA_DUART 24 24 select MXS_HAVE_PLATFORM_AUART 25 + select MXS_HAVE_PLATFORM_MXS_MMC 25 26 select MXS_HAVE_PLATFORM_MXSFB 26 27 default y 27 28 help ··· 36 35 select MXS_HAVE_PLATFORM_AUART 37 36 select MXS_HAVE_PLATFORM_FEC 38 37 select MXS_HAVE_PLATFORM_FLEXCAN 38 + select MXS_HAVE_PLATFORM_MXS_MMC 39 39 select MXS_HAVE_PLATFORM_MXSFB 40 40 select MXS_OCOTP 41 41 default y
+15
arch/arm/mach-mxs/clock-mx23.c
··· 521 521 __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT, 522 522 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET); 523 523 524 + /* 525 + * 480 MHz seems too high to be ssp clock source directly, 526 + * so set frac to get a 288 MHz ref_io. 527 + */ 528 + reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC); 529 + reg &= ~BM_CLKCTRL_FRAC_IOFRAC; 530 + reg |= 30 << BP_CLKCTRL_FRAC_IOFRAC; 531 + __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC); 532 + 524 533 return 0; 525 534 } 526 535 527 536 int __init mx23_clocks_init(void) 528 537 { 529 538 clk_misc_init(); 539 + 540 + /* 541 + * source ssp clock from ref_io than ref_xtal, 542 + * as ref_xtal only provides 24 MHz as maximum. 543 + */ 544 + clk_set_parent(&ssp_clk, &ref_io_clk); 530 545 531 546 clk_enable(&cpu_clk); 532 547 clk_enable(&hbus_clk);
+18
arch/arm/mach-mxs/clock-mx28.c
··· 618 618 _REGISTER_CLOCK("pll2", NULL, pll2_clk) 619 619 _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk) 620 620 _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk) 621 + _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp0_clk) 622 + _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp1_clk) 621 623 _REGISTER_CLOCK("flexcan.0", NULL, can0_clk) 622 624 _REGISTER_CLOCK("flexcan.1", NULL, can1_clk) 623 625 _REGISTER_CLOCK(NULL, "usb0", usb0_clk) ··· 739 737 reg |= BM_CLKCTRL_ENET_CLK_OUT_EN; 740 738 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET); 741 739 740 + /* 741 + * 480 MHz seems too high to be ssp clock source directly, 742 + * so set frac0 to get a 288 MHz ref_io0. 743 + */ 744 + reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0); 745 + reg &= ~BM_CLKCTRL_FRAC0_IO0FRAC; 746 + reg |= 30 << BP_CLKCTRL_FRAC0_IO0FRAC; 747 + __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0); 748 + 742 749 return 0; 743 750 } 744 751 745 752 int __init mx28_clocks_init(void) 746 753 { 747 754 clk_misc_init(); 755 + 756 + /* 757 + * source ssp clock from ref_io0 than ref_xtal, 758 + * as ref_xtal only provides 24 MHz as maximum. 759 + */ 760 + clk_set_parent(&ssp0_clk, &ref_io0_clk); 761 + clk_set_parent(&ssp1_clk, &ref_io0_clk); 748 762 749 763 clk_enable(&cpu_clk); 750 764 clk_enable(&hbus_clk);
+4
arch/arm/mach-mxs/devices-mx23.h
··· 21 21 #define mx23_add_auart0() mx23_add_auart(0) 22 22 #define mx23_add_auart1() mx23_add_auart(1) 23 23 24 + extern const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst; 25 + #define mx23_add_mxs_mmc(id, pdata) \ 26 + mxs_add_mxs_mmc(&mx23_mxs_mmc_data[id], pdata) 27 + 24 28 #define mx23_add_mxs_pwm(id) mxs_add_mxs_pwm(MX23_PWM_BASE_ADDR, id) 25 29 26 30 struct platform_device *__init mx23_add_mxsfb(
+4
arch/arm/mach-mxs/devices-mx28.h
··· 37 37 extern const struct mxs_i2c_data mx28_mxs_i2c_data[] __initconst; 38 38 #define mx28_add_mxs_i2c(id) mxs_add_mxs_i2c(&mx28_mxs_i2c_data[id]) 39 39 40 + extern const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst; 41 + #define mx28_add_mxs_mmc(id, pdata) \ 42 + mxs_add_mxs_mmc(&mx28_mxs_mmc_data[id], pdata) 43 + 40 44 #define mx28_add_mxs_pwm(id) mxs_add_mxs_pwm(MX28_PWM_BASE_ADDR, id) 41 45 42 46 struct platform_device *__init mx28_add_mxsfb(
+3
arch/arm/mach-mxs/devices/Kconfig
··· 15 15 config MXS_HAVE_PLATFORM_MXS_I2C 16 16 bool 17 17 18 + config MXS_HAVE_PLATFORM_MXS_MMC 19 + bool 20 + 18 21 config MXS_HAVE_PLATFORM_MXS_PWM 19 22 bool 20 23
+1
arch/arm/mach-mxs/devices/Makefile
··· 4 4 obj-$(CONFIG_MXS_HAVE_PLATFORM_FEC) += platform-fec.o 5 5 obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o 6 6 obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o 7 + obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o 7 8 obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o 8 9 obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o
+73
arch/arm/mach-mxs/devices/platform-mxs-mmc.c
··· 1 + /* 2 + * Copyright (C) 2010 Pengutronix 3 + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> 4 + * 5 + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it under 8 + * the terms of the GNU General Public License version 2 as published by the 9 + * Free Software Foundation. 10 + */ 11 + 12 + #include <linux/compiler.h> 13 + #include <linux/err.h> 14 + #include <linux/init.h> 15 + 16 + #include <mach/mx23.h> 17 + #include <mach/mx28.h> 18 + #include <mach/devices-common.h> 19 + 20 + #define mxs_mxs_mmc_data_entry_single(soc, _id, hwid) \ 21 + { \ 22 + .id = _id, \ 23 + .iobase = soc ## _SSP ## hwid ## _BASE_ADDR, \ 24 + .dma = soc ## _DMA_SSP ## hwid, \ 25 + .irq_err = soc ## _INT_SSP ## hwid ## _ERROR, \ 26 + .irq_dma = soc ## _INT_SSP ## hwid ## _DMA, \ 27 + } 28 + 29 + #define mxs_mxs_mmc_data_entry(soc, _id, hwid) \ 30 + [_id] = mxs_mxs_mmc_data_entry_single(soc, _id, hwid) 31 + 32 + 33 + #ifdef CONFIG_SOC_IMX23 34 + const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = { 35 + mxs_mxs_mmc_data_entry(MX23, 0, 1), 36 + mxs_mxs_mmc_data_entry(MX23, 1, 2), 37 + }; 38 + #endif 39 + 40 + #ifdef CONFIG_SOC_IMX28 41 + const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = { 42 + mxs_mxs_mmc_data_entry(MX28, 0, 0), 43 + mxs_mxs_mmc_data_entry(MX28, 1, 1), 44 + }; 45 + #endif 46 + 47 + struct platform_device *__init mxs_add_mxs_mmc( 48 + const struct mxs_mxs_mmc_data *data, 49 + const struct mxs_mmc_platform_data *pdata) 50 + { 51 + struct resource res[] = { 52 + { 53 + .start = data->iobase, 54 + .end = data->iobase + SZ_8K - 1, 55 + .flags = IORESOURCE_MEM, 56 + }, { 57 + .start = data->dma, 58 + .end = data->dma, 59 + .flags = IORESOURCE_DMA, 60 + }, { 61 + .start = data->irq_err, 62 + .end = data->irq_err, 63 + .flags = IORESOURCE_IRQ, 64 + }, { 65 + .start = data->irq_dma, 66 + .end = data->irq_dma, 67 + .flags = IORESOURCE_IRQ, 68 + }, 69 + }; 70 + 71 + return mxs_add_platform_device("mxs-mmc", data->id, 72 + res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); 73 + }
+5 -5
arch/arm/mach-mxs/gpio.c
··· 136 136 static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) 137 137 { 138 138 u32 irq_stat; 139 - struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq); 139 + struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq); 140 140 u32 gpio_irq_no_base = port->virtual_irq_start; 141 141 142 142 desc->irq_data.chip->irq_ack(&desc->irq_data); ··· 265 265 266 266 for (j = port[i].virtual_irq_start; 267 267 j < port[i].virtual_irq_start + 32; j++) { 268 - set_irq_chip(j, &gpio_irq_chip); 269 - set_irq_handler(j, handle_level_irq); 268 + irq_set_chip_and_handler(j, &gpio_irq_chip, 269 + handle_level_irq); 270 270 set_irq_flags(j, IRQF_VALID); 271 271 } 272 272 273 273 /* setup one handler for each entry */ 274 - set_irq_chained_handler(port[i].irq, mxs_gpio_irq_handler); 275 - set_irq_data(port[i].irq, &port[i]); 274 + irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler); 275 + irq_set_handler_data(port[i].irq, &port[i]); 276 276 277 277 /* register gpio chip */ 278 278 port[i].chip.direction_input = mxs_gpio_direction_input;
+1 -2
arch/arm/mach-mxs/icoll.c
··· 74 74 mxs_reset_block(icoll_base + HW_ICOLL_CTRL); 75 75 76 76 for (i = 0; i < MXS_INTERNAL_IRQS; i++) { 77 - set_irq_chip(i, &mxs_icoll_chip); 78 - set_irq_handler(i, handle_level_irq); 77 + irq_set_chip_and_handler(i, &mxs_icoll_chip, handle_level_irq); 79 78 set_irq_flags(i, IRQF_VALID); 80 79 } 81 80 }
+13
arch/arm/mach-mxs/include/mach/devices-common.h
··· 73 73 }; 74 74 struct platform_device * __init mxs_add_mxs_i2c(const struct mxs_i2c_data *data); 75 75 76 + /* mmc */ 77 + #include <mach/mmc.h> 78 + struct mxs_mxs_mmc_data { 79 + int id; 80 + resource_size_t iobase; 81 + resource_size_t dma; 82 + resource_size_t irq_err; 83 + resource_size_t irq_dma; 84 + }; 85 + struct platform_device *__init mxs_add_mxs_mmc( 86 + const struct mxs_mxs_mmc_data *data, 87 + const struct mxs_mmc_platform_data *pdata); 88 + 76 89 /* pwm */ 77 90 struct platform_device *__init mxs_add_mxs_pwm( 78 91 resource_size_t iobase, int id);
+44
arch/arm/mach-mxs/mach-mx23evk.c
··· 28 28 29 29 #define MX23EVK_LCD_ENABLE MXS_GPIO_NR(1, 18) 30 30 #define MX23EVK_BL_ENABLE MXS_GPIO_NR(1, 28) 31 + #define MX23EVK_MMC0_WRITE_PROTECT MXS_GPIO_NR(1, 30) 32 + #define MX23EVK_MMC0_SLOT_POWER MXS_GPIO_NR(1, 29) 31 33 32 34 static const iomux_cfg_t mx23evk_pads[] __initconst = { 33 35 /* duart */ ··· 75 73 MX23_PAD_LCD_RESET__GPIO_1_18 | MXS_PAD_CTRL, 76 74 /* backlight control */ 77 75 MX23_PAD_PWM2__GPIO_1_28 | MXS_PAD_CTRL, 76 + 77 + /* mmc */ 78 + MX23_PAD_SSP1_DATA0__SSP1_DATA0 | 79 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 80 + MX23_PAD_SSP1_DATA1__SSP1_DATA1 | 81 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 82 + MX23_PAD_SSP1_DATA2__SSP1_DATA2 | 83 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 84 + MX23_PAD_SSP1_DATA3__SSP1_DATA3 | 85 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 86 + MX23_PAD_GPMI_D08__SSP1_DATA4 | 87 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 88 + MX23_PAD_GPMI_D09__SSP1_DATA5 | 89 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 90 + MX23_PAD_GPMI_D10__SSP1_DATA6 | 91 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 92 + MX23_PAD_GPMI_D11__SSP1_DATA7 | 93 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 94 + MX23_PAD_SSP1_CMD__SSP1_CMD | 95 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 96 + MX23_PAD_SSP1_DETECT__SSP1_DETECT | 97 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 98 + MX23_PAD_SSP1_SCK__SSP1_SCK | 99 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 100 + /* write protect */ 101 + MX23_PAD_PWM4__GPIO_1_30 | 102 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 103 + /* slot power enable */ 104 + MX23_PAD_PWM3__GPIO_1_29 | 105 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 78 106 }; 79 107 80 108 /* mxsfb (lcdif) */ ··· 133 101 .ld_intf_width = STMLCDIF_24BIT, 134 102 }; 135 103 104 + static struct mxs_mmc_platform_data mx23evk_mmc_pdata __initdata = { 105 + .wp_gpio = MX23EVK_MMC0_WRITE_PROTECT, 106 + .flags = SLOTF_8_BIT_CAPABLE, 107 + }; 108 + 136 109 static void __init mx23evk_init(void) 137 110 { 138 111 int ret; ··· 146 109 147 110 mx23_add_duart(); 148 111 mx23_add_auart0(); 112 + 113 + /* power on mmc slot by writing 0 to the gpio */ 114 + ret = gpio_request_one(MX23EVK_MMC0_SLOT_POWER, GPIOF_DIR_OUT, 115 + "mmc0-slot-power"); 116 + if (ret) 117 + pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret); 118 + mx23_add_mxs_mmc(0, &mx23evk_mmc_pdata); 149 119 150 120 ret = gpio_request_one(MX23EVK_LCD_ENABLE, GPIOF_DIR_OUT, "lcd-enable"); 151 121 if (ret)
+89
arch/arm/mach-mxs/mach-mx28evk.c
··· 34 34 #define MX28EVK_LCD_ENABLE MXS_GPIO_NR(3, 30) 35 35 #define MX28EVK_FEC_PHY_RESET MXS_GPIO_NR(4, 13) 36 36 37 + #define MX28EVK_MMC0_WRITE_PROTECT MXS_GPIO_NR(2, 12) 38 + #define MX28EVK_MMC1_WRITE_PROTECT MXS_GPIO_NR(0, 28) 39 + #define MX28EVK_MMC0_SLOT_POWER MXS_GPIO_NR(3, 28) 40 + #define MX28EVK_MMC1_SLOT_POWER MXS_GPIO_NR(3, 29) 41 + 37 42 static const iomux_cfg_t mx28evk_pads[] __initconst = { 38 43 /* duart */ 39 44 MX28_PAD_PWM0__DUART_RX | MXS_PAD_CTRL, ··· 120 115 MX28_PAD_LCD_RESET__GPIO_3_30 | MXS_PAD_CTRL, 121 116 /* backlight control */ 122 117 MX28_PAD_PWM2__GPIO_3_18 | MXS_PAD_CTRL, 118 + /* mmc0 */ 119 + MX28_PAD_SSP0_DATA0__SSP0_D0 | 120 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 121 + MX28_PAD_SSP0_DATA1__SSP0_D1 | 122 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 123 + MX28_PAD_SSP0_DATA2__SSP0_D2 | 124 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 125 + MX28_PAD_SSP0_DATA3__SSP0_D3 | 126 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 127 + MX28_PAD_SSP0_DATA4__SSP0_D4 | 128 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 129 + MX28_PAD_SSP0_DATA5__SSP0_D5 | 130 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 131 + MX28_PAD_SSP0_DATA6__SSP0_D6 | 132 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 133 + MX28_PAD_SSP0_DATA7__SSP0_D7 | 134 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 135 + MX28_PAD_SSP0_CMD__SSP0_CMD | 136 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 137 + MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT | 138 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 139 + MX28_PAD_SSP0_SCK__SSP0_SCK | 140 + (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 141 + /* write protect */ 142 + MX28_PAD_SSP1_SCK__GPIO_2_12 | 143 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 144 + /* slot power enable */ 145 + MX28_PAD_PWM3__GPIO_3_28 | 146 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 147 + 148 + /* mmc1 */ 149 + MX28_PAD_GPMI_D00__SSP1_D0 | 150 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 151 + MX28_PAD_GPMI_D01__SSP1_D1 | 152 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 153 + MX28_PAD_GPMI_D02__SSP1_D2 | 154 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 155 + MX28_PAD_GPMI_D03__SSP1_D3 | 156 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 157 + MX28_PAD_GPMI_D04__SSP1_D4 | 158 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 159 + MX28_PAD_GPMI_D05__SSP1_D5 | 160 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 161 + MX28_PAD_GPMI_D06__SSP1_D6 | 162 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 163 + MX28_PAD_GPMI_D07__SSP1_D7 | 164 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 165 + MX28_PAD_GPMI_RDY1__SSP1_CMD | 166 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 167 + MX28_PAD_GPMI_RDY0__SSP1_CARD_DETECT | 168 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 169 + MX28_PAD_GPMI_WRN__SSP1_SCK | 170 + (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 171 + /* write protect */ 172 + MX28_PAD_GPMI_RESETN__GPIO_0_28 | 173 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 174 + /* slot power enable */ 175 + MX28_PAD_PWM4__GPIO_3_29 | 176 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 123 177 }; 124 178 125 179 /* fec */ ··· 322 258 .ld_intf_width = STMLCDIF_24BIT, 323 259 }; 324 260 261 + static struct mxs_mmc_platform_data mx28evk_mmc_pdata[] __initdata = { 262 + { 263 + /* mmc0 */ 264 + .wp_gpio = MX28EVK_MMC0_WRITE_PROTECT, 265 + .flags = SLOTF_8_BIT_CAPABLE, 266 + }, { 267 + /* mmc1 */ 268 + .wp_gpio = MX28EVK_MMC1_WRITE_PROTECT, 269 + .flags = SLOTF_8_BIT_CAPABLE, 270 + }, 271 + }; 272 + 325 273 static void __init mx28evk_init(void) 326 274 { 327 275 int ret; ··· 373 297 gpio_set_value(MX28EVK_BL_ENABLE, 1); 374 298 375 299 mx28_add_mxsfb(&mx28evk_mxsfb_pdata); 300 + 301 + /* power on mmc slot by writing 0 to the gpio */ 302 + ret = gpio_request_one(MX28EVK_MMC0_SLOT_POWER, GPIOF_DIR_OUT, 303 + "mmc0-slot-power"); 304 + if (ret) 305 + pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret); 306 + mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]); 307 + 308 + ret = gpio_request_one(MX28EVK_MMC1_SLOT_POWER, GPIOF_DIR_OUT, 309 + "mmc1-slot-power"); 310 + if (ret) 311 + pr_warn("failed to request gpio mmc1-slot-power: %d\n", ret); 312 + mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]); 376 313 } 377 314 378 315 static void __init mx28evk_timer_init(void)
+35 -6
arch/arm/mach-mxs/module-tx28.c
··· 45 45 }; 46 46 47 47 #define FEC_MODE (MXS_PAD_8MA | MXS_PAD_PULLUP | MXS_PAD_3V3) 48 - static const iomux_cfg_t tx28_fec_pads[] __initconst = { 48 + static const iomux_cfg_t tx28_fec0_pads[] __initconst = { 49 49 MX28_PAD_ENET0_MDC__ENET0_MDC | FEC_MODE, 50 50 MX28_PAD_ENET0_MDIO__ENET0_MDIO | FEC_MODE, 51 51 MX28_PAD_ENET0_RX_EN__ENET0_RX_EN | FEC_MODE, ··· 57 57 MX28_PAD_ENET_CLK__CLKCTRL_ENET | FEC_MODE, 58 58 }; 59 59 60 - static const struct fec_platform_data tx28_fec_data __initconst = { 60 + static const iomux_cfg_t tx28_fec1_pads[] __initconst = { 61 + MX28_PAD_ENET0_RXD2__ENET1_RXD0, 62 + MX28_PAD_ENET0_RXD3__ENET1_RXD1, 63 + MX28_PAD_ENET0_TXD2__ENET1_TXD0, 64 + MX28_PAD_ENET0_TXD3__ENET1_TXD1, 65 + MX28_PAD_ENET0_COL__ENET1_TX_EN, 66 + MX28_PAD_ENET0_CRS__ENET1_RX_EN, 67 + }; 68 + 69 + static struct fec_platform_data tx28_fec0_data = { 70 + .phy = PHY_INTERFACE_MODE_RMII, 71 + }; 72 + 73 + static struct fec_platform_data tx28_fec1_data = { 61 74 .phy = PHY_INTERFACE_MODE_RMII, 62 75 }; 63 76 ··· 121 108 pr_debug("%s: Deasserting FEC PHY RESET\n", __func__); 122 109 gpio_set_value(TX28_FEC_PHY_RESET, 1); 123 110 124 - ret = mxs_iomux_setup_multiple_pads(tx28_fec_pads, 125 - ARRAY_SIZE(tx28_fec_pads)); 111 + ret = mxs_iomux_setup_multiple_pads(tx28_fec0_pads, 112 + ARRAY_SIZE(tx28_fec0_pads)); 126 113 if (ret) { 127 114 pr_debug("%s: mxs_iomux_setup_multiple_pads() failed with rc: %d\n", 128 115 __func__, ret); 129 116 goto free_gpios; 130 117 } 131 - pr_debug("%s: Registering FEC device\n", __func__); 132 - mx28_add_fec(0, &tx28_fec_data); 118 + pr_debug("%s: Registering FEC0 device\n", __func__); 119 + mx28_add_fec(0, &tx28_fec0_data); 133 120 return 0; 134 121 135 122 free_gpios: ··· 141 128 } 142 129 143 130 return ret; 131 + } 132 + 133 + int __init tx28_add_fec1(void) 134 + { 135 + int ret; 136 + 137 + ret = mxs_iomux_setup_multiple_pads(tx28_fec1_pads, 138 + ARRAY_SIZE(tx28_fec1_pads)); 139 + if (ret) { 140 + pr_debug("%s: mxs_iomux_setup_multiple_pads() failed with rc: %d\n", 141 + __func__, ret); 142 + return ret; 143 + } 144 + pr_debug("%s: Registering FEC1 device\n", __func__); 145 + mx28_add_fec(1, &tx28_fec1_data); 146 + return 0; 144 147 }
+1
arch/arm/mach-mxs/module-tx28.h
··· 7 7 * Free Software Foundation. 8 8 */ 9 9 int __init tx28_add_fec0(void); 10 + int __init tx28_add_fec1(void);
+3 -3
arch/arm/mach-netx/generic.c
··· 171 171 vic_init(__io(io_p2v(NETX_PA_VIC)), 0, ~0, 0); 172 172 173 173 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { 174 - set_irq_chip(irq, &netx_hif_chip); 175 - set_irq_handler(irq, handle_level_irq); 174 + irq_set_chip_and_handler(irq, &netx_hif_chip, 175 + handle_level_irq); 176 176 set_irq_flags(irq, IRQF_VALID); 177 177 } 178 178 179 179 writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN); 180 - set_irq_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler); 180 + irq_set_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler); 181 181 } 182 182 183 183 static int __init netx_init(void)
+4 -4
arch/arm/mach-ns9xxx/board-a9m9750dev.c
··· 107 107 __func__); 108 108 109 109 for (i = FPGA_IRQ(0); i <= FPGA_IRQ(7); ++i) { 110 - set_irq_chip(i, &a9m9750dev_fpga_chip); 111 - set_irq_handler(i, handle_level_irq); 110 + irq_set_chip_and_handler(i, &a9m9750dev_fpga_chip, 111 + handle_level_irq); 112 112 set_irq_flags(i, IRQF_VALID); 113 113 } 114 114 ··· 118 118 REGSET(eic, SYS_EIC, LVEDG, LEVEL); 119 119 __raw_writel(eic, SYS_EIC(2)); 120 120 121 - set_irq_chained_handler(IRQ_NS9XXX_EXT2, 122 - a9m9750dev_fpga_demux_handler); 121 + irq_set_chained_handler(IRQ_NS9XXX_EXT2, 122 + a9m9750dev_fpga_demux_handler); 123 123 } 124 124 125 125 void __init board_a9m9750dev_init_machine(void)
-2
arch/arm/mach-ns9xxx/include/mach/board.h
··· 14 14 #include <asm/mach-types.h> 15 15 16 16 #define board_is_a9m9750dev() (0 \ 17 - || machine_is_cc9p9360dev() \ 18 17 || machine_is_cc9p9750dev() \ 19 18 ) 20 19 21 20 #define board_is_a9mvali() (0 \ 22 - || machine_is_cc9p9360val() \ 23 21 || machine_is_cc9p9750val() \ 24 22 ) 25 23
-5
arch/arm/mach-ns9xxx/include/mach/module.h
··· 18 18 ) 19 19 20 20 #define module_is_cc9c() (0 \ 21 - || machine_is_cc9c() \ 22 21 ) 23 22 24 23 #define module_is_cc9p9210() (0 \ ··· 31 32 ) 32 33 33 34 #define module_is_cc9p9360() (0 \ 34 - || machine_is_a9m9360() \ 35 35 || machine_is_cc9p9360dev() \ 36 36 || machine_is_cc9p9360js() \ 37 - || machine_is_cc9p9360val() \ 38 37 ) 39 38 40 39 #define module_is_cc9p9750() (0 \ 41 40 || machine_is_a9m9750() \ 42 - || machine_is_cc9p9750dev() \ 43 41 || machine_is_cc9p9750js() \ 44 42 || machine_is_cc9p9750val() \ 45 43 ) 46 44 47 45 #define module_is_ccw9c() (0 \ 48 - || machine_is_ccw9c() \ 49 46 ) 50 47 51 48 #define module_is_inc20otter() (0 \
+1 -2
arch/arm/mach-ns9xxx/irq.c
··· 67 67 __raw_writel(prio2irq(i), SYS_IVA(i)); 68 68 69 69 for (i = 0; i <= 31; ++i) { 70 - set_irq_chip(i, &ns9xxx_chip); 71 - set_irq_handler(i, handle_fasteoi_irq); 70 + irq_set_chip_and_handler(i, &ns9xxx_chip, handle_fasteoi_irq); 72 71 set_irq_flags(i, IRQF_VALID); 73 72 irq_set_status_flags(i, IRQ_LEVEL); 74 73 }
+2 -2
arch/arm/mach-nuc93x/irq.c
··· 59 59 __raw_writel(0xFFFFFFFE, REG_AIC_MDCR); 60 60 61 61 for (irqno = IRQ_WDT; irqno <= NR_IRQS; irqno++) { 62 - set_irq_chip(irqno, &nuc93x_irq_chip); 63 - set_irq_handler(irqno, handle_level_irq); 62 + irq_set_chip_and_handler(irqno, &nuc93x_irq_chip, 63 + handle_level_irq); 64 64 set_irq_flags(irqno, IRQF_VALID); 65 65 } 66 66 }
+3 -3
arch/arm/mach-omap1/board-osk.c
··· 276 276 return; 277 277 } 278 278 /* the CF I/O IRQ is really active-low */ 279 - set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_FALLING); 279 + irq_set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_FALLING); 280 280 } 281 281 282 282 static void __init osk_init_irq(void) ··· 482 482 omap_cfg_reg(P20_1610_GPIO4); /* PENIRQ */ 483 483 gpio_request(4, "ts_int"); 484 484 gpio_direction_input(4); 485 - set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING); 485 + irq_set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING); 486 486 487 487 spi_register_board_info(mistral_boardinfo, 488 488 ARRAY_SIZE(mistral_boardinfo)); ··· 500 500 int irq = gpio_to_irq(OMAP_MPUIO(2)); 501 501 502 502 gpio_direction_input(OMAP_MPUIO(2)); 503 - set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 503 + irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 504 504 #ifdef CONFIG_PM 505 505 /* share the IRQ in case someone wants to use the 506 506 * button for more than wakeup from system sleep.
+4 -4
arch/arm/mach-omap1/board-palmz71.c
··· 256 256 { 257 257 if (gpio_get_value(PALMZ71_USBDETECT_GPIO)) { 258 258 printk(KERN_INFO "PM: Power cable connected\n"); 259 - set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), 260 - IRQ_TYPE_EDGE_FALLING); 259 + irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), 260 + IRQ_TYPE_EDGE_FALLING); 261 261 } else { 262 262 printk(KERN_INFO "PM: Power cable disconnected\n"); 263 - set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), 264 - IRQ_TYPE_EDGE_RISING); 263 + irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), 264 + IRQ_TYPE_EDGE_RISING); 265 265 } 266 266 return IRQ_HANDLED; 267 267 }
+4 -4
arch/arm/mach-omap1/board-voiceblue.c
··· 279 279 gpio_request(13, "16C554 irq"); 280 280 gpio_request(14, "16C554 irq"); 281 281 gpio_request(15, "16C554 irq"); 282 - set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING); 283 - set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); 284 - set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING); 285 - set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING); 282 + irq_set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING); 283 + irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); 284 + irq_set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING); 285 + irq_set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING); 286 286 287 287 platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices)); 288 288 omap_board_config = voiceblue_config;
+5 -5
arch/arm/mach-omap1/fpga.c
··· 156 156 * The touchscreen interrupt is level-sensitive, so 157 157 * we'll use the regular mask_ack routine for it. 158 158 */ 159 - set_irq_chip(i, &omap_fpga_irq_ack); 159 + irq_set_chip(i, &omap_fpga_irq_ack); 160 160 } 161 161 else { 162 162 /* 163 163 * All FPGA interrupts except the touchscreen are 164 164 * edge-sensitive, so we won't mask them. 165 165 */ 166 - set_irq_chip(i, &omap_fpga_irq); 166 + irq_set_chip(i, &omap_fpga_irq); 167 167 } 168 168 169 - set_irq_handler(i, handle_edge_irq); 169 + irq_set_handler(i, handle_edge_irq); 170 170 set_irq_flags(i, IRQF_VALID); 171 171 } 172 172 ··· 183 183 return; 184 184 } 185 185 gpio_direction_input(13); 186 - set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); 187 - set_irq_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux); 186 + irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); 187 + irq_set_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux); 188 188 }
+2 -2
arch/arm/mach-omap1/irq.c
··· 230 230 irq_trigger = irq_banks[i].trigger_map >> IRQ_BIT(j); 231 231 omap_irq_set_cfg(j, 0, 0, irq_trigger); 232 232 233 - set_irq_chip(j, &omap_irq_chip); 234 - set_irq_handler(j, handle_level_irq); 233 + irq_set_chip_and_handler(j, &omap_irq_chip, 234 + handle_level_irq); 235 235 set_irq_flags(j, IRQF_VALID); 236 236 } 237 237 }
+1 -1
arch/arm/mach-omap2/gpmc.c
··· 743 743 /* initalize the irq_chained */ 744 744 irq = OMAP_GPMC_IRQ_BASE; 745 745 for (cs = 0; cs < GPMC_CS_NUM; cs++) { 746 - set_irq_chip_and_handler(irq, &dummy_irq_chip, 746 + irq_set_chip_and_handler(irq, &dummy_irq_chip, 747 747 handle_simple_irq); 748 748 set_irq_flags(irq, IRQF_VALID); 749 749 irq++;
+1 -2
arch/arm/mach-omap2/irq.c
··· 223 223 nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : ""); 224 224 225 225 for (i = 0; i < nr_of_irqs; i++) { 226 - set_irq_chip(i, &omap_irq_chip); 227 - set_irq_handler(i, handle_level_irq); 226 + irq_set_chip_and_handler(i, &omap_irq_chip, handle_level_irq); 228 227 set_irq_flags(i, IRQF_VALID); 229 228 } 230 229 }
+2 -2
arch/arm/mach-orion5x/db88f5281-setup.c
··· 213 213 pin = DB88F5281_PCI_SLOT0_IRQ_PIN; 214 214 if (gpio_request(pin, "PCI Int1") == 0) { 215 215 if (gpio_direction_input(pin) == 0) { 216 - set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 216 + irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 217 217 } else { 218 218 printk(KERN_ERR "db88f5281_pci_preinit faield to " 219 219 "set_irq_type pin %d\n", pin); ··· 226 226 pin = DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN; 227 227 if (gpio_request(pin, "PCI Int2") == 0) { 228 228 if (gpio_direction_input(pin) == 0) { 229 - set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 229 + irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 230 230 } else { 231 231 printk(KERN_ERR "db88f5281_pci_preinit faield " 232 232 "to set_irq_type pin %d\n", pin);
+4 -4
arch/arm/mach-orion5x/irq.c
··· 34 34 * Initialize gpiolib for GPIOs 0-31. 35 35 */ 36 36 orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START); 37 - set_irq_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler); 38 - set_irq_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler); 39 - set_irq_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler); 40 - set_irq_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler); 37 + irq_set_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler); 38 + irq_set_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler); 39 + irq_set_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler); 40 + irq_set_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler); 41 41 }
+2 -2
arch/arm/mach-orion5x/rd88f5182-setup.c
··· 148 148 pin = RD88F5182_PCI_SLOT0_IRQ_A_PIN; 149 149 if (gpio_request(pin, "PCI IntA") == 0) { 150 150 if (gpio_direction_input(pin) == 0) { 151 - set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 151 + irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 152 152 } else { 153 153 printk(KERN_ERR "rd88f5182_pci_preinit faield to " 154 154 "set_irq_type pin %d\n", pin); ··· 161 161 pin = RD88F5182_PCI_SLOT0_IRQ_B_PIN; 162 162 if (gpio_request(pin, "PCI IntB") == 0) { 163 163 if (gpio_direction_input(pin) == 0) { 164 - set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 164 + irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 165 165 } else { 166 166 printk(KERN_ERR "rd88f5182_pci_preinit faield to " 167 167 "set_irq_type pin %d\n", pin);
+1 -1
arch/arm/mach-orion5x/terastation_pro2-setup.c
··· 88 88 pin = TSP2_PCI_SLOT0_IRQ_PIN; 89 89 if (gpio_request(pin, "PCI Int1") == 0) { 90 90 if (gpio_direction_input(pin) == 0) { 91 - set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 91 + irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 92 92 } else { 93 93 printk(KERN_ERR "tsp2_pci_preinit failed " 94 94 "to set_irq_type pin %d\n", pin);
+2 -2
arch/arm/mach-orion5x/ts209-setup.c
··· 117 117 pin = QNAP_TS209_PCI_SLOT0_IRQ_PIN; 118 118 if (gpio_request(pin, "PCI Int1") == 0) { 119 119 if (gpio_direction_input(pin) == 0) { 120 - set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 120 + irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 121 121 } else { 122 122 printk(KERN_ERR "qnap_ts209_pci_preinit failed to " 123 123 "set_irq_type pin %d\n", pin); ··· 131 131 pin = QNAP_TS209_PCI_SLOT1_IRQ_PIN; 132 132 if (gpio_request(pin, "PCI Int2") == 0) { 133 133 if (gpio_direction_input(pin) == 0) { 134 - set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 134 + irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); 135 135 } else { 136 136 printk(KERN_ERR "qnap_ts209_pci_preinit failed " 137 137 "to set_irq_type pin %d\n", pin);
+9 -9
arch/arm/mach-orion5x/ts78xx-setup.c
··· 402 402 /* enable devices if magic matches */ 403 403 switch ((ts78xx_fpga.id >> 8) & 0xffffff) { 404 404 case TS7800_FPGA_MAGIC: 405 - printk(KERN_WARNING "TS-7800 FPGA: unrecognized revision 0x%.2x\n", 405 + pr_warning("TS-7800 FPGA: unrecognized revision 0x%.2x\n", 406 406 ts78xx_fpga.id & 0xff); 407 407 ts78xx_fpga.supports.ts_rtc.present = 1; 408 408 ts78xx_fpga.supports.ts_nand.present = 1; ··· 423 423 if (ts78xx_fpga.supports.ts_rtc.present == 1) { 424 424 tmp = ts78xx_ts_rtc_load(); 425 425 if (tmp) { 426 - printk(KERN_INFO "TS-78xx: RTC not registered\n"); 426 + pr_info("TS-78xx: RTC not registered\n"); 427 427 ts78xx_fpga.supports.ts_rtc.present = 0; 428 428 } 429 429 ret |= tmp; ··· 431 431 if (ts78xx_fpga.supports.ts_nand.present == 1) { 432 432 tmp = ts78xx_ts_nand_load(); 433 433 if (tmp) { 434 - printk(KERN_INFO "TS-78xx: NAND not registered\n"); 434 + pr_info("TS-78xx: NAND not registered\n"); 435 435 ts78xx_fpga.supports.ts_nand.present = 0; 436 436 } 437 437 ret |= tmp; ··· 439 439 if (ts78xx_fpga.supports.ts_rng.present == 1) { 440 440 tmp = ts78xx_ts_rng_load(); 441 441 if (tmp) { 442 - printk(KERN_INFO "TS-78xx: RNG not registered\n"); 442 + pr_info("TS-78xx: RNG not registered\n"); 443 443 ts78xx_fpga.supports.ts_rng.present = 0; 444 444 } 445 445 ret |= tmp; ··· 466 466 { 467 467 ts78xx_fpga.id = readl(TS78XX_FPGA_REGS_VIRT_BASE); 468 468 469 - printk(KERN_INFO "TS-78xx FPGA: magic=0x%.6x, rev=0x%.2x\n", 469 + pr_info("TS-78xx FPGA: magic=0x%.6x, rev=0x%.2x\n", 470 470 (ts78xx_fpga.id >> 8) & 0xffffff, 471 471 ts78xx_fpga.id & 0xff); 472 472 ··· 494 494 * UrJTAG SVN since r1381 can be used to reprogram the FPGA 495 495 */ 496 496 if (ts78xx_fpga.id != fpga_id) { 497 - printk(KERN_ERR "TS-78xx FPGA: magic/rev mismatch\n" 497 + pr_err("TS-78xx FPGA: magic/rev mismatch\n" 498 498 "TS-78xx FPGA: was 0x%.6x/%.2x but now 0x%.6x/%.2x\n", 499 499 (ts78xx_fpga.id >> 8) & 0xffffff, ts78xx_fpga.id & 0xff, 500 500 (fpga_id >> 8) & 0xffffff, fpga_id & 0xff); ··· 525 525 int value, ret; 526 526 527 527 if (ts78xx_fpga.state < 0) { 528 - printk(KERN_ERR "TS-78xx FPGA: borked, you must powercycle asap\n"); 528 + pr_err("TS-78xx FPGA: borked, you must powercycle asap\n"); 529 529 return -EBUSY; 530 530 } 531 531 ··· 534 534 else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 535 535 value = 0; 536 536 else { 537 - printk(KERN_ERR "ts78xx_fpga_store: Invalid value\n"); 537 + pr_err("ts78xx_fpga_store: Invalid value\n"); 538 538 return -EINVAL; 539 539 } 540 540 ··· 616 616 ret = ts78xx_fpga_load(); 617 617 ret = sysfs_create_file(power_kobj, &ts78xx_fpga_attr.attr); 618 618 if (ret) 619 - printk(KERN_ERR "sysfs_create_file failed: %d\n", ret); 619 + pr_err("sysfs_create_file failed: %d\n", ret); 620 620 } 621 621 622 622 MACHINE_START(TS78XX, "Technologic Systems TS-78xx SBC")
+5 -5
arch/arm/mach-pnx4008/irq.c
··· 58 58 case IRQ_TYPE_EDGE_RISING: 59 59 __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ 60 60 __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /*rising edge */ 61 - set_irq_handler(d->irq, handle_edge_irq); 61 + irq_set_handler(d->irq, handle_edge_irq); 62 62 break; 63 63 case IRQ_TYPE_EDGE_FALLING: 64 64 __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ 65 65 __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*falling edge */ 66 - set_irq_handler(d->irq, handle_edge_irq); 66 + irq_set_handler(d->irq, handle_edge_irq); 67 67 break; 68 68 case IRQ_TYPE_LEVEL_LOW: 69 69 __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ 70 70 __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*low level */ 71 - set_irq_handler(d->irq, handle_level_irq); 71 + irq_set_handler(d->irq, handle_level_irq); 72 72 break; 73 73 case IRQ_TYPE_LEVEL_HIGH: 74 74 __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ 75 75 __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /* high level */ 76 - set_irq_handler(d->irq, handle_level_irq); 76 + irq_set_handler(d->irq, handle_level_irq); 77 77 break; 78 78 79 79 /* IRQ_TYPE_EDGE_BOTH is not supported */ ··· 98 98 /* configure IRQ's */ 99 99 for (i = 0; i < NR_IRQS; i++) { 100 100 set_irq_flags(i, IRQF_VALID); 101 - set_irq_chip(i, &pnx4008_irq_chip); 101 + irq_set_chip(i, &pnx4008_irq_chip); 102 102 pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]); 103 103 } 104 104
+4 -4
arch/arm/mach-pxa/am200epd.c
··· 128 128 return 0; 129 129 130 130 err_req_gpio: 131 - while (i > 0) 132 - gpio_free(gpios[i--]); 131 + while (--i >= 0) 132 + gpio_free(gpios[i]); 133 133 134 134 return err; 135 135 } ··· 194 194 }; 195 195 196 196 /* this gets called as part of our init. these steps must be done now so 197 - * that we can use set_pxa_fb_info */ 197 + * that we can use pxa_set_fb_info */ 198 198 static void __init am200_presetup_fb(void) 199 199 { 200 200 int fw; ··· 249 249 /* we divide since we told the LCD controller we're 16bpp */ 250 250 am200_fb_info.modes->xres /= 2; 251 251 252 - set_pxa_fb_info(&am200_fb_info); 252 + pxa_set_fb_info(NULL, &am200_fb_info); 253 253 254 254 } 255 255
+7 -6
arch/arm/mach-pxa/am300epd.c
··· 125 125 if (err) { 126 126 dev_err(&am300_device->dev, "failed requesting " 127 127 "gpio %d, err=%d\n", i, err); 128 - while (i >= DB0_GPIO_PIN) 129 - gpio_free(i--); 130 - i = ARRAY_SIZE(gpios) - 1; 131 - goto err_req_gpio; 128 + goto err_req_gpio2; 132 129 } 133 130 } 134 131 ··· 156 159 157 160 return 0; 158 161 162 + err_req_gpio2: 163 + while (--i >= DB0_GPIO_PIN) 164 + gpio_free(i); 165 + i = ARRAY_SIZE(gpios); 159 166 err_req_gpio: 160 - while (i > 0) 161 - gpio_free(gpios[i--]); 167 + while (--i >= 0) 168 + gpio_free(gpios[i]); 162 169 163 170 return err; 164 171 }
+5 -5
arch/arm/mach-pxa/balloon3.c
··· 263 263 } 264 264 265 265 balloon3_lcd_screen.pxafb_backlight_power = balloon3_backlight_power; 266 - set_pxa_fb_info(&balloon3_lcd_screen); 266 + pxa_set_fb_info(NULL, &balloon3_lcd_screen); 267 267 return; 268 268 269 269 err2: ··· 527 527 pxa27x_init_irq(); 528 528 /* setup extra Balloon3 irqs */ 529 529 for (irq = BALLOON3_IRQ(0); irq <= BALLOON3_IRQ(7); irq++) { 530 - set_irq_chip(irq, &balloon3_irq_chip); 531 - set_irq_handler(irq, handle_level_irq); 530 + irq_set_chip_and_handler(irq, &balloon3_irq_chip, 531 + handle_level_irq); 532 532 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 533 533 } 534 534 535 - set_irq_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler); 536 - set_irq_type(BALLOON3_AUX_NIRQ, IRQ_TYPE_EDGE_FALLING); 535 + irq_set_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler); 536 + irq_set_irq_type(BALLOON3_AUX_NIRQ, IRQ_TYPE_EDGE_FALLING); 537 537 538 538 pr_debug("%s: chained handler installed - irq %d automatically " 539 539 "enabled\n", __func__, BALLOON3_AUX_NIRQ);
+3 -2
arch/arm/mach-pxa/cm-x2xx-pci.c
··· 70 70 71 71 cmx2xx_it8152_irq_gpio = irq_gpio; 72 72 73 - set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING); 73 + irq_set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING); 74 74 75 - set_irq_chained_handler(gpio_to_irq(irq_gpio), cmx2xx_it8152_irq_demux); 75 + irq_set_chained_handler(gpio_to_irq(irq_gpio), 76 + cmx2xx_it8152_irq_demux); 76 77 } 77 78 78 79 #ifdef CONFIG_PM
+1 -1
arch/arm/mach-pxa/cm-x2xx.c
··· 379 379 380 380 static void __init cmx2xx_init_display(void) 381 381 { 382 - set_pxa_fb_info(cmx2xx_display); 382 + pxa_set_fb_info(NULL, cmx2xx_display); 383 383 } 384 384 #else 385 385 static inline void cmx2xx_init_display(void) {}
+2 -2
arch/arm/mach-pxa/cm-x300.c
··· 296 296 297 297 static void __init cm_x300_init_lcd(void) 298 298 { 299 - set_pxa_fb_info(&cm_x300_lcd); 299 + pxa_set_fb_info(NULL, &cm_x300_lcd); 300 300 } 301 301 #else 302 302 static inline void cm_x300_init_lcd(void) {} ··· 765 765 { 766 766 pxa3xx_set_i2c_power_info(&cm_x300_pwr_i2c_info); 767 767 i2c_register_board_info(1, &cm_x300_pmic_info, 1); 768 - set_irq_wake(IRQ_WAKEUP0, 1); 768 + irq_set_irq_wake(IRQ_WAKEUP0, 1); 769 769 } 770 770 771 771 static void __init cm_x300_init_wi2wi(void)
+1 -1
arch/arm/mach-pxa/colibri-pxa270-income.c
··· 175 175 176 176 static void __init income_lcd_init(void) 177 177 { 178 - set_pxa_fb_info(&income_lcd_screen); 178 + pxa_set_fb_info(NULL, &income_lcd_screen); 179 179 } 180 180 #else 181 181 static inline void income_lcd_init(void) {}
+1 -1
arch/arm/mach-pxa/colibri-pxa3xx.c
··· 105 105 lcd_bl_pin = bl_pin; 106 106 gpio_request(bl_pin, "lcd backlight"); 107 107 gpio_direction_output(bl_pin, 0); 108 - set_pxa_fb_info(&sharp_lq43_info); 108 + pxa_set_fb_info(NULL, &sharp_lq43_info); 109 109 } 110 110 #endif 111 111
-1
arch/arm/mach-pxa/corgi.c
··· 462 462 * USB Device Controller 463 463 */ 464 464 static struct pxa2xx_udc_mach_info udc_info __initdata = { 465 - .gpio_vbus = -1, 466 465 /* no connect GPIO; corgi can't tell connection status */ 467 466 .gpio_pullup = CORGI_GPIO_USB_PULLUP, 468 467 };
+2 -7
arch/arm/mach-pxa/devices.c
··· 90 90 91 91 static struct pxa2xx_udc_mach_info pxa_udc_info = { 92 92 .gpio_pullup = -1, 93 - .gpio_vbus = -1, 94 93 }; 95 94 96 95 void __init pxa_set_udc_info(struct pxa2xx_udc_mach_info *info) ··· 187 188 .resource = pxafb_resources, 188 189 }; 189 190 190 - void __init set_pxa_fb_info(struct pxafb_mach_info *info) 191 + void __init pxa_set_fb_info(struct device *parent, struct pxafb_mach_info *info) 191 192 { 193 + pxa_device_fb.dev.parent = parent; 192 194 pxa_register_device(&pxa_device_fb, info); 193 - } 194 - 195 - void __init set_pxa_fb_parent(struct device *parent_dev) 196 - { 197 - pxa_device_fb.dev.parent = parent_dev; 198 195 } 199 196 200 197 static struct resource pxa_resource_ffuart[] = {
+1 -1
arch/arm/mach-pxa/em-x270.c
··· 689 689 690 690 static void __init em_x270_init_lcd(void) 691 691 { 692 - set_pxa_fb_info(&em_x270_lcd); 692 + pxa_set_fb_info(NULL, &em_x270_lcd); 693 693 } 694 694 #else 695 695 static inline void em_x270_init_lcd(void) {}
+27 -9
arch/arm/mach-pxa/eseries.c
··· 20 20 #include <linux/mfd/t7l66xb.h> 21 21 #include <linux/mtd/nand.h> 22 22 #include <linux/mtd/partitions.h> 23 + #include <linux/usb/gpio_vbus.h> 23 24 24 25 #include <video/w100fb.h> 25 26 ··· 52 51 mi->bank[0].size = (64*1024*1024); 53 52 } 54 53 55 - struct pxa2xx_udc_mach_info e7xx_udc_mach_info = { 54 + struct gpio_vbus_mach_info e7xx_udc_info = { 56 55 .gpio_vbus = GPIO_E7XX_USB_DISC, 57 56 .gpio_pullup = GPIO_E7XX_USB_PULLUP, 58 57 .gpio_pullup_inverted = 1 58 + }; 59 + 60 + static struct platform_device e7xx_gpio_vbus = { 61 + .name = "gpio-vbus", 62 + .id = -1, 63 + .dev = { 64 + .platform_data = &e7xx_udc_info, 65 + }, 59 66 }; 60 67 61 68 struct pxaficp_platform_data e7xx_ficp_platform_data = { ··· 174 165 175 166 static struct platform_device *e330_devices[] __initdata = { 176 167 &e330_tc6387xb_device, 168 + &e7xx_gpio_vbus, 177 169 }; 178 170 179 171 static void __init e330_init(void) ··· 185 175 eseries_register_clks(); 186 176 eseries_get_tmio_gpios(); 187 177 platform_add_devices(ARRAY_AND_SIZE(e330_devices)); 188 - pxa_set_udc_info(&e7xx_udc_mach_info); 189 178 } 190 179 191 180 MACHINE_START(E330, "Toshiba e330") ··· 223 214 224 215 static struct platform_device *e350_devices[] __initdata = { 225 216 &e350_t7l66xb_device, 217 + &e7xx_gpio_vbus, 226 218 }; 227 219 228 220 static void __init e350_init(void) ··· 234 224 eseries_register_clks(); 235 225 eseries_get_tmio_gpios(); 236 226 platform_add_devices(ARRAY_AND_SIZE(e350_devices)); 237 - pxa_set_udc_info(&e7xx_udc_mach_info); 238 227 } 239 228 240 229 MACHINE_START(E350, "Toshiba e350") ··· 342 333 343 334 static struct platform_device *e400_devices[] __initdata = { 344 335 &e400_t7l66xb_device, 336 + &e7xx_gpio_vbus, 345 337 }; 346 338 347 339 static void __init e400_init(void) ··· 354 344 /* Fixme - e400 may have a switched clock */ 355 345 eseries_register_clks(); 356 346 eseries_get_tmio_gpios(); 357 - set_pxa_fb_info(&e400_pxafb_mach_info); 347 + pxa_set_fb_info(NULL, &e400_pxafb_mach_info); 358 348 platform_add_devices(ARRAY_AND_SIZE(e400_devices)); 359 - pxa_set_udc_info(&e7xx_udc_mach_info); 360 349 } 361 350 362 351 MACHINE_START(E400, "Toshiba e400") ··· 528 519 static struct platform_device *e740_devices[] __initdata = { 529 520 &e740_fb_device, 530 521 &e740_t7l66xb_device, 522 + &e7xx_gpio_vbus, 531 523 }; 532 524 533 525 static void __init e740_init(void) ··· 542 532 "UDCCLK", &pxa25x_device_udc.dev), 543 533 eseries_get_tmio_gpios(); 544 534 platform_add_devices(ARRAY_AND_SIZE(e740_devices)); 545 - pxa_set_udc_info(&e7xx_udc_mach_info); 546 535 pxa_set_ac97_info(NULL); 547 536 pxa_set_ficp_info(&e7xx_ficp_platform_data); 548 537 } ··· 720 711 static struct platform_device *e750_devices[] __initdata = { 721 712 &e750_fb_device, 722 713 &e750_tc6393xb_device, 714 + &e7xx_gpio_vbus, 723 715 }; 724 716 725 717 static void __init e750_init(void) ··· 733 723 "GPIO11_CLK", NULL), 734 724 eseries_get_tmio_gpios(); 735 725 platform_add_devices(ARRAY_AND_SIZE(e750_devices)); 736 - pxa_set_udc_info(&e7xx_udc_mach_info); 737 726 pxa_set_ac97_info(NULL); 738 727 pxa_set_ficp_info(&e7xx_ficp_platform_data); 739 728 } ··· 882 873 883 874 /* --------------------------- UDC definitions --------------------------- */ 884 875 885 - static struct pxa2xx_udc_mach_info e800_udc_mach_info = { 876 + static struct gpio_vbus_mach_info e800_udc_info = { 886 877 .gpio_vbus = GPIO_E800_USB_DISC, 887 878 .gpio_pullup = GPIO_E800_USB_PULLUP, 888 879 .gpio_pullup_inverted = 1 889 880 }; 881 + 882 + static struct platform_device e800_gpio_vbus = { 883 + .name = "gpio-vbus", 884 + .id = -1, 885 + .dev = { 886 + .platform_data = &e800_udc_info, 887 + }, 888 + }; 889 + 890 890 891 891 /* ----------------- e800 tc6393xb parameters ------------------ */ 892 892 ··· 925 907 static struct platform_device *e800_devices[] __initdata = { 926 908 &e800_fb_device, 927 909 &e800_tc6393xb_device, 910 + &e800_gpio_vbus, 928 911 }; 929 912 930 913 static void __init e800_init(void) ··· 938 919 "GPIO11_CLK", NULL), 939 920 eseries_get_tmio_gpios(); 940 921 platform_add_devices(ARRAY_AND_SIZE(e800_devices)); 941 - pxa_set_udc_info(&e800_udc_mach_info); 942 922 pxa_set_ac97_info(NULL); 943 923 } 944 924
+6 -6
arch/arm/mach-pxa/ezx.c
··· 783 783 784 784 pxa_set_i2c_info(NULL); 785 785 786 - set_pxa_fb_info(&ezx_fb_info_1); 786 + pxa_set_fb_info(NULL, &ezx_fb_info_1); 787 787 788 788 pxa_set_keypad_info(&a780_keypad_platform_data); 789 789 ··· 853 853 pxa_set_i2c_info(NULL); 854 854 i2c_register_board_info(0, ARRAY_AND_SIZE(e680_i2c_board_info)); 855 855 856 - set_pxa_fb_info(&ezx_fb_info_1); 856 + pxa_set_fb_info(NULL, &ezx_fb_info_1); 857 857 858 858 pxa_set_keypad_info(&e680_keypad_platform_data); 859 859 ··· 918 918 pxa_set_i2c_info(NULL); 919 919 i2c_register_board_info(0, ARRAY_AND_SIZE(a1200_i2c_board_info)); 920 920 921 - set_pxa_fb_info(&ezx_fb_info_2); 921 + pxa_set_fb_info(NULL, &ezx_fb_info_2); 922 922 923 923 pxa_set_keypad_info(&a1200_keypad_platform_data); 924 924 ··· 1103 1103 pxa_set_i2c_info(NULL); 1104 1104 i2c_register_board_info(0, ARRAY_AND_SIZE(a910_i2c_board_info)); 1105 1105 1106 - set_pxa_fb_info(&ezx_fb_info_2); 1106 + pxa_set_fb_info(NULL, &ezx_fb_info_2); 1107 1107 1108 1108 pxa_set_keypad_info(&a910_keypad_platform_data); 1109 1109 ··· 1173 1173 pxa_set_i2c_info(NULL); 1174 1174 i2c_register_board_info(0, ARRAY_AND_SIZE(e6_i2c_board_info)); 1175 1175 1176 - set_pxa_fb_info(&ezx_fb_info_2); 1176 + pxa_set_fb_info(NULL, &ezx_fb_info_2); 1177 1177 1178 1178 pxa_set_keypad_info(&e6_keypad_platform_data); 1179 1179 ··· 1212 1212 pxa_set_i2c_info(NULL); 1213 1213 i2c_register_board_info(0, ARRAY_AND_SIZE(e2_i2c_board_info)); 1214 1214 1215 - set_pxa_fb_info(&ezx_fb_info_2); 1215 + pxa_set_fb_info(NULL, &ezx_fb_info_2); 1216 1216 1217 1217 pxa_set_keypad_info(&e2_keypad_platform_data); 1218 1218
+11 -2
arch/arm/mach-pxa/gumstix.c
··· 26 26 #include <linux/gpio.h> 27 27 #include <linux/err.h> 28 28 #include <linux/clk.h> 29 + #include <linux/usb/gpio_vbus.h> 29 30 30 31 #include <asm/setup.h> 31 32 #include <asm/memory.h> ··· 107 106 #endif 108 107 109 108 #ifdef CONFIG_USB_GADGET_PXA25X 110 - static struct pxa2xx_udc_mach_info gumstix_udc_info __initdata = { 109 + static struct gpio_vbus_mach_info gumstix_udc_info = { 111 110 .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn, 112 111 .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx, 113 112 }; 114 113 114 + static struct platform_device gumstix_gpio_vbus = { 115 + .name = "gpio-vbus", 116 + .id = -1, 117 + .dev = { 118 + .platform_data = &gumstix_udc_info, 119 + }, 120 + }; 121 + 115 122 static void __init gumstix_udc_init(void) 116 123 { 117 - pxa_set_udc_info(&gumstix_udc_info); 124 + platform_device_register(&gumstix_gpio_vbus); 118 125 } 119 126 #else 120 127 static void gumstix_udc_init(void)
+1 -1
arch/arm/mach-pxa/idp.c
··· 167 167 168 168 platform_device_register(&smc91x_device); 169 169 //platform_device_register(&mst_audio_device); 170 - set_pxa_fb_info(&sharp_lm8v31); 170 + pxa_set_fb_info(NULL, &sharp_lm8v31); 171 171 pxa_set_mci_info(&idp_mci_platform_data); 172 172 } 173 173
+5
arch/arm/mach-pxa/include/mach/palmz72.h
··· 44 44 #define GPIO_NR_PALMZ72_BT_POWER 17 45 45 #define GPIO_NR_PALMZ72_BT_RESET 83 46 46 47 + /* Camera */ 48 + #define GPIO_NR_PALMZ72_CAM_PWDN 56 49 + #define GPIO_NR_PALMZ72_CAM_RESET 57 50 + #define GPIO_NR_PALMZ72_CAM_POWER 91 51 + 47 52 /** Initial values **/ 48 53 49 54 /* Battery */
+2 -2
arch/arm/mach-pxa/include/mach/pxafb.h
··· 154 154 void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *); 155 155 void (*smart_update)(struct fb_info *); 156 156 }; 157 - void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info); 158 - void set_pxa_fb_parent(struct device *parent_dev); 157 + 158 + void pxa_set_fb_info(struct device *, struct pxafb_mach_info *); 159 159 unsigned long pxafb_get_hsync_time(struct device *dev); 160 160 161 161 extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
+1 -2
arch/arm/mach-pxa/include/mach/z2.h
··· 25 25 #define GPIO98_ZIPITZ2_LID_BUTTON 98 26 26 27 27 /* Libertas GSPI8686 WiFi */ 28 - #define GPIO14_ZIPITZ2_WIFI_RESET 14 29 - #define GPIO15_ZIPITZ2_WIFI_POWER 15 28 + #define GPIO14_ZIPITZ2_WIFI_POWER 14 30 29 #define GPIO24_ZIPITZ2_WIFI_CS 24 31 30 #define GPIO36_ZIPITZ2_WIFI_IRQ 36 32 31
+6 -6
arch/arm/mach-pxa/irq.c
··· 137 137 GEDR0 = 0x3; 138 138 139 139 for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) { 140 - set_irq_chip(irq, &pxa_low_gpio_chip); 141 - set_irq_chip_data(irq, irq_base(0)); 142 - set_irq_handler(irq, handle_edge_irq); 140 + irq_set_chip_and_handler(irq, &pxa_low_gpio_chip, 141 + handle_edge_irq); 142 + irq_set_chip_data(irq, irq_base(0)); 143 143 set_irq_flags(irq, IRQF_VALID); 144 144 } 145 145 ··· 165 165 __raw_writel(i | IPR_VALID, IRQ_BASE + IPR(i)); 166 166 167 167 irq = PXA_IRQ(i); 168 - set_irq_chip(irq, &pxa_internal_irq_chip); 169 - set_irq_chip_data(irq, base); 170 - set_irq_handler(irq, handle_level_irq); 168 + irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, 169 + handle_level_irq); 170 + irq_set_chip_data(irq, base); 171 171 set_irq_flags(irq, IRQF_VALID); 172 172 } 173 173 }
+1 -1
arch/arm/mach-pxa/littleton.c
··· 185 185 186 186 static void littleton_init_lcd(void) 187 187 { 188 - set_pxa_fb_info(&littleton_lcd_info); 188 + pxa_set_fb_info(NULL, &littleton_lcd_info); 189 189 } 190 190 #else 191 191 static inline void littleton_init_lcd(void) {};
+5 -5
arch/arm/mach-pxa/lpd270.c
··· 149 149 150 150 /* setup extra LogicPD PXA270 irqs */ 151 151 for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) { 152 - set_irq_chip(irq, &lpd270_irq_chip); 153 - set_irq_handler(irq, handle_level_irq); 152 + irq_set_chip_and_handler(irq, &lpd270_irq_chip, 153 + handle_level_irq); 154 154 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 155 155 } 156 - set_irq_chained_handler(IRQ_GPIO(0), lpd270_irq_handler); 157 - set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); 156 + irq_set_chained_handler(IRQ_GPIO(0), lpd270_irq_handler); 157 + irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); 158 158 } 159 159 160 160 ··· 480 480 pxa_set_ac97_info(NULL); 481 481 482 482 if (lpd270_lcd_to_use != NULL) 483 - set_pxa_fb_info(lpd270_lcd_to_use); 483 + pxa_set_fb_info(NULL, lpd270_lcd_to_use); 484 484 485 485 pxa_set_ohci_info(&lpd270_ohci_platform_data); 486 486 }
+5 -5
arch/arm/mach-pxa/lubbock.c
··· 165 165 166 166 /* setup extra lubbock irqs */ 167 167 for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) { 168 - set_irq_chip(irq, &lubbock_irq_chip); 169 - set_irq_handler(irq, handle_level_irq); 168 + irq_set_chip_and_handler(irq, &lubbock_irq_chip, 169 + handle_level_irq); 170 170 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 171 171 } 172 172 173 - set_irq_chained_handler(IRQ_GPIO(0), lubbock_irq_handler); 174 - set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); 173 + irq_set_chained_handler(IRQ_GPIO(0), lubbock_irq_handler); 174 + irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); 175 175 } 176 176 177 177 #ifdef CONFIG_PM ··· 521 521 522 522 clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL); 523 523 pxa_set_udc_info(&udc_info); 524 - set_pxa_fb_info(&sharp_lm8v31); 524 + pxa_set_fb_info(NULL, &sharp_lm8v31); 525 525 pxa_set_mci_info(&lubbock_mci_platform_data); 526 526 pxa_set_ficp_info(&lubbock_ficp_platform_data); 527 527 pxa_set_ac97_info(NULL);
+1 -1
arch/arm/mach-pxa/magician.c
··· 757 757 gpio_direction_output(GPIO104_MAGICIAN_LCD_POWER_1, 0); 758 758 gpio_direction_output(GPIO105_MAGICIAN_LCD_POWER_2, 0); 759 759 gpio_direction_output(GPIO106_MAGICIAN_LCD_POWER_3, 0); 760 - set_pxa_fb_info(lcd_select ? &samsung_info : &toppoly_info); 760 + pxa_set_fb_info(NULL, lcd_select ? &samsung_info : &toppoly_info); 761 761 } else 762 762 pr_err("LCD detection: CPLD mapping failed\n"); 763 763 }
+5 -5
arch/arm/mach-pxa/mainstone.c
··· 166 166 167 167 /* setup extra Mainstone irqs */ 168 168 for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { 169 - set_irq_chip(irq, &mainstone_irq_chip); 170 - set_irq_handler(irq, handle_level_irq); 169 + irq_set_chip_and_handler(irq, &mainstone_irq_chip, 170 + handle_level_irq); 171 171 if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) 172 172 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); 173 173 else ··· 179 179 MST_INTMSKENA = 0; 180 180 MST_INTSETCLR = 0; 181 181 182 - set_irq_chained_handler(IRQ_GPIO(0), mainstone_irq_handler); 183 - set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); 182 + irq_set_chained_handler(IRQ_GPIO(0), mainstone_irq_handler); 183 + irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); 184 184 } 185 185 186 186 #ifdef CONFIG_PM ··· 592 592 else 593 593 mainstone_pxafb_info.modes = &toshiba_ltm035a776c_mode; 594 594 595 - set_pxa_fb_info(&mainstone_pxafb_info); 595 + pxa_set_fb_info(NULL, &mainstone_pxafb_info); 596 596 mainstone_backlight_register(); 597 597 598 598 pxa_set_mci_info(&mainstone_mci_platform_data);
+1 -1
arch/arm/mach-pxa/mioa701.c
··· 795 795 pxa_set_stuart_info(NULL); 796 796 mio_gpio_request(ARRAY_AND_SIZE(global_gpios)); 797 797 bootstrap_init(); 798 - set_pxa_fb_info(&mioa701_pxafb_info); 798 + pxa_set_fb_info(NULL, &mioa701_pxafb_info); 799 799 pxa_set_mci_info(&mioa701_mci_info); 800 800 pxa_set_keypad_info(&mioa701_keypad_info); 801 801 pxa_set_udc_info(&mioa701_udc_info);
+2 -3
arch/arm/mach-pxa/palm27x.c
··· 1 1 /* 2 2 * Common code for Palm LD, T5, TX, Z72 3 3 * 4 - * Copyright (C) 2010 5 - * Marek Vasut <marek.vasut@gmail.com> 4 + * Copyright (C) 2010-2011 Marek Vasut <marek.vasut@gmail.com> 6 5 * 7 6 * This program is free software; you can redistribute it and/or modify 8 7 * it under the terms of the GNU General Public License version 2 as ··· 157 158 palm27x_lcd_screen.pxafb_lcd_power = palm27x_lcd_ctl; 158 159 } 159 160 160 - set_pxa_fb_info(&palm27x_lcd_screen); 161 + pxa_set_fb_info(NULL, &palm27x_lcd_screen); 161 162 } 162 163 #endif 163 164
+1 -1
arch/arm/mach-pxa/palmtc.c
··· 507 507 508 508 static void __init palmtc_lcd_init(void) 509 509 { 510 - set_pxa_fb_info(&palmtc_lcd_screen); 510 + pxa_set_fb_info(NULL, &palmtc_lcd_screen); 511 511 } 512 512 #else 513 513 static inline void palmtc_lcd_init(void) {}
+8 -25
arch/arm/mach-pxa/palmte2.c
··· 136 136 /****************************************************************************** 137 137 * Backlight 138 138 ******************************************************************************/ 139 + static struct gpio palmte_bl_gpios[] = { 140 + { GPIO_NR_PALMTE2_BL_POWER, GPIOF_INIT_LOW, "Backlight power" }, 141 + { GPIO_NR_PALMTE2_LCD_POWER, GPIOF_INIT_LOW, "LCD power" }, 142 + }; 143 + 139 144 static int palmte2_backlight_init(struct device *dev) 140 145 { 141 - int ret; 142 - 143 - ret = gpio_request(GPIO_NR_PALMTE2_BL_POWER, "BL POWER"); 144 - if (ret) 145 - goto err; 146 - ret = gpio_direction_output(GPIO_NR_PALMTE2_BL_POWER, 0); 147 - if (ret) 148 - goto err2; 149 - ret = gpio_request(GPIO_NR_PALMTE2_LCD_POWER, "LCD POWER"); 150 - if (ret) 151 - goto err2; 152 - ret = gpio_direction_output(GPIO_NR_PALMTE2_LCD_POWER, 0); 153 - if (ret) 154 - goto err3; 155 - 156 - return 0; 157 - err3: 158 - gpio_free(GPIO_NR_PALMTE2_LCD_POWER); 159 - err2: 160 - gpio_free(GPIO_NR_PALMTE2_BL_POWER); 161 - err: 162 - return ret; 146 + return gpio_request_array(ARRAY_AND_SIZE(palmte_bl_gpios)); 163 147 } 164 148 165 149 static int palmte2_backlight_notify(struct device *dev, int brightness) ··· 155 171 156 172 static void palmte2_backlight_exit(struct device *dev) 157 173 { 158 - gpio_free(GPIO_NR_PALMTE2_BL_POWER); 159 - gpio_free(GPIO_NR_PALMTE2_LCD_POWER); 174 + gpio_free_array(ARRAY_AND_SIZE(palmte_bl_gpios)); 160 175 } 161 176 162 177 static struct platform_pwm_backlight_data palmte2_backlight_data = { ··· 346 363 pxa_set_btuart_info(NULL); 347 364 pxa_set_stuart_info(NULL); 348 365 349 - set_pxa_fb_info(&palmte2_lcd_screen); 366 + pxa_set_fb_info(NULL, &palmte2_lcd_screen); 350 367 pxa_set_mci_info(&palmte2_mci_platform_data); 351 368 palmte2_udc_init(); 352 369 pxa_set_ac97_info(&palmte2_ac97_pdata);
+127
arch/arm/mach-pxa/palmz72.c
··· 30 30 #include <linux/wm97xx.h> 31 31 #include <linux/power_supply.h> 32 32 #include <linux/usb/gpio_vbus.h> 33 + #include <linux/i2c-gpio.h> 33 34 34 35 #include <asm/mach-types.h> 35 36 #include <asm/mach/arch.h> ··· 48 47 #include <mach/palm27x.h> 49 48 50 49 #include <mach/pm.h> 50 + #include <mach/camera.h> 51 + 52 + #include <media/soc_camera.h> 51 53 52 54 #include "generic.h" 53 55 #include "devices.h" ··· 106 102 GPIO21_GPIO, /* LCD border switch */ 107 103 GPIO22_GPIO, /* LCD border color */ 108 104 GPIO96_GPIO, /* lcd power */ 105 + 106 + /* PXA Camera */ 107 + GPIO81_CIF_DD_0, 108 + GPIO48_CIF_DD_5, 109 + GPIO50_CIF_DD_3, 110 + GPIO51_CIF_DD_2, 111 + GPIO52_CIF_DD_4, 112 + GPIO53_CIF_MCLK, 113 + GPIO54_CIF_PCLK, 114 + GPIO55_CIF_DD_1, 115 + GPIO84_CIF_FV, 116 + GPIO85_CIF_LV, 117 + GPIO93_CIF_DD_6, 118 + GPIO108_CIF_DD_7, 119 + 120 + GPIO56_GPIO, /* OV9640 Powerdown */ 121 + GPIO57_GPIO, /* OV9640 Reset */ 122 + GPIO91_GPIO, /* OV9640 Power */ 123 + 124 + /* I2C */ 125 + GPIO117_GPIO, /* I2C_SCL */ 126 + GPIO118_GPIO, /* I2C_SDA */ 109 127 110 128 /* Misc. */ 111 129 GPIO0_GPIO | WAKEUP_ON_LEVEL_HIGH, /* power detect */ ··· 280 254 #endif 281 255 282 256 /****************************************************************************** 257 + * SoC Camera 258 + ******************************************************************************/ 259 + #if defined(CONFIG_SOC_CAMERA_OV9640) || \ 260 + defined(CONFIG_SOC_CAMERA_OV9640_MODULE) 261 + static struct pxacamera_platform_data palmz72_pxacamera_platform_data = { 262 + .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 | 263 + PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN, 264 + .mclk_10khz = 2600, 265 + }; 266 + 267 + /* Board I2C devices. */ 268 + static struct i2c_board_info palmz72_i2c_device[] = { 269 + { 270 + I2C_BOARD_INFO("ov9640", 0x30), 271 + } 272 + }; 273 + 274 + static int palmz72_camera_power(struct device *dev, int power) 275 + { 276 + gpio_set_value(GPIO_NR_PALMZ72_CAM_PWDN, !power); 277 + mdelay(50); 278 + return 0; 279 + } 280 + 281 + static int palmz72_camera_reset(struct device *dev) 282 + { 283 + gpio_set_value(GPIO_NR_PALMZ72_CAM_RESET, 1); 284 + mdelay(50); 285 + gpio_set_value(GPIO_NR_PALMZ72_CAM_RESET, 0); 286 + mdelay(50); 287 + return 0; 288 + } 289 + 290 + static struct soc_camera_link palmz72_iclink = { 291 + .bus_id = 0, /* Match id in pxa27x_device_camera in device.c */ 292 + .board_info = &palmz72_i2c_device[0], 293 + .i2c_adapter_id = 0, 294 + .module_name = "ov96xx", 295 + .power = &palmz72_camera_power, 296 + .reset = &palmz72_camera_reset, 297 + .flags = SOCAM_DATAWIDTH_8, 298 + }; 299 + 300 + static struct i2c_gpio_platform_data palmz72_i2c_bus_data = { 301 + .sda_pin = 118, 302 + .scl_pin = 117, 303 + .udelay = 10, 304 + .timeout = 100, 305 + }; 306 + 307 + static struct platform_device palmz72_i2c_bus_device = { 308 + .name = "i2c-gpio", 309 + .id = 0, /* we use this as a replacement for i2c-pxa */ 310 + .dev = { 311 + .platform_data = &palmz72_i2c_bus_data, 312 + } 313 + }; 314 + 315 + static struct platform_device palmz72_camera = { 316 + .name = "soc-camera-pdrv", 317 + .id = -1, 318 + .dev = { 319 + .platform_data = &palmz72_iclink, 320 + }, 321 + }; 322 + 323 + /* Here we request the camera GPIOs and configure them. We power up the camera 324 + * module, deassert the reset pin, but put it into powerdown (low to no power 325 + * consumption) mode. This allows us to later bring the module up fast. */ 326 + static struct gpio palmz72_camera_gpios[] = { 327 + { GPIO_NR_PALMZ72_CAM_POWER, GPIOF_INIT_HIGH,"Camera DVDD" }, 328 + { GPIO_NR_PALMZ72_CAM_RESET, GPIOF_INIT_LOW, "Camera RESET" }, 329 + { GPIO_NR_PALMZ72_CAM_PWDN, GPIOF_INIT_LOW, "Camera PWDN" }, 330 + }; 331 + 332 + static inline void __init palmz72_cam_gpio_init(void) 333 + { 334 + int ret; 335 + 336 + ret = gpio_request_array(ARRAY_AND_SIZE(palmz72_camera_gpios)); 337 + if (!ret) 338 + gpio_free_array(ARRAY_AND_SIZE(palmz72_camera_gpios)); 339 + else 340 + printk(KERN_ERR "Camera GPIO init failed!\n"); 341 + 342 + return; 343 + } 344 + 345 + static void __init palmz72_camera_init(void) 346 + { 347 + palmz72_cam_gpio_init(); 348 + pxa_set_camera_info(&palmz72_pxacamera_platform_data); 349 + platform_device_register(&palmz72_i2c_bus_device); 350 + platform_device_register(&palmz72_camera); 351 + } 352 + #else 353 + static inline void palmz72_camera_init(void) {} 354 + #endif 355 + 356 + /****************************************************************************** 283 357 * Machine init 284 358 ******************************************************************************/ 285 359 static void __init palmz72_init(void) ··· 402 276 palm27x_pmic_init(); 403 277 palmz72_kpc_init(); 404 278 palmz72_leds_init(); 279 + palmz72_camera_init(); 405 280 } 406 281 407 282 MACHINE_START(PALMZ72, "Palm Zire72")
+5 -5
arch/arm/mach-pxa/pcm990-baseboard.c
··· 281 281 282 282 /* setup extra PCM990 irqs */ 283 283 for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) { 284 - set_irq_chip(irq, &pcm990_irq_chip); 285 - set_irq_handler(irq, handle_level_irq); 284 + irq_set_chip_and_handler(irq, &pcm990_irq_chip, 285 + handle_level_irq); 286 286 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 287 287 } 288 288 289 289 PCM990_INTMSKENA = 0x00; /* disable all Interrupts */ 290 290 PCM990_INTSETCLR = 0xFF; 291 291 292 - set_irq_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler); 293 - set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE); 292 + irq_set_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler); 293 + irq_set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE); 294 294 } 295 295 296 296 static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, ··· 515 515 pcm990_init_irq(); 516 516 517 517 #ifndef CONFIG_PCM990_DISPLAY_NONE 518 - set_pxa_fb_info(&pcm990_fbinfo); 518 + pxa_set_fb_info(NULL, &pcm990_fbinfo); 519 519 #endif 520 520 platform_device_register(&pcm990_backlight_device); 521 521
+1 -2
arch/arm/mach-pxa/poodle.c
··· 445 445 if (ret) 446 446 pr_warning("poodle: Unable to register LoCoMo device\n"); 447 447 448 - set_pxa_fb_parent(&poodle_locomo_device.dev); 449 - set_pxa_fb_info(&poodle_fb_info); 448 + pxa_set_fb_info(&poodle_locomo_device.dev, &poodle_fb_info); 450 449 pxa_set_udc_info(&udc_info); 451 450 pxa_set_mci_info(&poodle_mci_platform_data); 452 451 pxa_set_ficp_info(&poodle_ficp_platform_data);
+2 -2
arch/arm/mach-pxa/pxa3xx.c
··· 362 362 int irq; 363 363 364 364 for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) { 365 - set_irq_chip(irq, &pxa_ext_wakeup_chip); 366 - set_irq_handler(irq, handle_edge_irq); 365 + irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip, 366 + handle_edge_irq); 367 367 set_irq_flags(irq, IRQF_VALID); 368 368 } 369 369
+1 -1
arch/arm/mach-pxa/raumfeld.c
··· 597 597 { 598 598 int ret; 599 599 600 - set_pxa_fb_info(&raumfeld_sharp_lcd_info); 600 + pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); 601 601 602 602 /* Earlier devices had the backlight regulator controlled 603 603 * via PWM, later versions use another controller for that */
+1 -1
arch/arm/mach-pxa/saar.c
··· 473 473 474 474 static void __init saar_init_lcd(void) 475 475 { 476 - set_pxa_fb_info(&saar_lcd_info); 476 + pxa_set_fb_info(NULL, &saar_lcd_info); 477 477 } 478 478 #else 479 479 static inline void saar_init_lcd(void) {}
+1 -1
arch/arm/mach-pxa/spitz.c
··· 724 724 725 725 static void __init spitz_lcd_init(void) 726 726 { 727 - set_pxa_fb_info(&spitz_pxafb_info); 727 + pxa_set_fb_info(NULL, &spitz_pxafb_info); 728 728 } 729 729 #else 730 730 static inline void spitz_lcd_init(void) {}
+1 -1
arch/arm/mach-pxa/tavorevb.c
··· 466 466 { 467 467 platform_device_register(&tavorevb_backlight_devices[0]); 468 468 platform_device_register(&tavorevb_backlight_devices[1]); 469 - set_pxa_fb_info(&tavorevb_lcd_info); 469 + pxa_set_fb_info(NULL, &tavorevb_lcd_info); 470 470 } 471 471 #else 472 472 static inline void tavorevb_init_lcd(void) {}
+2 -3
arch/arm/mach-pxa/time.c
··· 100 100 static struct clock_event_device ckevt_pxa_osmr0 = { 101 101 .name = "osmr0", 102 102 .features = CLOCK_EVT_FEAT_ONESHOT, 103 - .shift = 32, 104 103 .rating = 200, 105 104 .set_next_event = pxa_osmr0_set_next_event, 106 105 .set_mode = pxa_osmr0_set_mode, ··· 134 135 135 136 init_sched_clock(&cd, pxa_update_sched_clock, 32, clock_tick_rate); 136 137 137 - ckevt_pxa_osmr0.mult = 138 - div_sc(clock_tick_rate, NSEC_PER_SEC, ckevt_pxa_osmr0.shift); 138 + clocksource_calc_mult_shift(&cksrc_pxa_oscr0, clock_tick_rate, 4); 139 + clockevents_calc_mult_shift(&ckevt_pxa_osmr0, clock_tick_rate, 4); 139 140 ckevt_pxa_osmr0.max_delta_ns = 140 141 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); 141 142 ckevt_pxa_osmr0.min_delta_ns =
+11 -2
arch/arm/mach-pxa/tosa.c
··· 35 35 #include <linux/spi/pxa2xx_spi.h> 36 36 #include <linux/input/matrix_keypad.h> 37 37 #include <linux/i2c/pxa-i2c.h> 38 + #include <linux/usb/gpio_vbus.h> 38 39 39 40 #include <asm/setup.h> 40 41 #include <asm/mach-types.h> ··· 241 240 /* 242 241 * USB Device Controller 243 242 */ 244 - static struct pxa2xx_udc_mach_info udc_info __initdata = { 243 + static struct gpio_vbus_mach_info tosa_udc_info = { 245 244 .gpio_pullup = TOSA_GPIO_USB_PULLUP, 246 245 .gpio_vbus = TOSA_GPIO_USB_IN, 247 246 .gpio_vbus_inverted = 1, 247 + }; 248 + 249 + static struct platform_device tosa_gpio_vbus = { 250 + .name = "gpio-vbus", 251 + .id = -1, 252 + .dev = { 253 + .platform_data = &tosa_udc_info, 254 + }, 248 255 }; 249 256 250 257 /* ··· 900 891 &tosa_bt_device, 901 892 &sharpsl_rom_device, 902 893 &wm9712_device, 894 + &tosa_gpio_vbus, 903 895 }; 904 896 905 897 static void tosa_poweroff(void) ··· 947 937 dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16); 948 938 949 939 pxa_set_mci_info(&tosa_mci_platform_data); 950 - pxa_set_udc_info(&udc_info); 951 940 pxa_set_ficp_info(&tosa_ficp_platform_data); 952 941 pxa_set_i2c_info(NULL); 953 942 pxa_set_ac97_info(NULL);
+2 -2
arch/arm/mach-pxa/trizeps4.c
··· 516 516 pxa_set_stuart_info(NULL); 517 517 518 518 if (0) /* dont know how to determine LCD */ 519 - set_pxa_fb_info(&sharp_lcd); 519 + pxa_set_fb_info(NULL, &sharp_lcd); 520 520 else 521 - set_pxa_fb_info(&toshiba_lcd); 521 + pxa_set_fb_info(NULL, &toshiba_lcd); 522 522 523 523 pxa_set_mci_info(&trizeps4_mci_platform_data); 524 524 #ifndef STATUS_LEDS_ON_STUART_PINS
+5 -5
arch/arm/mach-pxa/viper.c
··· 310 310 /* setup ISA IRQs */ 311 311 for (level = 0; level < ARRAY_SIZE(viper_isa_irqs); level++) { 312 312 isa_irq = viper_bit_to_irq(level); 313 - set_irq_chip(isa_irq, &viper_irq_chip); 314 - set_irq_handler(isa_irq, handle_edge_irq); 313 + irq_set_chip_and_handler(isa_irq, &viper_irq_chip, 314 + handle_edge_irq); 315 315 set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); 316 316 } 317 317 318 - set_irq_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO), 318 + irq_set_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO), 319 319 viper_irq_handler); 320 - set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH); 320 + irq_set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH); 321 321 } 322 322 323 323 /* Flat Panel */ ··· 932 932 /* Wake-up serial console */ 933 933 viper_init_serial_gpio(); 934 934 935 - set_pxa_fb_info(&fb_info); 935 + pxa_set_fb_info(NULL, &fb_info); 936 936 937 937 /* v1 hardware cannot use the datacs line */ 938 938 version = viper_hw_version();
+1 -1
arch/arm/mach-pxa/vpac270.c
··· 572 572 } 573 573 574 574 vpac270_lcd_screen.pxafb_lcd_power = vpac270_lcd_power; 575 - set_pxa_fb_info(&vpac270_lcd_screen); 575 + pxa_set_fb_info(NULL, &vpac270_lcd_screen); 576 576 return; 577 577 578 578 err2:
+38 -39
arch/arm/mach-pxa/z2.c
··· 91 91 GPIO47_STUART_TXD, 92 92 93 93 /* Keypad */ 94 - GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, 95 - GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, 96 - GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, 97 - GPIO34_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH, 98 - GPIO38_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH, 99 - GPIO16_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH, 100 - GPIO17_KP_MKIN_6 | WAKEUP_ON_LEVEL_HIGH, 94 + GPIO100_KP_MKIN_0, 95 + GPIO101_KP_MKIN_1, 96 + GPIO102_KP_MKIN_2, 97 + GPIO34_KP_MKIN_3, 98 + GPIO38_KP_MKIN_4, 99 + GPIO16_KP_MKIN_5, 100 + GPIO17_KP_MKIN_6, 101 101 GPIO103_KP_MKOUT_0, 102 102 GPIO104_KP_MKOUT_1, 103 103 GPIO105_KP_MKOUT_2, ··· 138 138 GPIO1_GPIO, /* Power button */ 139 139 GPIO37_GPIO, /* Headphone detect */ 140 140 GPIO98_GPIO, /* Lid switch */ 141 - GPIO14_GPIO, /* WiFi Reset */ 142 - GPIO15_GPIO, /* WiFi Power */ 141 + GPIO14_GPIO, /* WiFi Power */ 143 142 GPIO24_GPIO, /* WiFi CS */ 144 143 GPIO36_GPIO, /* WiFi IRQ */ 145 144 GPIO88_GPIO, /* LCD CS */ ··· 203 204 /* Keypad Backlight */ 204 205 .pwm_id = 1, 205 206 .max_brightness = 1023, 206 - .dft_brightness = 512, 207 + .dft_brightness = 0, 207 208 .pwm_period_ns = 1260320, 208 209 }, 209 210 [1] = { ··· 270 271 271 272 static void __init z2_lcd_init(void) 272 273 { 273 - set_pxa_fb_info(&z2_lcd_screen); 274 + pxa_set_fb_info(NULL, &z2_lcd_screen); 274 275 } 275 276 #else 276 277 static inline void z2_lcd_init(void) {} ··· 308 309 .active_low = 1, 309 310 }, { 310 311 .name = "z2:green:charged", 311 - .default_trigger = "none", 312 + .default_trigger = "mmc0", 312 313 .gpio = GPIO85_ZIPITZ2_LED_CHARGED, 313 314 .active_low = 1, 314 315 }, { 315 316 .name = "z2:amber:charging", 316 - .default_trigger = "none", 317 + .default_trigger = "Z2-charging-or-full", 317 318 .gpio = GPIO83_ZIPITZ2_LED_CHARGING, 318 319 .active_low = 1, 319 320 }, ··· 426 427 ******************************************************************************/ 427 428 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 428 429 static struct gpio_keys_button z2_pxa_buttons[] = { 429 - {KEY_POWER, GPIO1_ZIPITZ2_POWER_BUTTON, 0, "Power Button" }, 430 - {KEY_CLOSE, GPIO98_ZIPITZ2_LID_BUTTON, 0, "Lid Button" }, 430 + { 431 + .code = KEY_POWER, 432 + .gpio = GPIO1_ZIPITZ2_POWER_BUTTON, 433 + .active_low = 0, 434 + .desc = "Power Button", 435 + .wakeup = 1, 436 + .type = EV_KEY, 437 + }, 438 + { 439 + .code = SW_LID, 440 + .gpio = GPIO98_ZIPITZ2_LID_BUTTON, 441 + .active_low = 1, 442 + .desc = "Lid Switch", 443 + .wakeup = 0, 444 + .type = EV_SW, 445 + }, 431 446 }; 432 447 433 448 static struct gpio_keys_platform_data z2_pxa_keys_data = { ··· 474 461 .batt_I2C_addr = 0x55, 475 462 .batt_I2C_reg = 2, 476 463 .charge_gpio = GPIO0_ZIPITZ2_AC_DETECT, 477 - .min_voltage = 2400000, 478 - .max_voltage = 3700000, 479 - .batt_div = 69, 464 + .min_voltage = 3475000, 465 + .max_voltage = 4190000, 466 + .batt_div = 59, 480 467 .batt_mult = 1000000, 481 468 .batt_tech = POWER_SUPPLY_TECHNOLOGY_LION, 482 469 .batt_name = "Z2", ··· 510 497 { 511 498 int ret = 0; 512 499 513 - ret = gpio_request(GPIO15_ZIPITZ2_WIFI_POWER, "WiFi Power"); 500 + ret = gpio_request(GPIO14_ZIPITZ2_WIFI_POWER, "WiFi Power"); 514 501 if (ret) 515 502 goto err; 516 503 517 - ret = gpio_direction_output(GPIO15_ZIPITZ2_WIFI_POWER, 1); 504 + ret = gpio_direction_output(GPIO14_ZIPITZ2_WIFI_POWER, 1); 518 505 if (ret) 519 506 goto err2; 520 507 521 - ret = gpio_request(GPIO14_ZIPITZ2_WIFI_RESET, "WiFi Reset"); 522 - if (ret) 523 - goto err2; 524 - 525 - ret = gpio_direction_output(GPIO14_ZIPITZ2_WIFI_RESET, 0); 526 - if (ret) 527 - goto err3; 528 - 529 - /* Reset the card */ 508 + /* Wait until card is powered on */ 530 509 mdelay(180); 531 - gpio_set_value(GPIO14_ZIPITZ2_WIFI_RESET, 1); 532 - mdelay(20); 533 510 534 511 spi->bits_per_word = 16; 535 512 spi->mode = SPI_MODE_2, ··· 528 525 529 526 return 0; 530 527 531 - err3: 532 - gpio_free(GPIO14_ZIPITZ2_WIFI_RESET); 533 528 err2: 534 - gpio_free(GPIO15_ZIPITZ2_WIFI_POWER); 529 + gpio_free(GPIO14_ZIPITZ2_WIFI_POWER); 535 530 err: 536 531 return ret; 537 532 }; 538 533 539 534 static int z2_lbs_spi_teardown(struct spi_device *spi) 540 535 { 541 - gpio_set_value(GPIO14_ZIPITZ2_WIFI_RESET, 0); 542 - gpio_set_value(GPIO15_ZIPITZ2_WIFI_POWER, 0); 543 - gpio_free(GPIO14_ZIPITZ2_WIFI_RESET); 544 - gpio_free(GPIO15_ZIPITZ2_WIFI_POWER); 545 - return 0; 536 + gpio_set_value(GPIO14_ZIPITZ2_WIFI_POWER, 0); 537 + gpio_free(GPIO14_ZIPITZ2_WIFI_POWER); 546 538 539 + return 0; 547 540 }; 548 541 549 542 static struct pxa2xx_spi_chip z2_lbs_chip_info = {
+11 -10
arch/arm/mach-pxa/zeus.c
··· 136 136 137 137 /* Peripheral IRQs. It would be nice to move those inside driver 138 138 configuration, but it is not supported at the moment. */ 139 - set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING); 140 - set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING); 141 - set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING); 142 - set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), IRQ_TYPE_EDGE_FALLING); 143 - set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING); 139 + irq_set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING); 140 + irq_set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING); 141 + irq_set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING); 142 + irq_set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), 143 + IRQ_TYPE_EDGE_FALLING); 144 + irq_set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING); 144 145 145 146 /* Setup ISA IRQs */ 146 147 for (level = 0; level < ARRAY_SIZE(zeus_isa_irqs); level++) { 147 148 isa_irq = zeus_bit_to_irq(level); 148 - set_irq_chip(isa_irq, &zeus_irq_chip); 149 - set_irq_handler(isa_irq, handle_edge_irq); 149 + irq_set_chip_and_handler(isa_irq, &zeus_irq_chip, 150 + handle_edge_irq); 150 151 set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); 151 152 } 152 153 153 - set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING); 154 - set_irq_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler); 154 + irq_set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING); 155 + irq_set_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler); 155 156 } 156 157 157 158 ··· 847 846 if (zeus_setup_fb_gpios()) 848 847 pr_err("Failed to setup fb gpios\n"); 849 848 else 850 - set_pxa_fb_info(&zeus_fb_info); 849 + pxa_set_fb_info(NULL, &zeus_fb_info); 851 850 852 851 pxa_set_mci_info(&zeus_mci_platform_data); 853 852 pxa_set_udc_info(&zeus_udc_info);
+2 -2
arch/arm/mach-pxa/zylonite.c
··· 208 208 platform_device_register(&zylonite_backlight_device); 209 209 210 210 if (lcd_id & 0x20) { 211 - set_pxa_fb_info(&zylonite_sharp_lcd_info); 211 + pxa_set_fb_info(NULL, &zylonite_sharp_lcd_info); 212 212 return; 213 213 } 214 214 ··· 220 220 else 221 221 zylonite_toshiba_lcd_info.modes = &toshiba_ltm04c380k_mode; 222 222 223 - set_pxa_fb_info(&zylonite_toshiba_lcd_info); 223 + pxa_set_fb_info(NULL, &zylonite_toshiba_lcd_info); 224 224 } 225 225 #else 226 226 static inline void zylonite_init_lcd(void) {}
+1 -1
arch/arm/mach-realview/realview_eb.c
··· 348 348 349 349 #ifndef CONFIG_REALVIEW_EB_ARM11MP_REVB 350 350 /* board GIC, secondary */ 351 - gic_init(1, 64, __io_address(REALVIEW_EB_GIC_DIST_BASE), 351 + gic_init(1, 96, __io_address(REALVIEW_EB_GIC_DIST_BASE), 352 352 __io_address(REALVIEW_EB_GIC_CPU_BASE)); 353 353 gic_cascade_irq(1, IRQ_EB11MP_EB_IRQ1); 354 354 #endif
+7 -7
arch/arm/mach-rpc/irq.c
··· 133 133 134 134 switch (irq) { 135 135 case 0 ... 7: 136 - set_irq_chip(irq, &iomd_a_chip); 137 - set_irq_handler(irq, handle_level_irq); 136 + irq_set_chip_and_handler(irq, &iomd_a_chip, 137 + handle_level_irq); 138 138 set_irq_flags(irq, flags); 139 139 break; 140 140 141 141 case 8 ... 15: 142 - set_irq_chip(irq, &iomd_b_chip); 143 - set_irq_handler(irq, handle_level_irq); 142 + irq_set_chip_and_handler(irq, &iomd_b_chip, 143 + handle_level_irq); 144 144 set_irq_flags(irq, flags); 145 145 break; 146 146 147 147 case 16 ... 21: 148 - set_irq_chip(irq, &iomd_dma_chip); 149 - set_irq_handler(irq, handle_level_irq); 148 + irq_set_chip_and_handler(irq, &iomd_dma_chip, 149 + handle_level_irq); 150 150 set_irq_flags(irq, flags); 151 151 break; 152 152 153 153 case 64 ... 71: 154 - set_irq_chip(irq, &iomd_fiq_chip); 154 + irq_set_chip(irq, &iomd_fiq_chip); 155 155 set_irq_flags(irq, IRQF_VALID); 156 156 break; 157 157 }
+3 -3
arch/arm/mach-s3c2410/bast-irq.c
··· 147 147 148 148 __raw_writeb(0x0, BAST_VA_PC104_IRQMASK); 149 149 150 - set_irq_chained_handler(IRQ_ISA, bast_irq_pc104_demux); 150 + irq_set_chained_handler(IRQ_ISA, bast_irq_pc104_demux); 151 151 152 152 /* register our IRQs */ 153 153 154 154 for (i = 0; i < 4; i++) { 155 155 unsigned int irqno = bast_pc104_irqs[i]; 156 156 157 - set_irq_chip(irqno, &bast_pc104_chip); 158 - set_irq_handler(irqno, handle_level_irq); 157 + irq_set_chip_and_handler(irqno, &bast_pc104_chip, 158 + handle_level_irq); 159 159 set_irq_flags(irqno, IRQF_VALID); 160 160 } 161 161 }
+6 -6
arch/arm/mach-s3c2412/irq.c
··· 175 175 unsigned int irqno; 176 176 177 177 for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) { 178 - set_irq_chip(irqno, &s3c2412_irq_eint0t4); 179 - set_irq_handler(irqno, handle_edge_irq); 178 + irq_set_chip_and_handler(irqno, &s3c2412_irq_eint0t4, 179 + handle_edge_irq); 180 180 set_irq_flags(irqno, IRQF_VALID); 181 181 } 182 182 183 183 /* add demux support for CF/SDI */ 184 184 185 - set_irq_chained_handler(IRQ_S3C2412_CFSDI, s3c2412_irq_demux_cfsdi); 185 + irq_set_chained_handler(IRQ_S3C2412_CFSDI, s3c2412_irq_demux_cfsdi); 186 186 187 187 for (irqno = IRQ_S3C2412_SDI; irqno <= IRQ_S3C2412_CF; irqno++) { 188 - set_irq_chip(irqno, &s3c2412_irq_cfsdi); 189 - set_irq_handler(irqno, handle_level_irq); 188 + irq_set_chip_and_handler(irqno, &s3c2412_irq_cfsdi, 189 + handle_level_irq); 190 190 set_irq_flags(irqno, IRQF_VALID); 191 191 } 192 192 ··· 195 195 s3c2412_irq_rtc_chip = s3c_irq_chip; 196 196 s3c2412_irq_rtc_chip.irq_set_wake = s3c2412_irq_rtc_wake; 197 197 198 - set_irq_chip(IRQ_RTC, &s3c2412_irq_rtc_chip); 198 + irq_set_chip(IRQ_RTC, &s3c2412_irq_rtc_chip); 199 199 200 200 return 0; 201 201 }
+3 -5
arch/arm/mach-s3c2416/irq.c
··· 202 202 { 203 203 unsigned int irqno; 204 204 205 - set_irq_chip(base, &s3c_irq_level_chip); 206 - set_irq_handler(base, handle_level_irq); 207 - set_irq_chained_handler(base, demux); 205 + irq_set_chip_and_handler(base, &s3c_irq_level_chip, handle_level_irq); 206 + irq_set_chained_handler(base, demux); 208 207 209 208 for (irqno = start; irqno <= end; irqno++) { 210 - set_irq_chip(irqno, chip); 211 - set_irq_handler(irqno, handle_level_irq); 209 + irq_set_chip_and_handler(irqno, chip, handle_level_irq); 212 210 set_irq_flags(irqno, IRQF_VALID); 213 211 } 214 212
+5 -5
arch/arm/mach-s3c2440/irq.c
··· 100 100 101 101 /* add new chained handler for wdt, ac7 */ 102 102 103 - set_irq_chip(IRQ_WDT, &s3c_irq_level_chip); 104 - set_irq_handler(IRQ_WDT, handle_level_irq); 105 - set_irq_chained_handler(IRQ_WDT, s3c_irq_demux_wdtac97); 103 + irq_set_chip_and_handler(IRQ_WDT, &s3c_irq_level_chip, 104 + handle_level_irq); 105 + irq_set_chained_handler(IRQ_WDT, s3c_irq_demux_wdtac97); 106 106 107 107 for (irqno = IRQ_S3C2440_WDT; irqno <= IRQ_S3C2440_AC97; irqno++) { 108 - set_irq_chip(irqno, &s3c_irq_wdtac97); 109 - set_irq_handler(irqno, handle_level_irq); 108 + irq_set_chip_and_handler(irqno, &s3c_irq_wdtac97, 109 + handle_level_irq); 110 110 set_irq_flags(irqno, IRQF_VALID); 111 111 } 112 112
+7 -7
arch/arm/mach-s3c2440/s3c244x-irq.c
··· 95 95 { 96 96 unsigned int irqno; 97 97 98 - set_irq_chip(IRQ_NFCON, &s3c_irq_level_chip); 99 - set_irq_handler(IRQ_NFCON, handle_level_irq); 98 + irq_set_chip_and_handler(IRQ_NFCON, &s3c_irq_level_chip, 99 + handle_level_irq); 100 100 set_irq_flags(IRQ_NFCON, IRQF_VALID); 101 101 102 102 /* add chained handler for camera */ 103 103 104 - set_irq_chip(IRQ_CAM, &s3c_irq_level_chip); 105 - set_irq_handler(IRQ_CAM, handle_level_irq); 106 - set_irq_chained_handler(IRQ_CAM, s3c_irq_demux_cam); 104 + irq_set_chip_and_handler(IRQ_CAM, &s3c_irq_level_chip, 105 + handle_level_irq); 106 + irq_set_chained_handler(IRQ_CAM, s3c_irq_demux_cam); 107 107 108 108 for (irqno = IRQ_S3C2440_CAM_C; irqno <= IRQ_S3C2440_CAM_P; irqno++) { 109 - set_irq_chip(irqno, &s3c_irq_cam); 110 - set_irq_handler(irqno, handle_level_irq); 109 + irq_set_chip_and_handler(irqno, &s3c_irq_cam, 110 + handle_level_irq); 111 111 set_irq_flags(irqno, IRQF_VALID); 112 112 } 113 113
+3 -5
arch/arm/mach-s3c2443/irq.c
··· 230 230 { 231 231 unsigned int irqno; 232 232 233 - set_irq_chip(base, &s3c_irq_level_chip); 234 - set_irq_handler(base, handle_level_irq); 235 - set_irq_chained_handler(base, demux); 233 + irq_set_chip_and_handler(base, &s3c_irq_level_chip, handle_level_irq); 234 + irq_set_chained_handler(base, demux); 236 235 237 236 for (irqno = start; irqno <= end; irqno++) { 238 - set_irq_chip(irqno, chip); 239 - set_irq_handler(irqno, handle_level_irq); 237 + irq_set_chip_and_handler(irqno, chip, handle_level_irq); 240 238 set_irq_flags(irqno, IRQF_VALID); 241 239 } 242 240
+6 -7
arch/arm/mach-s3c64xx/irq-eint.c
··· 197 197 int irq; 198 198 199 199 for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) { 200 - set_irq_chip(irq, &s3c_irq_eint); 201 - set_irq_chip_data(irq, (void *)eint_irq_to_bit(irq)); 202 - set_irq_handler(irq, handle_level_irq); 200 + irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq); 201 + irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq)); 203 202 set_irq_flags(irq, IRQF_VALID); 204 203 } 205 204 206 - set_irq_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3); 207 - set_irq_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11); 208 - set_irq_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19); 209 - set_irq_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27); 205 + irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3); 206 + irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11); 207 + irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19); 208 + irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27); 210 209 211 210 return 0; 212 211 }
+1 -1
arch/arm/mach-sa1100/cerf.c
··· 96 96 static void __init cerf_init_irq(void) 97 97 { 98 98 sa1100_init_irq(); 99 - set_irq_type(CERF_ETH_IRQ, IRQ_TYPE_EDGE_RISING); 99 + irq_set_irq_type(CERF_ETH_IRQ, IRQ_TYPE_EDGE_RISING); 100 100 } 101 101 102 102 static struct map_desc cerf_io_desc[] __initdata = {
+8 -8
arch/arm/mach-sa1100/irq.c
··· 323 323 ICCR = 1; 324 324 325 325 for (irq = 0; irq <= 10; irq++) { 326 - set_irq_chip(irq, &sa1100_low_gpio_chip); 327 - set_irq_handler(irq, handle_edge_irq); 326 + irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip, 327 + handle_edge_irq); 328 328 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 329 329 } 330 330 331 331 for (irq = 12; irq <= 31; irq++) { 332 - set_irq_chip(irq, &sa1100_normal_chip); 333 - set_irq_handler(irq, handle_level_irq); 332 + irq_set_chip_and_handler(irq, &sa1100_normal_chip, 333 + handle_level_irq); 334 334 set_irq_flags(irq, IRQF_VALID); 335 335 } 336 336 337 337 for (irq = 32; irq <= 48; irq++) { 338 - set_irq_chip(irq, &sa1100_high_gpio_chip); 339 - set_irq_handler(irq, handle_edge_irq); 338 + irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip, 339 + handle_edge_irq); 340 340 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 341 341 } 342 342 343 343 /* 344 344 * Install handler for GPIO 11-27 edge detect interrupts 345 345 */ 346 - set_irq_chip(IRQ_GPIO11_27, &sa1100_normal_chip); 347 - set_irq_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); 346 + irq_set_chip(IRQ_GPIO11_27, &sa1100_normal_chip); 347 + irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); 348 348 349 349 sa1100_init_gpio(); 350 350 }
+4 -4
arch/arm/mach-sa1100/neponset.c
··· 145 145 /* 146 146 * Install handler for GPIO25. 147 147 */ 148 - set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING); 149 - set_irq_chained_handler(IRQ_GPIO25, neponset_irq_handler); 148 + irq_set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING); 149 + irq_set_chained_handler(IRQ_GPIO25, neponset_irq_handler); 150 150 151 151 /* 152 152 * We would set IRQ_GPIO25 to be a wake-up IRQ, but ··· 161 161 * Setup other Neponset IRQs. SA1111 will be done by the 162 162 * generic SA1111 code. 163 163 */ 164 - set_irq_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq); 164 + irq_set_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq); 165 165 set_irq_flags(IRQ_NEPONSET_SMC9196, IRQF_VALID | IRQF_PROBE); 166 - set_irq_handler(IRQ_NEPONSET_USAR, handle_simple_irq); 166 + irq_set_handler(IRQ_NEPONSET_USAR, handle_simple_irq); 167 167 set_irq_flags(IRQ_NEPONSET_USAR, IRQF_VALID | IRQF_PROBE); 168 168 169 169 /*
+1 -1
arch/arm/mach-sa1100/pleb.c
··· 142 142 143 143 GPDR &= ~GPIO_ETH0_IRQ; 144 144 145 - set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING); 145 + irq_set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING); 146 146 } 147 147 148 148 MACHINE_START(PLEB, "PLEB")
+1 -2
arch/arm/mach-shark/irq.c
··· 80 80 int irq; 81 81 82 82 for (irq = 0; irq < NR_IRQS; irq++) { 83 - set_irq_chip(irq, &fb_chip); 84 - set_irq_handler(irq, handle_edge_irq); 83 + irq_set_chip_and_handler(irq, &fb_chip, handle_edge_irq); 85 84 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 86 85 } 87 86
+5 -5
arch/arm/mach-shmobile/board-ap4evb.c
··· 24 24 #include <linux/irq.h> 25 25 #include <linux/platform_device.h> 26 26 #include <linux/delay.h> 27 - #include <linux/mfd/sh_mobile_sdhi.h> 28 27 #include <linux/mfd/tmio.h> 29 28 #include <linux/mmc/host.h> 29 + #include <linux/mmc/sh_mobile_sdhi.h> 30 30 #include <linux/mtd/mtd.h> 31 31 #include <linux/mtd/partitions.h> 32 32 #include <linux/mtd/physmap.h> ··· 312 312 [0] = { 313 313 .name = "SDHI0", 314 314 .start = 0xe6850000, 315 - .end = 0xe68501ff, 315 + .end = 0xe68500ff, 316 316 .flags = IORESOURCE_MEM, 317 317 }, 318 318 [1] = { ··· 345 345 [0] = { 346 346 .name = "SDHI1", 347 347 .start = 0xe6860000, 348 - .end = 0xe68601ff, 348 + .end = 0xe68600ff, 349 349 .flags = IORESOURCE_MEM, 350 350 }, 351 351 [1] = { ··· 1255 1255 gpio_request(GPIO_FN_KEYIN4, NULL); 1256 1256 1257 1257 /* enable TouchScreen */ 1258 - set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW); 1258 + irq_set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW); 1259 1259 1260 1260 tsc_device.irq = IRQ28; 1261 1261 i2c_register_board_info(1, &tsc_device, 1); ··· 1311 1311 lcdc_info.ch[0].lcd_size_cfg.height = 91; 1312 1312 1313 1313 /* enable TouchScreen */ 1314 - set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); 1314 + irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); 1315 1315 1316 1316 tsc_device.irq = IRQ7; 1317 1317 i2c_register_board_info(0, &tsc_device, 1);
+3 -3
arch/arm/mach-shmobile/board-g4evm.c
··· 31 31 #include <linux/input.h> 32 32 #include <linux/input/sh_keysc.h> 33 33 #include <linux/mmc/host.h> 34 - #include <linux/mfd/sh_mobile_sdhi.h> 34 + #include <linux/mmc/sh_mobile_sdhi.h> 35 35 #include <linux/gpio.h> 36 36 #include <mach/sh7377.h> 37 37 #include <mach/common.h> ··· 205 205 [0] = { 206 206 .name = "SDHI0", 207 207 .start = 0xe6d50000, 208 - .end = 0xe6d501ff, 208 + .end = 0xe6d50nff, 209 209 .flags = IORESOURCE_MEM, 210 210 }, 211 211 [1] = { ··· 232 232 [0] = { 233 233 .name = "SDHI1", 234 234 .start = 0xe6d60000, 235 - .end = 0xe6d601ff, 235 + .end = 0xe6d600ff, 236 236 .flags = IORESOURCE_MEM, 237 237 }, 238 238 [1] = {
+7 -7
arch/arm/mach-shmobile/board-mackerel.c
··· 32 32 #include <linux/io.h> 33 33 #include <linux/i2c.h> 34 34 #include <linux/leds.h> 35 - #include <linux/mfd/sh_mobile_sdhi.h> 36 35 #include <linux/mfd/tmio.h> 37 36 #include <linux/mmc/host.h> 38 37 #include <linux/mmc/sh_mmcif.h> 38 + #include <linux/mmc/sh_mobile_sdhi.h> 39 39 #include <linux/mtd/mtd.h> 40 40 #include <linux/mtd/partitions.h> 41 41 #include <linux/mtd/physmap.h> ··· 690 690 [0] = { 691 691 .name = "SDHI0", 692 692 .start = 0xe6850000, 693 - .end = 0xe68501ff, 693 + .end = 0xe68500ff, 694 694 .flags = IORESOURCE_MEM, 695 695 }, 696 696 [1] = { ··· 725 725 [0] = { 726 726 .name = "SDHI1", 727 727 .start = 0xe6860000, 728 - .end = 0xe68601ff, 728 + .end = 0xe68600ff, 729 729 .flags = IORESOURCE_MEM, 730 730 }, 731 731 [1] = { ··· 768 768 [0] = { 769 769 .name = "SDHI2", 770 770 .start = 0xe6870000, 771 - .end = 0xe68701ff, 771 + .end = 0xe68700ff, 772 772 .flags = IORESOURCE_MEM, 773 773 }, 774 774 [1] = { ··· 1124 1124 1125 1125 /* enable Keypad */ 1126 1126 gpio_request(GPIO_FN_IRQ9_42, NULL); 1127 - set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH); 1127 + irq_set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH); 1128 1128 1129 1129 /* enable Touchscreen */ 1130 1130 gpio_request(GPIO_FN_IRQ7_40, NULL); 1131 - set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); 1131 + irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); 1132 1132 1133 1133 /* enable Accelerometer */ 1134 1134 gpio_request(GPIO_FN_IRQ21, NULL); 1135 - set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); 1135 + irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); 1136 1136 1137 1137 /* enable SDHI0 */ 1138 1138 gpio_request(GPIO_FN_SDHICD0, NULL);
+3 -3
arch/arm/mach-shmobile/intc-sh7367.c
··· 421 421 422 422 static void intcs_demux(unsigned int irq, struct irq_desc *desc) 423 423 { 424 - void __iomem *reg = (void *)get_irq_data(irq); 424 + void __iomem *reg = (void *)irq_get_handler_data(irq); 425 425 unsigned int evtcodeas = ioread32(reg); 426 426 427 427 generic_handle_irq(intcs_evt2irq(evtcodeas)); ··· 435 435 register_intc_controller(&intcs_desc); 436 436 437 437 /* demux using INTEVTSA */ 438 - set_irq_data(evt2irq(0xf80), (void *)intevtsa); 439 - set_irq_chained_handler(evt2irq(0xf80), intcs_demux); 438 + irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); 439 + irq_set_chained_handler(evt2irq(0xf80), intcs_demux); 440 440 }
+3 -3
arch/arm/mach-shmobile/intc-sh7372.c
··· 601 601 602 602 static void intcs_demux(unsigned int irq, struct irq_desc *desc) 603 603 { 604 - void __iomem *reg = (void *)get_irq_data(irq); 604 + void __iomem *reg = (void *)irq_get_handler_data(irq); 605 605 unsigned int evtcodeas = ioread32(reg); 606 606 607 607 generic_handle_irq(intcs_evt2irq(evtcodeas)); ··· 615 615 register_intc_controller(&intcs_desc); 616 616 617 617 /* demux using INTEVTSA */ 618 - set_irq_data(evt2irq(0xf80), (void *)intevtsa); 619 - set_irq_chained_handler(evt2irq(0xf80), intcs_demux); 618 + irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); 619 + irq_set_chained_handler(evt2irq(0xf80), intcs_demux); 620 620 }
+3 -3
arch/arm/mach-shmobile/intc-sh7377.c
··· 626 626 627 627 static void intcs_demux(unsigned int irq, struct irq_desc *desc) 628 628 { 629 - void __iomem *reg = (void *)get_irq_data(irq); 629 + void __iomem *reg = (void *)irq_get_handler_data(irq); 630 630 unsigned int evtcodeas = ioread32(reg); 631 631 632 632 generic_handle_irq(intcs_evt2irq(evtcodeas)); ··· 641 641 register_intc_controller(&intcs_desc); 642 642 643 643 /* demux using INTEVTSA */ 644 - set_irq_data(evt2irq(INTCS_INTVECT), (void *)intevtsa); 645 - set_irq_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux); 644 + irq_set_handler_data(evt2irq(INTCS_INTVECT), (void *)intevtsa); 645 + irq_set_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux); 646 646 }
+3 -3
arch/arm/mach-tcc8k/irq.c
··· 102 102 103 103 for (irqno = 0; irqno < NR_IRQS; irqno++) { 104 104 if (irqno < 32) 105 - set_irq_chip(irqno, &tcc8000_irq_chip0); 105 + irq_set_chip(irqno, &tcc8000_irq_chip0); 106 106 else 107 - set_irq_chip(irqno, &tcc8000_irq_chip1); 108 - set_irq_handler(irqno, handle_level_irq); 107 + irq_set_chip(irqno, &tcc8000_irq_chip1); 108 + irq_set_handler(irqno, handle_level_irq); 109 109 set_irq_flags(irqno, IRQF_VALID); 110 110 } 111 111 }
+10 -29
arch/arm/mach-tegra/gpio.c
··· 208 208 spin_unlock_irqrestore(&bank->lvl_lock[port], flags); 209 209 210 210 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 211 - __set_irq_handler_unlocked(d->irq, handle_level_irq); 211 + __irq_set_handler_locked(d->irq, handle_level_irq); 212 212 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 213 - __set_irq_handler_unlocked(d->irq, handle_edge_irq); 213 + __irq_set_handler_locked(d->irq, handle_edge_irq); 214 214 215 215 return 0; 216 216 } ··· 224 224 225 225 desc->irq_data.chip->irq_ack(&desc->irq_data); 226 226 227 - bank = get_irq_data(irq); 227 + bank = irq_get_handler_data(irq); 228 228 229 229 for (port = 0; port < 4; port++) { 230 230 int gpio = tegra_gpio_compose(bank->bank, port, 0); ··· 275 275 } 276 276 277 277 local_irq_restore(flags); 278 - 279 - for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { 280 - struct irq_desc *desc = irq_to_desc(i); 281 - if (!desc || (desc->status & IRQ_WAKEUP)) 282 - continue; 283 - enable_irq(i); 284 - } 285 278 } 286 279 287 280 void tegra_gpio_suspend(void) 288 281 { 289 282 unsigned long flags; 290 283 int b, p, i; 291 - 292 - for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { 293 - struct irq_desc *desc = irq_to_desc(i); 294 - if (!desc) 295 - continue; 296 - if (desc->status & IRQ_WAKEUP) { 297 - int gpio = i - INT_GPIO_BASE; 298 - pr_debug("gpio %d.%d is wakeup\n", gpio/8, gpio&7); 299 - continue; 300 - } 301 - disable_irq(i); 302 - } 303 284 304 285 local_irq_save(flags); 305 286 for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { ··· 301 320 static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable) 302 321 { 303 322 struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d); 304 - return set_irq_wake(bank->irq, enable); 323 + return irq_set_irq_wake(bank->irq, enable); 305 324 } 306 325 #endif 307 326 ··· 340 359 for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { 341 360 bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))]; 342 361 343 - lockdep_set_class(&irq_desc[i].lock, &gpio_lock_class); 344 - set_irq_chip_data(i, bank); 345 - set_irq_chip(i, &tegra_gpio_irq_chip); 346 - set_irq_handler(i, handle_simple_irq); 362 + irq_set_lockdep_class(i, &gpio_lock_class); 363 + irq_set_chip_data(i, bank); 364 + irq_set_chip_and_handler(i, &tegra_gpio_irq_chip, 365 + handle_simple_irq); 347 366 set_irq_flags(i, IRQF_VALID); 348 367 } 349 368 350 369 for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) { 351 370 bank = &tegra_gpio_banks[i]; 352 371 353 - set_irq_chained_handler(bank->irq, tegra_gpio_irq_handler); 354 - set_irq_data(bank->irq, bank); 372 + irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler); 373 + irq_set_handler_data(bank->irq, bank); 355 374 356 375 for (j = 0; j < 4; j++) 357 376 spin_lock_init(&bank->lvl_lock[j]);
+2 -3
arch/arm/mach-tegra/irq.c
··· 144 144 gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), 145 145 IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); 146 146 147 - gic = get_irq_chip(29); 147 + gic = irq_get_chip(29); 148 148 tegra_gic_unmask_irq = gic->irq_unmask; 149 149 tegra_gic_mask_irq = gic->irq_mask; 150 150 tegra_gic_ack_irq = gic->irq_ack; ··· 154 154 155 155 for (i = 0; i < INT_MAIN_NR; i++) { 156 156 irq = INT_PRI_BASE + i; 157 - set_irq_chip(irq, &tegra_irq); 158 - set_irq_handler(irq, handle_level_irq); 157 + irq_set_chip_and_handler(irq, &tegra_irq, handle_level_irq); 159 158 set_irq_flags(irq, IRQF_VALID); 160 159 } 161 160 }
+1 -2
arch/arm/mach-ux500/modem-irq-db5500.c
··· 90 90 91 91 static void create_virtual_irq(int irq, struct irq_chip *modem_irq_chip) 92 92 { 93 - set_irq_chip(irq, modem_irq_chip); 94 - set_irq_handler(irq, handle_simple_irq); 93 + irq_set_chip_and_handler(irq, modem_irq_chip, handle_simple_irq); 95 94 set_irq_flags(irq, IRQF_VALID); 96 95 97 96 pr_debug("modem_irq: Created virtual IRQ %d\n", irq);
+7 -7
arch/arm/mach-vt8500/irq.c
··· 97 97 return -EINVAL; 98 98 case IRQF_TRIGGER_HIGH: 99 99 dctr |= VT8500_TRIGGER_HIGH; 100 - irq_desc[orig_irq].handle_irq = handle_level_irq; 100 + __irq_set_handler_locked(orig_irq, handle_level_irq); 101 101 break; 102 102 case IRQF_TRIGGER_FALLING: 103 103 dctr |= VT8500_TRIGGER_FALLING; 104 - irq_desc[orig_irq].handle_irq = handle_edge_irq; 104 + __irq_set_handler_locked(orig_irq, handle_edge_irq); 105 105 break; 106 106 case IRQF_TRIGGER_RISING: 107 107 dctr |= VT8500_TRIGGER_RISING; 108 - irq_desc[orig_irq].handle_irq = handle_edge_irq; 108 + __irq_set_handler_locked(orig_irq, handle_edge_irq); 109 109 break; 110 110 } 111 111 writeb(dctr, base + VT8500_IC_DCTR + irq); ··· 136 136 /* Disable all interrupts and route them to IRQ */ 137 137 writeb(0x00, ic_regbase + VT8500_IC_DCTR + i); 138 138 139 - set_irq_chip(i, &vt8500_irq_chip); 140 - set_irq_handler(i, handle_level_irq); 139 + irq_set_chip_and_handler(i, &vt8500_irq_chip, 140 + handle_level_irq); 141 141 set_irq_flags(i, IRQF_VALID); 142 142 } 143 143 } else { ··· 167 167 writeb(0x00, sic_regbase + VT8500_IC_DCTR 168 168 + i - 64); 169 169 170 - set_irq_chip(i, &vt8500_irq_chip); 171 - set_irq_handler(i, handle_level_irq); 170 + irq_set_chip_and_handler(i, &vt8500_irq_chip, 171 + handle_level_irq); 172 172 set_irq_flags(i, IRQF_VALID); 173 173 } 174 174 } else {
+2 -2
arch/arm/mach-w90x900/irq.c
··· 207 207 __raw_writel(0xFFFFFFFE, REG_AIC_MDCR); 208 208 209 209 for (irqno = IRQ_WDT; irqno <= IRQ_ADC; irqno++) { 210 - set_irq_chip(irqno, &nuc900_irq_chip); 211 - set_irq_handler(irqno, handle_level_irq); 210 + irq_set_chip_and_handler(irqno, &nuc900_irq_chip, 211 + handle_level_irq); 212 212 set_irq_flags(irqno, IRQF_VALID); 213 213 } 214 214 }
+4 -10
arch/arm/plat-mxc/3ds_debugboard.c
··· 100 100 101 101 expio_irq = MXC_BOARD_IRQ_START; 102 102 for (; int_valid != 0; int_valid >>= 1, expio_irq++) { 103 - struct irq_desc *d; 104 103 if ((int_valid & 1) == 0) 105 104 continue; 106 - d = irq_desc + expio_irq; 107 - if (unlikely(!(d->handle_irq))) 108 - pr_err("\nEXPIO irq: %d unhandled\n", expio_irq); 109 - else 110 - d->handle_irq(expio_irq, d); 105 + generic_handle_irq(expio_irq); 111 106 } 112 107 113 108 desc->irq_data.chip->irq_ack(&desc->irq_data); ··· 181 186 __raw_writew(0x1F, brd_io + INTR_MASK_REG); 182 187 for (i = MXC_EXP_IO_BASE; 183 188 i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); i++) { 184 - set_irq_chip(i, &expio_irq_chip); 185 - set_irq_handler(i, handle_level_irq); 189 + irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq); 186 190 set_irq_flags(i, IRQF_VALID); 187 191 } 188 - set_irq_type(p_irq, IRQF_TRIGGER_LOW); 189 - set_irq_chained_handler(p_irq, mxc_expio_irq_handler); 192 + irq_set_irq_type(p_irq, IRQF_TRIGGER_LOW); 193 + irq_set_chained_handler(p_irq, mxc_expio_irq_handler); 190 194 191 195 /* Register Lan device on the debugboard */ 192 196 smsc911x_resources[0].start = LAN9217_BASE_ADDR(base);
+2 -2
arch/arm/plat-mxc/avic.c
··· 139 139 __raw_writel(0, avic_base + AVIC_INTTYPEH); 140 140 __raw_writel(0, avic_base + AVIC_INTTYPEL); 141 141 for (i = 0; i < MXC_INTERNAL_IRQS; i++) { 142 - set_irq_chip(i, &mxc_avic_chip.base); 143 - set_irq_handler(i, handle_level_irq); 142 + irq_set_chip_and_handler(i, &mxc_avic_chip.base, 143 + handle_level_irq); 144 144 set_irq_flags(i, IRQF_VALID); 145 145 } 146 146
+1 -1
arch/arm/plat-mxc/devices/platform-fec.c
··· 53 53 struct resource res[] = { 54 54 { 55 55 .start = data->iobase, 56 - .end = data->iobase + SZ_4K, 56 + .end = data->iobase + SZ_4K - 1, 57 57 .flags = IORESOURCE_MEM, 58 58 }, { 59 59 .start = data->irq,
+1 -1
arch/arm/plat-mxc/devices/platform-imxdi_rtc.c
··· 27 27 struct resource res[] = { 28 28 { 29 29 .start = data->iobase, 30 - .end = data->iobase + SZ_16K, 30 + .end = data->iobase + SZ_16K - 1, 31 31 .flags = IORESOURCE_MEM, 32 32 }, { 33 33 .start = data->irq,
+13 -11
arch/arm/plat-mxc/gpio.c
··· 175 175 static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) 176 176 { 177 177 u32 irq_stat; 178 - struct mxc_gpio_port *port = get_irq_data(irq); 178 + struct mxc_gpio_port *port = irq_get_handler_data(irq); 179 179 180 180 irq_stat = __raw_readl(port->base + GPIO_ISR) & 181 181 __raw_readl(port->base + GPIO_IMR); ··· 188 188 { 189 189 int i; 190 190 u32 irq_msk, irq_stat; 191 - struct mxc_gpio_port *port = get_irq_data(irq); 191 + struct mxc_gpio_port *port = irq_get_handler_data(irq); 192 192 193 193 /* walk through all interrupt status registers */ 194 194 for (i = 0; i < gpio_table_size; i++) { ··· 311 311 __raw_writel(~0, port[i].base + GPIO_ISR); 312 312 for (j = port[i].virtual_irq_start; 313 313 j < port[i].virtual_irq_start + 32; j++) { 314 - set_irq_chip(j, &gpio_irq_chip); 315 - set_irq_handler(j, handle_level_irq); 314 + irq_set_chip_and_handler(j, &gpio_irq_chip, 315 + handle_level_irq); 316 316 set_irq_flags(j, IRQF_VALID); 317 317 } 318 318 ··· 331 331 332 332 if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) { 333 333 /* setup one handler for each entry */ 334 - set_irq_chained_handler(port[i].irq, mx3_gpio_irq_handler); 335 - set_irq_data(port[i].irq, &port[i]); 334 + irq_set_chained_handler(port[i].irq, 335 + mx3_gpio_irq_handler); 336 + irq_set_handler_data(port[i].irq, &port[i]); 336 337 if (port[i].irq_high) { 337 338 /* setup handler for GPIO 16 to 31 */ 338 - set_irq_chained_handler(port[i].irq_high, 339 - mx3_gpio_irq_handler); 340 - set_irq_data(port[i].irq_high, &port[i]); 339 + irq_set_chained_handler(port[i].irq_high, 340 + mx3_gpio_irq_handler); 341 + irq_set_handler_data(port[i].irq_high, 342 + &port[i]); 341 343 } 342 344 } 343 345 } 344 346 345 347 if (cpu_is_mx2()) { 346 348 /* setup one handler for all GPIO interrupts */ 347 - set_irq_chained_handler(port[0].irq, mx2_gpio_irq_handler); 348 - set_irq_data(port[0].irq, port); 349 + irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler); 350 + irq_set_handler_data(port[0].irq, port); 349 351 } 350 352 351 353 return 0;
+9 -1
arch/arm/plat-mxc/include/mach/audmux.h
··· 15 15 #define MX31_AUDMUX_PORT5_SSI_PINS_5 4 16 16 #define MX31_AUDMUX_PORT6_SSI_PINS_6 5 17 17 18 + #define MX51_AUDMUX_PORT1_SSI0 0 19 + #define MX51_AUDMUX_PORT2_SSI1 1 20 + #define MX51_AUDMUX_PORT3 2 21 + #define MX51_AUDMUX_PORT4 3 22 + #define MX51_AUDMUX_PORT5 4 23 + #define MX51_AUDMUX_PORT6 5 24 + #define MX51_AUDMUX_PORT7 6 25 + 18 26 /* Register definitions for the i.MX21/27 Digital Audio Multiplexer */ 19 27 #define MXC_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff) 20 28 #define MXC_AUDMUX_V1_PCR_INMEN (1 << 8) ··· 36 28 #define MXC_AUDMUX_V1_PCR_TCLKDIR (1 << 30) 37 29 #define MXC_AUDMUX_V1_PCR_TFSDIR (1 << 31) 38 30 39 - /* Register definitions for the i.MX25/31/35 Digital Audio Multiplexer */ 31 + /* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */ 40 32 #define MXC_AUDMUX_V2_PTCR_TFSDIR (1 << 31) 41 33 #define MXC_AUDMUX_V2_PTCR_TFSEL(x) (((x) & 0xf) << 27) 42 34 #define MXC_AUDMUX_V2_PTCR_TCLKDIR (1 << 26)
+6 -6
arch/arm/plat-mxc/include/mach/iomux-mx2x.h
··· 90 90 #define PC31_PF_SSI3_CLK (GPIO_PORTC | GPIO_PF | GPIO_IN | 31) 91 91 #define PD17_PF_I2C_DATA (GPIO_PORTD | GPIO_PF | GPIO_OUT | 17) 92 92 #define PD18_PF_I2C_CLK (GPIO_PORTD | GPIO_PF | GPIO_OUT | 18) 93 - #define PD19_PF_CSPI2_SS2 (GPIO_PORTD | GPIO_PF | 19) 94 - #define PD20_PF_CSPI2_SS1 (GPIO_PORTD | GPIO_PF | 20) 95 - #define PD21_PF_CSPI2_SS0 (GPIO_PORTD | GPIO_PF | 21) 96 - #define PD22_PF_CSPI2_SCLK (GPIO_PORTD | GPIO_PF | 22) 97 - #define PD23_PF_CSPI2_MISO (GPIO_PORTD | GPIO_PF | 23) 98 - #define PD24_PF_CSPI2_MOSI (GPIO_PORTD | GPIO_PF | 24) 93 + #define PD19_PF_CSPI2_SS2 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 19) 94 + #define PD20_PF_CSPI2_SS1 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 20) 95 + #define PD21_PF_CSPI2_SS0 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 21) 96 + #define PD22_PF_CSPI2_SCLK (GPIO_PORTD | GPIO_PF | GPIO_OUT | 22) 97 + #define PD23_PF_CSPI2_MISO (GPIO_PORTD | GPIO_PF | GPIO_IN | 23) 98 + #define PD24_PF_CSPI2_MOSI (GPIO_PORTD | GPIO_PF | GPIO_OUT | 24) 99 99 #define PD25_PF_CSPI1_RDY (GPIO_PORTD | GPIO_PF | GPIO_OUT | 25) 100 100 #define PD26_PF_CSPI1_SS2 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 26) 101 101 #define PD27_PF_CSPI1_SS1 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 27)
+4
arch/arm/plat-mxc/include/mach/mx50.h
··· 282 282 #define MX50_INT_APBHDMA_CHAN6 116 283 283 #define MX50_INT_APBHDMA_CHAN7 117 284 284 285 + #if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS) 286 + extern int mx50_revision(void); 287 + #endif 288 + 285 289 #endif /* ifndef __MACH_MX50_H__ */
+1
arch/arm/plat-mxc/include/mach/mx51.h
··· 347 347 348 348 #if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS) 349 349 extern int mx51_revision(void); 350 + extern void mx51_display_revision(void); 350 351 #endif 351 352 352 353 /* tape-out 1 defines */
+23
arch/arm/plat-mxc/include/mach/mxc.h
··· 51 51 #define IMX_CHIP_REVISION_3_3 0x33 52 52 #define IMX_CHIP_REVISION_UNKNOWN 0xff 53 53 54 + #define IMX_CHIP_REVISION_1_0_STRING "1.0" 55 + #define IMX_CHIP_REVISION_1_1_STRING "1.1" 56 + #define IMX_CHIP_REVISION_1_2_STRING "1.2" 57 + #define IMX_CHIP_REVISION_1_3_STRING "1.3" 58 + #define IMX_CHIP_REVISION_2_0_STRING "2.0" 59 + #define IMX_CHIP_REVISION_2_1_STRING "2.1" 60 + #define IMX_CHIP_REVISION_2_2_STRING "2.2" 61 + #define IMX_CHIP_REVISION_2_3_STRING "2.3" 62 + #define IMX_CHIP_REVISION_3_0_STRING "3.0" 63 + #define IMX_CHIP_REVISION_3_1_STRING "3.1" 64 + #define IMX_CHIP_REVISION_3_2_STRING "3.2" 65 + #define IMX_CHIP_REVISION_3_3_STRING "3.3" 66 + #define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown" 67 + 54 68 #ifndef __ASSEMBLY__ 55 69 extern unsigned int __mxc_cpu_type; 56 70 #endif ··· 193 179 194 180 struct cpu_op { 195 181 u32 cpu_rate; 182 + }; 183 + 184 + int tzic_enable_wake(int is_idle); 185 + enum mxc_cpu_pwr_mode { 186 + WAIT_CLOCKED, /* wfi only */ 187 + WAIT_UNCLOCKED, /* WAIT */ 188 + WAIT_UNCLOCKED_POWER_OFF, /* WAIT + SRPG */ 189 + STOP_POWER_ON, /* just STOP */ 190 + STOP_POWER_OFF, /* STOP + SRPG */ 196 191 }; 197 192 198 193 extern struct cpu_op *(*get_cpu_op)(int *op);
+5 -1
arch/arm/plat-mxc/include/mach/system.h
··· 20 20 #include <mach/hardware.h> 21 21 #include <mach/common.h> 22 22 23 + extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode); 24 + 23 25 static inline void arch_idle(void) 24 26 { 25 27 #ifdef CONFIG_ARCH_MXC91231 ··· 56 54 "orr %0, %0, #0x00000004\n" 57 55 "mcr p15, 0, %0, c1, c0, 0\n" 58 56 : "=r" (reg)); 59 - } else 57 + } else if (cpu_is_mx51()) 58 + mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); 59 + else 60 60 cpu_do_idle(); 61 61 } 62 62
+2 -2
arch/arm/plat-mxc/irq-common.c
··· 29 29 30 30 ret = -ENOSYS; 31 31 32 - base = get_irq_chip(irq); 32 + base = irq_get_chip(irq); 33 33 if (base) { 34 34 chip = container_of(base, struct mxc_irq_chip, base); 35 35 if (chip->set_priority) ··· 48 48 49 49 ret = -ENOSYS; 50 50 51 - base = get_irq_chip(irq); 51 + base = irq_get_chip(irq); 52 52 if (base) { 53 53 chip = container_of(base, struct mxc_irq_chip, base); 54 54 if (chip->set_irq_fiq)
+24 -1
arch/arm/plat-mxc/time.c
··· 27 27 #include <linux/clk.h> 28 28 29 29 #include <mach/hardware.h> 30 + #include <asm/sched_clock.h> 30 31 #include <asm/mach/time.h> 31 32 #include <mach/common.h> 32 33 ··· 106 105 __raw_writel(V2_TSTAT_OF1, timer_base + V2_TSTAT); 107 106 } 108 107 108 + static cycle_t dummy_get_cycles(struct clocksource *cs) 109 + { 110 + return 0; 111 + } 112 + 109 113 static cycle_t mx1_2_get_cycles(struct clocksource *cs) 110 114 { 111 115 return __raw_readl(timer_base + MX1_2_TCN); ··· 124 118 static struct clocksource clocksource_mxc = { 125 119 .name = "mxc_timer1", 126 120 .rating = 200, 127 - .read = mx1_2_get_cycles, 121 + .read = dummy_get_cycles, 128 122 .mask = CLOCKSOURCE_MASK(32), 129 123 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 130 124 }; 125 + 126 + static DEFINE_CLOCK_DATA(cd); 127 + unsigned long long notrace sched_clock(void) 128 + { 129 + cycle_t cyc = clocksource_mxc.read(&clocksource_mxc); 130 + 131 + return cyc_to_sched_clock(&cd, cyc, (u32)~0); 132 + } 133 + 134 + static void notrace mxc_update_sched_clock(void) 135 + { 136 + cycle_t cyc = clocksource_mxc.read(&clocksource_mxc); 137 + update_sched_clock(&cd, cyc, (u32)~0); 138 + } 131 139 132 140 static int __init mxc_clocksource_init(struct clk *timer_clk) 133 141 { ··· 149 129 150 130 if (timer_is_v2()) 151 131 clocksource_mxc.read = v2_get_cycles; 132 + else 133 + clocksource_mxc.read = mx1_2_get_cycles; 152 134 135 + init_sched_clock(&cd, mxc_update_sched_clock, 32, c); 153 136 clocksource_register_hz(&clocksource_mxc, c); 154 137 155 138 return 0;
+2 -2
arch/arm/plat-mxc/tzic.c
··· 167 167 /* all IRQ no FIQ Warning :: No selection */ 168 168 169 169 for (i = 0; i < MXC_INTERNAL_IRQS; i++) { 170 - set_irq_chip(i, &mxc_tzic_chip.base); 171 - set_irq_handler(i, handle_level_irq); 170 + irq_set_chip_and_handler(i, &mxc_tzic_chip.base, 171 + handle_level_irq); 172 172 set_irq_flags(i, IRQF_VALID); 173 173 } 174 174
+27 -30
arch/arm/plat-nomadik/gpio.c
··· 54 54 u32 rwimsc; 55 55 u32 fwimsc; 56 56 u32 slpm; 57 + u32 enabled; 57 58 }; 58 59 59 60 static struct nmk_gpio_chip * ··· 319 318 struct nmk_gpio_chip *nmk_chip; 320 319 int pin = PIN_NUM(cfgs[i]); 321 320 322 - nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(pin)); 321 + nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(pin)); 323 322 if (!nmk_chip) { 324 323 ret = -EINVAL; 325 324 break; ··· 398 397 struct nmk_gpio_chip *nmk_chip; 399 398 unsigned long flags; 400 399 401 - nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 400 + nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 402 401 if (!nmk_chip) 403 402 return -EINVAL; 404 403 ··· 431 430 struct nmk_gpio_chip *nmk_chip; 432 431 unsigned long flags; 433 432 434 - nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 433 + nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 435 434 if (!nmk_chip) 436 435 return -EINVAL; 437 436 ··· 457 456 struct nmk_gpio_chip *nmk_chip; 458 457 unsigned long flags; 459 458 460 - nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 459 + nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 461 460 if (!nmk_chip) 462 461 return -EINVAL; 463 462 ··· 474 473 struct nmk_gpio_chip *nmk_chip; 475 474 u32 afunc, bfunc, bit; 476 475 477 - nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 476 + nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); 478 477 if (!nmk_chip) 479 478 return -EINVAL; 480 479 ··· 542 541 static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, 543 542 int gpio, bool on) 544 543 { 545 - #ifdef CONFIG_ARCH_U8500 546 - if (cpu_is_u8500v2()) { 547 - __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base, 548 - on ? NMK_GPIO_SLPM_WAKEUP_ENABLE 549 - : NMK_GPIO_SLPM_WAKEUP_DISABLE); 550 - } 551 - #endif 552 544 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on); 553 545 } 554 546 ··· 557 563 bitmask = nmk_gpio_get_bitmask(gpio); 558 564 if (!nmk_chip) 559 565 return -EINVAL; 566 + 567 + if (enable) 568 + nmk_chip->enabled |= bitmask; 569 + else 570 + nmk_chip->enabled &= ~bitmask; 560 571 561 572 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); 562 573 spin_lock(&nmk_chip->lock); ··· 589 590 590 591 static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on) 591 592 { 592 - struct irq_desc *desc = irq_to_desc(d->irq); 593 - bool enabled = !(desc->status & IRQ_DISABLED); 594 593 struct nmk_gpio_chip *nmk_chip; 595 594 unsigned long flags; 596 595 u32 bitmask; ··· 603 606 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); 604 607 spin_lock(&nmk_chip->lock); 605 608 606 - if (!enabled) 609 + if (!(nmk_chip->enabled & bitmask)) 607 610 __nmk_gpio_set_wake(nmk_chip, gpio, on); 608 611 609 612 if (on) ··· 619 622 620 623 static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type) 621 624 { 622 - struct irq_desc *desc = irq_to_desc(d->irq); 623 - bool enabled = !(desc->status & IRQ_DISABLED); 624 - bool wake = desc->wake_depth; 625 + bool enabled, wake = irqd_is_wakeup_set(d); 625 626 int gpio; 626 627 struct nmk_gpio_chip *nmk_chip; 627 628 unsigned long flags; ··· 635 640 return -EINVAL; 636 641 if (type & IRQ_TYPE_LEVEL_LOW) 637 642 return -EINVAL; 643 + 644 + enabled = nmk_chip->enabled & bitmask; 638 645 639 646 spin_lock_irqsave(&nmk_chip->lock, flags); 640 647 ··· 678 681 u32 status) 679 682 { 680 683 struct nmk_gpio_chip *nmk_chip; 681 - struct irq_chip *host_chip = get_irq_chip(irq); 684 + struct irq_chip *host_chip = irq_get_chip(irq); 682 685 unsigned int first_irq; 683 686 684 687 if (host_chip->irq_mask_ack) ··· 689 692 host_chip->irq_ack(&desc->irq_data); 690 693 } 691 694 692 - nmk_chip = get_irq_data(irq); 695 + nmk_chip = irq_get_handler_data(irq); 693 696 first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); 694 697 while (status) { 695 698 int bit = __ffs(status); ··· 703 706 704 707 static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 705 708 { 706 - struct nmk_gpio_chip *nmk_chip = get_irq_data(irq); 709 + struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq); 707 710 u32 status = readl(nmk_chip->addr + NMK_GPIO_IS); 708 711 709 712 __nmk_gpio_irq_handler(irq, desc, status); ··· 712 715 static void nmk_gpio_secondary_irq_handler(unsigned int irq, 713 716 struct irq_desc *desc) 714 717 { 715 - struct nmk_gpio_chip *nmk_chip = get_irq_data(irq); 718 + struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq); 716 719 u32 status = nmk_chip->get_secondary_status(nmk_chip->bank); 717 720 718 721 __nmk_gpio_irq_handler(irq, desc, status); ··· 725 728 726 729 first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); 727 730 for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) { 728 - set_irq_chip(i, &nmk_gpio_irq_chip); 729 - set_irq_handler(i, handle_edge_irq); 731 + irq_set_chip_and_handler(i, &nmk_gpio_irq_chip, 732 + handle_edge_irq); 730 733 set_irq_flags(i, IRQF_VALID); 731 - set_irq_chip_data(i, nmk_chip); 732 - set_irq_type(i, IRQ_TYPE_EDGE_FALLING); 734 + irq_set_chip_data(i, nmk_chip); 735 + irq_set_irq_type(i, IRQ_TYPE_EDGE_FALLING); 733 736 } 734 737 735 - set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); 736 - set_irq_data(nmk_chip->parent_irq, nmk_chip); 738 + irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); 739 + irq_set_handler_data(nmk_chip->parent_irq, nmk_chip); 737 740 738 741 if (nmk_chip->secondary_parent_irq >= 0) { 739 - set_irq_chained_handler(nmk_chip->secondary_parent_irq, 742 + irq_set_chained_handler(nmk_chip->secondary_parent_irq, 740 743 nmk_gpio_secondary_irq_handler); 741 - set_irq_data(nmk_chip->secondary_parent_irq, nmk_chip); 744 + irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip); 742 745 } 743 746 744 747 return 0;
+11 -20
arch/arm/plat-omap/gpio.c
··· 755 755 bank = irq_data_get_irq_chip_data(d); 756 756 spin_lock_irqsave(&bank->lock, flags); 757 757 retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type); 758 - if (retval == 0) { 759 - struct irq_desc *desc = irq_to_desc(d->irq); 760 - 761 - desc->status &= ~IRQ_TYPE_SENSE_MASK; 762 - desc->status |= type; 763 - } 764 758 spin_unlock_irqrestore(&bank->lock, flags); 765 759 766 760 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 767 - __set_irq_handler_unlocked(d->irq, handle_level_irq); 761 + __irq_set_handler_locked(d->irq, handle_level_irq); 768 762 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 769 - __set_irq_handler_unlocked(d->irq, handle_edge_irq); 763 + __irq_set_handler_locked(d->irq, handle_edge_irq); 770 764 771 765 return retval; 772 766 } ··· 1140 1146 1141 1147 desc->irq_data.chip->irq_ack(&desc->irq_data); 1142 1148 1143 - bank = get_irq_data(irq); 1149 + bank = irq_get_handler_data(irq); 1144 1150 #ifdef CONFIG_ARCH_OMAP1 1145 1151 if (bank->method == METHOD_MPUIO) 1146 1152 isr_reg = bank->base + ··· 1264 1270 unsigned int gpio = d->irq - IH_GPIO_BASE; 1265 1271 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 1266 1272 unsigned int irq_mask = 1 << get_gpio_index(gpio); 1267 - struct irq_desc *desc = irq_to_desc(d->irq); 1268 - u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK; 1273 + u32 trigger = irqd_get_trigger_type(d); 1269 1274 1270 1275 if (trigger) 1271 1276 _set_gpio_triggering(bank, get_gpio_index(gpio), trigger); ··· 1665 1672 1666 1673 for (j = bank->virtual_irq_start; 1667 1674 j < bank->virtual_irq_start + bank_width; j++) { 1668 - struct irq_desc *d = irq_to_desc(j); 1669 - 1670 - lockdep_set_class(&d->lock, &gpio_lock_class); 1671 - set_irq_chip_data(j, bank); 1675 + irq_set_lockdep_class(j, &gpio_lock_class); 1676 + irq_set_chip_data(j, bank); 1672 1677 if (bank_is_mpuio(bank)) 1673 - set_irq_chip(j, &mpuio_irq_chip); 1678 + irq_set_chip(j, &mpuio_irq_chip); 1674 1679 else 1675 - set_irq_chip(j, &gpio_irq_chip); 1676 - set_irq_handler(j, handle_simple_irq); 1680 + irq_set_chip(j, &gpio_irq_chip); 1681 + irq_set_handler(j, handle_simple_irq); 1677 1682 set_irq_flags(j, IRQF_VALID); 1678 1683 } 1679 - set_irq_chained_handler(bank->irq, gpio_irq_handler); 1680 - set_irq_data(bank->irq, bank); 1684 + irq_set_chained_handler(bank->irq, gpio_irq_handler); 1685 + irq_set_handler_data(bank->irq, bank); 1681 1686 } 1682 1687 1683 1688 static int __devinit omap_gpio_probe(struct platform_device *pdev)
+13 -18
arch/arm/plat-orion/gpio.c
··· 324 324 static void gpio_irq_ack(struct irq_data *d) 325 325 { 326 326 struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); 327 - int type; 327 + int type = irqd_get_trigger_type(d); 328 328 329 - type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK; 330 329 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { 331 330 int pin = d->irq - ochip->secondary_irq_base; 332 331 ··· 336 337 static void gpio_irq_mask(struct irq_data *d) 337 338 { 338 339 struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); 339 - int type; 340 + int type = irqd_get_trigger_type(d); 340 341 void __iomem *reg; 341 342 int pin; 342 343 343 - type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK; 344 344 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 345 345 reg = GPIO_EDGE_MASK(ochip); 346 346 else ··· 353 355 static void gpio_irq_unmask(struct irq_data *d) 354 356 { 355 357 struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); 356 - int type; 358 + int type = irqd_get_trigger_type(d); 357 359 void __iomem *reg; 358 360 int pin; 359 361 360 - type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK; 361 362 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 362 363 reg = GPIO_EDGE_MASK(ochip); 363 364 else ··· 386 389 * Set edge/level type. 387 390 */ 388 391 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { 389 - set_irq_handler(d->irq, handle_edge_irq); 392 + __irq_set_handler_locked(d->irq, handle_edge_irq); 390 393 } else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { 391 - set_irq_handler(d->irq, handle_level_irq); 394 + __irq_set_handler_locked(d->irq, handle_level_irq); 392 395 } else { 393 396 printk(KERN_ERR "failed to set irq=%d (type=%d)\n", 394 397 d->irq, type); ··· 474 477 for (i = 0; i < ngpio; i++) { 475 478 unsigned int irq = secondary_irq_base + i; 476 479 477 - set_irq_chip(irq, &orion_gpio_irq_chip); 478 - set_irq_handler(irq, handle_level_irq); 479 - set_irq_chip_data(irq, ochip); 480 - irq_desc[irq].status |= IRQ_LEVEL; 480 + irq_set_chip_and_handler(irq, &orion_gpio_irq_chip, 481 + handle_level_irq); 482 + irq_set_chip_data(irq, ochip); 483 + irq_set_status_flags(irq, IRQ_LEVEL); 481 484 set_irq_flags(irq, IRQF_VALID); 482 485 } 483 486 } ··· 485 488 void orion_gpio_irq_handler(int pinoff) 486 489 { 487 490 struct orion_gpio_chip *ochip; 488 - u32 cause; 491 + u32 cause, type; 489 492 int i; 490 493 491 494 ochip = orion_gpio_chip_find(pinoff); ··· 497 500 498 501 for (i = 0; i < ochip->chip.ngpio; i++) { 499 502 int irq; 500 - struct irq_desc *desc; 501 503 502 504 irq = ochip->secondary_irq_base + i; 503 505 504 506 if (!(cause & (1 << i))) 505 507 continue; 506 508 507 - desc = irq_desc + irq; 508 - if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { 509 + type = irqd_get_trigger_type(irq_get_irq_data(irq)); 510 + if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { 509 511 /* Swap polarity (race with GPIO line) */ 510 512 u32 polarity; 511 513 ··· 512 516 polarity ^= 1 << i; 513 517 writel(polarity, GPIO_IN_POL(ochip)); 514 518 } 515 - 516 - desc_handle_irq(irq, desc); 519 + generic_handle_irq(irq); 517 520 } 518 521 }
+4 -4
arch/arm/plat-orion/irq.c
··· 56 56 for (i = 0; i < 32; i++) { 57 57 unsigned int irq = irq_start + i; 58 58 59 - set_irq_chip(irq, &orion_irq_chip); 60 - set_irq_chip_data(irq, maskaddr); 61 - set_irq_handler(irq, handle_level_irq); 62 - irq_desc[irq].status |= IRQ_LEVEL; 59 + irq_set_chip_and_handler(irq, &orion_irq_chip, 60 + handle_level_irq); 61 + irq_set_chip_data(irq, maskaddr); 62 + irq_set_status_flags(irq, IRQ_LEVEL); 63 63 set_irq_flags(irq, IRQF_VALID); 64 64 } 65 65 }
+3 -3
arch/arm/plat-pxa/gpio.c
··· 284 284 } 285 285 286 286 for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) { 287 - set_irq_chip(irq, &pxa_muxed_gpio_chip); 288 - set_irq_handler(irq, handle_edge_irq); 287 + irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, 288 + handle_edge_irq); 289 289 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 290 290 } 291 291 292 292 /* Install handler for GPIO>=2 edge detect interrupts */ 293 - set_irq_chained_handler(mux_irq, pxa_gpio_demux_handler); 293 + irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler); 294 294 pxa_muxed_gpio_chip.irq_set_wake = fn; 295 295 } 296 296
+21 -22
arch/arm/plat-s3c24xx/irq.c
··· 592 592 case IRQ_UART1: 593 593 case IRQ_UART2: 594 594 case IRQ_ADCPARENT: 595 - set_irq_chip(irqno, &s3c_irq_level_chip); 596 - set_irq_handler(irqno, handle_level_irq); 595 + irq_set_chip_and_handler(irqno, &s3c_irq_level_chip, 596 + handle_level_irq); 597 597 break; 598 598 599 599 case IRQ_RESERVED6: ··· 603 603 604 604 default: 605 605 //irqdbf("registering irq %d (s3c irq)\n", irqno); 606 - set_irq_chip(irqno, &s3c_irq_chip); 607 - set_irq_handler(irqno, handle_edge_irq); 606 + irq_set_chip_and_handler(irqno, &s3c_irq_chip, 607 + handle_edge_irq); 608 608 set_irq_flags(irqno, IRQF_VALID); 609 609 } 610 610 } 611 611 612 612 /* setup the cascade irq handlers */ 613 613 614 - set_irq_chained_handler(IRQ_EINT4t7, s3c_irq_demux_extint4t7); 615 - set_irq_chained_handler(IRQ_EINT8t23, s3c_irq_demux_extint8); 614 + irq_set_chained_handler(IRQ_EINT4t7, s3c_irq_demux_extint4t7); 615 + irq_set_chained_handler(IRQ_EINT8t23, s3c_irq_demux_extint8); 616 616 617 - set_irq_chained_handler(IRQ_UART0, s3c_irq_demux_uart0); 618 - set_irq_chained_handler(IRQ_UART1, s3c_irq_demux_uart1); 619 - set_irq_chained_handler(IRQ_UART2, s3c_irq_demux_uart2); 620 - set_irq_chained_handler(IRQ_ADCPARENT, s3c_irq_demux_adc); 617 + irq_set_chained_handler(IRQ_UART0, s3c_irq_demux_uart0); 618 + irq_set_chained_handler(IRQ_UART1, s3c_irq_demux_uart1); 619 + irq_set_chained_handler(IRQ_UART2, s3c_irq_demux_uart2); 620 + irq_set_chained_handler(IRQ_ADCPARENT, s3c_irq_demux_adc); 621 621 622 622 /* external interrupts */ 623 623 624 624 for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) { 625 625 irqdbf("registering irq %d (ext int)\n", irqno); 626 - set_irq_chip(irqno, &s3c_irq_eint0t4); 627 - set_irq_handler(irqno, handle_edge_irq); 626 + irq_set_chip_and_handler(irqno, &s3c_irq_eint0t4, 627 + handle_edge_irq); 628 628 set_irq_flags(irqno, IRQF_VALID); 629 629 } 630 630 631 631 for (irqno = IRQ_EINT4; irqno <= IRQ_EINT23; irqno++) { 632 632 irqdbf("registering irq %d (extended s3c irq)\n", irqno); 633 - set_irq_chip(irqno, &s3c_irqext_chip); 634 - set_irq_handler(irqno, handle_edge_irq); 633 + irq_set_chip_and_handler(irqno, &s3c_irqext_chip, 634 + handle_edge_irq); 635 635 set_irq_flags(irqno, IRQF_VALID); 636 636 } 637 637 ··· 641 641 642 642 for (irqno = IRQ_S3CUART_RX0; irqno <= IRQ_S3CUART_ERR0; irqno++) { 643 643 irqdbf("registering irq %d (s3c uart0 irq)\n", irqno); 644 - set_irq_chip(irqno, &s3c_irq_uart0); 645 - set_irq_handler(irqno, handle_level_irq); 644 + irq_set_chip_and_handler(irqno, &s3c_irq_uart0, 645 + handle_level_irq); 646 646 set_irq_flags(irqno, IRQF_VALID); 647 647 } 648 648 649 649 for (irqno = IRQ_S3CUART_RX1; irqno <= IRQ_S3CUART_ERR1; irqno++) { 650 650 irqdbf("registering irq %d (s3c uart1 irq)\n", irqno); 651 - set_irq_chip(irqno, &s3c_irq_uart1); 652 - set_irq_handler(irqno, handle_level_irq); 651 + irq_set_chip_and_handler(irqno, &s3c_irq_uart1, 652 + handle_level_irq); 653 653 set_irq_flags(irqno, IRQF_VALID); 654 654 } 655 655 656 656 for (irqno = IRQ_S3CUART_RX2; irqno <= IRQ_S3CUART_ERR2; irqno++) { 657 657 irqdbf("registering irq %d (s3c uart2 irq)\n", irqno); 658 - set_irq_chip(irqno, &s3c_irq_uart2); 659 - set_irq_handler(irqno, handle_level_irq); 658 + irq_set_chip_and_handler(irqno, &s3c_irq_uart2, 659 + handle_level_irq); 660 660 set_irq_flags(irqno, IRQF_VALID); 661 661 } 662 662 663 663 for (irqno = IRQ_TC; irqno <= IRQ_ADC; irqno++) { 664 664 irqdbf("registering irq %d (s3c adc irq)\n", irqno); 665 - set_irq_chip(irqno, &s3c_irq_adc); 666 - set_irq_handler(irqno, handle_edge_irq); 665 + irq_set_chip_and_handler(irqno, &s3c_irq_adc, handle_edge_irq); 667 666 set_irq_flags(irqno, IRQF_VALID); 668 667 } 669 668
+3 -4
arch/arm/plat-s5p/irq-eint.c
··· 205 205 int irq; 206 206 207 207 for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++) 208 - set_irq_chip(irq, &s5p_irq_vic_eint); 208 + irq_set_chip(irq, &s5p_irq_vic_eint); 209 209 210 210 for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) { 211 - set_irq_chip(irq, &s5p_irq_eint); 212 - set_irq_handler(irq, handle_level_irq); 211 + irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq); 213 212 set_irq_flags(irq, IRQF_VALID); 214 213 } 215 214 216 - set_irq_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31); 215 + irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31); 217 216 return 0; 218 217 } 219 218
+11 -11
arch/arm/plat-s5p/irq-gpioint.c
··· 43 43 44 44 static int s5p_gpioint_get_offset(struct irq_data *data) 45 45 { 46 - struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); 46 + struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); 47 47 return data->irq - chip->irq_base; 48 48 } 49 49 50 50 static void s5p_gpioint_ack(struct irq_data *data) 51 51 { 52 - struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); 52 + struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); 53 53 int group, offset, pend_offset; 54 54 unsigned int value; 55 55 ··· 64 64 65 65 static void s5p_gpioint_mask(struct irq_data *data) 66 66 { 67 - struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); 67 + struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); 68 68 int group, offset, mask_offset; 69 69 unsigned int value; 70 70 ··· 79 79 80 80 static void s5p_gpioint_unmask(struct irq_data *data) 81 81 { 82 - struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); 82 + struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); 83 83 int group, offset, mask_offset; 84 84 unsigned int value; 85 85 ··· 100 100 101 101 static int s5p_gpioint_set_type(struct irq_data *data, unsigned int type) 102 102 { 103 - struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); 103 + struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); 104 104 int group, offset, con_offset; 105 105 unsigned int value; 106 106 ··· 149 149 150 150 static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc) 151 151 { 152 - struct s5p_gpioint_bank *bank = get_irq_data(irq); 152 + struct s5p_gpioint_bank *bank = irq_get_handler_data(irq); 153 153 int group, pend_offset, mask_offset; 154 154 unsigned int pend, mask; 155 155 ··· 200 200 if (!bank->chips) 201 201 return -ENOMEM; 202 202 203 - set_irq_chained_handler(bank->irq, s5p_gpioint_handler); 204 - set_irq_data(bank->irq, bank); 203 + irq_set_chained_handler(bank->irq, s5p_gpioint_handler); 204 + irq_set_handler_data(bank->irq, bank); 205 205 bank->handler = s5p_gpioint_handler; 206 206 printk(KERN_INFO "Registered chained gpio int handler for interrupt %d.\n", 207 207 bank->irq); ··· 219 219 bank->chips[group - bank->start] = chip; 220 220 for (i = 0; i < chip->chip.ngpio; i++) { 221 221 irq = chip->irq_base + i; 222 - set_irq_chip(irq, &s5p_gpioint); 223 - set_irq_data(irq, chip); 224 - set_irq_handler(irq, handle_level_irq); 222 + irq_set_chip(irq, &s5p_gpioint); 223 + irq_set_handler_data(irq, chip); 224 + irq_set_handler(irq, handle_level_irq); 225 225 set_irq_flags(irq, IRQF_VALID); 226 226 } 227 227 return 0;
+4 -6
arch/arm/plat-samsung/irq-uart.c
··· 107 107 108 108 static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) 109 109 { 110 - struct irq_desc *desc = irq_to_desc(uirq->parent_irq); 111 110 void __iomem *reg_base = uirq->regs; 112 111 unsigned int irq; 113 112 int offs; ··· 117 118 for (offs = 0; offs < 3; offs++) { 118 119 irq = uirq->base_irq + offs; 119 120 120 - set_irq_chip(irq, &s3c_irq_uart); 121 - set_irq_chip_data(irq, uirq); 122 - set_irq_handler(irq, handle_level_irq); 121 + irq_set_chip_and_handler(irq, &s3c_irq_uart, handle_level_irq); 122 + irq_set_chip_data(irq, uirq); 123 123 set_irq_flags(irq, IRQF_VALID); 124 124 } 125 125 126 - desc->irq_data.handler_data = uirq; 127 - set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart); 126 + irq_set_handler_data(uirq->parent_irq, uirq); 127 + irq_set_chained_handler(uirq->parent_irq, s3c_irq_demux_uart); 128 128 } 129 129 130 130 /**
+4 -7
arch/arm/plat-samsung/irq-vic-timer.c
··· 77 77 void __init s3c_init_vic_timer_irq(unsigned int parent_irq, 78 78 unsigned int timer_irq) 79 79 { 80 - struct irq_desc *desc = irq_to_desc(parent_irq); 81 80 82 - set_irq_chained_handler(parent_irq, s3c_irq_demux_vic_timer); 81 + irq_set_chained_handler(parent_irq, s3c_irq_demux_vic_timer); 82 + irq_set_handler_data(parent_irq, (void *)timer_irq); 83 83 84 - set_irq_chip(timer_irq, &s3c_irq_timer); 85 - set_irq_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0))); 86 - set_irq_handler(timer_irq, handle_level_irq); 84 + irq_set_chip_and_handler(timer_irq, &s3c_irq_timer, handle_level_irq); 85 + irq_set_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0))); 87 86 set_irq_flags(timer_irq, IRQF_VALID); 88 - 89 - desc->irq_data.handler_data = (void *)timer_irq; 90 87 }
+4 -4
arch/arm/plat-samsung/wakeup-mask.c
··· 22 22 void samsung_sync_wakemask(void __iomem *reg, 23 23 struct samsung_wakeup_mask *mask, int nr_mask) 24 24 { 25 - struct irq_desc *desc; 25 + struct irq_data *data; 26 26 u32 val; 27 27 28 28 val = __raw_readl(reg); ··· 33 33 continue; 34 34 } 35 35 36 - desc = irq_to_desc(mask->irq); 36 + data = irq_get_irq_data(mask->irq); 37 37 38 - /* bit of a liberty to read this directly from irq_desc. */ 39 - if (desc->wake_depth > 0) 38 + /* bit of a liberty to read this directly from irq_data. */ 39 + if (irqd_is_wakeup_set(data)) 40 40 val &= ~mask->bit; 41 41 else 42 42 val |= mask->bit;
+6 -6
arch/arm/plat-spear/shirq.c
··· 68 68 static void shirq_handler(unsigned irq, struct irq_desc *desc) 69 69 { 70 70 u32 i, val, mask; 71 - struct spear_shirq *shirq = get_irq_data(irq); 71 + struct spear_shirq *shirq = irq_get_handler_data(irq); 72 72 73 73 desc->irq_data.chip->irq_ack(&desc->irq_data); 74 74 while ((val = readl(shirq->regs.base + shirq->regs.status_reg) & ··· 105 105 if (!shirq->dev_count) 106 106 return -EINVAL; 107 107 108 - set_irq_chained_handler(shirq->irq, shirq_handler); 108 + irq_set_chained_handler(shirq->irq, shirq_handler); 109 109 for (i = 0; i < shirq->dev_count; i++) { 110 - set_irq_chip(shirq->dev_config[i].virq, &shirq_chip); 111 - set_irq_handler(shirq->dev_config[i].virq, handle_simple_irq); 110 + irq_set_chip_and_handler(shirq->dev_config[i].virq, 111 + &shirq_chip, handle_simple_irq); 112 112 set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID); 113 - set_irq_chip_data(shirq->dev_config[i].virq, shirq); 113 + irq_set_chip_data(shirq->dev_config[i].virq, shirq); 114 114 } 115 115 116 - set_irq_data(shirq->irq, shirq); 116 + irq_set_handler_data(shirq->irq, shirq); 117 117 return 0; 118 118 }
+1 -2
arch/arm/plat-stmp3xxx/irq.c
··· 35 35 /* Disable all interrupts initially */ 36 36 for (i = 0; i < NR_REAL_IRQS; i++) { 37 37 chip->irq_mask(irq_get_irq_data(i)); 38 - set_irq_chip(i, chip); 39 - set_irq_handler(i, handle_level_irq); 38 + irq_set_chip_and_handler(i, chip, handle_level_irq); 40 39 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 41 40 } 42 41
+6 -7
arch/arm/plat-stmp3xxx/pinmux.c
··· 489 489 490 490 static void stmp3xxx_gpio_irq(u32 irq, struct irq_desc *desc) 491 491 { 492 - struct stmp3xxx_pinmux_bank *pm = get_irq_data(irq); 492 + struct stmp3xxx_pinmux_bank *pm = irq_get_handler_data(irq); 493 493 int gpio_irq = pm->virq; 494 494 u32 stat = __raw_readl(pm->irqstat); 495 495 496 496 while (stat) { 497 497 if (stat & 1) 498 - irq_desc[gpio_irq].handle_irq(gpio_irq, 499 - &irq_desc[gpio_irq]); 498 + generic_handle_irq(gpio_irq); 500 499 gpio_irq++; 501 500 stat >>= 1; 502 501 } ··· 533 534 534 535 for (virq = pm->virq; virq < pm->virq; virq++) { 535 536 gpio_irq_chip.irq_mask(irq_get_irq_data(virq)); 536 - set_irq_chip(virq, &gpio_irq_chip); 537 - set_irq_handler(virq, handle_level_irq); 537 + irq_set_chip_and_handler(virq, &gpio_irq_chip, 538 + handle_level_irq); 538 539 set_irq_flags(virq, IRQF_VALID); 539 540 } 540 541 r = gpiochip_add(&pm->chip); 541 542 if (r < 0) 542 543 break; 543 - set_irq_chained_handler(pm->irq, stmp3xxx_gpio_irq); 544 - set_irq_data(pm->irq, pm); 544 + irq_set_chained_handler(pm->irq, stmp3xxx_gpio_irq); 545 + irq_set_handler_data(pm->irq, pm); 545 546 } 546 547 return r; 547 548 }
+6 -6
arch/arm/plat-versatile/fpga-irq.c
··· 30 30 31 31 static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc) 32 32 { 33 - struct fpga_irq_data *f = get_irq_desc_data(desc); 33 + struct fpga_irq_data *f = irq_desc_get_handler_data(desc); 34 34 u32 status = readl(f->base + IRQ_STATUS); 35 35 36 36 if (status == 0) { ··· 55 55 f->chip.irq_unmask = fpga_irq_unmask; 56 56 57 57 if (parent_irq != -1) { 58 - set_irq_data(parent_irq, f); 59 - set_irq_chained_handler(parent_irq, fpga_irq_handle); 58 + irq_set_handler_data(parent_irq, f); 59 + irq_set_chained_handler(parent_irq, fpga_irq_handle); 60 60 } 61 61 62 62 for (i = 0; i < 32; i++) { 63 63 if (valid & (1 << i)) { 64 64 unsigned int irq = f->irq_start + i; 65 65 66 - set_irq_chip_data(irq, f); 67 - set_irq_chip(irq, &f->chip); 68 - set_irq_handler(irq, handle_level_irq); 66 + irq_set_chip_data(irq, f); 67 + irq_set_chip_and_handler(irq, &f->chip, 68 + handle_level_irq); 69 69 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 70 70 } 71 71 }
-1
arch/avr32/Kconfig
··· 10 10 select GENERIC_IRQ_PROBE 11 11 select HARDIRQS_SW_RESEND 12 12 select GENERIC_IRQ_SHOW 13 - select GENERIC_HARDIRQS_NO_DEPRECATED 14 13 help 15 14 AVR32 is a high-performance 32-bit RISC microprocessor core, 16 15 designed for cost-sensitive embedded applications, with particular
+1 -1
arch/avr32/mach-at32ap/pio.c
··· 282 282 283 283 static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) 284 284 { 285 - struct pio_device *pio = get_irq_desc_chip_data(desc); 285 + struct pio_device *pio = irq_desc_get_chip_data(desc); 286 286 unsigned gpio_irq; 287 287 288 288 gpio_irq = (unsigned) irq_get_handler_data(irq);
-1
arch/blackfin/Kconfig
··· 34 34 select GENERIC_ATOMIC64 35 35 select GENERIC_IRQ_PROBE 36 36 select IRQ_PER_CPU if SMP 37 - select GENERIC_HARDIRQS_NO_DEPRECATED 38 37 39 38 config GENERIC_CSUM 40 39 def_bool y
-1
arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
··· 46 46 # CONFIG_WIRELESS is not set 47 47 CONFIG_BLK_DEV_LOOP=y 48 48 CONFIG_BLK_DEV_RAM=y 49 - # CONFIG_MISC_DEVICES is not set 50 49 # CONFIG_INPUT_MOUSEDEV is not set 51 50 CONFIG_INPUT_EVDEV=y 52 51 # CONFIG_INPUT_KEYBOARD is not set
-1
arch/blackfin/configs/BF538-EZKIT_defconfig
··· 70 70 CONFIG_MTD_PHYSMAP=m 71 71 CONFIG_MTD_NAND=m 72 72 CONFIG_BLK_DEV_RAM=y 73 - # CONFIG_MISC_DEVICES is not set 74 73 CONFIG_NETDEVICES=y 75 74 CONFIG_PHYLIB=y 76 75 CONFIG_SMSC_PHY=y
-1
arch/blackfin/configs/BF561-ACVILON_defconfig
··· 63 63 CONFIG_BLK_DEV_RAM=y 64 64 CONFIG_BLK_DEV_RAM_COUNT=2 65 65 CONFIG_BLK_DEV_RAM_SIZE=16384 66 - # CONFIG_MISC_DEVICES is not set 67 66 CONFIG_SCSI=y 68 67 # CONFIG_SCSI_PROC_FS is not set 69 68 CONFIG_BLK_DEV_SD=y
+1
arch/blackfin/configs/BlackStamp_defconfig
··· 58 58 CONFIG_BLK_DEV_LOOP=y 59 59 CONFIG_BLK_DEV_NBD=y 60 60 CONFIG_BLK_DEV_RAM=y 61 + CONFIG_MISC_DEVICES=y 61 62 CONFIG_EEPROM_AT25=y 62 63 CONFIG_NETDEVICES=y 63 64 CONFIG_NET_ETHERNET=y
-1
arch/blackfin/configs/CM-BF527_defconfig
··· 64 64 CONFIG_MTD_COMPLEX_MAPPINGS=y 65 65 CONFIG_MTD_GPIO_ADDR=y 66 66 CONFIG_BLK_DEV_RAM=y 67 - # CONFIG_MISC_DEVICES is not set 68 67 CONFIG_SCSI=y 69 68 CONFIG_BLK_DEV_SD=y 70 69 # CONFIG_SCSI_LOWLEVEL is not set
-1
arch/blackfin/configs/CM-BF533_defconfig
··· 44 44 CONFIG_MTD_CFI_INTELEXT=y 45 45 CONFIG_MTD_RAM=y 46 46 CONFIG_MTD_PHYSMAP=y 47 - # CONFIG_MISC_DEVICES is not set 48 47 CONFIG_NETDEVICES=y 49 48 # CONFIG_NETDEV_1000 is not set 50 49 # CONFIG_NETDEV_10000 is not set
-1
arch/blackfin/configs/CM-BF548_defconfig
··· 63 63 CONFIG_MTD_COMPLEX_MAPPINGS=y 64 64 CONFIG_MTD_PHYSMAP=y 65 65 CONFIG_BLK_DEV_RAM=y 66 - # CONFIG_MISC_DEVICES is not set 67 66 CONFIG_SCSI=m 68 67 CONFIG_BLK_DEV_SD=m 69 68 # CONFIG_SCSI_LOWLEVEL is not set
-1
arch/blackfin/configs/DNP5370_defconfig
··· 55 55 CONFIG_MTD_NAND_PLATFORM=y 56 56 CONFIG_BLK_DEV_LOOP=y 57 57 CONFIG_BLK_DEV_RAM=y 58 - # CONFIG_MISC_DEVICES is not set 59 58 CONFIG_NETDEVICES=y 60 59 CONFIG_DAVICOM_PHY=y 61 60 CONFIG_NET_ETHERNET=y
+1
arch/blackfin/configs/H8606_defconfig
··· 45 45 CONFIG_MTD_M25P80=y 46 46 # CONFIG_M25PXX_USE_FAST_READ is not set 47 47 CONFIG_BLK_DEV_RAM=y 48 + CONFIG_MISC_DEVICES=y 48 49 CONFIG_EEPROM_AT25=y 49 50 CONFIG_NETDEVICES=y 50 51 CONFIG_NET_ETHERNET=y
+1
arch/blackfin/configs/SRV1_defconfig
··· 48 48 CONFIG_MTD_UCLINUX=y 49 49 CONFIG_MTD_NAND=m 50 50 CONFIG_BLK_DEV_RAM=y 51 + CONFIG_MISC_DEVICES=y 51 52 CONFIG_EEPROM_AT25=m 52 53 CONFIG_NETDEVICES=y 53 54 # CONFIG_NETDEV_1000 is not set
+3 -1
arch/blackfin/include/asm/bitops.h
··· 25 25 #include <asm-generic/bitops/const_hweight.h> 26 26 #include <asm-generic/bitops/lock.h> 27 27 28 - #include <asm-generic/bitops/le.h> 29 28 #include <asm-generic/bitops/ext2-atomic.h> 30 29 31 30 #ifndef CONFIG_SMP ··· 111 112 #undef test_bit 112 113 113 114 #endif /* CONFIG_SMP */ 115 + 116 + /* Needs to be after test_bit and friends */ 117 + #include <asm-generic/bitops/le.h> 114 118 115 119 /* 116 120 * hweightN: returns the hamming weight (i.e. the number
+1 -1
arch/blackfin/kernel/irqchip.c
··· 48 48 seq_printf(p, "%3d: ", i); 49 49 for_each_online_cpu(j) 50 50 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 51 - seq_printf(p, " %8s", get_irq_desc_chip(desc)->name); 51 + seq_printf(p, " %8s", irq_desc_get_chip(desc)->name); 52 52 seq_printf(p, " %s", action->name); 53 53 for (action = action->next; action; action = action->next) 54 54 seq_printf(p, " %s", action->name);
+18 -27
arch/blackfin/kernel/module.c
··· 4 4 * Licensed under the GPL-2 or later 5 5 */ 6 6 7 - #define pr_fmt(fmt) "module %s: " fmt 7 + #define pr_fmt(fmt) "module %s: " fmt, mod->name 8 8 9 9 #include <linux/moduleloader.h> 10 10 #include <linux/elf.h> ··· 57 57 dest = l1_inst_sram_alloc(s->sh_size); 58 58 mod->arch.text_l1 = dest; 59 59 if (dest == NULL) { 60 - pr_err("L1 inst memory allocation failed\n", 61 - mod->name); 60 + pr_err("L1 inst memory allocation failed\n"); 62 61 return -1; 63 62 } 64 63 dma_memcpy(dest, (void *)s->sh_addr, s->sh_size); ··· 69 70 dest = l1_data_sram_alloc(s->sh_size); 70 71 mod->arch.data_a_l1 = dest; 71 72 if (dest == NULL) { 72 - pr_err("L1 data memory allocation failed\n", 73 - mod->name); 73 + pr_err("L1 data memory allocation failed\n"); 74 74 return -1; 75 75 } 76 76 memcpy(dest, (void *)s->sh_addr, s->sh_size); ··· 81 83 dest = l1_data_sram_zalloc(s->sh_size); 82 84 mod->arch.bss_a_l1 = dest; 83 85 if (dest == NULL) { 84 - pr_err("L1 data memory allocation failed\n", 85 - mod->name); 86 + pr_err("L1 data memory allocation failed\n"); 86 87 return -1; 87 88 } 88 89 ··· 90 93 dest = l1_data_B_sram_alloc(s->sh_size); 91 94 mod->arch.data_b_l1 = dest; 92 95 if (dest == NULL) { 93 - pr_err("L1 data memory allocation failed\n", 94 - mod->name); 96 + pr_err("L1 data memory allocation failed\n"); 95 97 return -1; 96 98 } 97 99 memcpy(dest, (void *)s->sh_addr, s->sh_size); ··· 100 104 dest = l1_data_B_sram_alloc(s->sh_size); 101 105 mod->arch.bss_b_l1 = dest; 102 106 if (dest == NULL) { 103 - pr_err("L1 data memory allocation failed\n", 104 - mod->name); 107 + pr_err("L1 data memory allocation failed\n"); 105 108 return -1; 106 109 } 107 110 memset(dest, 0, s->sh_size); ··· 112 117 dest = l2_sram_alloc(s->sh_size); 113 118 mod->arch.text_l2 = dest; 114 119 if (dest == NULL) { 115 - pr_err("L2 SRAM allocation failed\n", 116 - mod->name); 120 + pr_err("L2 SRAM allocation failed\n"); 117 121 return -1; 118 122 } 119 123 memcpy(dest, (void *)s->sh_addr, s->sh_size); ··· 124 130 dest = l2_sram_alloc(s->sh_size); 125 131 mod->arch.data_l2 = dest; 126 132 if (dest == NULL) { 127 - pr_err("L2 SRAM allocation failed\n", 128 - mod->name); 133 + pr_err("L2 SRAM allocation failed\n"); 129 134 return -1; 130 135 } 131 136 memcpy(dest, (void *)s->sh_addr, s->sh_size); ··· 136 143 dest = l2_sram_zalloc(s->sh_size); 137 144 mod->arch.bss_l2 = dest; 138 145 if (dest == NULL) { 139 - pr_err("L2 SRAM allocation failed\n", 140 - mod->name); 146 + pr_err("L2 SRAM allocation failed\n"); 141 147 return -1; 142 148 } 143 149 ··· 152 160 153 161 int 154 162 apply_relocate(Elf_Shdr * sechdrs, const char *strtab, 155 - unsigned int symindex, unsigned int relsec, struct module *me) 163 + unsigned int symindex, unsigned int relsec, struct module *mod) 156 164 { 157 - pr_err(".rel unsupported\n", me->name); 165 + pr_err(".rel unsupported\n"); 158 166 return -ENOEXEC; 159 167 } 160 168 ··· 178 186 Elf32_Sym *sym; 179 187 unsigned long location, value, size; 180 188 181 - pr_debug("applying relocate section %u to %u\n", mod->name, 189 + pr_debug("applying relocate section %u to %u\n", 182 190 relsec, sechdrs[relsec].sh_info); 183 191 184 192 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { ··· 195 203 196 204 #ifdef CONFIG_SMP 197 205 if (location >= COREB_L1_DATA_A_START) { 198 - pr_err("cannot relocate in L1: %u (SMP kernel)", 199 - mod->name, ELF32_R_TYPE(rel[i].r_info)); 206 + pr_err("cannot relocate in L1: %u (SMP kernel)\n", 207 + ELF32_R_TYPE(rel[i].r_info)); 200 208 return -ENOEXEC; 201 209 } 202 210 #endif 203 211 204 212 pr_debug("location is %lx, value is %lx type is %d\n", 205 - mod->name, location, value, ELF32_R_TYPE(rel[i].r_info)); 213 + location, value, ELF32_R_TYPE(rel[i].r_info)); 206 214 207 215 switch (ELF32_R_TYPE(rel[i].r_info)) { 208 216 ··· 222 230 case R_BFIN_PCREL12_JUMP_S: 223 231 case R_BFIN_PCREL10: 224 232 pr_err("unsupported relocation: %u (no -mlong-calls?)\n", 225 - mod->name, ELF32_R_TYPE(rel[i].r_info)); 233 + ELF32_R_TYPE(rel[i].r_info)); 226 234 return -ENOEXEC; 227 235 228 236 default: 229 - pr_err("unknown relocation: %u\n", mod->name, 237 + pr_err("unknown relocation: %u\n", 230 238 ELF32_R_TYPE(rel[i].r_info)); 231 239 return -ENOEXEC; 232 240 } ··· 243 251 isram_memcpy((void *)location, &value, size); 244 252 break; 245 253 default: 246 - pr_err("invalid relocation for %#lx\n", 247 - mod->name, location); 254 + pr_err("invalid relocation for %#lx\n", location); 248 255 return -ENOEXEC; 249 256 } 250 257 }
+4 -3
arch/blackfin/kernel/trace.c
··· 912 912 /* if no interrupts are going off, don't print this out */ 913 913 if (fp->ipend & ~0x3F) { 914 914 for (i = 0; i < (NR_IRQS - 1); i++) { 915 + struct irq_desc *desc = irq_to_desc(i); 915 916 if (!in_atomic) 916 - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 917 + raw_spin_lock_irqsave(&desc->lock, flags); 917 918 918 - action = irq_desc[i].action; 919 + action = desc->action; 919 920 if (!action) 920 921 goto unlock; 921 922 ··· 929 928 pr_cont("\n"); 930 929 unlock: 931 930 if (!in_atomic) 932 - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 931 + raw_spin_unlock_irqrestore(&desc->lock, flags); 933 932 } 934 933 } 935 934
+3 -3
arch/blackfin/mach-bf561/smp.c
··· 154 154 void __cpuinit bfin_local_timer_setup(void) 155 155 { 156 156 #if defined(CONFIG_TICKSOURCE_CORETMR) 157 - struct irq_chip *chip = get_irq_chip(IRQ_CORETMR); 158 - struct irq_desc *desc = irq_to_desc(IRQ_CORETMR); 157 + struct irq_data *data = irq_get_irq_data(IRQ_CORETMR); 158 + struct irq_chip *chip = irq_data_get_irq_chip(data); 159 159 160 160 bfin_coretmr_init(); 161 161 bfin_coretmr_clockevent_init(); 162 162 163 - chip->irq_unmask(&desc->irq_data); 163 + chip->irq_unmask(data); 164 164 #else 165 165 /* Power down the core timer, just to play safe. */ 166 166 bfin_write_TCNTL(0);
+20 -23
arch/blackfin/mach-common/ints-priority.c
··· 559 559 #ifdef CONFIG_IPIPE 560 560 handle = handle_level_irq; 561 561 #endif 562 - __set_irq_handler_unlocked(irq, handle); 562 + __irq_set_handler_locked(irq, handle); 563 563 } 564 564 565 565 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); ··· 578 578 static void bfin_gpio_mask_ack_irq(struct irq_data *d) 579 579 { 580 580 unsigned int irq = d->irq; 581 - struct irq_desc *desc = irq_to_desc(irq); 582 581 u32 gpionr = irq_to_gpio(irq); 583 582 584 - if (desc->handle_irq == handle_edge_irq) 583 + if (!irqd_is_level_type(d)) 585 584 set_gpio_data(gpionr, 0); 586 585 587 586 set_gpio_maska(gpionr, 0); ··· 836 837 837 838 static void bfin_gpio_ack_irq(struct irq_data *d) 838 839 { 839 - struct irq_desc *desc = irq_to_desc(d->irq); 840 840 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; 841 841 u32 pintbit = PINT_BIT(pint_val); 842 842 u32 bank = PINT_2_BANK(pint_val); 843 843 844 - if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { 844 + if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { 845 845 if (pint[bank]->invert_set & pintbit) 846 846 pint[bank]->invert_clear = pintbit; 847 847 else ··· 852 854 853 855 static void bfin_gpio_mask_ack_irq(struct irq_data *d) 854 856 { 855 - struct irq_desc *desc = irq_to_desc(d->irq); 856 857 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; 857 858 u32 pintbit = PINT_BIT(pint_val); 858 859 u32 bank = PINT_2_BANK(pint_val); 859 860 860 - if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { 861 + if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { 861 862 if (pint[bank]->invert_set & pintbit) 862 863 pint[bank]->invert_clear = pintbit; 863 864 else ··· 1163 1166 1164 1167 for (irq = 0; irq <= SYS_IRQS; irq++) { 1165 1168 if (irq <= IRQ_CORETMR) 1166 - set_irq_chip(irq, &bfin_core_irqchip); 1169 + irq_set_chip(irq, &bfin_core_irqchip); 1167 1170 else 1168 - set_irq_chip(irq, &bfin_internal_irqchip); 1171 + irq_set_chip(irq, &bfin_internal_irqchip); 1169 1172 1170 1173 switch (irq) { 1171 1174 #if defined(CONFIG_BF53x) ··· 1189 1192 #elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 1190 1193 case IRQ_PORTF_INTA: 1191 1194 #endif 1192 - set_irq_chained_handler(irq, 1193 - bfin_demux_gpio_irq); 1195 + irq_set_chained_handler(irq, bfin_demux_gpio_irq); 1194 1196 break; 1195 1197 #ifdef BF537_GENERIC_ERROR_INT_DEMUX 1196 1198 case IRQ_GENERIC_ERROR: 1197 - set_irq_chained_handler(irq, bfin_demux_error_irq); 1199 + irq_set_chained_handler(irq, bfin_demux_error_irq); 1198 1200 break; 1199 1201 #endif 1200 1202 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1201 1203 case IRQ_MAC_ERROR: 1202 - set_irq_chained_handler(irq, bfin_demux_mac_status_irq); 1204 + irq_set_chained_handler(irq, 1205 + bfin_demux_mac_status_irq); 1203 1206 break; 1204 1207 #endif 1205 1208 #ifdef CONFIG_SMP 1206 1209 case IRQ_SUPPLE_0: 1207 1210 case IRQ_SUPPLE_1: 1208 - set_irq_handler(irq, handle_percpu_irq); 1211 + irq_set_handler(irq, handle_percpu_irq); 1209 1212 break; 1210 1213 #endif 1211 1214 1212 1215 #ifdef CONFIG_TICKSOURCE_CORETMR 1213 1216 case IRQ_CORETMR: 1214 1217 # ifdef CONFIG_SMP 1215 - set_irq_handler(irq, handle_percpu_irq); 1218 + irq_set_handler(irq, handle_percpu_irq); 1216 1219 break; 1217 1220 # else 1218 - set_irq_handler(irq, handle_simple_irq); 1221 + irq_set_handler(irq, handle_simple_irq); 1219 1222 break; 1220 1223 # endif 1221 1224 #endif 1222 1225 1223 1226 #ifdef CONFIG_TICKSOURCE_GPTMR0 1224 1227 case IRQ_TIMER0: 1225 - set_irq_handler(irq, handle_simple_irq); 1228 + irq_set_handler(irq, handle_simple_irq); 1226 1229 break; 1227 1230 #endif 1228 1231 1229 1232 #ifdef CONFIG_IPIPE 1230 1233 default: 1231 - set_irq_handler(irq, handle_level_irq); 1234 + irq_set_handler(irq, handle_level_irq); 1232 1235 break; 1233 1236 #else /* !CONFIG_IPIPE */ 1234 1237 default: 1235 - set_irq_handler(irq, handle_simple_irq); 1238 + irq_set_handler(irq, handle_simple_irq); 1236 1239 break; 1237 1240 #endif /* !CONFIG_IPIPE */ 1238 1241 } ··· 1240 1243 1241 1244 #ifdef BF537_GENERIC_ERROR_INT_DEMUX 1242 1245 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) 1243 - set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip, 1246 + irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip, 1244 1247 handle_level_irq); 1245 1248 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1246 - set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); 1249 + irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); 1247 1250 #endif 1248 1251 #endif 1249 1252 1250 1253 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1251 1254 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) 1252 - set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip, 1255 + irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip, 1253 1256 handle_level_irq); 1254 1257 #endif 1255 1258 /* if configured as edge, then will be changed to do_edge_IRQ */ 1256 1259 for (irq = GPIO_IRQ_BASE; 1257 1260 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) 1258 - set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, 1261 + irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, 1259 1262 handle_level_irq); 1260 1263 1261 1264 bfin_write_IMASK(0);
-1
arch/cris/Kconfig
··· 55 55 default y 56 56 select HAVE_IDE 57 57 select HAVE_GENERIC_HARDIRQS 58 - select GENERIC_HARDIRQS_NO_DEPRECATED 59 58 select GENERIC_IRQ_SHOW 60 59 61 60 config HZ
+1 -1
arch/frv/Kconfig
··· 6 6 select HAVE_IRQ_WORK 7 7 select HAVE_PERF_EVENTS 8 8 select HAVE_GENERIC_HARDIRQS 9 + select GENERIC_IRQ_SHOW 9 10 10 11 config ZONE_DMA 11 12 bool ··· 362 361 363 362 config ARCH_SUSPEND_POSSIBLE 364 363 def_bool y 365 - depends on !SMP 366 364 367 365 source kernel/power/Kconfig 368 366 endmenu
-9
arch/frv/include/asm/system.h
··· 45 45 #define wmb() asm volatile ("membar" : : :"memory") 46 46 #define read_barrier_depends() do { } while (0) 47 47 48 - #ifdef CONFIG_SMP 49 - #define smp_mb() mb() 50 - #define smp_rmb() rmb() 51 - #define smp_wmb() wmb() 52 - #define smp_read_barrier_depends() read_barrier_depends() 53 - #define set_mb(var, value) \ 54 - do { xchg(&var, (value)); } while (0) 55 - #else 56 48 #define smp_mb() barrier() 57 49 #define smp_rmb() barrier() 58 50 #define smp_wmb() barrier() 59 51 #define smp_read_barrier_depends() do {} while(0) 60 52 #define set_mb(var, value) \ 61 53 do { var = (value); barrier(); } while (0) 62 - #endif 63 54 64 55 extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); 65 56 extern void free_initmem(void);
+3 -1
arch/frv/include/asm/thread_info.h
··· 21 21 22 22 #define THREAD_SIZE 8192 23 23 24 + #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 25 + 24 26 /* 25 27 * low level task data that entry.S needs immediate access to 26 28 * - this struct should fit entirely inside of one cache line ··· 89 87 #define alloc_thread_info_node(tsk, node) \ 90 88 kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 91 89 #else 92 - #define alloc_thread_info_node(tsk) \ 90 + #define alloc_thread_info_node(tsk, node) \ 93 91 kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 94 92 #endif 95 93
+15 -15
arch/frv/kernel/irq-mb93091.c
··· 36 36 /* 37 37 * on-motherboard FPGA PIC operations 38 38 */ 39 - static void frv_fpga_mask(unsigned int irq) 39 + static void frv_fpga_mask(struct irq_data *d) 40 40 { 41 41 uint16_t imr = __get_IMR(); 42 42 43 - imr |= 1 << (irq - IRQ_BASE_FPGA); 43 + imr |= 1 << (d->irq - IRQ_BASE_FPGA); 44 44 45 45 __set_IMR(imr); 46 46 } 47 47 48 - static void frv_fpga_ack(unsigned int irq) 48 + static void frv_fpga_ack(struct irq_data *d) 49 49 { 50 - __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); 50 + __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); 51 51 } 52 52 53 - static void frv_fpga_mask_ack(unsigned int irq) 53 + static void frv_fpga_mask_ack(struct irq_data *d) 54 54 { 55 55 uint16_t imr = __get_IMR(); 56 56 57 - imr |= 1 << (irq - IRQ_BASE_FPGA); 57 + imr |= 1 << (d->irq - IRQ_BASE_FPGA); 58 58 __set_IMR(imr); 59 59 60 - __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); 60 + __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); 61 61 } 62 62 63 - static void frv_fpga_unmask(unsigned int irq) 63 + static void frv_fpga_unmask(struct irq_data *d) 64 64 { 65 65 uint16_t imr = __get_IMR(); 66 66 67 - imr &= ~(1 << (irq - IRQ_BASE_FPGA)); 67 + imr &= ~(1 << (d->irq - IRQ_BASE_FPGA)); 68 68 69 69 __set_IMR(imr); 70 70 } 71 71 72 72 static struct irq_chip frv_fpga_pic = { 73 73 .name = "mb93091", 74 - .ack = frv_fpga_ack, 75 - .mask = frv_fpga_mask, 76 - .mask_ack = frv_fpga_mask_ack, 77 - .unmask = frv_fpga_unmask, 74 + .irq_ack = frv_fpga_ack, 75 + .irq_mask = frv_fpga_mask, 76 + .irq_mask_ack = frv_fpga_mask_ack, 77 + .irq_unmask = frv_fpga_unmask, 78 78 }; 79 79 80 80 /* ··· 146 146 __clr_IFR(0x0000); 147 147 148 148 for (irq = IRQ_BASE_FPGA + 1; irq <= IRQ_BASE_FPGA + 14; irq++) 149 - set_irq_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq); 149 + irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq); 150 150 151 - set_irq_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq); 151 + irq_set_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq); 152 152 153 153 /* the FPGA drives the first four external IRQ inputs on the CPU PIC */ 154 154 setup_irq(IRQ_CPU_EXTERNAL0, &fpga_irq[0]);
+15 -16
arch/frv/kernel/irq-mb93093.c
··· 35 35 /* 36 36 * off-CPU FPGA PIC operations 37 37 */ 38 - static void frv_fpga_mask(unsigned int irq) 38 + static void frv_fpga_mask(struct irq_data *d) 39 39 { 40 40 uint16_t imr = __get_IMR(); 41 41 42 - imr |= 1 << (irq - IRQ_BASE_FPGA); 42 + imr |= 1 << (d->irq - IRQ_BASE_FPGA); 43 43 __set_IMR(imr); 44 44 } 45 45 46 - static void frv_fpga_ack(unsigned int irq) 46 + static void frv_fpga_ack(struct irq_data *d) 47 47 { 48 - __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); 48 + __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); 49 49 } 50 50 51 - static void frv_fpga_mask_ack(unsigned int irq) 51 + static void frv_fpga_mask_ack(struct irq_data *d) 52 52 { 53 53 uint16_t imr = __get_IMR(); 54 54 55 - imr |= 1 << (irq - IRQ_BASE_FPGA); 55 + imr |= 1 << (d->irq - IRQ_BASE_FPGA); 56 56 __set_IMR(imr); 57 57 58 - __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); 58 + __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); 59 59 } 60 60 61 - static void frv_fpga_unmask(unsigned int irq) 61 + static void frv_fpga_unmask(struct irq_data *d) 62 62 { 63 63 uint16_t imr = __get_IMR(); 64 64 65 - imr &= ~(1 << (irq - IRQ_BASE_FPGA)); 65 + imr &= ~(1 << (d->irq - IRQ_BASE_FPGA)); 66 66 67 67 __set_IMR(imr); 68 68 } 69 69 70 70 static struct irq_chip frv_fpga_pic = { 71 71 .name = "mb93093", 72 - .ack = frv_fpga_ack, 73 - .mask = frv_fpga_mask, 74 - .mask_ack = frv_fpga_mask_ack, 75 - .unmask = frv_fpga_unmask, 76 - .end = frv_fpga_end, 72 + .irq_ack = frv_fpga_ack, 73 + .irq_mask = frv_fpga_mask, 74 + .irq_mask_ack = frv_fpga_mask_ack, 75 + .irq_unmask = frv_fpga_unmask, 77 76 }; 78 77 79 78 /* ··· 93 94 irq = 31 - irq; 94 95 mask &= ~(1 << irq); 95 96 96 - generic_irq_handle(IRQ_BASE_FPGA + irq); 97 + generic_handle_irq(IRQ_BASE_FPGA + irq); 97 98 } 98 99 99 100 return IRQ_HANDLED; ··· 124 125 __clr_IFR(0x0000); 125 126 126 127 for (irq = IRQ_BASE_FPGA + 8; irq <= IRQ_BASE_FPGA + 10; irq++) 127 - set_irq_chip_and_handler(irq, &frv_fpga_pic, handle_edge_irq); 128 + irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_edge_irq); 128 129 129 130 /* the FPGA drives external IRQ input #2 on the CPU PIC */ 130 131 setup_irq(IRQ_CPU_EXTERNAL2, &fpga_irq[0]);
+13 -12
arch/frv/kernel/irq-mb93493.c
··· 45 45 * daughter board PIC operations 46 46 * - there is no way to ACK interrupts in the MB93493 chip 47 47 */ 48 - static void frv_mb93493_mask(unsigned int irq) 48 + static void frv_mb93493_mask(struct irq_data *d) 49 49 { 50 50 uint32_t iqsr; 51 51 volatile void *piqsr; 52 52 53 - if (IRQ_ROUTING & (1 << (irq - IRQ_BASE_MB93493))) 53 + if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493))) 54 54 piqsr = __addr_MB93493_IQSR(1); 55 55 else 56 56 piqsr = __addr_MB93493_IQSR(0); 57 57 58 58 iqsr = readl(piqsr); 59 - iqsr &= ~(1 << (irq - IRQ_BASE_MB93493 + 16)); 59 + iqsr &= ~(1 << (d->irq - IRQ_BASE_MB93493 + 16)); 60 60 writel(iqsr, piqsr); 61 61 } 62 62 63 - static void frv_mb93493_ack(unsigned int irq) 63 + static void frv_mb93493_ack(struct irq_data *d) 64 64 { 65 65 } 66 66 67 - static void frv_mb93493_unmask(unsigned int irq) 67 + static void frv_mb93493_unmask(struct irq_data *d) 68 68 { 69 69 uint32_t iqsr; 70 70 volatile void *piqsr; 71 71 72 - if (IRQ_ROUTING & (1 << (irq - IRQ_BASE_MB93493))) 72 + if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493))) 73 73 piqsr = __addr_MB93493_IQSR(1); 74 74 else 75 75 piqsr = __addr_MB93493_IQSR(0); 76 76 77 77 iqsr = readl(piqsr); 78 - iqsr |= 1 << (irq - IRQ_BASE_MB93493 + 16); 78 + iqsr |= 1 << (d->irq - IRQ_BASE_MB93493 + 16); 79 79 writel(iqsr, piqsr); 80 80 } 81 81 82 82 static struct irq_chip frv_mb93493_pic = { 83 83 .name = "mb93093", 84 - .ack = frv_mb93493_ack, 85 - .mask = frv_mb93493_mask, 86 - .mask_ack = frv_mb93493_mask, 87 - .unmask = frv_mb93493_unmask, 84 + .irq_ack = frv_mb93493_ack, 85 + .irq_mask = frv_mb93493_mask, 86 + .irq_mask_ack = frv_mb93493_mask, 87 + .irq_unmask = frv_mb93493_unmask, 88 88 }; 89 89 90 90 /* ··· 139 139 int irq; 140 140 141 141 for (irq = IRQ_BASE_MB93493 + 0; irq <= IRQ_BASE_MB93493 + 10; irq++) 142 - set_irq_chip_and_handler(irq, &frv_mb93493_pic, handle_edge_irq); 142 + irq_set_chip_and_handler(irq, &frv_mb93493_pic, 143 + handle_edge_irq); 143 144 144 145 /* the MB93493 drives external IRQ inputs on the CPU PIC */ 145 146 setup_irq(IRQ_CPU_MB93493_0, &mb93493_irq[0]);
+18 -62
arch/frv/kernel/irq.c
··· 47 47 48 48 atomic_t irq_err_count; 49 49 50 - /* 51 - * Generic, controller-independent functions: 52 - */ 53 - int show_interrupts(struct seq_file *p, void *v) 50 + int arch_show_interrupts(struct seq_file *p, int prec) 54 51 { 55 - int i = *(loff_t *) v, cpu; 56 - struct irqaction * action; 57 - unsigned long flags; 58 - 59 - if (i == 0) { 60 - char cpuname[12]; 61 - 62 - seq_printf(p, " "); 63 - for_each_present_cpu(cpu) { 64 - sprintf(cpuname, "CPU%d", cpu); 65 - seq_printf(p, " %10s", cpuname); 66 - } 67 - seq_putc(p, '\n'); 68 - } 69 - 70 - if (i < NR_IRQS) { 71 - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 72 - action = irq_desc[i].action; 73 - if (action) { 74 - seq_printf(p, "%3d: ", i); 75 - for_each_present_cpu(cpu) 76 - seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 77 - seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); 78 - seq_printf(p, " %s", action->name); 79 - for (action = action->next; 80 - action; 81 - action = action->next) 82 - seq_printf(p, ", %s", action->name); 83 - 84 - seq_putc(p, '\n'); 85 - } 86 - 87 - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 88 - } else if (i == NR_IRQS) { 89 - seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); 90 - } 91 - 52 + seq_printf(p, "%*s: ", prec, "ERR"); 53 + seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); 92 54 return 0; 93 55 } 94 56 95 57 /* 96 58 * on-CPU PIC operations 97 59 */ 98 - static void frv_cpupic_ack(unsigned int irqlevel) 60 + static void frv_cpupic_ack(struct irq_data *d) 99 61 { 100 - __clr_RC(irqlevel); 62 + __clr_RC(d->irq); 101 63 __clr_IRL(); 102 64 } 103 65 104 - static void frv_cpupic_mask(unsigned int irqlevel) 66 + static void frv_cpupic_mask(struct irq_data *d) 105 67 { 106 - __set_MASK(irqlevel); 68 + __set_MASK(d->irq); 107 69 } 108 70 109 - static void frv_cpupic_mask_ack(unsigned int irqlevel) 71 + static void frv_cpupic_mask_ack(struct irq_data *d) 110 72 { 111 - __set_MASK(irqlevel); 112 - __clr_RC(irqlevel); 73 + __set_MASK(d->irq); 74 + __clr_RC(d->irq); 113 75 __clr_IRL(); 114 76 } 115 77 116 - static void frv_cpupic_unmask(unsigned int irqlevel) 78 + static void frv_cpupic_unmask(struct irq_data *d) 117 79 { 118 - __clr_MASK(irqlevel); 119 - } 120 - 121 - static void frv_cpupic_end(unsigned int irqlevel) 122 - { 123 - __clr_MASK(irqlevel); 80 + __clr_MASK(d->irq); 124 81 } 125 82 126 83 static struct irq_chip frv_cpu_pic = { 127 84 .name = "cpu", 128 - .ack = frv_cpupic_ack, 129 - .mask = frv_cpupic_mask, 130 - .mask_ack = frv_cpupic_mask_ack, 131 - .unmask = frv_cpupic_unmask, 132 - .end = frv_cpupic_end, 85 + .irq_ack = frv_cpupic_ack, 86 + .irq_mask = frv_cpupic_mask, 87 + .irq_mask_ack = frv_cpupic_mask_ack, 88 + .irq_unmask = frv_cpupic_unmask, 133 89 }; 134 90 135 91 /* ··· 117 161 int level; 118 162 119 163 for (level = 1; level <= 14; level++) 120 - set_irq_chip_and_handler(level, &frv_cpu_pic, 164 + irq_set_chip_and_handler(level, &frv_cpu_pic, 121 165 handle_level_irq); 122 166 123 - set_irq_handler(IRQ_CPU_TIMER0, handle_edge_irq); 167 + irq_set_handler(IRQ_CPU_TIMER0, handle_edge_irq); 124 168 125 169 /* set the trigger levels for internal interrupt sources 126 170 * - timers all falling-edge
-1
arch/h8300/Kconfig
··· 3 3 default y 4 4 select HAVE_IDE 5 5 select HAVE_GENERIC_HARDIRQS 6 - select GENERIC_HARDIRQS_NO_DEPRECATED 7 6 select GENERIC_IRQ_SHOW 8 7 9 8 config SYMBOL_PREFIX
+1
arch/ia64/Kconfig
··· 26 26 select GENERIC_IRQ_PROBE 27 27 select GENERIC_PENDING_IRQ if SMP 28 28 select IRQ_PER_CPU 29 + select GENERIC_IRQ_SHOW 29 30 default y 30 31 help 31 32 The Itanium Processor Family is Intel's 64-bit successor to
+15 -16
arch/ia64/hp/sim/hpsim_irq.c
··· 11 11 #include <linux/irq.h> 12 12 13 13 static unsigned int 14 - hpsim_irq_startup (unsigned int irq) 14 + hpsim_irq_startup(struct irq_data *data) 15 15 { 16 16 return 0; 17 17 } 18 18 19 19 static void 20 - hpsim_irq_noop (unsigned int irq) 20 + hpsim_irq_noop(struct irq_data *data) 21 21 { 22 22 } 23 23 24 24 static int 25 - hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) 25 + hpsim_set_affinity_noop(struct irq_data *d, const struct cpumask *b, bool f) 26 26 { 27 27 return 0; 28 28 } 29 29 30 30 static struct irq_chip irq_type_hp_sim = { 31 - .name = "hpsim", 32 - .startup = hpsim_irq_startup, 33 - .shutdown = hpsim_irq_noop, 34 - .enable = hpsim_irq_noop, 35 - .disable = hpsim_irq_noop, 36 - .ack = hpsim_irq_noop, 37 - .end = hpsim_irq_noop, 38 - .set_affinity = hpsim_set_affinity_noop, 31 + .name = "hpsim", 32 + .irq_startup = hpsim_irq_startup, 33 + .irq_shutdown = hpsim_irq_noop, 34 + .irq_enable = hpsim_irq_noop, 35 + .irq_disable = hpsim_irq_noop, 36 + .irq_ack = hpsim_irq_noop, 37 + .irq_set_affinity = hpsim_set_affinity_noop, 39 38 }; 40 39 41 40 void __init 42 41 hpsim_irq_init (void) 43 42 { 44 - struct irq_desc *idesc; 45 43 int i; 46 44 47 - for (i = 0; i < NR_IRQS; ++i) { 48 - idesc = irq_desc + i; 49 - if (idesc->chip == &no_irq_chip) 50 - idesc->chip = &irq_type_hp_sim; 45 + for_each_active_irq(i) { 46 + struct irq_chip *chip = irq_get_chip(i); 47 + 48 + if (chip == &no_irq_chip) 49 + irq_set_chip(i, &irq_type_hp_sim); 51 50 } 52 51 }
-3
arch/ia64/include/asm/hw_irq.h
··· 151 151 /* 152 152 * Default implementations for the irq-descriptor API: 153 153 */ 154 - 155 - extern struct irq_desc irq_desc[NR_IRQS]; 156 - 157 154 #ifndef CONFIG_IA64_GENERIC 158 155 static inline ia64_vector __ia64_irq_to_vector(int irq) 159 156 {
+55 -64
arch/ia64/kernel/iosapic.c
··· 257 257 } 258 258 259 259 static void 260 - nop (unsigned int irq) 260 + nop (struct irq_data *data) 261 261 { 262 262 /* do nothing... */ 263 263 } ··· 287 287 #endif 288 288 289 289 static void 290 - mask_irq (unsigned int irq) 290 + mask_irq (struct irq_data *data) 291 291 { 292 + unsigned int irq = data->irq; 292 293 u32 low32; 293 294 int rte_index; 294 295 struct iosapic_rte_info *rte; ··· 306 305 } 307 306 308 307 static void 309 - unmask_irq (unsigned int irq) 308 + unmask_irq (struct irq_data *data) 310 309 { 310 + unsigned int irq = data->irq; 311 311 u32 low32; 312 312 int rte_index; 313 313 struct iosapic_rte_info *rte; ··· 325 323 326 324 327 325 static int 328 - iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) 326 + iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 327 + bool force) 329 328 { 330 329 #ifdef CONFIG_SMP 330 + unsigned int irq = data->irq; 331 331 u32 high32, low32; 332 332 int cpu, dest, rte_index; 333 333 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; ··· 383 379 */ 384 380 385 381 static unsigned int 386 - iosapic_startup_level_irq (unsigned int irq) 382 + iosapic_startup_level_irq (struct irq_data *data) 387 383 { 388 - unmask_irq(irq); 384 + unmask_irq(data); 389 385 return 0; 390 386 } 391 387 392 388 static void 393 - iosapic_unmask_level_irq (unsigned int irq) 389 + iosapic_unmask_level_irq (struct irq_data *data) 394 390 { 391 + unsigned int irq = data->irq; 395 392 ia64_vector vec = irq_to_vector(irq); 396 393 struct iosapic_rte_info *rte; 397 394 int do_unmask_irq = 0; 398 395 399 396 irq_complete_move(irq); 400 - if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 397 + if (unlikely(irqd_is_setaffinity_pending(data))) { 401 398 do_unmask_irq = 1; 402 - mask_irq(irq); 399 + mask_irq(data); 403 400 } else 404 - unmask_irq(irq); 401 + unmask_irq(data); 405 402 406 403 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) 407 404 iosapic_eoi(rte->iosapic->addr, vec); 408 405 409 406 if (unlikely(do_unmask_irq)) { 410 - move_masked_irq(irq); 411 - unmask_irq(irq); 407 + irq_move_masked_irq(data); 408 + unmask_irq(data); 412 409 } 413 410 } 414 411 ··· 419 414 #define iosapic_ack_level_irq nop 420 415 421 416 static struct irq_chip irq_type_iosapic_level = { 422 - .name = "IO-SAPIC-level", 423 - .startup = iosapic_startup_level_irq, 424 - .shutdown = iosapic_shutdown_level_irq, 425 - .enable = iosapic_enable_level_irq, 426 - .disable = iosapic_disable_level_irq, 427 - .ack = iosapic_ack_level_irq, 428 - .mask = mask_irq, 429 - .unmask = iosapic_unmask_level_irq, 430 - .set_affinity = iosapic_set_affinity 417 + .name = "IO-SAPIC-level", 418 + .irq_startup = iosapic_startup_level_irq, 419 + .irq_shutdown = iosapic_shutdown_level_irq, 420 + .irq_enable = iosapic_enable_level_irq, 421 + .irq_disable = iosapic_disable_level_irq, 422 + .irq_ack = iosapic_ack_level_irq, 423 + .irq_mask = mask_irq, 424 + .irq_unmask = iosapic_unmask_level_irq, 425 + .irq_set_affinity = iosapic_set_affinity 431 426 }; 432 427 433 428 /* ··· 435 430 */ 436 431 437 432 static unsigned int 438 - iosapic_startup_edge_irq (unsigned int irq) 433 + iosapic_startup_edge_irq (struct irq_data *data) 439 434 { 440 - unmask_irq(irq); 435 + unmask_irq(data); 441 436 /* 442 437 * IOSAPIC simply drops interrupts pended while the 443 438 * corresponding pin was masked, so we can't know if an ··· 447 442 } 448 443 449 444 static void 450 - iosapic_ack_edge_irq (unsigned int irq) 445 + iosapic_ack_edge_irq (struct irq_data *data) 451 446 { 452 - struct irq_desc *idesc = irq_desc + irq; 453 - 454 - irq_complete_move(irq); 455 - move_native_irq(irq); 456 - /* 457 - * Once we have recorded IRQ_PENDING already, we can mask the 458 - * interrupt for real. This prevents IRQ storms from unhandled 459 - * devices. 460 - */ 461 - if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == 462 - (IRQ_PENDING|IRQ_DISABLED)) 463 - mask_irq(irq); 447 + irq_complete_move(data->irq); 448 + irq_move_irq(data); 464 449 } 465 450 466 451 #define iosapic_enable_edge_irq unmask_irq 467 452 #define iosapic_disable_edge_irq nop 468 - #define iosapic_end_edge_irq nop 469 453 470 454 static struct irq_chip irq_type_iosapic_edge = { 471 - .name = "IO-SAPIC-edge", 472 - .startup = iosapic_startup_edge_irq, 473 - .shutdown = iosapic_disable_edge_irq, 474 - .enable = iosapic_enable_edge_irq, 475 - .disable = iosapic_disable_edge_irq, 476 - .ack = iosapic_ack_edge_irq, 477 - .end = iosapic_end_edge_irq, 478 - .mask = mask_irq, 479 - .unmask = unmask_irq, 480 - .set_affinity = iosapic_set_affinity 455 + .name = "IO-SAPIC-edge", 456 + .irq_startup = iosapic_startup_edge_irq, 457 + .irq_shutdown = iosapic_disable_edge_irq, 458 + .irq_enable = iosapic_enable_edge_irq, 459 + .irq_disable = iosapic_disable_edge_irq, 460 + .irq_ack = iosapic_ack_edge_irq, 461 + .irq_mask = mask_irq, 462 + .irq_unmask = unmask_irq, 463 + .irq_set_affinity = iosapic_set_affinity 481 464 }; 482 465 483 466 static unsigned int ··· 555 562 register_intr (unsigned int gsi, int irq, unsigned char delivery, 556 563 unsigned long polarity, unsigned long trigger) 557 564 { 558 - struct irq_desc *idesc; 559 - struct irq_chip *irq_type; 565 + struct irq_chip *chip, *irq_type; 560 566 int index; 561 567 struct iosapic_rte_info *rte; 562 568 ··· 602 610 603 611 irq_type = iosapic_get_irq_chip(trigger); 604 612 605 - idesc = irq_desc + irq; 606 - if (irq_type != NULL && idesc->chip != irq_type) { 607 - if (idesc->chip != &no_irq_chip) 613 + chip = irq_get_chip(irq); 614 + if (irq_type != NULL && chip != irq_type) { 615 + if (chip != &no_irq_chip) 608 616 printk(KERN_WARNING 609 617 "%s: changing vector %d from %s to %s\n", 610 618 __func__, irq_to_vector(irq), 611 - idesc->chip->name, irq_type->name); 612 - idesc->chip = irq_type; 619 + chip->name, irq_type->name); 620 + chip = irq_type; 613 621 } 614 - if (trigger == IOSAPIC_EDGE) 615 - __set_irq_handler_unlocked(irq, handle_edge_irq); 616 - else 617 - __set_irq_handler_unlocked(irq, handle_level_irq); 622 + __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ? 623 + handle_edge_irq : handle_level_irq, 624 + NULL); 618 625 return 0; 619 626 } 620 627 ··· 723 732 struct iosapic_rte_info *rte; 724 733 u32 low32; 725 734 unsigned char dmode; 735 + struct irq_desc *desc; 726 736 727 737 /* 728 738 * If this GSI has already been registered (i.e., it's a ··· 751 759 goto unlock_iosapic_lock; 752 760 } 753 761 754 - raw_spin_lock(&irq_desc[irq].lock); 762 + desc = irq_to_desc(irq); 763 + raw_spin_lock(&desc->lock); 755 764 dest = get_target_cpu(gsi, irq); 756 765 dmode = choose_dmode(); 757 766 err = register_intr(gsi, irq, dmode, polarity, trigger); 758 767 if (err < 0) { 759 - raw_spin_unlock(&irq_desc[irq].lock); 768 + raw_spin_unlock(&desc->lock); 760 769 irq = err; 761 770 goto unlock_iosapic_lock; 762 771 } ··· 776 783 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 777 784 cpu_logical_id(dest), dest, irq_to_vector(irq)); 778 785 779 - raw_spin_unlock(&irq_desc[irq].lock); 786 + raw_spin_unlock(&desc->lock); 780 787 unlock_iosapic_lock: 781 788 spin_unlock_irqrestore(&iosapic_lock, flags); 782 789 return irq; ··· 787 794 { 788 795 unsigned long flags; 789 796 int irq, index; 790 - struct irq_desc *idesc; 791 797 u32 low32; 792 798 unsigned long trigger, polarity; 793 799 unsigned int dest; ··· 816 824 if (--rte->refcnt > 0) 817 825 goto out; 818 826 819 - idesc = irq_desc + irq; 820 827 rte->refcnt = NO_REF_RTE; 821 828 822 829 /* Mask the interrupt */ ··· 839 848 if (iosapic_intr_info[irq].count == 0) { 840 849 #ifdef CONFIG_SMP 841 850 /* Clear affinity */ 842 - cpumask_setall(idesc->affinity); 851 + cpumask_setall(irq_get_irq_data(irq)->affinity); 843 852 #endif 844 853 /* Clear the interrupt information */ 845 854 iosapic_intr_info[irq].dest = 0;
+18 -55
arch/ia64/kernel/irq.c
··· 53 53 /* 54 54 * /proc/interrupts printing: 55 55 */ 56 - 57 - int show_interrupts(struct seq_file *p, void *v) 56 + int arch_show_interrupts(struct seq_file *p, int prec) 58 57 { 59 - int i = *(loff_t *) v, j; 60 - struct irqaction * action; 61 - unsigned long flags; 62 - 63 - if (i == 0) { 64 - char cpuname[16]; 65 - seq_printf(p, " "); 66 - for_each_online_cpu(j) { 67 - snprintf(cpuname, 10, "CPU%d", j); 68 - seq_printf(p, "%10s ", cpuname); 69 - } 70 - seq_putc(p, '\n'); 71 - } 72 - 73 - if (i < NR_IRQS) { 74 - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 75 - action = irq_desc[i].action; 76 - if (!action) 77 - goto skip; 78 - seq_printf(p, "%3d: ",i); 79 - #ifndef CONFIG_SMP 80 - seq_printf(p, "%10u ", kstat_irqs(i)); 81 - #else 82 - for_each_online_cpu(j) { 83 - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 84 - } 85 - #endif 86 - seq_printf(p, " %14s", irq_desc[i].chip->name); 87 - seq_printf(p, " %s", action->name); 88 - 89 - for (action=action->next; action; action = action->next) 90 - seq_printf(p, ", %s", action->name); 91 - 92 - seq_putc(p, '\n'); 93 - skip: 94 - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 95 - } else if (i == NR_IRQS) 96 - seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 58 + seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 97 59 return 0; 98 60 } 99 61 ··· 65 103 void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 66 104 { 67 105 if (irq < NR_IRQS) { 68 - cpumask_copy(irq_desc[irq].affinity, 106 + cpumask_copy(irq_get_irq_data(irq)->affinity, 69 107 cpumask_of(cpu_logical_id(hwid))); 70 108 irq_redir[irq] = (char) (redir & 0xff); 71 109 } ··· 92 130 */ 93 131 static void migrate_irqs(void) 94 132 { 95 - struct irq_desc *desc; 96 133 int irq, new_cpu; 97 134 98 135 for (irq=0; irq < NR_IRQS; irq++) { 99 - desc = irq_desc + irq; 136 + struct irq_desc *desc = irq_to_desc(irq); 137 + struct irq_data *data = irq_desc_get_irq_data(desc); 138 + struct irq_chip *chip = irq_data_get_irq_chip(data); 100 139 101 - if (desc->status == IRQ_DISABLED) 140 + if (irqd_irq_disabled(data)) 102 141 continue; 103 142 104 143 /* ··· 108 145 * tell CPU not to respond to these local intr sources. 109 146 * such as ITV,CPEI,MCA etc. 110 147 */ 111 - if (desc->status == IRQ_PER_CPU) 148 + if (irqd_is_per_cpu(data)) 112 149 continue; 113 150 114 - if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) 151 + if (cpumask_any_and(data->affinity, cpu_online_mask) 115 152 >= nr_cpu_ids) { 116 153 /* 117 154 * Save it for phase 2 processing ··· 123 160 /* 124 161 * Al three are essential, currently WARN_ON.. maybe panic? 125 162 */ 126 - if (desc->chip && desc->chip->disable && 127 - desc->chip->enable && desc->chip->set_affinity) { 128 - desc->chip->disable(irq); 129 - desc->chip->set_affinity(irq, 130 - cpumask_of(new_cpu)); 131 - desc->chip->enable(irq); 163 + if (chip && chip->irq_disable && 164 + chip->irq_enable && chip->irq_set_affinity) { 165 + chip->irq_disable(data); 166 + chip->irq_set_affinity(data, 167 + cpumask_of(new_cpu), false); 168 + chip->irq_enable(data); 132 169 } else { 133 - WARN_ON((!(desc->chip) || !(desc->chip->disable) || 134 - !(desc->chip->enable) || 135 - !(desc->chip->set_affinity))); 170 + WARN_ON((!chip || !chip->irq_disable || 171 + !chip->irq_enable || 172 + !chip->irq_set_affinity)); 136 173 } 137 174 } 138 175 }
+4 -6
arch/ia64/kernel/irq_ia64.c
··· 343 343 if (irq < 0) 344 344 continue; 345 345 346 - desc = irq_desc + irq; 346 + desc = irq_to_desc(irq); 347 347 cfg = irq_cfg + irq; 348 348 raw_spin_lock(&desc->lock); 349 349 if (!cfg->move_cleanup_count) ··· 626 626 void 627 627 ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) 628 628 { 629 - struct irq_desc *desc; 630 629 unsigned int irq; 631 630 632 631 irq = vec; 633 632 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); 634 - desc = irq_desc + irq; 635 - desc->status |= IRQ_PER_CPU; 636 - set_irq_chip(irq, &irq_type_ia64_lsapic); 633 + irq_set_status_flags(irq, IRQ_PER_CPU); 634 + irq_set_chip(irq, &irq_type_ia64_lsapic); 637 635 if (action) 638 636 setup_irq(irq, action); 639 - set_irq_handler(irq, handle_percpu_irq); 637 + irq_set_handler(irq, handle_percpu_irq); 640 638 } 641 639 642 640 void __init
+11 -12
arch/ia64/kernel/irq_lsapic.c
··· 15 15 #include <linux/irq.h> 16 16 17 17 static unsigned int 18 - lsapic_noop_startup (unsigned int irq) 18 + lsapic_noop_startup (struct irq_data *data) 19 19 { 20 20 return 0; 21 21 } 22 22 23 23 static void 24 - lsapic_noop (unsigned int irq) 24 + lsapic_noop (struct irq_data *data) 25 25 { 26 26 /* nothing to do... */ 27 27 } 28 28 29 - static int lsapic_retrigger(unsigned int irq) 29 + static int lsapic_retrigger(struct irq_data *data) 30 30 { 31 - ia64_resend_irq(irq); 31 + ia64_resend_irq(data->irq); 32 32 33 33 return 1; 34 34 } 35 35 36 36 struct irq_chip irq_type_ia64_lsapic = { 37 - .name = "LSAPIC", 38 - .startup = lsapic_noop_startup, 39 - .shutdown = lsapic_noop, 40 - .enable = lsapic_noop, 41 - .disable = lsapic_noop, 42 - .ack = lsapic_noop, 43 - .end = lsapic_noop, 44 - .retrigger = lsapic_retrigger, 37 + .name = "LSAPIC", 38 + .irq_startup = lsapic_noop_startup, 39 + .irq_shutdown = lsapic_noop, 40 + .irq_enable = lsapic_noop, 41 + .irq_disable = lsapic_noop, 42 + .irq_ack = lsapic_noop, 43 + .irq_retrigger = lsapic_retrigger, 45 44 };
+1 -3
arch/ia64/kernel/mca.c
··· 2125 2125 cpe_poll_timer.function = ia64_mca_cpe_poll; 2126 2126 2127 2127 { 2128 - struct irq_desc *desc; 2129 2128 unsigned int irq; 2130 2129 2131 2130 if (cpe_vector >= 0) { ··· 2132 2133 irq = local_vector_to_irq(cpe_vector); 2133 2134 if (irq > 0) { 2134 2135 cpe_poll_enabled = 0; 2135 - desc = irq_desc + irq; 2136 - desc->status |= IRQ_PER_CPU; 2136 + irq_set_status_flags(irq, IRQ_PER_CPU); 2137 2137 setup_irq(irq, &mca_cpe_irqaction); 2138 2138 ia64_cpe_irq = irq; 2139 2139 ia64_mca_register_cpev(cpe_vector);
+26 -23
arch/ia64/kernel/msi_ia64.c
··· 12 12 static struct irq_chip ia64_msi_chip; 13 13 14 14 #ifdef CONFIG_SMP 15 - static int ia64_set_msi_irq_affinity(unsigned int irq, 16 - const cpumask_t *cpu_mask) 15 + static int ia64_set_msi_irq_affinity(struct irq_data *idata, 16 + const cpumask_t *cpu_mask, bool force) 17 17 { 18 18 struct msi_msg msg; 19 19 u32 addr, data; 20 20 int cpu = first_cpu(*cpu_mask); 21 + unsigned int irq = idata->irq; 21 22 22 23 if (!cpu_online(cpu)) 23 24 return -1; ··· 39 38 msg.data = data; 40 39 41 40 write_msi_msg(irq, &msg); 42 - cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 41 + cpumask_copy(idata->affinity, cpumask_of(cpu)); 43 42 44 43 return 0; 45 44 } ··· 56 55 if (irq < 0) 57 56 return irq; 58 57 59 - set_irq_msi(irq, desc); 58 + irq_set_msi_desc(irq, desc); 60 59 cpus_and(mask, irq_to_domain(irq), cpu_online_map); 61 60 dest_phys_id = cpu_physical_id(first_cpu(mask)); 62 61 vector = irq_to_vector(irq); ··· 75 74 MSI_DATA_VECTOR(vector); 76 75 77 76 write_msi_msg(irq, &msg); 78 - set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 77 + irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 79 78 80 79 return 0; 81 80 } ··· 85 84 destroy_irq(irq); 86 85 } 87 86 88 - static void ia64_ack_msi_irq(unsigned int irq) 87 + static void ia64_ack_msi_irq(struct irq_data *data) 89 88 { 90 - irq_complete_move(irq); 91 - move_native_irq(irq); 89 + irq_complete_move(data->irq); 90 + irq_move_irq(data); 92 91 ia64_eoi(); 93 92 } 94 93 95 - static int ia64_msi_retrigger_irq(unsigned int irq) 94 + static int ia64_msi_retrigger_irq(struct irq_data *data) 96 95 { 97 - unsigned int vector = irq_to_vector(irq); 96 + unsigned int vector = irq_to_vector(data->irq); 98 97 ia64_resend_irq(vector); 99 98 100 99 return 1; ··· 104 103 * Generic ops used on most IA64 platforms. 105 104 */ 106 105 static struct irq_chip ia64_msi_chip = { 107 - .name = "PCI-MSI", 108 - .irq_mask = mask_msi_irq, 109 - .irq_unmask = unmask_msi_irq, 110 - .ack = ia64_ack_msi_irq, 106 + .name = "PCI-MSI", 107 + .irq_mask = mask_msi_irq, 108 + .irq_unmask = unmask_msi_irq, 109 + .irq_ack = ia64_ack_msi_irq, 111 110 #ifdef CONFIG_SMP 112 - .set_affinity = ia64_set_msi_irq_affinity, 111 + .irq_set_affinity = ia64_set_msi_irq_affinity, 113 112 #endif 114 - .retrigger = ia64_msi_retrigger_irq, 113 + .irq_retrigger = ia64_msi_retrigger_irq, 115 114 }; 116 115 117 116 ··· 133 132 134 133 #ifdef CONFIG_DMAR 135 134 #ifdef CONFIG_SMP 136 - static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 135 + static int dmar_msi_set_affinity(struct irq_data *data, 136 + const struct cpumask *mask, bool force) 137 137 { 138 + unsigned int irq = data->irq; 138 139 struct irq_cfg *cfg = irq_cfg + irq; 139 140 struct msi_msg msg; 140 141 int cpu = cpumask_first(mask); ··· 155 152 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); 156 153 157 154 dmar_msi_write(irq, &msg); 158 - cpumask_copy(irq_desc[irq].affinity, mask); 155 + cpumask_copy(data->affinity, mask); 159 156 160 157 return 0; 161 158 } ··· 165 162 .name = "DMAR_MSI", 166 163 .irq_unmask = dmar_msi_unmask, 167 164 .irq_mask = dmar_msi_mask, 168 - .ack = ia64_ack_msi_irq, 165 + .irq_ack = ia64_ack_msi_irq, 169 166 #ifdef CONFIG_SMP 170 - .set_affinity = dmar_msi_set_affinity, 167 + .irq_set_affinity = dmar_msi_set_affinity, 171 168 #endif 172 - .retrigger = ia64_msi_retrigger_irq, 169 + .irq_retrigger = ia64_msi_retrigger_irq, 173 170 }; 174 171 175 172 static int ··· 206 203 if (ret < 0) 207 204 return ret; 208 205 dmar_msi_write(irq, &msg); 209 - set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 210 - "edge"); 206 + irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 207 + "edge"); 211 208 return 0; 212 209 } 213 210 #endif /* CONFIG_DMAR */
+7 -7
arch/ia64/kernel/smpboot.c
··· 677 677 int migrate_platform_irqs(unsigned int cpu) 678 678 { 679 679 int new_cpei_cpu; 680 - struct irq_desc *desc = NULL; 680 + struct irq_data *data = NULL; 681 681 const struct cpumask *mask; 682 682 int retval = 0; 683 683 ··· 693 693 new_cpei_cpu = any_online_cpu(cpu_online_map); 694 694 mask = cpumask_of(new_cpei_cpu); 695 695 set_cpei_target_cpu(new_cpei_cpu); 696 - desc = irq_desc + ia64_cpe_irq; 696 + data = irq_get_irq_data(ia64_cpe_irq); 697 697 /* 698 698 * Switch for now, immediately, we need to do fake intr 699 699 * as other interrupts, but need to study CPEI behaviour with 700 700 * polling before making changes. 701 701 */ 702 - if (desc) { 703 - desc->chip->disable(ia64_cpe_irq); 704 - desc->chip->set_affinity(ia64_cpe_irq, mask); 705 - desc->chip->enable(ia64_cpe_irq); 702 + if (data && data->chip) { 703 + data->chip->irq_disable(data); 704 + data->chip->irq_set_affinity(data, mask, false); 705 + data->chip->irq_enable(data); 706 706 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); 707 707 } 708 708 } 709 - if (!desc) { 709 + if (!data) { 710 710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); 711 711 retval = -EBUSY; 712 712 }
+28 -69
arch/ia64/sn/kernel/irq.c
··· 23 23 #include <asm/sn/sn_sal.h> 24 24 #include <asm/sn/sn_feature_sets.h> 25 25 26 - static void force_interrupt(int irq); 27 26 static void register_intr_pda(struct sn_irq_info *sn_irq_info); 28 27 static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); 29 28 30 - int sn_force_interrupt_flag = 1; 31 29 extern int sn_ioif_inited; 32 30 struct list_head **sn_irq_lh; 33 31 static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ ··· 76 78 return ret_stuff.status; 77 79 } 78 80 79 - static unsigned int sn_startup_irq(unsigned int irq) 81 + static unsigned int sn_startup_irq(struct irq_data *data) 80 82 { 81 83 return 0; 82 84 } 83 85 84 - static void sn_shutdown_irq(unsigned int irq) 86 + static void sn_shutdown_irq(struct irq_data *data) 85 87 { 86 88 } 87 89 88 90 extern void ia64_mca_register_cpev(int); 89 91 90 - static void sn_disable_irq(unsigned int irq) 92 + static void sn_disable_irq(struct irq_data *data) 91 93 { 92 - if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) 94 + if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) 93 95 ia64_mca_register_cpev(0); 94 96 } 95 97 96 - static void sn_enable_irq(unsigned int irq) 98 + static void sn_enable_irq(struct irq_data *data) 97 99 { 98 - if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) 99 - ia64_mca_register_cpev(irq); 100 + if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) 101 + ia64_mca_register_cpev(data->irq); 100 102 } 101 103 102 - static void sn_ack_irq(unsigned int irq) 104 + static void sn_ack_irq(struct irq_data *data) 103 105 { 104 106 u64 event_occurred, mask; 107 + unsigned int irq = data->irq & 0xff; 105 108 106 - irq = irq & 0xff; 107 109 event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); 108 110 mask = event_occurred & SH_ALL_INT_MASK; 109 111 HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); 110 112 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); 111 113 112 - move_native_irq(irq); 113 - } 114 - 115 - static void sn_end_irq(unsigned int irq) 116 - { 117 - int ivec; 118 - u64 event_occurred; 119 - 120 - ivec = irq & 0xff; 121 - if (ivec == SGI_UART_VECTOR) { 122 - event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); 123 - /* If the UART bit is set here, we may have received an 124 - * interrupt from the UART that the driver missed. To 125 - * make sure, we IPI ourselves to force us to look again. 126 - */ 127 - if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { 128 - platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, 129 - IA64_IPI_DM_INT, 0); 130 - } 131 - } 132 - __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); 133 - if (sn_force_interrupt_flag) 134 - force_interrupt(irq); 114 + irq_move_irq(data); 135 115 } 136 116 137 117 static void sn_irq_info_free(struct rcu_head *head); ··· 204 228 return new_irq_info; 205 229 } 206 230 207 - static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) 231 + static int sn_set_affinity_irq(struct irq_data *data, 232 + const struct cpumask *mask, bool force) 208 233 { 209 234 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 235 + unsigned int irq = data->irq; 210 236 nasid_t nasid; 211 237 int slice; 212 238 ··· 237 259 #endif 238 260 239 261 static void 240 - sn_mask_irq(unsigned int irq) 262 + sn_mask_irq(struct irq_data *data) 241 263 { 242 264 } 243 265 244 266 static void 245 - sn_unmask_irq(unsigned int irq) 267 + sn_unmask_irq(struct irq_data *data) 246 268 { 247 269 } 248 270 249 271 struct irq_chip irq_type_sn = { 250 - .name = "SN hub", 251 - .startup = sn_startup_irq, 252 - .shutdown = sn_shutdown_irq, 253 - .enable = sn_enable_irq, 254 - .disable = sn_disable_irq, 255 - .ack = sn_ack_irq, 256 - .end = sn_end_irq, 257 - .mask = sn_mask_irq, 258 - .unmask = sn_unmask_irq, 259 - .set_affinity = sn_set_affinity_irq 272 + .name = "SN hub", 273 + .irq_startup = sn_startup_irq, 274 + .irq_shutdown = sn_shutdown_irq, 275 + .irq_enable = sn_enable_irq, 276 + .irq_disable = sn_disable_irq, 277 + .irq_ack = sn_ack_irq, 278 + .irq_mask = sn_mask_irq, 279 + .irq_unmask = sn_unmask_irq, 280 + .irq_set_affinity = sn_set_affinity_irq 260 281 }; 261 282 262 283 ia64_vector sn_irq_to_vector(int irq) ··· 273 296 void sn_irq_init(void) 274 297 { 275 298 int i; 276 - struct irq_desc *base_desc = irq_desc; 277 299 278 300 ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; 279 301 ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; 280 302 281 303 for (i = 0; i < NR_IRQS; i++) { 282 - if (base_desc[i].chip == &no_irq_chip) { 283 - base_desc[i].chip = &irq_type_sn; 284 - } 304 + if (irq_get_chip(i) == &no_irq_chip) 305 + irq_set_chip(i, &irq_type_sn); 285 306 } 286 307 } 287 308 ··· 353 378 int cpu = nasid_slice_to_cpuid(nasid, slice); 354 379 #ifdef CONFIG_SMP 355 380 int cpuphys; 356 - struct irq_desc *desc; 357 381 #endif 358 382 359 383 pci_dev_get(pci_dev); ··· 369 395 #ifdef CONFIG_SMP 370 396 cpuphys = cpu_physical_id(cpu); 371 397 set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); 372 - desc = irq_to_desc(sn_irq_info->irq_irq); 373 398 /* 374 399 * Affinity was set by the PROM, prevent it from 375 400 * being reset by the request_irq() path. 376 401 */ 377 - desc->status |= IRQ_AFFINITY_SET; 402 + irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq)); 378 403 #endif 379 404 } 380 405 ··· 412 439 pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; 413 440 414 441 /* Don't force an interrupt if the irq has been disabled */ 415 - if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) && 442 + if (!irqd_irq_disabled(sn_irq_info->irq_irq) && 416 443 pci_provider && pci_provider->force_interrupt) 417 444 (*pci_provider->force_interrupt)(sn_irq_info); 418 - } 419 - 420 - static void force_interrupt(int irq) 421 - { 422 - struct sn_irq_info *sn_irq_info; 423 - 424 - if (!sn_ioif_inited) 425 - return; 426 - 427 - rcu_read_lock(); 428 - list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) 429 - sn_call_force_intr_provider(sn_irq_info); 430 - 431 - rcu_read_unlock(); 432 445 } 433 446 434 447 /*
+16 -16
arch/ia64/sn/kernel/msi_sn.c
··· 144 144 */ 145 145 msg.data = 0x100 + irq; 146 146 147 - set_irq_msi(irq, entry); 147 + irq_set_msi_desc(irq, entry); 148 148 write_msi_msg(irq, &msg); 149 - set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 149 + irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 150 150 151 151 return 0; 152 152 } 153 153 154 154 #ifdef CONFIG_SMP 155 - static int sn_set_msi_irq_affinity(unsigned int irq, 156 - const struct cpumask *cpu_mask) 155 + static int sn_set_msi_irq_affinity(struct irq_data *data, 156 + const struct cpumask *cpu_mask, bool force) 157 157 { 158 158 struct msi_msg msg; 159 159 int slice; ··· 164 164 struct sn_irq_info *sn_irq_info; 165 165 struct sn_irq_info *new_irq_info; 166 166 struct sn_pcibus_provider *provider; 167 - unsigned int cpu; 167 + unsigned int cpu, irq = data->irq; 168 168 169 169 cpu = cpumask_first(cpu_mask); 170 170 sn_irq_info = sn_msi_info[irq].sn_irq_info; ··· 206 206 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 207 207 208 208 write_msi_msg(irq, &msg); 209 - cpumask_copy(irq_desc[irq].affinity, cpu_mask); 209 + cpumask_copy(data->affinity, cpu_mask); 210 210 211 211 return 0; 212 212 } 213 213 #endif /* CONFIG_SMP */ 214 214 215 - static void sn_ack_msi_irq(unsigned int irq) 215 + static void sn_ack_msi_irq(struct irq_data *data) 216 216 { 217 - move_native_irq(irq); 217 + irq_move_irq(data); 218 218 ia64_eoi(); 219 219 } 220 220 221 - static int sn_msi_retrigger_irq(unsigned int irq) 221 + static int sn_msi_retrigger_irq(struct irq_data *data) 222 222 { 223 - unsigned int vector = irq; 223 + unsigned int vector = data->irq; 224 224 ia64_resend_irq(vector); 225 225 226 226 return 1; 227 227 } 228 228 229 229 static struct irq_chip sn_msi_chip = { 230 - .name = "PCI-MSI", 231 - .irq_mask = mask_msi_irq, 232 - .irq_unmask = unmask_msi_irq, 233 - .ack = sn_ack_msi_irq, 230 + .name = "PCI-MSI", 231 + .irq_mask = mask_msi_irq, 232 + .irq_unmask = unmask_msi_irq, 233 + .irq_ack = sn_ack_msi_irq, 234 234 #ifdef CONFIG_SMP 235 - .set_affinity = sn_set_msi_irq_affinity, 235 + .irq_set_affinity = sn_set_msi_irq_affinity, 236 236 #endif 237 - .retrigger = sn_msi_retrigger_irq, 237 + .irq_retrigger = sn_msi_retrigger_irq, 238 238 };
+1 -3
arch/ia64/xen/irq_xen.c
··· 138 138 __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, 139 139 struct irqaction *action, int save) 140 140 { 141 - struct irq_desc *desc; 142 141 int irq = 0; 143 142 144 143 if (xen_slab_ready) { ··· 222 223 * mark the interrupt for migrations and trigger it 223 224 * on cpu hotplug. 224 225 */ 225 - desc = irq_desc + irq; 226 - desc->status |= IRQ_PER_CPU; 226 + irq_set_status_flags(irq, IRQ_PER_CPU); 227 227 } 228 228 } 229 229
-1
arch/m32r/Kconfig
··· 8 8 select HAVE_KERNEL_BZIP2 9 9 select HAVE_KERNEL_LZMA 10 10 select HAVE_GENERIC_HARDIRQS 11 - select GENERIC_HARDIRQS_NO_DEPRECATED 12 11 select GENERIC_IRQ_PROBE 13 12 select GENERIC_IRQ_SHOW 14 13
-1
arch/m68k/Kconfig
··· 5 5 select HAVE_AOUT if MMU 6 6 select GENERIC_ATOMIC64 if MMU 7 7 select HAVE_GENERIC_HARDIRQS if !MMU 8 - select GENERIC_HARDIRQS_NO_DEPRECATED if !MMU 9 8 10 9 config RWSEM_GENERIC_SPINLOCK 11 10 bool
+1 -1
arch/m68k/kernel/irq.c
··· 44 44 if (ap) { 45 45 seq_printf(p, "%3d: ", irq); 46 46 seq_printf(p, "%10u ", kstat_irqs(irq)); 47 - seq_printf(p, "%14s ", get_irq_desc_chip(desc)->name); 47 + seq_printf(p, "%14s ", irq_desc_get_chip(desc)->name); 48 48 49 49 seq_printf(p, "%s", ap->name); 50 50 for (ap = ap->next; ap; ap = ap->next)
+2 -2
arch/m68k/platform/5249/intc2.c
··· 51 51 52 52 /* GPIO interrupt sources */ 53 53 for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { 54 - set_irq_chip(irq, &intc2_irq_gpio_chip); 55 - set_irq_handler(irq, handle_edge_irq); 54 + irq_set_chip(irq, &intc2_irq_gpio_chip); 55 + irq_set_handler(irq, handle_edge_irq); 56 56 } 57 57 58 58 return 0;
+6 -6
arch/m68k/platform/5272/intc.c
··· 145 145 */ 146 146 static void intc_external_irq(unsigned int irq, struct irq_desc *desc) 147 147 { 148 - get_irq_desc_chip(desc)->irq_ack(&desc->irq_data); 148 + irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); 149 149 handle_simple_irq(irq, desc); 150 150 } 151 151 ··· 171 171 writel(0x88888888, MCF_MBAR + MCFSIM_ICR4); 172 172 173 173 for (irq = 0; (irq < NR_IRQS); irq++) { 174 - set_irq_chip(irq, &intc_irq_chip); 174 + irq_set_chip(irq, &intc_irq_chip); 175 175 edge = 0; 176 176 if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) 177 177 edge = intc_irqmap[irq - MCFINT_VECBASE].ack; 178 178 if (edge) { 179 - set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 180 - set_irq_handler(irq, intc_external_irq); 179 + irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 180 + irq_set_handler(irq, intc_external_irq); 181 181 } else { 182 - set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 183 - set_irq_handler(irq, handle_level_irq); 182 + irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 183 + irq_set_handler(irq, handle_level_irq); 184 184 } 185 185 } 186 186 }
+2 -2
arch/m68k/platform/68328/ints.c
··· 179 179 IMR = ~0; 180 180 181 181 for (i = 0; (i < NR_IRQS); i++) { 182 - set_irq_chip(i, &intc_irq_chip); 183 - set_irq_handler(i, handle_level_irq); 182 + irq_set_chip(i, &intc_irq_chip); 183 + irq_set_handler(i, handle_level_irq); 184 184 } 185 185 } 186 186
+2 -2
arch/m68k/platform/68360/ints.c
··· 132 132 pquicc->intr_cimr = 0x00000000; 133 133 134 134 for (i = 0; (i < NR_IRQS); i++) { 135 - set_irq_chip(i, &intc_irq_chip); 136 - set_irq_handler(i, handle_level_irq); 135 + irq_set_chip(i, &intc_irq_chip); 136 + irq_set_handler(i, handle_level_irq); 137 137 } 138 138 } 139 139
+5 -5
arch/m68k/platform/coldfire/intc-2.c
··· 164 164 } 165 165 166 166 if (tb) 167 - set_irq_handler(irq, handle_edge_irq); 167 + irq_set_handler(irq, handle_edge_irq); 168 168 169 169 irq -= EINT0; 170 170 pa = __raw_readw(MCFEPORT_EPPAR); ··· 204 204 205 205 for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) { 206 206 if ((irq >= EINT1) && (irq <=EINT7)) 207 - set_irq_chip(irq, &intc_irq_chip_edge_port); 207 + irq_set_chip(irq, &intc_irq_chip_edge_port); 208 208 else 209 - set_irq_chip(irq, &intc_irq_chip); 210 - set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 211 - set_irq_handler(irq, handle_level_irq); 209 + irq_set_chip(irq, &intc_irq_chip); 210 + irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 211 + irq_set_handler(irq, handle_level_irq); 212 212 } 213 213 } 214 214
+5 -5
arch/m68k/platform/coldfire/intc-simr.c
··· 141 141 } 142 142 143 143 if (tb) 144 - set_irq_handler(irq, handle_edge_irq); 144 + irq_set_handler(irq, handle_edge_irq); 145 145 146 146 ebit = irq2ebit(irq) * 2; 147 147 pa = __raw_readw(MCFEPORT_EPPAR); ··· 181 181 eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0); 182 182 for (irq = MCFINT_VECBASE; (irq < eirq); irq++) { 183 183 if ((irq >= EINT1) && (irq <= EINT7)) 184 - set_irq_chip(irq, &intc_irq_chip_edge_port); 184 + irq_set_chip(irq, &intc_irq_chip_edge_port); 185 185 else 186 - set_irq_chip(irq, &intc_irq_chip); 187 - set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 188 - set_irq_handler(irq, handle_level_irq); 186 + irq_set_chip(irq, &intc_irq_chip); 187 + irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 188 + irq_set_handler(irq, handle_level_irq); 189 189 } 190 190 } 191 191
+3 -3
arch/m68k/platform/coldfire/intc.c
··· 143 143 mcf_maskimr(0xffffffff); 144 144 145 145 for (irq = 0; (irq < NR_IRQS); irq++) { 146 - set_irq_chip(irq, &intc_irq_chip); 147 - set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 148 - set_irq_handler(irq, handle_level_irq); 146 + irq_set_chip(irq, &intc_irq_chip); 147 + irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); 148 + irq_set_handler(irq, handle_level_irq); 149 149 } 150 150 } 151 151
+1 -1
arch/microblaze/Kconfig
··· 17 17 select OF_EARLY_FLATTREE 18 18 select HAVE_GENERIC_HARDIRQS 19 19 select GENERIC_IRQ_PROBE 20 - select GENERIC_HARDIRQS_NO_DEPRECATED 20 + select GENERIC_IRQ_SHOW 21 21 22 22 config SWAP 23 23 def_bool n
+3 -3
arch/microblaze/kernel/intc.c
··· 50 50 * ack function since the handle_level_irq function 51 51 * acks the irq before calling the interrupt handler 52 52 */ 53 - if (irq_to_desc(d->irq)->status & IRQ_LEVEL) 53 + if (irqd_is_level_type(d)) 54 54 out_be32(INTC_BASE + IAR, mask); 55 55 } 56 56 ··· 157 157 158 158 for (i = 0; i < nr_irq; ++i) { 159 159 if (intr_type & (0x00000001 << i)) { 160 - set_irq_chip_and_handler_name(i, &intc_dev, 160 + irq_set_chip_and_handler_name(i, &intc_dev, 161 161 handle_edge_irq, intc_dev.name); 162 162 irq_clear_status_flags(i, IRQ_LEVEL); 163 163 } else { 164 - set_irq_chip_and_handler_name(i, &intc_dev, 164 + irq_set_chip_and_handler_name(i, &intc_dev, 165 165 handle_level_irq, intc_dev.name); 166 166 irq_set_status_flags(i, IRQ_LEVEL); 167 167 }
-42
arch/microblaze/kernel/irq.c
··· 47 47 trace_hardirqs_on(); 48 48 } 49 49 50 - int show_interrupts(struct seq_file *p, void *v) 51 - { 52 - int i = *(loff_t *) v, j; 53 - struct irq_desc *desc; 54 - struct irqaction *action; 55 - unsigned long flags; 56 - 57 - if (i == 0) { 58 - seq_printf(p, " "); 59 - for_each_online_cpu(j) 60 - seq_printf(p, "CPU%-8d", j); 61 - seq_putc(p, '\n'); 62 - } 63 - 64 - if (i < nr_irq) { 65 - desc = irq_to_desc(i); 66 - raw_spin_lock_irqsave(&desc->lock, flags); 67 - action = desc->action; 68 - if (!action) 69 - goto skip; 70 - seq_printf(p, "%3d: ", i); 71 - #ifndef CONFIG_SMP 72 - seq_printf(p, "%10u ", kstat_irqs(i)); 73 - #else 74 - for_each_online_cpu(j) 75 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 76 - #endif 77 - seq_printf(p, " %8s", desc->status & 78 - IRQ_LEVEL ? "level" : "edge"); 79 - seq_printf(p, " %8s", desc->irq_data.chip->name); 80 - seq_printf(p, " %s", action->name); 81 - 82 - for (action = action->next; action; action = action->next) 83 - seq_printf(p, ", %s", action->name); 84 - 85 - seq_putc(p, '\n'); 86 - skip: 87 - raw_spin_unlock_irqrestore(&desc->lock, flags); 88 - } 89 - return 0; 90 - } 91 - 92 50 /* MS: There is no any advance mapping mechanism. We are using simple 32bit 93 51 intc without any cascades or any connection that's why mapping is 1:1 */ 94 52 unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
+1 -1
arch/microblaze/pci/pci-common.c
··· 237 237 238 238 virq = irq_create_mapping(NULL, line); 239 239 if (virq != NO_IRQ) 240 - set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 240 + irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 241 241 } else { 242 242 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 243 243 oirq.size, oirq.specifier[0], oirq.specifier[1],
+3 -3
arch/mips/alchemy/devboards/bcsr.c
··· 142 142 bcsr_csc_base = csc_start; 143 143 144 144 for (irq = csc_start; irq <= csc_end; irq++) 145 - set_irq_chip_and_handler_name(irq, &bcsr_irq_type, 146 - handle_level_irq, "level"); 145 + irq_set_chip_and_handler_name(irq, &bcsr_irq_type, 146 + handle_level_irq, "level"); 147 147 148 - set_irq_chained_handler(hook_irq, bcsr_csc_handler); 148 + irq_set_chained_handler(hook_irq, bcsr_csc_handler); 149 149 }
+7 -8
arch/mips/alchemy/devboards/db1200/setup.c
··· 63 63 static int __init db1200_arch_init(void) 64 64 { 65 65 /* GPIO7 is low-level triggered CPLD cascade */ 66 - set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); 66 + irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); 67 67 bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT); 68 68 69 69 /* insert/eject pairs: one of both is always screaming. To avoid 70 70 * issues they must not be automatically enabled when initially 71 71 * requested. 72 72 */ 73 - irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; 74 - irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; 75 - irq_to_desc(DB1200_PC0_INSERT_INT)->status |= IRQ_NOAUTOEN; 76 - irq_to_desc(DB1200_PC0_EJECT_INT)->status |= IRQ_NOAUTOEN; 77 - irq_to_desc(DB1200_PC1_INSERT_INT)->status |= IRQ_NOAUTOEN; 78 - irq_to_desc(DB1200_PC1_EJECT_INT)->status |= IRQ_NOAUTOEN; 79 - 73 + irq_set_status_flags(DB1200_SD0_INSERT_INT, IRQ_NOAUTOEN); 74 + irq_set_status_flags(DB1200_SD0_EJECT_INT, IRQ_NOAUTOEN); 75 + irq_set_status_flags(DB1200_PC0_INSERT_INT, IRQ_NOAUTOEN); 76 + irq_set_status_flags(DB1200_PC0_EJECT_INT, IRQ_NOAUTOEN); 77 + irq_set_status_flags(DB1200_PC1_INSERT_INT, IRQ_NOAUTOEN); 78 + irq_set_status_flags(DB1200_PC1_EJECT_INT, IRQ_NOAUTOEN); 80 79 return 0; 81 80 } 82 81 arch_initcall(db1200_arch_init);
+25 -25
arch/mips/alchemy/devboards/db1x00/board_setup.c
··· 215 215 static int __init db1x00_init_irq(void) 216 216 { 217 217 #if defined(CONFIG_MIPS_MIRAGE) 218 - set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */ 218 + irq_set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */ 219 219 #elif defined(CONFIG_MIPS_DB1550) 220 - set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 221 - set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */ 222 - set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 223 - set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 224 - set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 225 - set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 220 + irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 221 + irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */ 222 + irq_set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 223 + irq_set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 224 + irq_set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 225 + irq_set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 226 226 #elif defined(CONFIG_MIPS_DB1500) 227 - set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 228 - set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 229 - set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 230 - set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 231 - set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 232 - set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 227 + irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 228 + irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 229 + irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 230 + irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 231 + irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 232 + irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 233 233 #elif defined(CONFIG_MIPS_DB1100) 234 - set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 235 - set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 236 - set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 237 - set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 238 - set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 239 - set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 234 + irq_set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 235 + irq_set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 236 + irq_set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 237 + irq_set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 238 + irq_set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 239 + irq_set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 240 240 #elif defined(CONFIG_MIPS_DB1000) 241 - set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 242 - set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 243 - set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 244 - set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 245 - set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 246 - set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 241 + irq_set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 242 + irq_set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 243 + irq_set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 244 + irq_set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 245 + irq_set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 246 + irq_set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 247 247 #endif 248 248 return 0; 249 249 }
+1 -1
arch/mips/alchemy/devboards/pb1000/board_setup.c
··· 197 197 198 198 static int __init pb1000_init_irq(void) 199 199 { 200 - set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW); 200 + irq_set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW); 201 201 return 0; 202 202 } 203 203 arch_initcall(pb1000_init_irq);
+4 -4
arch/mips/alchemy/devboards/pb1100/board_setup.c
··· 117 117 118 118 static int __init pb1100_init_irq(void) 119 119 { 120 - set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */ 121 - set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */ 122 - set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */ 123 - set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */ 120 + irq_set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */ 121 + irq_set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */ 122 + irq_set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */ 123 + irq_set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */ 124 124 125 125 return 0; 126 126 }
+1 -1
arch/mips/alchemy/devboards/pb1200/board_setup.c
··· 142 142 panic("Game over. Your score is 0."); 143 143 } 144 144 145 - set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); 145 + irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); 146 146 bcsr_init_irq(PB1200_INT_BEGIN, PB1200_INT_END, AU1200_GPIO7_INT); 147 147 148 148 return 0;
+8 -8
arch/mips/alchemy/devboards/pb1500/board_setup.c
··· 134 134 135 135 static int __init pb1500_init_irq(void) 136 136 { 137 - set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */ 138 - set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */ 139 - set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 140 - set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 141 - set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 142 - set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 143 - set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 144 - set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 137 + irq_set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */ 138 + irq_set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */ 139 + irq_set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 140 + irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 141 + irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 142 + irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 143 + irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 144 + irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 145 145 146 146 return 0; 147 147 }
+3 -3
arch/mips/alchemy/devboards/pb1550/board_setup.c
··· 73 73 74 74 static int __init pb1550_init_irq(void) 75 75 { 76 - set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); 77 - set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); 78 - set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH); 76 + irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); 77 + irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); 78 + irq_set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH); 79 79 80 80 /* enable both PCMCIA card irqs in the shared line */ 81 81 alchemy_gpio2_enable_int(201);
+5 -5
arch/mips/alchemy/mtx-1/board_setup.c
··· 123 123 124 124 static int __init mtx1_init_irq(void) 125 125 { 126 - set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 127 - set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 128 - set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 129 - set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 130 - set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 126 + irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 127 + irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 128 + irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 129 + irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 130 + irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 131 131 132 132 return 0; 133 133 }
+12 -12
arch/mips/alchemy/xxs1500/board_setup.c
··· 85 85 86 86 static int __init xxs1500_init_irq(void) 87 87 { 88 - set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 89 - set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 90 - set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 91 - set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 92 - set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 93 - set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW); 88 + irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 89 + irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 90 + irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 91 + irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 92 + irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 93 + irq_set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW); 94 94 95 - set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); 96 - set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); 97 - set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); 98 - set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); 99 - set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */ 100 - set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); 95 + irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); 96 + irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); 97 + irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); 98 + irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); 99 + irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */ 100 + irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); 101 101 102 102 return 0; 103 103 }
+2 -2
arch/mips/ar7/irq.c
··· 119 119 for (i = 0; i < 40; i++) { 120 120 writel(i, REG(CHNL_OFFSET(i))); 121 121 /* Primary IRQ's */ 122 - set_irq_chip_and_handler(base + i, &ar7_irq_type, 122 + irq_set_chip_and_handler(base + i, &ar7_irq_type, 123 123 handle_level_irq); 124 124 /* Secondary IRQ's */ 125 125 if (i < 32) 126 - set_irq_chip_and_handler(base + i + 40, 126 + irq_set_chip_and_handler(base + i + 40, 127 127 &ar7_sec_irq_type, 128 128 handle_level_irq); 129 129 }
+2 -2
arch/mips/ath79/irq.c
··· 124 124 125 125 for (i = ATH79_MISC_IRQ_BASE; 126 126 i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) { 127 - set_irq_chip_and_handler(i, &ath79_misc_irq_chip, 127 + irq_set_chip_and_handler(i, &ath79_misc_irq_chip, 128 128 handle_level_irq); 129 129 } 130 130 131 - set_irq_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler); 131 + irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler); 132 132 } 133 133 134 134 asmlinkage void plat_irq_dispatch(void)
+2 -2
arch/mips/bcm63xx/irq.c
··· 230 230 231 231 mips_cpu_irq_init(); 232 232 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i) 233 - set_irq_chip_and_handler(i, &bcm63xx_internal_irq_chip, 233 + irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip, 234 234 handle_level_irq); 235 235 236 236 for (i = IRQ_EXT_BASE; i < IRQ_EXT_BASE + 4; ++i) 237 - set_irq_chip_and_handler(i, &bcm63xx_external_irq_chip, 237 + irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip, 238 238 handle_edge_irq); 239 239 240 240 setup_irq(IRQ_MIPS_BASE + 2, &cpu_ip2_cascade_action);
+852 -681
arch/mips/cavium-octeon/octeon-irq.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks 6 + * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks 7 7 */ 8 - #include <linux/irq.h> 8 + 9 9 #include <linux/interrupt.h> 10 + #include <linux/bitops.h> 11 + #include <linux/percpu.h> 12 + #include <linux/irq.h> 10 13 #include <linux/smp.h> 11 14 12 15 #include <asm/octeon/octeon.h> 13 16 14 17 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); 15 18 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); 19 + 20 + static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); 21 + static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 22 + 23 + static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 24 + 25 + union octeon_ciu_chip_data { 26 + void *p; 27 + unsigned long l; 28 + struct { 29 + unsigned int line:6; 30 + unsigned int bit:6; 31 + } s; 32 + }; 33 + 34 + struct octeon_core_chip_data { 35 + struct mutex core_irq_mutex; 36 + bool current_en; 37 + bool desired_en; 38 + u8 bit; 39 + }; 40 + 41 + #define MIPS_CORE_IRQ_LINES 8 42 + 43 + static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 44 + 45 + static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit, 46 + struct irq_chip *chip, 47 + irq_flow_handler_t handler) 48 + { 49 + union octeon_ciu_chip_data cd; 50 + 51 + irq_set_chip_and_handler(irq, chip, handler); 52 + 53 + cd.l = 0; 54 + cd.s.line = line; 55 + cd.s.bit = bit; 56 + 57 + irq_set_chip_data(irq, cd.p); 58 + octeon_irq_ciu_to_irq[line][bit] = irq; 59 + } 16 60 17 61 static int octeon_coreid_for_cpu(int cpu) 18 62 { ··· 67 23 #endif 68 24 } 69 25 70 - static void octeon_irq_core_ack(unsigned int irq) 26 + static int octeon_cpu_for_coreid(int coreid) 71 27 { 72 - unsigned int bit = irq - OCTEON_IRQ_SW0; 28 + #ifdef CONFIG_SMP 29 + return cpu_number_map(coreid); 30 + #else 31 + return smp_processor_id(); 32 + #endif 33 + } 34 + 35 + static void octeon_irq_core_ack(struct irq_data *data) 36 + { 37 + struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 38 + unsigned int bit = cd->bit; 39 + 73 40 /* 74 41 * We don't need to disable IRQs to make these atomic since 75 42 * they are already disabled earlier in the low level ··· 92 37 clear_c0_cause(0x100 << bit); 93 38 } 94 39 95 - static void octeon_irq_core_eoi(unsigned int irq) 40 + static void octeon_irq_core_eoi(struct irq_data *data) 96 41 { 97 - struct irq_desc *desc = irq_to_desc(irq); 98 - unsigned int bit = irq - OCTEON_IRQ_SW0; 99 - /* 100 - * If an IRQ is being processed while we are disabling it the 101 - * handler will attempt to unmask the interrupt after it has 102 - * been disabled. 103 - */ 104 - if ((unlikely(desc->status & IRQ_DISABLED))) 105 - return; 42 + struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 43 + 106 44 /* 107 45 * We don't need to disable IRQs to make these atomic since 108 46 * they are already disabled earlier in the low level 109 47 * interrupt code. 110 48 */ 111 - set_c0_status(0x100 << bit); 49 + set_c0_status(0x100 << cd->bit); 112 50 } 113 51 114 - static void octeon_irq_core_enable(unsigned int irq) 52 + static void octeon_irq_core_set_enable_local(void *arg) 115 53 { 116 - unsigned long flags; 117 - unsigned int bit = irq - OCTEON_IRQ_SW0; 54 + struct irq_data *data = arg; 55 + struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 56 + unsigned int mask = 0x100 << cd->bit; 118 57 119 58 /* 120 - * We need to disable interrupts to make sure our updates are 121 - * atomic. 59 + * Interrupts are already disabled, so these are atomic. 122 60 */ 123 - local_irq_save(flags); 124 - set_c0_status(0x100 << bit); 125 - local_irq_restore(flags); 61 + if (cd->desired_en) 62 + set_c0_status(mask); 63 + else 64 + clear_c0_status(mask); 65 + 126 66 } 127 67 128 - static void octeon_irq_core_disable_local(unsigned int irq) 68 + static void octeon_irq_core_disable(struct irq_data *data) 129 69 { 130 - unsigned long flags; 131 - unsigned int bit = irq - OCTEON_IRQ_SW0; 132 - /* 133 - * We need to disable interrupts to make sure our updates are 134 - * atomic. 135 - */ 136 - local_irq_save(flags); 137 - clear_c0_status(0x100 << bit); 138 - local_irq_restore(flags); 70 + struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 71 + cd->desired_en = false; 139 72 } 140 73 141 - static void octeon_irq_core_disable(unsigned int irq) 74 + static void octeon_irq_core_enable(struct irq_data *data) 142 75 { 143 - #ifdef CONFIG_SMP 144 - on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local, 145 - (void *) (long) irq, 1); 146 - #else 147 - octeon_irq_core_disable_local(irq); 148 - #endif 76 + struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 77 + cd->desired_en = true; 78 + } 79 + 80 + static void octeon_irq_core_bus_lock(struct irq_data *data) 81 + { 82 + struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 83 + 84 + mutex_lock(&cd->core_irq_mutex); 85 + } 86 + 87 + static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) 88 + { 89 + struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 90 + 91 + if (cd->desired_en != cd->current_en) { 92 + on_each_cpu(octeon_irq_core_set_enable_local, data, 1); 93 + 94 + cd->current_en = cd->desired_en; 95 + } 96 + 97 + mutex_unlock(&cd->core_irq_mutex); 149 98 } 150 99 151 100 static struct irq_chip octeon_irq_chip_core = { 152 101 .name = "Core", 153 - .enable = octeon_irq_core_enable, 154 - .disable = octeon_irq_core_disable, 155 - .ack = octeon_irq_core_ack, 156 - .eoi = octeon_irq_core_eoi, 102 + .irq_enable = octeon_irq_core_enable, 103 + .irq_disable = octeon_irq_core_disable, 104 + .irq_ack = octeon_irq_core_ack, 105 + .irq_eoi = octeon_irq_core_eoi, 106 + .irq_bus_lock = octeon_irq_core_bus_lock, 107 + .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, 108 + 109 + .irq_cpu_online = octeon_irq_core_eoi, 110 + .irq_cpu_offline = octeon_irq_core_ack, 111 + .flags = IRQCHIP_ONOFFLINE_ENABLED, 157 112 }; 158 113 159 - 160 - static void octeon_irq_ciu0_ack(unsigned int irq) 114 + static void __init octeon_irq_init_core(void) 161 115 { 162 - switch (irq) { 163 - case OCTEON_IRQ_GMX_DRP0: 164 - case OCTEON_IRQ_GMX_DRP1: 165 - case OCTEON_IRQ_IPD_DRP: 166 - case OCTEON_IRQ_KEY_ZERO: 167 - case OCTEON_IRQ_TIMER0: 168 - case OCTEON_IRQ_TIMER1: 169 - case OCTEON_IRQ_TIMER2: 170 - case OCTEON_IRQ_TIMER3: 171 - { 172 - int index = cvmx_get_core_num() * 2; 173 - u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 174 - /* 175 - * CIU timer type interrupts must be acknoleged by 176 - * writing a '1' bit to their sum0 bit. 177 - */ 178 - cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 179 - break; 180 - } 181 - default: 182 - break; 183 - } 116 + int i; 117 + int irq; 118 + struct octeon_core_chip_data *cd; 184 119 185 - /* 186 - * In order to avoid any locking accessing the CIU, we 187 - * acknowledge CIU interrupts by disabling all of them. This 188 - * way we can use a per core register and avoid any out of 189 - * core locking requirements. This has the side affect that 190 - * CIU interrupts can't be processed recursively. 191 - * 192 - * We don't need to disable IRQs to make these atomic since 193 - * they are already disabled earlier in the low level 194 - * interrupt code. 195 - */ 196 - clear_c0_status(0x100 << 2); 120 + for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { 121 + cd = &octeon_irq_core_chip_data[i]; 122 + cd->current_en = false; 123 + cd->desired_en = false; 124 + cd->bit = i; 125 + mutex_init(&cd->core_irq_mutex); 126 + 127 + irq = OCTEON_IRQ_SW0 + i; 128 + switch (irq) { 129 + case OCTEON_IRQ_TIMER: 130 + case OCTEON_IRQ_SW0: 131 + case OCTEON_IRQ_SW1: 132 + case OCTEON_IRQ_5: 133 + case OCTEON_IRQ_PERF: 134 + irq_set_chip_data(irq, cd); 135 + irq_set_chip_and_handler(irq, &octeon_irq_chip_core, 136 + handle_percpu_irq); 137 + break; 138 + default: 139 + break; 140 + } 141 + } 197 142 } 198 143 199 - static void octeon_irq_ciu0_eoi(unsigned int irq) 200 - { 201 - /* 202 - * Enable all CIU interrupts again. We don't need to disable 203 - * IRQs to make these atomic since they are already disabled 204 - * earlier in the low level interrupt code. 205 - */ 206 - set_c0_status(0x100 << 2); 207 - } 208 - 209 - static int next_coreid_for_irq(struct irq_desc *desc) 144 + static int next_cpu_for_irq(struct irq_data *data) 210 145 { 211 146 212 147 #ifdef CONFIG_SMP 213 - int coreid; 214 - int weight = cpumask_weight(desc->affinity); 148 + int cpu; 149 + int weight = cpumask_weight(data->affinity); 215 150 216 151 if (weight > 1) { 217 - int cpu = smp_processor_id(); 152 + cpu = smp_processor_id(); 218 153 for (;;) { 219 - cpu = cpumask_next(cpu, desc->affinity); 154 + cpu = cpumask_next(cpu, data->affinity); 220 155 if (cpu >= nr_cpu_ids) { 221 156 cpu = -1; 222 157 continue; ··· 214 169 break; 215 170 } 216 171 } 217 - coreid = octeon_coreid_for_cpu(cpu); 218 172 } else if (weight == 1) { 219 - coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity)); 173 + cpu = cpumask_first(data->affinity); 220 174 } else { 221 - coreid = cvmx_get_core_num(); 175 + cpu = smp_processor_id(); 222 176 } 223 - return coreid; 177 + return cpu; 224 178 #else 225 - return cvmx_get_core_num(); 179 + return smp_processor_id(); 226 180 #endif 227 181 } 228 182 229 - static void octeon_irq_ciu0_enable(unsigned int irq) 183 + static void octeon_irq_ciu_enable(struct irq_data *data) 230 184 { 231 - struct irq_desc *desc = irq_to_desc(irq); 232 - int coreid = next_coreid_for_irq(desc); 185 + int cpu = next_cpu_for_irq(data); 186 + int coreid = octeon_coreid_for_cpu(cpu); 187 + unsigned long *pen; 233 188 unsigned long flags; 234 - uint64_t en0; 235 - int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 189 + union octeon_ciu_chip_data cd; 236 190 237 - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 238 - en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 239 - en0 |= 1ull << bit; 240 - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 241 - cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 242 - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 243 - } 191 + cd.p = irq_data_get_irq_chip_data(data); 244 192 245 - static void octeon_irq_ciu0_enable_mbox(unsigned int irq) 246 - { 247 - int coreid = cvmx_get_core_num(); 248 - unsigned long flags; 249 - uint64_t en0; 250 - int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 251 - 252 - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 253 - en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 254 - en0 |= 1ull << bit; 255 - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 256 - cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 257 - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 258 - } 259 - 260 - static void octeon_irq_ciu0_disable(unsigned int irq) 261 - { 262 - int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 263 - unsigned long flags; 264 - uint64_t en0; 265 - int cpu; 266 - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 267 - for_each_online_cpu(cpu) { 268 - int coreid = octeon_coreid_for_cpu(cpu); 269 - en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 270 - en0 &= ~(1ull << bit); 271 - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 193 + if (cd.s.line == 0) { 194 + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 195 + pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 196 + set_bit(cd.s.bit, pen); 197 + cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 198 + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 199 + } else { 200 + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 201 + pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 202 + set_bit(cd.s.bit, pen); 203 + cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 204 + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 272 205 } 273 - /* 274 - * We need to do a read after the last update to make sure all 275 - * of them are done. 276 - */ 277 - cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); 278 - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 206 + } 207 + 208 + static void octeon_irq_ciu_enable_local(struct irq_data *data) 209 + { 210 + unsigned long *pen; 211 + unsigned long flags; 212 + union octeon_ciu_chip_data cd; 213 + 214 + cd.p = irq_data_get_irq_chip_data(data); 215 + 216 + if (cd.s.line == 0) { 217 + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 218 + pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 219 + set_bit(cd.s.bit, pen); 220 + cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 221 + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 222 + } else { 223 + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 224 + pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 225 + set_bit(cd.s.bit, pen); 226 + cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 227 + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 228 + } 229 + } 230 + 231 + static void octeon_irq_ciu_disable_local(struct irq_data *data) 232 + { 233 + unsigned long *pen; 234 + unsigned long flags; 235 + union octeon_ciu_chip_data cd; 236 + 237 + cd.p = irq_data_get_irq_chip_data(data); 238 + 239 + if (cd.s.line == 0) { 240 + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 241 + pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 242 + clear_bit(cd.s.bit, pen); 243 + cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 244 + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 245 + } else { 246 + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 247 + pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 248 + clear_bit(cd.s.bit, pen); 249 + cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 250 + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 251 + } 252 + } 253 + 254 + static void octeon_irq_ciu_disable_all(struct irq_data *data) 255 + { 256 + unsigned long flags; 257 + unsigned long *pen; 258 + int cpu; 259 + union octeon_ciu_chip_data cd; 260 + 261 + wmb(); /* Make sure flag changes arrive before register updates. */ 262 + 263 + cd.p = irq_data_get_irq_chip_data(data); 264 + 265 + if (cd.s.line == 0) { 266 + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 267 + for_each_online_cpu(cpu) { 268 + int coreid = octeon_coreid_for_cpu(cpu); 269 + pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 270 + clear_bit(cd.s.bit, pen); 271 + cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 272 + } 273 + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 274 + } else { 275 + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 276 + for_each_online_cpu(cpu) { 277 + int coreid = octeon_coreid_for_cpu(cpu); 278 + pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 279 + clear_bit(cd.s.bit, pen); 280 + cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 281 + } 282 + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 283 + } 284 + } 285 + 286 + static void octeon_irq_ciu_enable_all(struct irq_data *data) 287 + { 288 + unsigned long flags; 289 + unsigned long *pen; 290 + int cpu; 291 + union octeon_ciu_chip_data cd; 292 + 293 + cd.p = irq_data_get_irq_chip_data(data); 294 + 295 + if (cd.s.line == 0) { 296 + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 297 + for_each_online_cpu(cpu) { 298 + int coreid = octeon_coreid_for_cpu(cpu); 299 + pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 300 + set_bit(cd.s.bit, pen); 301 + cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 302 + } 303 + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 304 + } else { 305 + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 306 + for_each_online_cpu(cpu) { 307 + int coreid = octeon_coreid_for_cpu(cpu); 308 + pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 309 + set_bit(cd.s.bit, pen); 310 + cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 311 + } 312 + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 313 + } 279 314 } 280 315 281 316 /* 282 317 * Enable the irq on the next core in the affinity set for chips that 283 318 * have the EN*_W1{S,C} registers. 284 319 */ 285 - static void octeon_irq_ciu0_enable_v2(unsigned int irq) 320 + static void octeon_irq_ciu_enable_v2(struct irq_data *data) 286 321 { 287 - int index; 288 - u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 289 - struct irq_desc *desc = irq_to_desc(irq); 322 + u64 mask; 323 + int cpu = next_cpu_for_irq(data); 324 + union octeon_ciu_chip_data cd; 290 325 291 - if ((desc->status & IRQ_DISABLED) == 0) { 292 - index = next_coreid_for_irq(desc) * 2; 326 + cd.p = irq_data_get_irq_chip_data(data); 327 + mask = 1ull << (cd.s.bit); 328 + 329 + /* 330 + * Called under the desc lock, so these should never get out 331 + * of sync. 332 + */ 333 + if (cd.s.line == 0) { 334 + int index = octeon_coreid_for_cpu(cpu) * 2; 335 + set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 293 336 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 337 + } else { 338 + int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 339 + set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 340 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 294 341 } 295 342 } 296 343 ··· 390 253 * Enable the irq on the current CPU for chips that 391 254 * have the EN*_W1{S,C} registers. 392 255 */ 393 - static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq) 256 + static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 394 257 { 395 - int index; 396 - u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 258 + u64 mask; 259 + union octeon_ciu_chip_data cd; 397 260 398 - index = cvmx_get_core_num() * 2; 399 - cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 400 - } 261 + cd.p = irq_data_get_irq_chip_data(data); 262 + mask = 1ull << (cd.s.bit); 401 263 402 - /* 403 - * Disable the irq on the current core for chips that have the EN*_W1{S,C} 404 - * registers. 405 - */ 406 - static void octeon_irq_ciu0_ack_v2(unsigned int irq) 407 - { 408 - int index = cvmx_get_core_num() * 2; 409 - u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 410 - 411 - switch (irq) { 412 - case OCTEON_IRQ_GMX_DRP0: 413 - case OCTEON_IRQ_GMX_DRP1: 414 - case OCTEON_IRQ_IPD_DRP: 415 - case OCTEON_IRQ_KEY_ZERO: 416 - case OCTEON_IRQ_TIMER0: 417 - case OCTEON_IRQ_TIMER1: 418 - case OCTEON_IRQ_TIMER2: 419 - case OCTEON_IRQ_TIMER3: 420 - /* 421 - * CIU timer type interrupts must be acknoleged by 422 - * writing a '1' bit to their sum0 bit. 423 - */ 424 - cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 425 - break; 426 - default: 427 - break; 428 - } 429 - 430 - cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 431 - } 432 - 433 - /* 434 - * Enable the irq on the current core for chips that have the EN*_W1{S,C} 435 - * registers. 436 - */ 437 - static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq) 438 - { 439 - struct irq_desc *desc = irq_to_desc(irq); 440 - int index = cvmx_get_core_num() * 2; 441 - u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 442 - 443 - if (likely((desc->status & IRQ_DISABLED) == 0)) 264 + if (cd.s.line == 0) { 265 + int index = cvmx_get_core_num() * 2; 266 + set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); 444 267 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 268 + } else { 269 + int index = cvmx_get_core_num() * 2 + 1; 270 + set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); 271 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 272 + } 445 273 } 446 274 447 - /* 448 - * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 449 - * registers. 450 - */ 451 - static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) 275 + static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 452 276 { 453 - u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 454 - int index; 455 - int cpu; 456 - for_each_online_cpu(cpu) { 457 - index = octeon_coreid_for_cpu(cpu) * 2; 277 + u64 mask; 278 + union octeon_ciu_chip_data cd; 279 + 280 + cd.p = irq_data_get_irq_chip_data(data); 281 + mask = 1ull << (cd.s.bit); 282 + 283 + if (cd.s.line == 0) { 284 + int index = cvmx_get_core_num() * 2; 285 + clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); 458 286 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 459 - } 460 - } 461 - 462 - #ifdef CONFIG_SMP 463 - static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) 464 - { 465 - int cpu; 466 - struct irq_desc *desc = irq_to_desc(irq); 467 - int enable_one = (desc->status & IRQ_DISABLED) == 0; 468 - unsigned long flags; 469 - int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 470 - 471 - /* 472 - * For non-v2 CIU, we will allow only single CPU affinity. 473 - * This removes the need to do locking in the .ack/.eoi 474 - * functions. 475 - */ 476 - if (cpumask_weight(dest) != 1) 477 - return -EINVAL; 478 - 479 - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 480 - for_each_online_cpu(cpu) { 481 - int coreid = octeon_coreid_for_cpu(cpu); 482 - uint64_t en0 = 483 - cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 484 - if (cpumask_test_cpu(cpu, dest) && enable_one) { 485 - enable_one = 0; 486 - en0 |= 1ull << bit; 487 - } else { 488 - en0 &= ~(1ull << bit); 489 - } 490 - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 491 - } 492 - /* 493 - * We need to do a read after the last update to make sure all 494 - * of them are done. 495 - */ 496 - cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); 497 - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 498 - 499 - return 0; 500 - } 501 - 502 - /* 503 - * Set affinity for the irq for chips that have the EN*_W1{S,C} 504 - * registers. 505 - */ 506 - static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, 507 - const struct cpumask *dest) 508 - { 509 - int cpu; 510 - int index; 511 - struct irq_desc *desc = irq_to_desc(irq); 512 - int enable_one = (desc->status & IRQ_DISABLED) == 0; 513 - u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 514 - 515 - for_each_online_cpu(cpu) { 516 - index = octeon_coreid_for_cpu(cpu) * 2; 517 - if (cpumask_test_cpu(cpu, dest) && enable_one) { 518 - enable_one = 0; 519 - cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 520 - } else { 521 - cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 522 - } 523 - } 524 - return 0; 525 - } 526 - #endif 527 - 528 - /* 529 - * Newer octeon chips have support for lockless CIU operation. 530 - */ 531 - static struct irq_chip octeon_irq_chip_ciu0_v2 = { 532 - .name = "CIU0", 533 - .enable = octeon_irq_ciu0_enable_v2, 534 - .disable = octeon_irq_ciu0_disable_all_v2, 535 - .eoi = octeon_irq_ciu0_enable_v2, 536 - #ifdef CONFIG_SMP 537 - .set_affinity = octeon_irq_ciu0_set_affinity_v2, 538 - #endif 539 - }; 540 - 541 - static struct irq_chip octeon_irq_chip_ciu0 = { 542 - .name = "CIU0", 543 - .enable = octeon_irq_ciu0_enable, 544 - .disable = octeon_irq_ciu0_disable, 545 - .eoi = octeon_irq_ciu0_eoi, 546 - #ifdef CONFIG_SMP 547 - .set_affinity = octeon_irq_ciu0_set_affinity, 548 - #endif 549 - }; 550 - 551 - /* The mbox versions don't do any affinity or round-robin. */ 552 - static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = { 553 - .name = "CIU0-M", 554 - .enable = octeon_irq_ciu0_enable_mbox_v2, 555 - .disable = octeon_irq_ciu0_disable, 556 - .eoi = octeon_irq_ciu0_eoi_mbox_v2, 557 - }; 558 - 559 - static struct irq_chip octeon_irq_chip_ciu0_mbox = { 560 - .name = "CIU0-M", 561 - .enable = octeon_irq_ciu0_enable_mbox, 562 - .disable = octeon_irq_ciu0_disable, 563 - .eoi = octeon_irq_ciu0_eoi, 564 - }; 565 - 566 - static void octeon_irq_ciu1_ack(unsigned int irq) 567 - { 568 - /* 569 - * In order to avoid any locking accessing the CIU, we 570 - * acknowledge CIU interrupts by disabling all of them. This 571 - * way we can use a per core register and avoid any out of 572 - * core locking requirements. This has the side affect that 573 - * CIU interrupts can't be processed recursively. We don't 574 - * need to disable IRQs to make these atomic since they are 575 - * already disabled earlier in the low level interrupt code. 576 - */ 577 - clear_c0_status(0x100 << 3); 578 - } 579 - 580 - static void octeon_irq_ciu1_eoi(unsigned int irq) 581 - { 582 - /* 583 - * Enable all CIU interrupts again. We don't need to disable 584 - * IRQs to make these atomic since they are already disabled 585 - * earlier in the low level interrupt code. 586 - */ 587 - set_c0_status(0x100 << 3); 588 - } 589 - 590 - static void octeon_irq_ciu1_enable(unsigned int irq) 591 - { 592 - struct irq_desc *desc = irq_to_desc(irq); 593 - int coreid = next_coreid_for_irq(desc); 594 - unsigned long flags; 595 - uint64_t en1; 596 - int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 597 - 598 - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 599 - en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 600 - en1 |= 1ull << bit; 601 - cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 602 - cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 603 - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 604 - } 605 - 606 - /* 607 - * Watchdog interrupts are special. They are associated with a single 608 - * core, so we hardwire the affinity to that core. 609 - */ 610 - static void octeon_irq_ciu1_wd_enable(unsigned int irq) 611 - { 612 - unsigned long flags; 613 - uint64_t en1; 614 - int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 615 - int coreid = bit; 616 - 617 - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 618 - en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 619 - en1 |= 1ull << bit; 620 - cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 621 - cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 622 - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 623 - } 624 - 625 - static void octeon_irq_ciu1_disable(unsigned int irq) 626 - { 627 - int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 628 - unsigned long flags; 629 - uint64_t en1; 630 - int cpu; 631 - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 632 - for_each_online_cpu(cpu) { 633 - int coreid = octeon_coreid_for_cpu(cpu); 634 - en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 635 - en1 &= ~(1ull << bit); 636 - cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 637 - } 638 - /* 639 - * We need to do a read after the last update to make sure all 640 - * of them are done. 641 - */ 642 - cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); 643 - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 644 - } 645 - 646 - /* 647 - * Enable the irq on the current core for chips that have the EN*_W1{S,C} 648 - * registers. 649 - */ 650 - static void octeon_irq_ciu1_enable_v2(unsigned int irq) 651 - { 652 - int index; 653 - u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 654 - struct irq_desc *desc = irq_to_desc(irq); 655 - 656 - if ((desc->status & IRQ_DISABLED) == 0) { 657 - index = next_coreid_for_irq(desc) * 2 + 1; 658 - cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 659 - } 660 - } 661 - 662 - /* 663 - * Watchdog interrupts are special. They are associated with a single 664 - * core, so we hardwire the affinity to that core. 665 - */ 666 - static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq) 667 - { 668 - int index; 669 - int coreid = irq - OCTEON_IRQ_WDOG0; 670 - u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 671 - struct irq_desc *desc = irq_to_desc(irq); 672 - 673 - if ((desc->status & IRQ_DISABLED) == 0) { 674 - index = coreid * 2 + 1; 675 - cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 676 - } 677 - } 678 - 679 - /* 680 - * Disable the irq on the current core for chips that have the EN*_W1{S,C} 681 - * registers. 682 - */ 683 - static void octeon_irq_ciu1_ack_v2(unsigned int irq) 684 - { 685 - int index = cvmx_get_core_num() * 2 + 1; 686 - u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 687 - 688 - cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 689 - } 690 - 691 - /* 692 - * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 693 - * registers. 694 - */ 695 - static void octeon_irq_ciu1_disable_all_v2(unsigned int irq) 696 - { 697 - u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 698 - int index; 699 - int cpu; 700 - for_each_online_cpu(cpu) { 701 - index = octeon_coreid_for_cpu(cpu) * 2 + 1; 287 + } else { 288 + int index = cvmx_get_core_num() * 2 + 1; 289 + clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); 702 290 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 703 291 } 704 292 } 705 293 706 - #ifdef CONFIG_SMP 707 - static int octeon_irq_ciu1_set_affinity(unsigned int irq, 708 - const struct cpumask *dest) 294 + /* 295 + * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. 296 + */ 297 + static void octeon_irq_ciu_ack(struct irq_data *data) 298 + { 299 + u64 mask; 300 + union octeon_ciu_chip_data cd; 301 + 302 + cd.p = data->chip_data; 303 + mask = 1ull << (cd.s.bit); 304 + 305 + if (cd.s.line == 0) { 306 + int index = cvmx_get_core_num() * 2; 307 + cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 308 + } else { 309 + cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); 310 + } 311 + } 312 + 313 + /* 314 + * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 315 + * registers. 316 + */ 317 + static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) 709 318 { 710 319 int cpu; 711 - struct irq_desc *desc = irq_to_desc(irq); 712 - int enable_one = (desc->status & IRQ_DISABLED) == 0; 320 + u64 mask; 321 + union octeon_ciu_chip_data cd; 322 + 323 + wmb(); /* Make sure flag changes arrive before register updates. */ 324 + 325 + cd.p = data->chip_data; 326 + mask = 1ull << (cd.s.bit); 327 + 328 + if (cd.s.line == 0) { 329 + for_each_online_cpu(cpu) { 330 + int index = octeon_coreid_for_cpu(cpu) * 2; 331 + clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 332 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 333 + } 334 + } else { 335 + for_each_online_cpu(cpu) { 336 + int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 337 + clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 338 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 339 + } 340 + } 341 + } 342 + 343 + /* 344 + * Enable the irq on the all cores for chips that have the EN*_W1{S,C} 345 + * registers. 346 + */ 347 + static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) 348 + { 349 + int cpu; 350 + u64 mask; 351 + union octeon_ciu_chip_data cd; 352 + 353 + cd.p = data->chip_data; 354 + mask = 1ull << (cd.s.bit); 355 + 356 + if (cd.s.line == 0) { 357 + for_each_online_cpu(cpu) { 358 + int index = octeon_coreid_for_cpu(cpu) * 2; 359 + set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 360 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 361 + } 362 + } else { 363 + for_each_online_cpu(cpu) { 364 + int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 365 + set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 366 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 367 + } 368 + } 369 + } 370 + 371 + #ifdef CONFIG_SMP 372 + 373 + static void octeon_irq_cpu_offline_ciu(struct irq_data *data) 374 + { 375 + int cpu = smp_processor_id(); 376 + cpumask_t new_affinity; 377 + 378 + if (!cpumask_test_cpu(cpu, data->affinity)) 379 + return; 380 + 381 + if (cpumask_weight(data->affinity) > 1) { 382 + /* 383 + * It has multi CPU affinity, just remove this CPU 384 + * from the affinity set. 385 + */ 386 + cpumask_copy(&new_affinity, data->affinity); 387 + cpumask_clear_cpu(cpu, &new_affinity); 388 + } else { 389 + /* Otherwise, put it on lowest numbered online CPU. */ 390 + cpumask_clear(&new_affinity); 391 + cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 392 + } 393 + __irq_set_affinity_locked(data, &new_affinity); 394 + } 395 + 396 + static int octeon_irq_ciu_set_affinity(struct irq_data *data, 397 + const struct cpumask *dest, bool force) 398 + { 399 + int cpu; 400 + bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 713 401 unsigned long flags; 714 - int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 402 + union octeon_ciu_chip_data cd; 403 + 404 + cd.p = data->chip_data; 715 405 716 406 /* 717 407 * For non-v2 CIU, we will allow only single CPU affinity. ··· 548 584 if (cpumask_weight(dest) != 1) 549 585 return -EINVAL; 550 586 551 - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 552 - for_each_online_cpu(cpu) { 553 - int coreid = octeon_coreid_for_cpu(cpu); 554 - uint64_t en1 = 555 - cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 556 - if (cpumask_test_cpu(cpu, dest) && enable_one) { 557 - enable_one = 0; 558 - en1 |= 1ull << bit; 559 - } else { 560 - en1 &= ~(1ull << bit); 561 - } 562 - cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 563 - } 564 - /* 565 - * We need to do a read after the last update to make sure all 566 - * of them are done. 567 - */ 568 - cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); 569 - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 587 + if (!enable_one) 588 + return 0; 570 589 590 + if (cd.s.line == 0) { 591 + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 592 + for_each_online_cpu(cpu) { 593 + int coreid = octeon_coreid_for_cpu(cpu); 594 + unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 595 + 596 + if (cpumask_test_cpu(cpu, dest) && enable_one) { 597 + enable_one = false; 598 + set_bit(cd.s.bit, pen); 599 + } else { 600 + clear_bit(cd.s.bit, pen); 601 + } 602 + cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 603 + } 604 + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 605 + } else { 606 + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 607 + for_each_online_cpu(cpu) { 608 + int coreid = octeon_coreid_for_cpu(cpu); 609 + unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 610 + 611 + if (cpumask_test_cpu(cpu, dest) && enable_one) { 612 + enable_one = false; 613 + set_bit(cd.s.bit, pen); 614 + } else { 615 + clear_bit(cd.s.bit, pen); 616 + } 617 + cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 618 + } 619 + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 620 + } 571 621 return 0; 572 622 } 573 623 ··· 589 611 * Set affinity for the irq for chips that have the EN*_W1{S,C} 590 612 * registers. 591 613 */ 592 - static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, 593 - const struct cpumask *dest) 614 + static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, 615 + const struct cpumask *dest, 616 + bool force) 594 617 { 595 618 int cpu; 596 - int index; 597 - struct irq_desc *desc = irq_to_desc(irq); 598 - int enable_one = (desc->status & IRQ_DISABLED) == 0; 599 - u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 600 - for_each_online_cpu(cpu) { 601 - index = octeon_coreid_for_cpu(cpu) * 2 + 1; 602 - if (cpumask_test_cpu(cpu, dest) && enable_one) { 603 - enable_one = 0; 604 - cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 605 - } else { 606 - cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 619 + bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 620 + u64 mask; 621 + union octeon_ciu_chip_data cd; 622 + 623 + if (!enable_one) 624 + return 0; 625 + 626 + cd.p = data->chip_data; 627 + mask = 1ull << cd.s.bit; 628 + 629 + if (cd.s.line == 0) { 630 + for_each_online_cpu(cpu) { 631 + unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 632 + int index = octeon_coreid_for_cpu(cpu) * 2; 633 + if (cpumask_test_cpu(cpu, dest) && enable_one) { 634 + enable_one = false; 635 + set_bit(cd.s.bit, pen); 636 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 637 + } else { 638 + clear_bit(cd.s.bit, pen); 639 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 640 + } 641 + } 642 + } else { 643 + for_each_online_cpu(cpu) { 644 + unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 645 + int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 646 + if (cpumask_test_cpu(cpu, dest) && enable_one) { 647 + enable_one = false; 648 + set_bit(cd.s.bit, pen); 649 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 650 + } else { 651 + clear_bit(cd.s.bit, pen); 652 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 653 + } 607 654 } 608 655 } 609 656 return 0; ··· 636 633 #endif 637 634 638 635 /* 636 + * The v1 CIU code already masks things, so supply a dummy version to 637 + * the core chip code. 638 + */ 639 + static void octeon_irq_dummy_mask(struct irq_data *data) 640 + { 641 + } 642 + 643 + /* 639 644 * Newer octeon chips have support for lockless CIU operation. 640 645 */ 641 - static struct irq_chip octeon_irq_chip_ciu1_v2 = { 642 - .name = "CIU1", 643 - .enable = octeon_irq_ciu1_enable_v2, 644 - .disable = octeon_irq_ciu1_disable_all_v2, 645 - .eoi = octeon_irq_ciu1_enable_v2, 646 + static struct irq_chip octeon_irq_chip_ciu_v2 = { 647 + .name = "CIU", 648 + .irq_enable = octeon_irq_ciu_enable_v2, 649 + .irq_disable = octeon_irq_ciu_disable_all_v2, 650 + .irq_mask = octeon_irq_ciu_disable_local_v2, 651 + .irq_unmask = octeon_irq_ciu_enable_v2, 646 652 #ifdef CONFIG_SMP 647 - .set_affinity = octeon_irq_ciu1_set_affinity_v2, 653 + .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 654 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 648 655 #endif 649 656 }; 650 657 651 - static struct irq_chip octeon_irq_chip_ciu1 = { 652 - .name = "CIU1", 653 - .enable = octeon_irq_ciu1_enable, 654 - .disable = octeon_irq_ciu1_disable, 655 - .eoi = octeon_irq_ciu1_eoi, 658 + static struct irq_chip octeon_irq_chip_ciu_edge_v2 = { 659 + .name = "CIU-E", 660 + .irq_enable = octeon_irq_ciu_enable_v2, 661 + .irq_disable = octeon_irq_ciu_disable_all_v2, 662 + .irq_ack = octeon_irq_ciu_ack, 663 + .irq_mask = octeon_irq_ciu_disable_local_v2, 664 + .irq_unmask = octeon_irq_ciu_enable_v2, 656 665 #ifdef CONFIG_SMP 657 - .set_affinity = octeon_irq_ciu1_set_affinity, 666 + .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 667 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 658 668 #endif 659 669 }; 660 670 661 - static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = { 662 - .name = "CIU1-W", 663 - .enable = octeon_irq_ciu1_wd_enable_v2, 664 - .disable = octeon_irq_ciu1_disable_all_v2, 665 - .eoi = octeon_irq_ciu1_wd_enable_v2, 671 + static struct irq_chip octeon_irq_chip_ciu = { 672 + .name = "CIU", 673 + .irq_enable = octeon_irq_ciu_enable, 674 + .irq_disable = octeon_irq_ciu_disable_all, 675 + .irq_mask = octeon_irq_dummy_mask, 676 + #ifdef CONFIG_SMP 677 + .irq_set_affinity = octeon_irq_ciu_set_affinity, 678 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 679 + #endif 666 680 }; 667 681 668 - static struct irq_chip octeon_irq_chip_ciu1_wd = { 669 - .name = "CIU1-W", 670 - .enable = octeon_irq_ciu1_wd_enable, 671 - .disable = octeon_irq_ciu1_disable, 672 - .eoi = octeon_irq_ciu1_eoi, 682 + static struct irq_chip octeon_irq_chip_ciu_edge = { 683 + .name = "CIU-E", 684 + .irq_enable = octeon_irq_ciu_enable, 685 + .irq_disable = octeon_irq_ciu_disable_all, 686 + .irq_mask = octeon_irq_dummy_mask, 687 + .irq_ack = octeon_irq_ciu_ack, 688 + #ifdef CONFIG_SMP 689 + .irq_set_affinity = octeon_irq_ciu_set_affinity, 690 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 691 + #endif 673 692 }; 674 693 675 - static void (*octeon_ciu0_ack)(unsigned int); 676 - static void (*octeon_ciu1_ack)(unsigned int); 694 + /* The mbox versions don't do any affinity or round-robin. */ 695 + static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { 696 + .name = "CIU-M", 697 + .irq_enable = octeon_irq_ciu_enable_all_v2, 698 + .irq_disable = octeon_irq_ciu_disable_all_v2, 699 + .irq_ack = octeon_irq_ciu_disable_local_v2, 700 + .irq_eoi = octeon_irq_ciu_enable_local_v2, 701 + 702 + .irq_cpu_online = octeon_irq_ciu_enable_local_v2, 703 + .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, 704 + .flags = IRQCHIP_ONOFFLINE_ENABLED, 705 + }; 706 + 707 + static struct irq_chip octeon_irq_chip_ciu_mbox = { 708 + .name = "CIU-M", 709 + .irq_enable = octeon_irq_ciu_enable_all, 710 + .irq_disable = octeon_irq_ciu_disable_all, 711 + 712 + .irq_cpu_online = octeon_irq_ciu_enable_local, 713 + .irq_cpu_offline = octeon_irq_ciu_disable_local, 714 + .flags = IRQCHIP_ONOFFLINE_ENABLED, 715 + }; 716 + 717 + /* 718 + * Watchdog interrupts are special. They are associated with a single 719 + * core, so we hardwire the affinity to that core. 720 + */ 721 + static void octeon_irq_ciu_wd_enable(struct irq_data *data) 722 + { 723 + unsigned long flags; 724 + unsigned long *pen; 725 + int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 726 + int cpu = octeon_cpu_for_coreid(coreid); 727 + 728 + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 729 + pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 730 + set_bit(coreid, pen); 731 + cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 732 + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 733 + } 734 + 735 + /* 736 + * Watchdog interrupts are special. They are associated with a single 737 + * core, so we hardwire the affinity to that core. 738 + */ 739 + static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) 740 + { 741 + int coreid = data->irq - OCTEON_IRQ_WDOG0; 742 + int cpu = octeon_cpu_for_coreid(coreid); 743 + 744 + set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 745 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); 746 + } 747 + 748 + 749 + static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { 750 + .name = "CIU-W", 751 + .irq_enable = octeon_irq_ciu1_wd_enable_v2, 752 + .irq_disable = octeon_irq_ciu_disable_all_v2, 753 + .irq_mask = octeon_irq_ciu_disable_local_v2, 754 + .irq_unmask = octeon_irq_ciu_enable_local_v2, 755 + }; 756 + 757 + static struct irq_chip octeon_irq_chip_ciu_wd = { 758 + .name = "CIU-W", 759 + .irq_enable = octeon_irq_ciu_wd_enable, 760 + .irq_disable = octeon_irq_ciu_disable_all, 761 + .irq_mask = octeon_irq_dummy_mask, 762 + }; 763 + 764 + static void octeon_irq_ip2_v1(void) 765 + { 766 + const unsigned long core_id = cvmx_get_core_num(); 767 + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 768 + 769 + ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); 770 + clear_c0_status(STATUSF_IP2); 771 + if (likely(ciu_sum)) { 772 + int bit = fls64(ciu_sum) - 1; 773 + int irq = octeon_irq_ciu_to_irq[0][bit]; 774 + if (likely(irq)) 775 + do_IRQ(irq); 776 + else 777 + spurious_interrupt(); 778 + } else { 779 + spurious_interrupt(); 780 + } 781 + set_c0_status(STATUSF_IP2); 782 + } 783 + 784 + static void octeon_irq_ip2_v2(void) 785 + { 786 + const unsigned long core_id = cvmx_get_core_num(); 787 + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 788 + 789 + ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); 790 + if (likely(ciu_sum)) { 791 + int bit = fls64(ciu_sum) - 1; 792 + int irq = octeon_irq_ciu_to_irq[0][bit]; 793 + if (likely(irq)) 794 + do_IRQ(irq); 795 + else 796 + spurious_interrupt(); 797 + } else { 798 + spurious_interrupt(); 799 + } 800 + } 801 + static void octeon_irq_ip3_v1(void) 802 + { 803 + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 804 + 805 + ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); 806 + clear_c0_status(STATUSF_IP3); 807 + if (likely(ciu_sum)) { 808 + int bit = fls64(ciu_sum) - 1; 809 + int irq = octeon_irq_ciu_to_irq[1][bit]; 810 + if (likely(irq)) 811 + do_IRQ(irq); 812 + else 813 + spurious_interrupt(); 814 + } else { 815 + spurious_interrupt(); 816 + } 817 + set_c0_status(STATUSF_IP3); 818 + } 819 + 820 + static void octeon_irq_ip3_v2(void) 821 + { 822 + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 823 + 824 + ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); 825 + if (likely(ciu_sum)) { 826 + int bit = fls64(ciu_sum) - 1; 827 + int irq = octeon_irq_ciu_to_irq[1][bit]; 828 + if (likely(irq)) 829 + do_IRQ(irq); 830 + else 831 + spurious_interrupt(); 832 + } else { 833 + spurious_interrupt(); 834 + } 835 + } 836 + 837 + static void octeon_irq_ip4_mask(void) 838 + { 839 + clear_c0_status(STATUSF_IP4); 840 + spurious_interrupt(); 841 + } 842 + 843 + static void (*octeon_irq_ip2)(void); 844 + static void (*octeon_irq_ip3)(void); 845 + static void (*octeon_irq_ip4)(void); 846 + 847 + void __cpuinitdata (*octeon_irq_setup_secondary)(void); 848 + 849 + static void __cpuinit octeon_irq_percpu_enable(void) 850 + { 851 + irq_cpu_online(); 852 + } 853 + 854 + static void __cpuinit octeon_irq_init_ciu_percpu(void) 855 + { 856 + int coreid = cvmx_get_core_num(); 857 + /* 858 + * Disable All CIU Interrupts. The ones we need will be 859 + * enabled later. Read the SUM register so we know the write 860 + * completed. 861 + */ 862 + cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); 863 + cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 864 + cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 865 + cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 866 + cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); 867 + } 868 + 869 + static void __cpuinit octeon_irq_setup_secondary_ciu(void) 870 + { 871 + 872 + __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; 873 + __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; 874 + 875 + octeon_irq_init_ciu_percpu(); 876 + octeon_irq_percpu_enable(); 877 + 878 + /* Enable the CIU lines */ 879 + set_c0_status(STATUSF_IP3 | STATUSF_IP2); 880 + clear_c0_status(STATUSF_IP4); 881 + } 882 + 883 + static void __init octeon_irq_init_ciu(void) 884 + { 885 + unsigned int i; 886 + struct irq_chip *chip; 887 + struct irq_chip *chip_edge; 888 + struct irq_chip *chip_mbox; 889 + struct irq_chip *chip_wd; 890 + 891 + octeon_irq_init_ciu_percpu(); 892 + octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 893 + 894 + if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 895 + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 896 + OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 897 + OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 898 + octeon_irq_ip2 = octeon_irq_ip2_v2; 899 + octeon_irq_ip3 = octeon_irq_ip3_v2; 900 + chip = &octeon_irq_chip_ciu_v2; 901 + chip_edge = &octeon_irq_chip_ciu_edge_v2; 902 + chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 903 + chip_wd = &octeon_irq_chip_ciu_wd_v2; 904 + } else { 905 + octeon_irq_ip2 = octeon_irq_ip2_v1; 906 + octeon_irq_ip3 = octeon_irq_ip3_v1; 907 + chip = &octeon_irq_chip_ciu; 908 + chip_edge = &octeon_irq_chip_ciu_edge; 909 + chip_mbox = &octeon_irq_chip_ciu_mbox; 910 + chip_wd = &octeon_irq_chip_ciu_wd; 911 + } 912 + octeon_irq_ip4 = octeon_irq_ip4_mask; 913 + 914 + /* Mips internal */ 915 + octeon_irq_init_core(); 916 + 917 + /* CIU_0 */ 918 + for (i = 0; i < 16; i++) 919 + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq); 920 + for (i = 0; i < 16; i++) 921 + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq); 922 + 923 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); 924 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); 925 + 926 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq); 927 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq); 928 + 929 + for (i = 0; i < 4; i++) 930 + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq); 931 + for (i = 0; i < 4; i++) 932 + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq); 933 + 934 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq); 935 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq); 936 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq); 937 + 938 + for (i = 0; i < 2; i++) 939 + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq); 940 + 941 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq); 942 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq); 943 + 944 + for (i = 0; i < 4; i++) 945 + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq); 946 + 947 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); 948 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq); 949 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq); 950 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq); 951 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq); 952 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq); 953 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq); 954 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); 955 + 956 + /* CIU_1 */ 957 + for (i = 0; i < 16; i++) 958 + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); 959 + 960 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq); 961 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); 962 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq); 963 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq); 964 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq); 965 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq); 966 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq); 967 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq); 968 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq); 969 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq); 970 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq); 971 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq); 972 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq); 973 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq); 974 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq); 975 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq); 976 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq); 977 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq); 978 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq); 979 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq); 980 + 981 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq); 982 + 983 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq); 984 + 985 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq); 986 + 987 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq); 988 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq); 989 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq); 990 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq); 991 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq); 992 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq); 993 + octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq); 994 + 995 + /* Enable the CIU lines */ 996 + set_c0_status(STATUSF_IP3 | STATUSF_IP2); 997 + clear_c0_status(STATUSF_IP4); 998 + } 677 999 678 1000 void __init arch_init_irq(void) 679 1001 { 680 - unsigned int irq; 681 - struct irq_chip *chip0; 682 - struct irq_chip *chip0_mbox; 683 - struct irq_chip *chip1; 684 - struct irq_chip *chip1_wd; 685 - 686 1002 #ifdef CONFIG_SMP 687 1003 /* Set the default affinity to the boot cpu. */ 688 1004 cpumask_clear(irq_default_affinity); 689 1005 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 690 1006 #endif 691 - 692 - if (NR_IRQS < OCTEON_IRQ_LAST) 693 - pr_err("octeon_irq_init: NR_IRQS is set too low\n"); 694 - 695 - if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 696 - OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 697 - OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { 698 - octeon_ciu0_ack = octeon_irq_ciu0_ack_v2; 699 - octeon_ciu1_ack = octeon_irq_ciu1_ack_v2; 700 - chip0 = &octeon_irq_chip_ciu0_v2; 701 - chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2; 702 - chip1 = &octeon_irq_chip_ciu1_v2; 703 - chip1_wd = &octeon_irq_chip_ciu1_wd_v2; 704 - } else { 705 - octeon_ciu0_ack = octeon_irq_ciu0_ack; 706 - octeon_ciu1_ack = octeon_irq_ciu1_ack; 707 - chip0 = &octeon_irq_chip_ciu0; 708 - chip0_mbox = &octeon_irq_chip_ciu0_mbox; 709 - chip1 = &octeon_irq_chip_ciu1; 710 - chip1_wd = &octeon_irq_chip_ciu1_wd; 711 - } 712 - 713 - /* 0 - 15 reserved for i8259 master and slave controller. */ 714 - 715 - /* 17 - 23 Mips internal */ 716 - for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) { 717 - set_irq_chip_and_handler(irq, &octeon_irq_chip_core, 718 - handle_percpu_irq); 719 - } 720 - 721 - /* 24 - 87 CIU_INT_SUM0 */ 722 - for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { 723 - switch (irq) { 724 - case OCTEON_IRQ_MBOX0: 725 - case OCTEON_IRQ_MBOX1: 726 - set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq); 727 - break; 728 - default: 729 - set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq); 730 - break; 731 - } 732 - } 733 - 734 - /* 88 - 151 CIU_INT_SUM1 */ 735 - for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++) 736 - set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq); 737 - 738 - for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++) 739 - set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq); 740 - 741 - set_c0_status(0x300 << 2); 1007 + octeon_irq_init_ciu(); 742 1008 } 743 1009 744 1010 asmlinkage void plat_irq_dispatch(void) 745 1011 { 746 - const unsigned long core_id = cvmx_get_core_num(); 747 - const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); 748 - const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); 749 - const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; 750 - const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); 751 1012 unsigned long cop0_cause; 752 1013 unsigned long cop0_status; 753 - uint64_t ciu_en; 754 - uint64_t ciu_sum; 755 - unsigned int irq; 756 1014 757 1015 while (1) { 758 1016 cop0_cause = read_c0_cause(); ··· 1021 757 cop0_cause &= cop0_status; 1022 758 cop0_cause &= ST0_IM; 1023 759 1024 - if (unlikely(cop0_cause & STATUSF_IP2)) { 1025 - ciu_sum = cvmx_read_csr(ciu_sum0_address); 1026 - ciu_en = cvmx_read_csr(ciu_en0_address); 1027 - ciu_sum &= ciu_en; 1028 - if (likely(ciu_sum)) { 1029 - irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1; 1030 - octeon_ciu0_ack(irq); 1031 - do_IRQ(irq); 1032 - } else { 1033 - spurious_interrupt(); 1034 - } 1035 - } else if (unlikely(cop0_cause & STATUSF_IP3)) { 1036 - ciu_sum = cvmx_read_csr(ciu_sum1_address); 1037 - ciu_en = cvmx_read_csr(ciu_en1_address); 1038 - ciu_sum &= ciu_en; 1039 - if (likely(ciu_sum)) { 1040 - irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1; 1041 - octeon_ciu1_ack(irq); 1042 - do_IRQ(irq); 1043 - } else { 1044 - spurious_interrupt(); 1045 - } 1046 - } else if (likely(cop0_cause)) { 760 + if (unlikely(cop0_cause & STATUSF_IP2)) 761 + octeon_irq_ip2(); 762 + else if (unlikely(cop0_cause & STATUSF_IP3)) 763 + octeon_irq_ip3(); 764 + else if (unlikely(cop0_cause & STATUSF_IP4)) 765 + octeon_irq_ip4(); 766 + else if (likely(cop0_cause)) 1047 767 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 1048 - } else { 768 + else 1049 769 break; 1050 - } 1051 770 } 1052 771 } 1053 772 ··· 1038 791 1039 792 void fixup_irqs(void) 1040 793 { 1041 - int irq; 1042 - struct irq_desc *desc; 1043 - cpumask_t new_affinity; 1044 - unsigned long flags; 1045 - int do_set_affinity; 1046 - int cpu; 1047 - 1048 - cpu = smp_processor_id(); 1049 - 1050 - for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) 1051 - octeon_irq_core_disable_local(irq); 1052 - 1053 - for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) { 1054 - desc = irq_to_desc(irq); 1055 - switch (irq) { 1056 - case OCTEON_IRQ_MBOX0: 1057 - case OCTEON_IRQ_MBOX1: 1058 - /* The eoi function will disable them on this CPU. */ 1059 - desc->chip->eoi(irq); 1060 - break; 1061 - case OCTEON_IRQ_WDOG0: 1062 - case OCTEON_IRQ_WDOG1: 1063 - case OCTEON_IRQ_WDOG2: 1064 - case OCTEON_IRQ_WDOG3: 1065 - case OCTEON_IRQ_WDOG4: 1066 - case OCTEON_IRQ_WDOG5: 1067 - case OCTEON_IRQ_WDOG6: 1068 - case OCTEON_IRQ_WDOG7: 1069 - case OCTEON_IRQ_WDOG8: 1070 - case OCTEON_IRQ_WDOG9: 1071 - case OCTEON_IRQ_WDOG10: 1072 - case OCTEON_IRQ_WDOG11: 1073 - case OCTEON_IRQ_WDOG12: 1074 - case OCTEON_IRQ_WDOG13: 1075 - case OCTEON_IRQ_WDOG14: 1076 - case OCTEON_IRQ_WDOG15: 1077 - /* 1078 - * These have special per CPU semantics and 1079 - * are handled in the watchdog driver. 1080 - */ 1081 - break; 1082 - default: 1083 - raw_spin_lock_irqsave(&desc->lock, flags); 1084 - /* 1085 - * If this irq has an action, it is in use and 1086 - * must be migrated if it has affinity to this 1087 - * cpu. 1088 - */ 1089 - if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) { 1090 - if (cpumask_weight(desc->affinity) > 1) { 1091 - /* 1092 - * It has multi CPU affinity, 1093 - * just remove this CPU from 1094 - * the affinity set. 1095 - */ 1096 - cpumask_copy(&new_affinity, desc->affinity); 1097 - cpumask_clear_cpu(cpu, &new_affinity); 1098 - } else { 1099 - /* 1100 - * Otherwise, put it on lowest 1101 - * numbered online CPU. 1102 - */ 1103 - cpumask_clear(&new_affinity); 1104 - cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 1105 - } 1106 - do_set_affinity = 1; 1107 - } else { 1108 - do_set_affinity = 0; 1109 - } 1110 - raw_spin_unlock_irqrestore(&desc->lock, flags); 1111 - 1112 - if (do_set_affinity) 1113 - irq_set_affinity(irq, &new_affinity); 1114 - 1115 - break; 1116 - } 1117 - } 794 + irq_cpu_offline(); 1118 795 } 1119 796 1120 797 #endif /* CONFIG_HOTPLUG_CPU */
-12
arch/mips/cavium-octeon/setup.c
··· 420 420 void __init prom_init(void) 421 421 { 422 422 struct cvmx_sysinfo *sysinfo; 423 - const int coreid = cvmx_get_core_num(); 424 423 int i; 425 424 int argc; 426 425 #ifdef CONFIG_CAVIUM_RESERVE32 ··· 535 536 octeon_check_cpu_bist(); 536 537 537 538 octeon_uart = octeon_get_boot_uart(); 538 - 539 - /* 540 - * Disable All CIU Interrupts. The ones we need will be 541 - * enabled later. Read the SUM register so we know the write 542 - * completed. 543 - */ 544 - cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); 545 - cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 546 - cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 547 - cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 548 - cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); 549 539 550 540 #ifdef CONFIG_SMP 551 541 octeon_write_lcd("LinuxSMP");
+18 -31
arch/mips/cavium-octeon/smp.c
··· 171 171 * After we've done initial boot, this function is called to allow the 172 172 * board code to clean up state, if needed 173 173 */ 174 - static void octeon_init_secondary(void) 174 + static void __cpuinit octeon_init_secondary(void) 175 175 { 176 - const int coreid = cvmx_get_core_num(); 177 - union cvmx_ciu_intx_sum0 interrupt_enable; 178 176 unsigned int sr; 179 177 178 + sr = set_c0_status(ST0_BEV); 179 + write_c0_ebase((u32)ebase); 180 + write_c0_status(sr); 181 + 182 + octeon_check_cpu_bist(); 183 + octeon_init_cvmcount(); 184 + 185 + octeon_irq_setup_secondary(); 186 + raw_local_irq_enable(); 187 + } 188 + 189 + /** 190 + * Callout to firmware before smp_init 191 + * 192 + */ 193 + void octeon_prepare_cpus(unsigned int max_cpus) 194 + { 180 195 #ifdef CONFIG_HOTPLUG_CPU 181 196 struct linux_app_boot_info *labi; 182 197 ··· 201 186 panic("The bootloader version on this board is incorrect."); 202 187 #endif 203 188 204 - sr = set_c0_status(ST0_BEV); 205 - write_c0_ebase((u32)ebase); 206 - write_c0_status(sr); 207 - 208 - octeon_check_cpu_bist(); 209 - octeon_init_cvmcount(); 210 - /* 211 - pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid); 212 - */ 213 - /* Enable Mailbox interrupts to this core. These are the only 214 - interrupts allowed on line 3 */ 215 - cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff); 216 - interrupt_enable.u64 = 0; 217 - interrupt_enable.s.mbox = 0x3; 218 - cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64); 219 - cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 220 - cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 221 - cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 222 - /* Enable core interrupt processing for 2,3 and 7 */ 223 - set_c0_status(0x8c01); 224 - } 225 - 226 - /** 227 - * Callout to firmware before smp_init 228 - * 229 - */ 230 - void octeon_prepare_cpus(unsigned int max_cpus) 231 - { 232 189 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); 233 190 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, 234 191 "mailbox0", mailbox_interrupt)) {
+2 -2
arch/mips/dec/ioasic-irq.c
··· 68 68 fast_iob(); 69 69 70 70 for (i = base; i < base + IO_INR_DMA; i++) 71 - set_irq_chip_and_handler(i, &ioasic_irq_type, 71 + irq_set_chip_and_handler(i, &ioasic_irq_type, 72 72 handle_level_irq); 73 73 for (; i < base + IO_IRQ_LINES; i++) 74 - set_irq_chip(i, &ioasic_dma_irq_type); 74 + irq_set_chip(i, &ioasic_dma_irq_type); 75 75 76 76 ioasic_irq_base = base; 77 77 }
+1 -1
arch/mips/dec/kn02-irq.c
··· 73 73 iob(); 74 74 75 75 for (i = base; i < base + KN02_IRQ_LINES; i++) 76 - set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq); 76 + irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq); 77 77 78 78 kn02_irq_base = base; 79 79 }
+3 -3
arch/mips/emma/markeins/irq.c
··· 69 69 u32 i; 70 70 71 71 for (i = 0; i < NUM_EMMA2RH_IRQ; i++) 72 - set_irq_chip_and_handler_name(EMMA2RH_IRQ_BASE + i, 72 + irq_set_chip_and_handler_name(EMMA2RH_IRQ_BASE + i, 73 73 &emma2rh_irq_controller, 74 74 handle_level_irq, "level"); 75 75 } ··· 105 105 u32 i; 106 106 107 107 for (i = 0; i < NUM_EMMA2RH_IRQ_SW; i++) 108 - set_irq_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i, 108 + irq_set_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i, 109 109 &emma2rh_sw_irq_controller, 110 110 handle_level_irq, "level"); 111 111 } ··· 162 162 u32 i; 163 163 164 164 for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++) 165 - set_irq_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i, 165 + irq_set_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i, 166 166 &emma2rh_gpio_irq_controller, 167 167 handle_edge_irq, "edge"); 168 168 }
+81 -162
arch/mips/include/asm/mach-cavium-octeon/irq.h
··· 11 11 #define NR_IRQS OCTEON_IRQ_LAST 12 12 #define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0 13 13 14 - /* 0 - 7 represent the i8259 master */ 15 - #define OCTEON_IRQ_I8259M0 0 16 - #define OCTEON_IRQ_I8259M1 1 17 - #define OCTEON_IRQ_I8259M2 2 18 - #define OCTEON_IRQ_I8259M3 3 19 - #define OCTEON_IRQ_I8259M4 4 20 - #define OCTEON_IRQ_I8259M5 5 21 - #define OCTEON_IRQ_I8259M6 6 22 - #define OCTEON_IRQ_I8259M7 7 23 - /* 8 - 15 represent the i8259 slave */ 24 - #define OCTEON_IRQ_I8259S0 8 25 - #define OCTEON_IRQ_I8259S1 9 26 - #define OCTEON_IRQ_I8259S2 10 27 - #define OCTEON_IRQ_I8259S3 11 28 - #define OCTEON_IRQ_I8259S4 12 29 - #define OCTEON_IRQ_I8259S5 13 30 - #define OCTEON_IRQ_I8259S6 14 31 - #define OCTEON_IRQ_I8259S7 15 32 - /* 16 - 23 represent the 8 MIPS standard interrupt sources */ 33 - #define OCTEON_IRQ_SW0 16 34 - #define OCTEON_IRQ_SW1 17 35 - #define OCTEON_IRQ_CIU0 18 36 - #define OCTEON_IRQ_CIU1 19 37 - #define OCTEON_IRQ_CIU4 20 38 - #define OCTEON_IRQ_5 21 39 - #define OCTEON_IRQ_PERF 22 40 - #define OCTEON_IRQ_TIMER 23 41 - /* 24 - 87 represent the sources in CIU_INTX_EN0 */ 42 - #define OCTEON_IRQ_WORKQ0 24 43 - #define OCTEON_IRQ_WORKQ1 25 44 - #define OCTEON_IRQ_WORKQ2 26 45 - #define OCTEON_IRQ_WORKQ3 27 46 - #define OCTEON_IRQ_WORKQ4 28 47 - #define OCTEON_IRQ_WORKQ5 29 48 - #define OCTEON_IRQ_WORKQ6 30 49 - #define OCTEON_IRQ_WORKQ7 31 50 - #define OCTEON_IRQ_WORKQ8 32 51 - #define OCTEON_IRQ_WORKQ9 33 52 - #define OCTEON_IRQ_WORKQ10 34 53 - #define OCTEON_IRQ_WORKQ11 35 54 - #define OCTEON_IRQ_WORKQ12 36 55 - #define OCTEON_IRQ_WORKQ13 37 56 - #define OCTEON_IRQ_WORKQ14 38 57 - #define OCTEON_IRQ_WORKQ15 39 58 - #define OCTEON_IRQ_GPIO0 40 59 - #define OCTEON_IRQ_GPIO1 41 60 - #define OCTEON_IRQ_GPIO2 42 61 - #define OCTEON_IRQ_GPIO3 43 62 - #define OCTEON_IRQ_GPIO4 44 63 - #define OCTEON_IRQ_GPIO5 45 64 - #define OCTEON_IRQ_GPIO6 46 65 - #define OCTEON_IRQ_GPIO7 47 66 - #define OCTEON_IRQ_GPIO8 48 67 - #define OCTEON_IRQ_GPIO9 49 68 - #define OCTEON_IRQ_GPIO10 50 69 - #define OCTEON_IRQ_GPIO11 51 70 - #define OCTEON_IRQ_GPIO12 52 71 - #define OCTEON_IRQ_GPIO13 53 72 - #define OCTEON_IRQ_GPIO14 54 73 - #define OCTEON_IRQ_GPIO15 55 74 - #define OCTEON_IRQ_MBOX0 56 75 - #define OCTEON_IRQ_MBOX1 57 76 - #define OCTEON_IRQ_UART0 58 77 - #define OCTEON_IRQ_UART1 59 78 - #define OCTEON_IRQ_PCI_INT0 60 79 - #define OCTEON_IRQ_PCI_INT1 61 80 - #define OCTEON_IRQ_PCI_INT2 62 81 - #define OCTEON_IRQ_PCI_INT3 63 82 - #define OCTEON_IRQ_PCI_MSI0 64 83 - #define OCTEON_IRQ_PCI_MSI1 65 84 - #define OCTEON_IRQ_PCI_MSI2 66 85 - #define OCTEON_IRQ_PCI_MSI3 67 86 - #define OCTEON_IRQ_RESERVED68 68 /* Summary of CIU_INT_SUM1 */ 87 - #define OCTEON_IRQ_TWSI 69 88 - #define OCTEON_IRQ_RML 70 89 - #define OCTEON_IRQ_TRACE 71 90 - #define OCTEON_IRQ_GMX_DRP0 72 91 - #define OCTEON_IRQ_GMX_DRP1 73 92 - #define OCTEON_IRQ_IPD_DRP 74 93 - #define OCTEON_IRQ_KEY_ZERO 75 94 - #define OCTEON_IRQ_TIMER0 76 95 - #define OCTEON_IRQ_TIMER1 77 96 - #define OCTEON_IRQ_TIMER2 78 97 - #define OCTEON_IRQ_TIMER3 79 98 - #define OCTEON_IRQ_USB0 80 99 - #define OCTEON_IRQ_PCM 81 100 - #define OCTEON_IRQ_MPI 82 101 - #define OCTEON_IRQ_TWSI2 83 102 - #define OCTEON_IRQ_POWIQ 84 103 - #define OCTEON_IRQ_IPDPPTHR 85 104 - #define OCTEON_IRQ_MII0 86 105 - #define OCTEON_IRQ_BOOTDMA 87 106 - /* 88 - 151 represent the sources in CIU_INTX_EN1 */ 107 - #define OCTEON_IRQ_WDOG0 88 108 - #define OCTEON_IRQ_WDOG1 89 109 - #define OCTEON_IRQ_WDOG2 90 110 - #define OCTEON_IRQ_WDOG3 91 111 - #define OCTEON_IRQ_WDOG4 92 112 - #define OCTEON_IRQ_WDOG5 93 113 - #define OCTEON_IRQ_WDOG6 94 114 - #define OCTEON_IRQ_WDOG7 95 115 - #define OCTEON_IRQ_WDOG8 96 116 - #define OCTEON_IRQ_WDOG9 97 117 - #define OCTEON_IRQ_WDOG10 98 118 - #define OCTEON_IRQ_WDOG11 99 119 - #define OCTEON_IRQ_WDOG12 100 120 - #define OCTEON_IRQ_WDOG13 101 121 - #define OCTEON_IRQ_WDOG14 102 122 - #define OCTEON_IRQ_WDOG15 103 123 - #define OCTEON_IRQ_UART2 104 124 - #define OCTEON_IRQ_USB1 105 125 - #define OCTEON_IRQ_MII1 106 126 - #define OCTEON_IRQ_RESERVED107 107 127 - #define OCTEON_IRQ_RESERVED108 108 128 - #define OCTEON_IRQ_RESERVED109 109 129 - #define OCTEON_IRQ_RESERVED110 110 130 - #define OCTEON_IRQ_RESERVED111 111 131 - #define OCTEON_IRQ_RESERVED112 112 132 - #define OCTEON_IRQ_RESERVED113 113 133 - #define OCTEON_IRQ_RESERVED114 114 134 - #define OCTEON_IRQ_RESERVED115 115 135 - #define OCTEON_IRQ_RESERVED116 116 136 - #define OCTEON_IRQ_RESERVED117 117 137 - #define OCTEON_IRQ_RESERVED118 118 138 - #define OCTEON_IRQ_RESERVED119 119 139 - #define OCTEON_IRQ_RESERVED120 120 140 - #define OCTEON_IRQ_RESERVED121 121 141 - #define OCTEON_IRQ_RESERVED122 122 142 - #define OCTEON_IRQ_RESERVED123 123 143 - #define OCTEON_IRQ_RESERVED124 124 144 - #define OCTEON_IRQ_RESERVED125 125 145 - #define OCTEON_IRQ_RESERVED126 126 146 - #define OCTEON_IRQ_RESERVED127 127 147 - #define OCTEON_IRQ_RESERVED128 128 148 - #define OCTEON_IRQ_RESERVED129 129 149 - #define OCTEON_IRQ_RESERVED130 130 150 - #define OCTEON_IRQ_RESERVED131 131 151 - #define OCTEON_IRQ_RESERVED132 132 152 - #define OCTEON_IRQ_RESERVED133 133 153 - #define OCTEON_IRQ_RESERVED134 134 154 - #define OCTEON_IRQ_RESERVED135 135 155 - #define OCTEON_IRQ_RESERVED136 136 156 - #define OCTEON_IRQ_RESERVED137 137 157 - #define OCTEON_IRQ_RESERVED138 138 158 - #define OCTEON_IRQ_RESERVED139 139 159 - #define OCTEON_IRQ_RESERVED140 140 160 - #define OCTEON_IRQ_RESERVED141 141 161 - #define OCTEON_IRQ_RESERVED142 142 162 - #define OCTEON_IRQ_RESERVED143 143 163 - #define OCTEON_IRQ_RESERVED144 144 164 - #define OCTEON_IRQ_RESERVED145 145 165 - #define OCTEON_IRQ_RESERVED146 146 166 - #define OCTEON_IRQ_RESERVED147 147 167 - #define OCTEON_IRQ_RESERVED148 148 168 - #define OCTEON_IRQ_RESERVED149 149 169 - #define OCTEON_IRQ_RESERVED150 150 170 - #define OCTEON_IRQ_RESERVED151 151 14 + enum octeon_irq { 15 + /* 1 - 8 represent the 8 MIPS standard interrupt sources */ 16 + OCTEON_IRQ_SW0 = 1, 17 + OCTEON_IRQ_SW1, 18 + /* CIU0, CUI2, CIU4 are 3, 4, 5 */ 19 + OCTEON_IRQ_5 = 6, 20 + OCTEON_IRQ_PERF, 21 + OCTEON_IRQ_TIMER, 22 + /* sources in CIU_INTX_EN0 */ 23 + OCTEON_IRQ_WORKQ0, 24 + OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16, 25 + OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16, 26 + OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15, 27 + OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16, 28 + OCTEON_IRQ_MBOX1, 29 + OCTEON_IRQ_UART0, 30 + OCTEON_IRQ_UART1, 31 + OCTEON_IRQ_UART2, 32 + OCTEON_IRQ_PCI_INT0, 33 + OCTEON_IRQ_PCI_INT1, 34 + OCTEON_IRQ_PCI_INT2, 35 + OCTEON_IRQ_PCI_INT3, 36 + OCTEON_IRQ_PCI_MSI0, 37 + OCTEON_IRQ_PCI_MSI1, 38 + OCTEON_IRQ_PCI_MSI2, 39 + OCTEON_IRQ_PCI_MSI3, 40 + 41 + OCTEON_IRQ_TWSI, 42 + OCTEON_IRQ_TWSI2, 43 + OCTEON_IRQ_RML, 44 + OCTEON_IRQ_TRACE0, 45 + OCTEON_IRQ_GMX_DRP0 = OCTEON_IRQ_TRACE0 + 4, 46 + OCTEON_IRQ_IPD_DRP = OCTEON_IRQ_GMX_DRP0 + 5, 47 + OCTEON_IRQ_KEY_ZERO, 48 + OCTEON_IRQ_TIMER0, 49 + OCTEON_IRQ_TIMER1, 50 + OCTEON_IRQ_TIMER2, 51 + OCTEON_IRQ_TIMER3, 52 + OCTEON_IRQ_USB0, 53 + OCTEON_IRQ_USB1, 54 + OCTEON_IRQ_PCM, 55 + OCTEON_IRQ_MPI, 56 + OCTEON_IRQ_POWIQ, 57 + OCTEON_IRQ_IPDPPTHR, 58 + OCTEON_IRQ_MII0, 59 + OCTEON_IRQ_MII1, 60 + OCTEON_IRQ_BOOTDMA, 61 + 62 + OCTEON_IRQ_NAND, 63 + OCTEON_IRQ_MIO, /* Summary of MIO_BOOT_ERR */ 64 + OCTEON_IRQ_IOB, /* Summary of IOB_INT_SUM */ 65 + OCTEON_IRQ_FPA, /* Summary of FPA_INT_SUM */ 66 + OCTEON_IRQ_POW, /* Summary of POW_ECC_ERR */ 67 + OCTEON_IRQ_L2C, /* Summary of L2C_INT_STAT */ 68 + OCTEON_IRQ_IPD, /* Summary of IPD_INT_SUM */ 69 + OCTEON_IRQ_PIP, /* Summary of PIP_INT_REG */ 70 + OCTEON_IRQ_PKO, /* Summary of PKO_REG_ERROR */ 71 + OCTEON_IRQ_ZIP, /* Summary of ZIP_ERROR */ 72 + OCTEON_IRQ_TIM, /* Summary of TIM_REG_ERROR */ 73 + OCTEON_IRQ_RAD, /* Summary of RAD_REG_ERROR */ 74 + OCTEON_IRQ_KEY, /* Summary of KEY_INT_SUM */ 75 + OCTEON_IRQ_DFA, /* Summary of DFA */ 76 + OCTEON_IRQ_USBCTL, /* Summary of USBN0_INT_SUM */ 77 + OCTEON_IRQ_SLI, /* Summary of SLI_INT_SUM */ 78 + OCTEON_IRQ_DPI, /* Summary of DPI_INT_SUM */ 79 + OCTEON_IRQ_AGX0, /* Summary of GMX0*+PCS0_INT*_REG */ 80 + OCTEON_IRQ_AGL = OCTEON_IRQ_AGX0 + 5, 81 + OCTEON_IRQ_PTP, 82 + OCTEON_IRQ_PEM0, 83 + OCTEON_IRQ_PEM1, 84 + OCTEON_IRQ_SRIO0, 85 + OCTEON_IRQ_SRIO1, 86 + OCTEON_IRQ_LMC0, 87 + OCTEON_IRQ_DFM = OCTEON_IRQ_LMC0 + 4, /* Summary of DFM */ 88 + OCTEON_IRQ_RST, 89 + }; 171 90 172 91 #ifdef CONFIG_PCI_MSI 173 - /* 152 - 215 represent the MSI interrupts 0-63 */ 174 - #define OCTEON_IRQ_MSI_BIT0 152 175 - #define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255) 92 + /* 152 - 407 represent the MSI interrupts 0-255 */ 93 + #define OCTEON_IRQ_MSI_BIT0 (OCTEON_IRQ_RST + 1) 176 94 177 - #define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1) 95 + #define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255) 96 + #define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1) 178 97 #else 179 - #define OCTEON_IRQ_LAST 152 98 + #define OCTEON_IRQ_LAST (OCTEON_IRQ_RST + 1) 180 99 #endif 181 100 182 101 #endif
+2
arch/mips/include/asm/octeon/octeon.h
··· 257 257 258 258 extern uint64_t octeon_bootloader_entry_addr; 259 259 260 + extern void (*octeon_irq_setup_secondary)(void); 261 + 260 262 #endif /* __ASM_OCTEON_OCTEON_H */
+1 -1
arch/mips/include/asm/unistd.h
··· 1005 1005 #define __NR_name_to_handle_at (__NR_Linux + 303) 1006 1006 #define __NR_open_by_handle_at (__NR_Linux + 304) 1007 1007 #define __NR_clock_adjtime (__NR_Linux + 305) 1008 - #define __NR_clock_adjtime (__NR_Linux + 306) 1008 + #define __NR_syncfs (__NR_Linux + 306) 1009 1009 1010 1010 /* 1011 1011 * Offset of the last N32 flavoured syscall
+1 -1
arch/mips/jazz/irq.c
··· 56 56 int i; 57 57 58 58 for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++) 59 - set_irq_chip_and_handler(i, &r4030_irq_type, handle_level_irq); 59 + irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq); 60 60 61 61 r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0); 62 62 r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
+7 -7
arch/mips/jz4740/gpio.c
··· 306 306 uint32_t flag; 307 307 unsigned int gpio_irq; 308 308 unsigned int gpio_bank; 309 - struct jz_gpio_chip *chip = get_irq_desc_data(desc); 309 + struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc); 310 310 311 311 gpio_bank = JZ4740_IRQ_GPIO0 - irq; 312 312 ··· 416 416 chip->wakeup &= ~IRQ_TO_BIT(data->irq); 417 417 spin_unlock(&chip->lock); 418 418 419 - set_irq_wake(chip->irq, on); 419 + irq_set_irq_wake(chip->irq, on); 420 420 return 0; 421 421 } 422 422 ··· 510 510 gpiochip_add(&chip->gpio_chip); 511 511 512 512 chip->irq = JZ4740_IRQ_INTC_GPIO(id); 513 - set_irq_data(chip->irq, chip); 514 - set_irq_chained_handler(chip->irq, jz_gpio_irq_demux_handler); 513 + irq_set_handler_data(chip->irq, chip); 514 + irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler); 515 515 516 516 for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) { 517 517 irq_set_lockdep_class(irq, &gpio_lock_class); 518 - set_irq_chip_data(irq, chip); 519 - set_irq_chip_and_handler(irq, &jz_gpio_irq_chip, 520 - handle_level_irq); 518 + irq_set_chip_data(irq, chip); 519 + irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, 520 + handle_level_irq); 521 521 } 522 522 523 523 return 0;
+2 -2
arch/mips/jz4740/irq.c
··· 104 104 writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK); 105 105 106 106 for (i = JZ4740_IRQ_BASE; i < JZ4740_IRQ_BASE + 32; i++) { 107 - set_irq_chip_data(i, (void *)IRQ_BIT(i)); 108 - set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq); 107 + irq_set_chip_data(i, (void *)IRQ_BIT(i)); 108 + irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq); 109 109 } 110 110 111 111 setup_irq(2, &jz4740_cascade_action);
+3 -3
arch/mips/kernel/i8259.c
··· 110 110 void make_8259A_irq(unsigned int irq) 111 111 { 112 112 disable_irq_nosync(irq); 113 - set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); 113 + irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); 114 114 enable_irq(irq); 115 115 } 116 116 ··· 336 336 init_8259A(0); 337 337 338 338 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { 339 - set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 340 - set_irq_probe(i); 339 + irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); 340 + irq_set_probe(i); 341 341 } 342 342 343 343 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+1 -1
arch/mips/kernel/irq-gic.c
··· 229 229 vpe_local_setup(numvpes); 230 230 231 231 for (i = _irqbase; i < (_irqbase + numintrs); i++) 232 - set_irq_chip(i, &gic_irq_controller); 232 + irq_set_chip(i, &gic_irq_controller); 233 233 } 234 234 235 235 void __init gic_init(unsigned long gic_base_addr,
+2 -2
arch/mips/kernel/irq-gt641xx.c
··· 126 126 * bit31: logical or of bits[25:1]. 127 127 */ 128 128 for (i = 1; i < 30; i++) 129 - set_irq_chip_and_handler(GT641XX_IRQ_BASE + i, 130 - &gt641xx_irq_chip, handle_level_irq); 129 + irq_set_chip_and_handler(GT641XX_IRQ_BASE + i, 130 + &gt641xx_irq_chip, handle_level_irq); 131 131 }
+8 -4
arch/mips/kernel/irq-msc01.c
··· 137 137 138 138 switch (imp->im_type) { 139 139 case MSC01_IRQ_EDGE: 140 - set_irq_chip_and_handler_name(irqbase + n, 141 - &msc_edgeirq_type, handle_edge_irq, "edge"); 140 + irq_set_chip_and_handler_name(irqbase + n, 141 + &msc_edgeirq_type, 142 + handle_edge_irq, 143 + "edge"); 142 144 if (cpu_has_veic) 143 145 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); 144 146 else 145 147 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); 146 148 break; 147 149 case MSC01_IRQ_LEVEL: 148 - set_irq_chip_and_handler_name(irqbase+n, 149 - &msc_levelirq_type, handle_level_irq, "level"); 150 + irq_set_chip_and_handler_name(irqbase + n, 151 + &msc_levelirq_type, 152 + handle_level_irq, 153 + "level"); 150 154 if (cpu_has_veic) 151 155 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); 152 156 else
+1 -1
arch/mips/kernel/irq-rm7000.c
··· 45 45 clear_c0_intcontrol(0x00000f00); /* Mask all */ 46 46 47 47 for (i = base; i < base + 4; i++) 48 - set_irq_chip_and_handler(i, &rm7k_irq_controller, 48 + irq_set_chip_and_handler(i, &rm7k_irq_controller, 49 49 handle_percpu_irq); 50 50 }
+2 -2
arch/mips/kernel/irq-rm9000.c
··· 98 98 clear_c0_intcontrol(0x0000f000); /* Mask all */ 99 99 100 100 for (i = base; i < base + 4; i++) 101 - set_irq_chip_and_handler(i, &rm9k_irq_controller, 101 + irq_set_chip_and_handler(i, &rm9k_irq_controller, 102 102 handle_level_irq); 103 103 104 104 rm9000_perfcount_irq = base + 1; 105 - set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 105 + irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 106 106 handle_percpu_irq); 107 107 }
+1 -1
arch/mips/kernel/irq.c
··· 102 102 #endif 103 103 104 104 for (i = 0; i < NR_IRQS; i++) 105 - set_irq_noprobe(i); 105 + irq_set_noprobe(i); 106 106 107 107 arch_init_irq(); 108 108
+2 -2
arch/mips/kernel/irq_cpu.c
··· 109 109 */ 110 110 if (cpu_has_mipsmt) 111 111 for (i = irq_base; i < irq_base + 2; i++) 112 - set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller, 112 + irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller, 113 113 handle_percpu_irq); 114 114 115 115 for (i = irq_base + 2; i < irq_base + 8; i++) 116 - set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 116 + irq_set_chip_and_handler(i, &mips_cpu_irq_controller, 117 117 handle_percpu_irq); 118 118 }
+2 -2
arch/mips/kernel/irq_txx9.c
··· 154 154 for (i = 0; i < TXx9_MAX_IR; i++) { 155 155 txx9irq[i].level = 4; /* middle level */ 156 156 txx9irq[i].mode = TXx9_IRCR_LOW; 157 - set_irq_chip_and_handler(TXX9_IRQ_BASE + i, 158 - &txx9_irq_chip, handle_level_irq); 157 + irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip, 158 + handle_level_irq); 159 159 } 160 160 161 161 /* mask all IRC interrupts */
+1 -1
arch/mips/kernel/smtc.c
··· 1146 1146 1147 1147 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1148 1148 1149 - set_irq_handler(cpu_ipi_irq, handle_percpu_irq); 1149 + irq_set_handler(cpu_ipi_irq, handle_percpu_irq); 1150 1150 } 1151 1151 1152 1152 /*
+1 -1
arch/mips/lasat/interrupt.c
··· 128 128 mips_cpu_irq_init(); 129 129 130 130 for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++) 131 - set_irq_chip_and_handler(i, &lasat_irq_type, handle_level_irq); 131 + irq_set_chip_and_handler(i, &lasat_irq_type, handle_level_irq); 132 132 133 133 setup_irq(LASAT_CASCADE_IRQ, &cascade); 134 134 }
+2 -1
arch/mips/loongson/common/bonito-irq.c
··· 44 44 u32 i; 45 45 46 46 for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++) 47 - set_irq_chip_and_handler(i, &bonito_irq_type, handle_level_irq); 47 + irq_set_chip_and_handler(i, &bonito_irq_type, 48 + handle_level_irq); 48 49 49 50 #ifdef CONFIG_CPU_LOONGSON2E 50 51 setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction);
+1 -1
arch/mips/mti-malta/malta-int.c
··· 472 472 void __init arch_init_ipiirq(int irq, struct irqaction *action) 473 473 { 474 474 setup_irq(irq, action); 475 - set_irq_handler(irq, handle_percpu_irq); 475 + irq_set_handler(irq, handle_percpu_irq); 476 476 } 477 477 478 478 void __init arch_init_irq(void)
+1 -1
arch/mips/mti-malta/malta-time.c
··· 119 119 set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); 120 120 mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 121 121 #ifdef CONFIG_SMP 122 - set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq); 122 + irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq); 123 123 #endif 124 124 } 125 125 }
+12 -12
arch/mips/pci/msi-octeon.c
··· 172 172 pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, 173 173 control); 174 174 175 - set_irq_msi(irq, desc); 175 + irq_set_msi_desc(irq, desc); 176 176 write_msi_msg(irq, &msg); 177 177 return 0; 178 178 } ··· 259 259 static u64 msi_rcv_reg[4]; 260 260 static u64 mis_ena_reg[4]; 261 261 262 - static void octeon_irq_msi_enable_pcie(unsigned int irq) 262 + static void octeon_irq_msi_enable_pcie(struct irq_data *data) 263 263 { 264 264 u64 en; 265 265 unsigned long flags; 266 - int msi_number = irq - OCTEON_IRQ_MSI_BIT0; 266 + int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; 267 267 int irq_index = msi_number >> 6; 268 268 int irq_bit = msi_number & 0x3f; 269 269 ··· 275 275 raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); 276 276 } 277 277 278 - static void octeon_irq_msi_disable_pcie(unsigned int irq) 278 + static void octeon_irq_msi_disable_pcie(struct irq_data *data) 279 279 { 280 280 u64 en; 281 281 unsigned long flags; 282 - int msi_number = irq - OCTEON_IRQ_MSI_BIT0; 282 + int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; 283 283 int irq_index = msi_number >> 6; 284 284 int irq_bit = msi_number & 0x3f; 285 285 ··· 293 293 294 294 static struct irq_chip octeon_irq_chip_msi_pcie = { 295 295 .name = "MSI", 296 - .enable = octeon_irq_msi_enable_pcie, 297 - .disable = octeon_irq_msi_disable_pcie, 296 + .irq_enable = octeon_irq_msi_enable_pcie, 297 + .irq_disable = octeon_irq_msi_disable_pcie, 298 298 }; 299 299 300 - static void octeon_irq_msi_enable_pci(unsigned int irq) 300 + static void octeon_irq_msi_enable_pci(struct irq_data *data) 301 301 { 302 302 /* 303 303 * Octeon PCI doesn't have the ability to mask/unmask MSI ··· 308 308 */ 309 309 } 310 310 311 - static void octeon_irq_msi_disable_pci(unsigned int irq) 311 + static void octeon_irq_msi_disable_pci(struct irq_data *data) 312 312 { 313 313 /* See comment in enable */ 314 314 } 315 315 316 316 static struct irq_chip octeon_irq_chip_msi_pci = { 317 317 .name = "MSI", 318 - .enable = octeon_irq_msi_enable_pci, 319 - .disable = octeon_irq_msi_disable_pci, 318 + .irq_enable = octeon_irq_msi_enable_pci, 319 + .irq_disable = octeon_irq_msi_disable_pci, 320 320 }; 321 321 322 322 /* ··· 388 388 } 389 389 390 390 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++) 391 - set_irq_chip_and_handler(irq, msi, handle_simple_irq); 391 + irq_set_chip_and_handler(irq, msi, handle_simple_irq); 392 392 393 393 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { 394 394 if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
+1 -1
arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
··· 182 182 183 183 /* initialize all the IRQ descriptors */ 184 184 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { 185 - set_irq_chip_and_handler(i, &msp_cic_irq_controller, 185 + irq_set_chip_and_handler(i, &msp_cic_irq_controller, 186 186 handle_level_irq); 187 187 #ifdef CONFIG_MIPS_MT_SMTC 188 188 /* Mask of CIC interrupt */
+1 -1
arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
··· 77 77 78 78 /* initialize all the IRQ descriptors */ 79 79 for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++) 80 - set_irq_chip_and_handler(i, &msp_slp_irq_controller, 80 + irq_set_chip_and_handler(i, &msp_slp_irq_controller, 81 81 handle_level_irq); 82 82 } 83 83
+1 -1
arch/mips/pmc-sierra/msp71xx/msp_smp.c
··· 64 64 void __init arch_init_ipiirq(int irq, struct irqaction *action) 65 65 { 66 66 setup_irq(irq, action); 67 - set_irq_handler(irq, handle_percpu_irq); 67 + irq_set_handler(irq, handle_percpu_irq); 68 68 } 69 69 70 70 void __init msp_vsmp_int_init(void)
+4 -2
arch/mips/pnx833x/common/interrupts.c
··· 259 259 /* Set IRQ information in irq_desc */ 260 260 for (irq = PNX833X_PIC_IRQ_BASE; irq < (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ); irq++) { 261 261 pnx833x_hard_disable_pic_irq(irq); 262 - set_irq_chip_and_handler(irq, &pnx833x_pic_irq_type, handle_simple_irq); 262 + irq_set_chip_and_handler(irq, &pnx833x_pic_irq_type, 263 + handle_simple_irq); 263 264 } 264 265 265 266 for (irq = PNX833X_GPIO_IRQ_BASE; irq < (PNX833X_GPIO_IRQ_BASE + PNX833X_GPIO_NUM_IRQ); irq++) 266 - set_irq_chip_and_handler(irq, &pnx833x_gpio_irq_type, handle_simple_irq); 267 + irq_set_chip_and_handler(irq, &pnx833x_gpio_irq_type, 268 + handle_simple_irq); 267 269 268 270 /* Set PIC priority limiter register to 0 */ 269 271 PNX833X_PIC_INT_PRIORITY = 0;
+5 -5
arch/mips/pnx8550/common/int.c
··· 183 183 int configPR; 184 184 185 185 for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) 186 - set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); 186 + irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq); 187 187 188 188 /* init of GIC/IPC interrupts */ 189 189 /* should be done before cp0 since cp0 init enables the GIC int */ ··· 206 206 /* mask/priority is still 0 so we will not get any 207 207 * interrupts until it is unmasked */ 208 208 209 - set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); 209 + irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq); 210 210 } 211 211 212 212 /* Priority level 0 */ ··· 215 215 /* Set int vector table address */ 216 216 PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0; 217 217 218 - set_irq_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type, 218 + irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type, 219 219 handle_level_irq); 220 220 setup_irq(MIPS_CPU_GIC_IRQ, &gic_action); 221 221 222 222 /* init of Timer interrupts */ 223 223 for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) 224 - set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); 224 + irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq); 225 225 226 226 /* Stop Timer 1-3 */ 227 227 configPR = read_c0_config7(); 228 228 configPR |= 0x00000038; 229 229 write_c0_config7(configPR); 230 230 231 - set_irq_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type, 231 + irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type, 232 232 handle_level_irq); 233 233 setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action); 234 234 }
+1 -1
arch/mips/powertv/asic/irq_asic.c
··· 112 112 * Initialize interrupt handlers. 113 113 */ 114 114 for (i = 0; i < NR_IRQS; i++) 115 - set_irq_chip_and_handler(i, &asic_irq_chip, handle_level_irq); 115 + irq_set_chip_and_handler(i, &asic_irq_chip, handle_level_irq); 116 116 }
+2 -2
arch/mips/rb532/irq.c
··· 207 207 pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS); 208 208 209 209 for (i = 0; i < RC32434_NR_IRQS; i++) 210 - set_irq_chip_and_handler(i, &rc32434_irq_type, 211 - handle_level_irq); 210 + irq_set_chip_and_handler(i, &rc32434_irq_type, 211 + handle_level_irq); 212 212 } 213 213 214 214 /* Main Interrupt dispatcher */
+1 -1
arch/mips/sgi-ip22/ip22-int.c
··· 312 312 else 313 313 handler = &ip22_local3_irq_type; 314 314 315 - set_irq_chip_and_handler(i, handler, handle_level_irq); 315 + irq_set_chip_and_handler(i, handler, handle_level_irq); 316 316 } 317 317 318 318 /* vector handler. this register the IRQ as non-sharable */
+1 -1
arch/mips/sgi-ip27/ip27-irq.c
··· 337 337 338 338 void __devinit register_bridge_irq(unsigned int irq) 339 339 { 340 - set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); 340 + irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); 341 341 } 342 342 343 343 int __devinit request_bridge_irq(struct bridge_controller *bc)
+1 -1
arch/mips/sgi-ip27/ip27-timer.c
··· 153 153 panic("Allocation of irq number for timer failed"); 154 154 } while (xchg(&rt_timer_irq, irq)); 155 155 156 - set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); 156 + irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); 157 157 setup_irq(irq, &hub_rt_irqaction); 158 158 } 159 159
+24 -16
arch/mips/sgi-ip32/ip32-irq.c
··· 451 451 for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) { 452 452 switch (irq) { 453 453 case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ: 454 - set_irq_chip_and_handler_name(irq,&ip32_mace_interrupt, 455 - handle_level_irq, "level"); 454 + irq_set_chip_and_handler_name(irq, 455 + &ip32_mace_interrupt, 456 + handle_level_irq, 457 + "level"); 456 458 break; 457 459 458 460 case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ: 459 - set_irq_chip_and_handler_name(irq, 460 - &ip32_macepci_interrupt, handle_level_irq, 461 - "level"); 461 + irq_set_chip_and_handler_name(irq, 462 + &ip32_macepci_interrupt, 463 + handle_level_irq, 464 + "level"); 462 465 break; 463 466 464 467 case CRIME_CPUERR_IRQ: 465 468 case CRIME_MEMERR_IRQ: 466 - set_irq_chip_and_handler_name(irq, 467 - &crime_level_interrupt, handle_level_irq, 468 - "level"); 469 + irq_set_chip_and_handler_name(irq, 470 + &crime_level_interrupt, 471 + handle_level_irq, 472 + "level"); 469 473 break; 470 474 471 475 case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ: 472 476 case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ: 473 477 case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ: 474 478 case CRIME_VICE_IRQ: 475 - set_irq_chip_and_handler_name(irq, 476 - &crime_edge_interrupt, handle_edge_irq, "edge"); 479 + irq_set_chip_and_handler_name(irq, 480 + &crime_edge_interrupt, 481 + handle_edge_irq, 482 + "edge"); 477 483 break; 478 484 479 485 case MACEISA_PARALLEL_IRQ: 480 486 case MACEISA_SERIAL1_TDMAPR_IRQ: 481 487 case MACEISA_SERIAL2_TDMAPR_IRQ: 482 - set_irq_chip_and_handler_name(irq, 483 - &ip32_maceisa_edge_interrupt, handle_edge_irq, 484 - "edge"); 488 + irq_set_chip_and_handler_name(irq, 489 + &ip32_maceisa_edge_interrupt, 490 + handle_edge_irq, 491 + "edge"); 485 492 break; 486 493 487 494 default: 488 - set_irq_chip_and_handler_name(irq, 489 - &ip32_maceisa_level_interrupt, handle_level_irq, 490 - "level"); 495 + irq_set_chip_and_handler_name(irq, 496 + &ip32_maceisa_level_interrupt, 497 + handle_level_irq, 498 + "level"); 491 499 break; 492 500 } 493 501 }
+2 -1
arch/mips/sibyte/bcm1480/irq.c
··· 216 216 int i; 217 217 218 218 for (i = 0; i < BCM1480_NR_IRQS; i++) { 219 - set_irq_chip_and_handler(i, &bcm1480_irq_type, handle_level_irq); 219 + irq_set_chip_and_handler(i, &bcm1480_irq_type, 220 + handle_level_irq); 220 221 bcm1480_irq_owner[i] = 0; 221 222 } 222 223 }
+2 -1
arch/mips/sibyte/sb1250/irq.c
··· 190 190 int i; 191 191 192 192 for (i = 0; i < SB1250_NR_IRQS; i++) { 193 - set_irq_chip_and_handler(i, &sb1250_irq_type, handle_level_irq); 193 + irq_set_chip_and_handler(i, &sb1250_irq_type, 194 + handle_level_irq); 194 195 sb1250_irq_owner[i] = 0; 195 196 } 196 197 }
+1 -1
arch/mips/sni/a20r.c
··· 209 209 int i; 210 210 211 211 for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++) 212 - set_irq_chip_and_handler(i, &a20r_irq_type, handle_level_irq); 212 + irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq); 213 213 sni_hwint = a20r_hwint; 214 214 change_c0_status(ST0_IM, IE_IRQ0); 215 215 setup_irq(SNI_A20R_IRQ_BASE + 3, &sni_isa_irq);
+1 -1
arch/mips/sni/pcimt.c
··· 296 296 mips_cpu_irq_init(); 297 297 /* Actually we've got more interrupts to handle ... */ 298 298 for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_SCSI; i++) 299 - set_irq_chip_and_handler(i, &pcimt_irq_type, handle_level_irq); 299 + irq_set_chip_and_handler(i, &pcimt_irq_type, handle_level_irq); 300 300 sni_hwint = sni_pcimt_hwint; 301 301 change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ3); 302 302 }
+2 -2
arch/mips/sni/pcit.c
··· 238 238 239 239 mips_cpu_irq_init(); 240 240 for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) 241 - set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq); 241 + irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq); 242 242 *(volatile u32 *)SNI_PCIT_INT_REG = 0; 243 243 sni_hwint = sni_pcit_hwint; 244 244 change_c0_status(ST0_IM, IE_IRQ1); ··· 251 251 252 252 mips_cpu_irq_init(); 253 253 for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) 254 - set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq); 254 + irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq); 255 255 *(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000; 256 256 sni_hwint = sni_pcit_hwint_cplus; 257 257 change_c0_status(ST0_IM, IE_IRQ0);
+2 -2
arch/mips/sni/rm200.c
··· 413 413 sni_rm200_init_8259A(); 414 414 415 415 for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++) 416 - set_irq_chip_and_handler(i, &sni_rm200_i8259A_chip, 416 + irq_set_chip_and_handler(i, &sni_rm200_i8259A_chip, 417 417 handle_level_irq); 418 418 419 419 setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2); ··· 477 477 mips_cpu_irq_init(); 478 478 /* Actually we've got more interrupts to handle ... */ 479 479 for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++) 480 - set_irq_chip_and_handler(i, &rm200_irq_type, handle_level_irq); 480 + irq_set_chip_and_handler(i, &rm200_irq_type, handle_level_irq); 481 481 sni_hwint = sni_rm200_hwint; 482 482 change_c0_status(ST0_IM, IE_IRQ0); 483 483 setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq);
+1 -1
arch/mips/txx9/generic/irq_tx4927.c
··· 35 35 36 36 mips_cpu_irq_init(); 37 37 txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL); 38 - set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT, 38 + irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT, 39 39 handle_simple_irq); 40 40 /* raise priority for errors, timers, SIO */ 41 41 txx9_irq_set_pri(TX4927_IR_ECCERR, 7);
+1 -1
arch/mips/txx9/generic/irq_tx4938.c
··· 23 23 24 24 mips_cpu_irq_init(); 25 25 txx9_irq_init(TX4938_IRC_REG & 0xfffffffffULL); 26 - set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT, 26 + irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT, 27 27 handle_simple_irq); 28 28 /* raise priority for errors, timers, SIO */ 29 29 txx9_irq_set_pri(TX4938_IR_ECCERR, 7);
+3 -3
arch/mips/txx9/generic/irq_tx4939.c
··· 176 176 for (i = 1; i < TX4939_NUM_IR; i++) { 177 177 tx4939irq[i].level = 4; /* middle level */ 178 178 tx4939irq[i].mode = TXx9_IRCR_LOW; 179 - set_irq_chip_and_handler(TXX9_IRQ_BASE + i, 180 - &tx4939_irq_chip, handle_level_irq); 179 + irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &tx4939_irq_chip, 180 + handle_level_irq); 181 181 } 182 182 183 183 /* mask all IRC interrupts */ ··· 193 193 __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r); 194 194 __raw_writel(irc_elevel, &tx4939_ircptr->msk.r); 195 195 196 - set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT, 196 + irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT, 197 197 handle_simple_irq); 198 198 199 199 /* raise priority for errors, timers, sio */
+3 -2
arch/mips/txx9/jmr3927/irq.c
··· 120 120 121 121 tx3927_irq_init(); 122 122 for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++) 123 - set_irq_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq); 123 + irq_set_chip_and_handler(i, &jmr3927_irq_ioc, 124 + handle_level_irq); 124 125 125 126 /* setup IOC interrupt 1 (PCI, MODEM) */ 126 - set_irq_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq); 127 + irq_set_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq); 127 128 }
+3 -3
arch/mips/txx9/rbtx4927/irq.c
··· 164 164 165 165 for (i = RBTX4927_IRQ_IOC; 166 166 i < RBTX4927_IRQ_IOC + RBTX4927_NR_IRQ_IOC; i++) 167 - set_irq_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type, 167 + irq_set_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type, 168 168 handle_level_irq); 169 - set_irq_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq); 169 + irq_set_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq); 170 170 } 171 171 172 172 static int rbtx4927_irq_dispatch(int pending) ··· 194 194 tx4927_irq_init(); 195 195 toshiba_rbtx4927_irq_ioc_init(); 196 196 /* Onboard 10M Ether: High Active */ 197 - set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH); 197 + irq_set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH); 198 198 }
+3 -3
arch/mips/txx9/rbtx4938/irq.c
··· 132 132 133 133 for (i = RBTX4938_IRQ_IOC; 134 134 i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++) 135 - set_irq_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type, 135 + irq_set_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type, 136 136 handle_level_irq); 137 137 138 - set_irq_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq); 138 + irq_set_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq); 139 139 } 140 140 141 141 void __init rbtx4938_irq_setup(void) ··· 153 153 tx4938_irq_init(); 154 154 toshiba_rbtx4938_irq_ioc_init(); 155 155 /* Onboard 10M Ether: High Active */ 156 - set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH); 156 + irq_set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH); 157 157 }
+2 -2
arch/mips/txx9/rbtx4939/irq.c
··· 88 88 tx4939_irq_init(); 89 89 for (i = RBTX4939_IRQ_IOC; 90 90 i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++) 91 - set_irq_chip_and_handler(i, &rbtx4939_ioc_irq_chip, 91 + irq_set_chip_and_handler(i, &rbtx4939_ioc_irq_chip, 92 92 handle_level_irq); 93 93 94 - set_irq_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq); 94 + irq_set_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq); 95 95 }
+2 -2
arch/mips/vr41xx/common/icu.c
··· 710 710 icu2_write(MGIUINTHREG, 0xffff); 711 711 712 712 for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++) 713 - set_irq_chip_and_handler(i, &sysint1_irq_type, 713 + irq_set_chip_and_handler(i, &sysint1_irq_type, 714 714 handle_level_irq); 715 715 716 716 for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++) 717 - set_irq_chip_and_handler(i, &sysint2_irq_type, 717 + irq_set_chip_and_handler(i, &sysint2_irq_type, 718 718 handle_level_irq); 719 719 720 720 cascade_irq(INT0_IRQ, icu_get_irq);
+1 -1
arch/mips/vr41xx/common/irq.c
··· 87 87 atomic_inc(&irq_err_count); 88 88 else 89 89 irq_dispatch(irq); 90 - if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) 90 + if (!irqd_irq_disabled(idata) && chip->irq_unmask) 91 91 chip->irq_unmask(idata); 92 92 } else 93 93 do_IRQ(irq);
+1 -1
arch/mn10300/Kconfig
··· 2 2 def_bool y 3 3 select HAVE_OPROFILE 4 4 select HAVE_GENERIC_HARDIRQS 5 - select GENERIC_HARDIRQS_NO_DEPRECATED 5 + select GENERIC_IRQ_SHOW 6 6 select HAVE_ARCH_TRACEHOOK 7 7 select HAVE_ARCH_KGDB 8 8
+18 -67
arch/mn10300/kernel/irq.c
··· 263 263 */ 264 264 void mn10300_set_lateack_irq_type(int irq) 265 265 { 266 - set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, 266 + irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level, 267 267 handle_level_irq); 268 268 } 269 269 ··· 275 275 int irq; 276 276 277 277 for (irq = 0; irq < NR_IRQS; irq++) 278 - if (get_irq_chip(irq) == &no_irq_chip) 278 + if (irq_get_chip(irq) == &no_irq_chip) 279 279 /* due to the PIC latching interrupt requests, even 280 280 * when the IRQ is disabled, IRQ_PENDING is superfluous 281 281 * and we can use handle_level_irq() for edge-triggered 282 282 * interrupts */ 283 - set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, 283 + irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge, 284 284 handle_level_irq); 285 285 286 286 unit_init_IRQ(); ··· 335 335 /* 336 336 * Display interrupt management information through /proc/interrupts 337 337 */ 338 - int show_interrupts(struct seq_file *p, void *v) 338 + int arch_show_interrupts(struct seq_file *p, int prec) 339 339 { 340 - int i = *(loff_t *) v, j, cpu; 341 - struct irqaction *action; 342 - unsigned long flags; 343 - 344 - switch (i) { 345 - /* display column title bar naming CPUs */ 346 - case 0: 347 - seq_printf(p, " "); 348 - for (j = 0; j < NR_CPUS; j++) 349 - if (cpu_online(j)) 350 - seq_printf(p, "CPU%d ", j); 351 - seq_putc(p, '\n'); 352 - break; 353 - 354 - /* display information rows, one per active CPU */ 355 - case 1 ... NR_IRQS - 1: 356 - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 357 - 358 - action = irq_desc[i].action; 359 - if (action) { 360 - seq_printf(p, "%3d: ", i); 361 - for_each_present_cpu(cpu) 362 - seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 363 - 364 - if (i < NR_CPU_IRQS) 365 - seq_printf(p, " %14s.%u", 366 - irq_desc[i].irq_data.chip->name, 367 - (GxICR(i) & GxICR_LEVEL) >> 368 - GxICR_LEVEL_SHIFT); 369 - else 370 - seq_printf(p, " %14s", 371 - irq_desc[i].irq_data.chip->name); 372 - 373 - seq_printf(p, " %s", action->name); 374 - 375 - for (action = action->next; 376 - action; 377 - action = action->next) 378 - seq_printf(p, ", %s", action->name); 379 - 380 - seq_putc(p, '\n'); 381 - } 382 - 383 - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 384 - break; 385 - 386 - /* polish off with NMI and error counters */ 387 - case NR_IRQS: 388 340 #ifdef CONFIG_MN10300_WD_TIMER 389 - seq_printf(p, "NMI: "); 390 - for (j = 0; j < NR_CPUS; j++) 391 - if (cpu_online(j)) 392 - seq_printf(p, "%10u ", nmi_count(j)); 393 - seq_putc(p, '\n'); 341 + int j; 342 + 343 + seq_printf(p, "%*s: ", prec, "NMI"); 344 + for (j = 0; j < NR_CPUS; j++) 345 + if (cpu_online(j)) 346 + seq_printf(p, "%10u ", nmi_count(j)); 347 + seq_putc(p, '\n'); 394 348 #endif 395 349 396 - seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 397 - break; 398 - } 399 - 350 + seq_printf(p, "%*s: ", prec, "ERR"); 351 + seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); 400 352 return 0; 401 353 } 402 354 403 355 #ifdef CONFIG_HOTPLUG_CPU 404 356 void migrate_irqs(void) 405 357 { 406 - irq_desc_t *desc; 407 358 int irq; 408 359 unsigned int self, new; 409 360 unsigned long flags; 410 361 411 362 self = smp_processor_id(); 412 363 for (irq = 0; irq < NR_IRQS; irq++) { 413 - desc = irq_desc + irq; 364 + struct irq_data *data = irq_get_irq_data(irq); 414 365 415 - if (desc->status == IRQ_PER_CPU) 366 + if (irqd_is_per_cpu(data)) 416 367 continue; 417 368 418 - if (cpu_isset(self, irq_desc[irq].affinity) && 369 + if (cpu_isset(self, data->affinity) && 419 370 !cpus_intersects(irq_affinity[irq], cpu_online_map)) { 420 371 int cpu_id; 421 372 cpu_id = first_cpu(cpu_online_map); 422 - cpu_set(cpu_id, irq_desc[irq].affinity); 373 + cpu_set(cpu_id, data->affinity); 423 374 } 424 375 /* We need to operate irq_affinity_online atomically. */ 425 376 arch_local_cli_save(flags); ··· 381 430 GxICR(irq) = x & GxICR_LEVEL; 382 431 tmp = GxICR(irq); 383 432 384 - new = any_online_cpu(irq_desc[irq].affinity); 433 + new = any_online_cpu(data->affinity); 385 434 irq_affinity_online[irq] = new; 386 435 387 436 CROSS_GxICR(irq, new) =
+1 -1
arch/mn10300/kernel/mn10300-serial.c
··· 933 933 NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); 934 934 set_intr_level(port->tx_irq, 935 935 NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); 936 - set_irq_chip(port->tm_irq, &mn10300_serial_pic); 936 + irq_set_chip(port->tm_irq, &mn10300_serial_pic); 937 937 938 938 if (request_irq(port->rx_irq, mn10300_serial_interrupt, 939 939 IRQF_DISABLED, port->rx_name, port) < 0)
+6 -6
arch/mn10300/kernel/smp.c
··· 156 156 u16 tmp16; 157 157 158 158 /* set up the reschedule IPI */ 159 - set_irq_chip_and_handler(RESCHEDULE_IPI, 160 - &mn10300_ipi_type, handle_percpu_irq); 159 + irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type, 160 + handle_percpu_irq); 161 161 setup_irq(RESCHEDULE_IPI, &reschedule_ipi); 162 162 set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV); 163 163 mn10300_ipi_enable(RESCHEDULE_IPI); 164 164 165 165 /* set up the call function IPI */ 166 - set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI, 167 - &mn10300_ipi_type, handle_percpu_irq); 166 + irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type, 167 + handle_percpu_irq); 168 168 setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi); 169 169 set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV); 170 170 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); ··· 172 172 /* set up the local timer IPI */ 173 173 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ 174 174 defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) 175 - set_irq_chip_and_handler(LOCAL_TIMER_IPI, 176 - &mn10300_ipi_type, handle_percpu_irq); 175 + irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type, 176 + handle_percpu_irq); 177 177 setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi); 178 178 set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV); 179 179 mn10300_ipi_enable(LOCAL_TIMER_IPI);
+2 -1
arch/mn10300/unit-asb2364/irq-fpga.c
··· 100 100 SyncExBus(); 101 101 102 102 for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++) 103 - set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq); 103 + irq_set_chip_and_handler(irq, &asb2364_fpga_pic, 104 + handle_level_irq); 104 105 105 106 /* the FPGA drives the XIRQ1 input on the CPU PIC */ 106 107 setup_irq(XIRQ1, &fpga_irq[0]);
-1
arch/parisc/Kconfig
··· 15 15 select HAVE_GENERIC_HARDIRQS 16 16 select GENERIC_IRQ_PROBE 17 17 select IRQ_PER_CPU 18 - select GENERIC_HARDIRQS_NO_DEPRECATED 19 18 20 19 help 21 20 The PA-RISC microprocessor is designed by Hewlett-Packard and used
+14 -18
arch/parisc/kernel/irq.c
··· 113 113 int cpu_dest; 114 114 115 115 /* timer and ipi have to always be received on all CPUs */ 116 - if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) { 117 - /* Bad linux design decision. The mask has already 118 - * been set; we must reset it. Will fix - tglx 119 - */ 120 - cpumask_setall(d->affinity); 116 + if (irqd_is_per_cpu(d)) 121 117 return -EINVAL; 122 - } 123 118 124 119 /* whatever mask they set, we just allow one CPU */ 125 120 cpu_dest = first_cpu(*dest); ··· 169 174 } 170 175 171 176 if (i < NR_IRQS) { 177 + struct irq_desc *desc = irq_to_desc(i); 172 178 struct irqaction *action; 173 179 174 - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 175 - action = irq_desc[i].action; 180 + raw_spin_lock_irqsave(&desc->lock, flags); 181 + action = desc->action; 176 182 if (!action) 177 183 goto skip; 178 184 seq_printf(p, "%3d: ", i); ··· 184 188 seq_printf(p, "%10u ", kstat_irqs(i)); 185 189 #endif 186 190 187 - seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name); 191 + seq_printf(p, " %14s", irq_desc_get_chip(desc)->name); 188 192 #ifndef PARISC_IRQ_CR16_COUNTS 189 193 seq_printf(p, " %s", action->name); 190 194 ··· 216 220 217 221 seq_putc(p, '\n'); 218 222 skip: 219 - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 223 + raw_spin_unlock_irqrestore(&desc->lock, flags); 220 224 } 221 225 222 226 return 0; ··· 234 238 235 239 int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) 236 240 { 237 - if (irq_desc[irq].action) 241 + if (irq_has_action(irq)) 238 242 return -EBUSY; 239 - if (get_irq_chip(irq) != &cpu_interrupt_type) 243 + if (irq_get_chip(irq) != &cpu_interrupt_type) 240 244 return -EBUSY; 241 245 242 246 /* for iosapic interrupts */ 243 247 if (type) { 244 - set_irq_chip_and_handler(irq, type, handle_percpu_irq); 245 - set_irq_chip_data(irq, data); 248 + irq_set_chip_and_handler(irq, type, handle_percpu_irq); 249 + irq_set_chip_data(irq, data); 246 250 __cpu_unmask_irq(irq); 247 251 } 248 252 return 0; ··· 353 357 #ifdef CONFIG_SMP 354 358 desc = irq_to_desc(irq); 355 359 cpumask_copy(&dest, desc->irq_data.affinity); 356 - if (CHECK_IRQ_PER_CPU(desc->status) && 360 + if (irqd_is_per_cpu(&desc->irq_data) && 357 361 !cpu_isset(smp_processor_id(), dest)) { 358 362 int cpu = first_cpu(dest); 359 363 ··· 394 398 { 395 399 int i; 396 400 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { 397 - set_irq_chip_and_handler(i, &cpu_interrupt_type, 401 + irq_set_chip_and_handler(i, &cpu_interrupt_type, 398 402 handle_percpu_irq); 399 403 } 400 404 401 - set_irq_handler(TIMER_IRQ, handle_percpu_irq); 405 + irq_set_handler(TIMER_IRQ, handle_percpu_irq); 402 406 setup_irq(TIMER_IRQ, &timer_action); 403 407 #ifdef CONFIG_SMP 404 - set_irq_handler(IPI_IRQ, handle_percpu_irq); 408 + irq_set_handler(IPI_IRQ, handle_percpu_irq); 405 409 setup_irq(IPI_IRQ, &ipi_action); 406 410 #endif 407 411 }
+2 -1
arch/powerpc/Kconfig
··· 138 138 select HAVE_GENERIC_HARDIRQS 139 139 select HAVE_SPARSE_IRQ 140 140 select IRQ_PER_CPU 141 - select GENERIC_HARDIRQS_NO_DEPRECATED 141 + select GENERIC_IRQ_SHOW 142 + select GENERIC_IRQ_SHOW_LEVEL 142 143 143 144 config EARLY_PRINTK 144 145 bool
+1
arch/powerpc/configs/44x/warp_defconfig
··· 47 47 CONFIG_MTD_UBI=y 48 48 CONFIG_PROC_DEVICETREE=y 49 49 CONFIG_BLK_DEV_RAM=y 50 + CONFIG_MISC_DEVICES=y 50 51 CONFIG_EEPROM_AT24=y 51 52 CONFIG_SCSI=y 52 53 CONFIG_BLK_DEV_SD=y
+1
arch/powerpc/configs/52xx/motionpro_defconfig
··· 43 43 CONFIG_BLK_DEV_LOOP=y 44 44 CONFIG_BLK_DEV_RAM=y 45 45 CONFIG_BLK_DEV_RAM_SIZE=32768 46 + CONFIG_MISC_DEVICES=y 46 47 CONFIG_EEPROM_LEGACY=y 47 48 CONFIG_SCSI_TGT=y 48 49 CONFIG_BLK_DEV_SD=y
+1
arch/powerpc/configs/86xx/gef_ppc9a_defconfig
··· 85 85 CONFIG_BLK_DEV_NBD=m 86 86 CONFIG_BLK_DEV_RAM=y 87 87 CONFIG_BLK_DEV_RAM_SIZE=131072 88 + CONFIG_MISC_DEVICES=y 88 89 CONFIG_DS1682=y 89 90 CONFIG_IDE=y 90 91 CONFIG_BLK_DEV_IDECS=y
+1
arch/powerpc/configs/86xx/gef_sbc310_defconfig
··· 85 85 CONFIG_BLK_DEV_NBD=m 86 86 CONFIG_BLK_DEV_RAM=y 87 87 CONFIG_BLK_DEV_RAM_SIZE=131072 88 + CONFIG_MISC_DEVICES=y 88 89 CONFIG_DS1682=y 89 90 CONFIG_IDE=y 90 91 CONFIG_BLK_DEV_IDECS=y
+1
arch/powerpc/configs/86xx/gef_sbc610_defconfig
··· 138 138 CONFIG_BLK_DEV_NBD=m 139 139 CONFIG_BLK_DEV_RAM=y 140 140 CONFIG_BLK_DEV_RAM_SIZE=131072 141 + CONFIG_MISC_DEVICES=y 141 142 CONFIG_DS1682=y 142 143 CONFIG_BLK_DEV_SD=y 143 144 CONFIG_CHR_DEV_ST=y
+1
arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
··· 63 63 CONFIG_BLK_DEV_NBD=y 64 64 CONFIG_BLK_DEV_RAM=y 65 65 CONFIG_BLK_DEV_RAM_SIZE=131072 66 + CONFIG_MISC_DEVICES=y 66 67 CONFIG_EEPROM_LEGACY=y 67 68 CONFIG_BLK_DEV_SD=y 68 69 CONFIG_CHR_DEV_ST=y
+1
arch/powerpc/configs/e55xx_smp_defconfig
··· 32 32 CONFIG_BLK_DEV_LOOP=y 33 33 CONFIG_BLK_DEV_RAM=y 34 34 CONFIG_BLK_DEV_RAM_SIZE=131072 35 + CONFIG_MISC_DEVICES=y 35 36 CONFIG_EEPROM_LEGACY=y 36 37 CONFIG_INPUT_FF_MEMLESS=m 37 38 # CONFIG_INPUT_MOUSEDEV is not set
+1
arch/powerpc/configs/linkstation_defconfig
··· 78 78 CONFIG_BLK_DEV_RAM=y 79 79 CONFIG_BLK_DEV_RAM_COUNT=2 80 80 CONFIG_BLK_DEV_RAM_SIZE=8192 81 + CONFIG_MISC_DEVICES=y 81 82 CONFIG_EEPROM_LEGACY=m 82 83 CONFIG_BLK_DEV_SD=y 83 84 CONFIG_CHR_DEV_SG=y
+1
arch/powerpc/configs/mpc512x_defconfig
··· 61 61 CONFIG_BLK_DEV_RAM_COUNT=1 62 62 CONFIG_BLK_DEV_RAM_SIZE=8192 63 63 CONFIG_BLK_DEV_XIP=y 64 + CONFIG_MISC_DEVICES=y 64 65 CONFIG_EEPROM_AT24=y 65 66 CONFIG_SCSI=y 66 67 # CONFIG_SCSI_PROC_FS is not set
+1
arch/powerpc/configs/mpc5200_defconfig
··· 52 52 CONFIG_BLK_DEV_LOOP=y 53 53 CONFIG_BLK_DEV_RAM=y 54 54 CONFIG_BLK_DEV_RAM_SIZE=32768 55 + CONFIG_MISC_DEVICES=y 55 56 CONFIG_EEPROM_AT24=y 56 57 CONFIG_SCSI_TGT=y 57 58 CONFIG_BLK_DEV_SD=y
+1
arch/powerpc/configs/mpc85xx_defconfig
··· 82 82 CONFIG_BLK_DEV_NBD=y 83 83 CONFIG_BLK_DEV_RAM=y 84 84 CONFIG_BLK_DEV_RAM_SIZE=131072 85 + CONFIG_MISC_DEVICES=y 85 86 CONFIG_EEPROM_LEGACY=y 86 87 CONFIG_BLK_DEV_SD=y 87 88 CONFIG_CHR_DEV_ST=y
+1
arch/powerpc/configs/mpc85xx_smp_defconfig
··· 84 84 CONFIG_BLK_DEV_NBD=y 85 85 CONFIG_BLK_DEV_RAM=y 86 86 CONFIG_BLK_DEV_RAM_SIZE=131072 87 + CONFIG_MISC_DEVICES=y 87 88 CONFIG_EEPROM_LEGACY=y 88 89 CONFIG_BLK_DEV_SD=y 89 90 CONFIG_CHR_DEV_ST=y
+1
arch/powerpc/configs/mpc86xx_defconfig
··· 66 66 CONFIG_BLK_DEV_NBD=y 67 67 CONFIG_BLK_DEV_RAM=y 68 68 CONFIG_BLK_DEV_RAM_SIZE=131072 69 + CONFIG_MISC_DEVICES=y 69 70 CONFIG_EEPROM_LEGACY=y 70 71 CONFIG_BLK_DEV_SD=y 71 72 CONFIG_CHR_DEV_ST=y
+1
arch/powerpc/configs/pasemi_defconfig
··· 59 59 CONFIG_BLK_DEV_LOOP=y 60 60 CONFIG_BLK_DEV_RAM=y 61 61 CONFIG_BLK_DEV_RAM_SIZE=16384 62 + CONFIG_MISC_DEVICES=y 62 63 CONFIG_EEPROM_LEGACY=y 63 64 CONFIG_IDE=y 64 65 CONFIG_BLK_DEV_IDECD=y
+1
arch/powerpc/configs/ppc6xx_defconfig
··· 398 398 CONFIG_CDROM_PKTCDVD=m 399 399 CONFIG_VIRTIO_BLK=m 400 400 CONFIG_BLK_DEV_HD=y 401 + CONFIG_MISC_DEVICES=y 401 402 CONFIG_ENCLOSURE_SERVICES=m 402 403 CONFIG_SENSORS_TSL2550=m 403 404 CONFIG_EEPROM_AT24=m
+3
arch/powerpc/configs/pseries_defconfig
··· 189 189 CONFIG_BNX2=m 190 190 CONFIG_CHELSIO_T1=m 191 191 CONFIG_CHELSIO_T3=m 192 + CONFIG_CHELSIO_T4=m 192 193 CONFIG_EHEA=y 193 194 CONFIG_IXGBE=m 194 195 CONFIG_IXGB=m ··· 256 255 CONFIG_INFINIBAND_USER_ACCESS=m 257 256 CONFIG_INFINIBAND_MTHCA=m 258 257 CONFIG_INFINIBAND_EHCA=m 258 + CONFIG_INFINIBAND_CXGB3=m 259 + CONFIG_INFINIBAND_CXGB4=m 259 260 CONFIG_MLX4_INFINIBAND=m 260 261 CONFIG_INFINIBAND_IPOIB=m 261 262 CONFIG_INFINIBAND_IPOIB_CM=y
+6
arch/powerpc/include/asm/dma-mapping.h
··· 42 42 extern void __dma_sync(void *vaddr, size_t size, int direction); 43 43 extern void __dma_sync_page(struct page *page, unsigned long offset, 44 44 size_t size, int direction); 45 + extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); 45 46 46 47 #else /* ! CONFIG_NOT_COHERENT_CACHE */ 47 48 /* ··· 198 197 199 198 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 200 199 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 200 + 201 + extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, 202 + void *, dma_addr_t, size_t); 203 + #define ARCH_HAS_DMA_MMAP_COHERENT 204 + 201 205 202 206 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 203 207 enum dma_data_direction direction)
+1 -1
arch/powerpc/include/asm/mmu-hash64.h
··· 27 27 #define STE_VSID_SHIFT 12 28 28 29 29 /* Location of cpu0's segment table */ 30 - #define STAB0_PAGE 0x6 30 + #define STAB0_PAGE 0x8 31 31 #define STAB0_OFFSET (STAB0_PAGE << 12) 32 32 #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) 33 33
+1 -1
arch/powerpc/include/asm/page.h
··· 100 100 #endif 101 101 102 102 #ifdef CONFIG_FLATMEM 103 - #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) 103 + #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) 104 104 #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) 105 105 #endif 106 106
+8 -8
arch/powerpc/include/asm/qe_ic.h
··· 81 81 static inline void qe_ic_cascade_low_ipic(unsigned int irq, 82 82 struct irq_desc *desc) 83 83 { 84 - struct qe_ic *qe_ic = get_irq_desc_data(desc); 84 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 85 85 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); 86 86 87 87 if (cascade_irq != NO_IRQ) ··· 91 91 static inline void qe_ic_cascade_high_ipic(unsigned int irq, 92 92 struct irq_desc *desc) 93 93 { 94 - struct qe_ic *qe_ic = get_irq_desc_data(desc); 94 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 95 95 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); 96 96 97 97 if (cascade_irq != NO_IRQ) ··· 101 101 static inline void qe_ic_cascade_low_mpic(unsigned int irq, 102 102 struct irq_desc *desc) 103 103 { 104 - struct qe_ic *qe_ic = get_irq_desc_data(desc); 104 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 105 105 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); 106 - struct irq_chip *chip = get_irq_desc_chip(desc); 106 + struct irq_chip *chip = irq_desc_get_chip(desc); 107 107 108 108 if (cascade_irq != NO_IRQ) 109 109 generic_handle_irq(cascade_irq); ··· 114 114 static inline void qe_ic_cascade_high_mpic(unsigned int irq, 115 115 struct irq_desc *desc) 116 116 { 117 - struct qe_ic *qe_ic = get_irq_desc_data(desc); 117 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 118 118 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); 119 - struct irq_chip *chip = get_irq_desc_chip(desc); 119 + struct irq_chip *chip = irq_desc_get_chip(desc); 120 120 121 121 if (cascade_irq != NO_IRQ) 122 122 generic_handle_irq(cascade_irq); ··· 127 127 static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, 128 128 struct irq_desc *desc) 129 129 { 130 - struct qe_ic *qe_ic = get_irq_desc_data(desc); 130 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 131 131 unsigned int cascade_irq; 132 - struct irq_chip *chip = get_irq_desc_chip(desc); 132 + struct irq_chip *chip = irq_desc_get_chip(desc); 133 133 134 134 cascade_irq = qe_ic_get_high_irq(qe_ic); 135 135 if (cascade_irq == NO_IRQ)
+1 -1
arch/powerpc/include/asm/reg_booke.h
··· 110 110 #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ 111 111 #define SPRN_MAS3 0x273 /* MMU Assist Register 3 */ 112 112 #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ 113 - #define SPRN_MAS5 0x275 /* MMU Assist Register 5 */ 113 + #define SPRN_MAS5 0x153 /* MMU Assist Register 5 */ 114 114 #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ 115 115 #define SPRN_PID1 0x279 /* Process ID Register 1 */ 116 116 #define SPRN_PID2 0x27A /* Process ID Register 2 */
+4
arch/powerpc/include/asm/systbl.h
··· 348 348 COMPAT_SYS_SPU(recvmsg) 349 349 COMPAT_SYS_SPU(recvmmsg) 350 350 SYSCALL_SPU(accept4) 351 + SYSCALL_SPU(name_to_handle_at) 352 + COMPAT_SYS_SPU(open_by_handle_at) 353 + COMPAT_SYS_SPU(clock_adjtime) 354 + SYSCALL_SPU(syncfs)
+5 -1
arch/powerpc/include/asm/unistd.h
··· 367 367 #define __NR_recvmsg 342 368 368 #define __NR_recvmmsg 343 369 369 #define __NR_accept4 344 370 + #define __NR_name_to_handle_at 345 371 + #define __NR_open_by_handle_at 346 372 + #define __NR_clock_adjtime 347 373 + #define __NR_syncfs 348 370 374 371 375 #ifdef __KERNEL__ 372 376 373 - #define __NR_syscalls 345 377 + #define __NR_syscalls 349 374 378 375 379 #define __NR__exit __NR_exit 376 380 #define NR_syscalls __NR_syscalls
+18
arch/powerpc/kernel/dma.c
··· 179 179 return 0; 180 180 } 181 181 fs_initcall(dma_init); 182 + 183 + int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 184 + void *cpu_addr, dma_addr_t handle, size_t size) 185 + { 186 + unsigned long pfn; 187 + 188 + #ifdef CONFIG_NOT_COHERENT_CACHE 189 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 190 + pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); 191 + #else 192 + pfn = page_to_pfn(virt_to_page(cpu_addr)); 193 + #endif 194 + return remap_pfn_range(vma, vma->vm_start, 195 + pfn + vma->vm_pgoff, 196 + vma->vm_end - vma->vm_start, 197 + vma->vm_page_prot); 198 + } 199 + EXPORT_SYMBOL_GPL(dma_mmap_coherent);
+14 -14
arch/powerpc/kernel/exceptions-64s.S
··· 977 977 rfid 978 978 b . /* prevent speculative execution */ 979 979 980 - /* 981 - * Space for CPU0's segment table. 982 - * 983 - * On iSeries, the hypervisor must fill in at least one entry before 984 - * we get control (with relocate on). The address is given to the hv 985 - * as a page number (see xLparMap below), so this must be at a 986 - * fixed address (the linker can't compute (u64)&initial_stab >> 987 - * PAGE_SHIFT). 988 - */ 989 - . = STAB0_OFFSET /* 0x6000 */ 990 - .globl initial_stab 991 - initial_stab: 992 - .space 4096 993 - 994 980 #ifdef CONFIG_PPC_PSERIES 995 981 /* 996 982 * Data area reserved for FWNMI option. ··· 1013 1027 #ifdef CONFIG_PPC_PSERIES 1014 1028 . = 0x8000 1015 1029 #endif /* CONFIG_PPC_PSERIES */ 1030 + 1031 + /* 1032 + * Space for CPU0's segment table. 1033 + * 1034 + * On iSeries, the hypervisor must fill in at least one entry before 1035 + * we get control (with relocate on). The address is given to the hv 1036 + * as a page number (see xLparMap above), so this must be at a 1037 + * fixed address (the linker can't compute (u64)&initial_stab >> 1038 + * PAGE_SHIFT). 1039 + */ 1040 + . = STAB0_OFFSET /* 0x8000 */ 1041 + .globl initial_stab 1042 + initial_stab: 1043 + .space 4096
+12 -69
arch/powerpc/kernel/irq.c
··· 195 195 EXPORT_SYMBOL(arch_local_irq_restore); 196 196 #endif /* CONFIG_PPC64 */ 197 197 198 - static int show_other_interrupts(struct seq_file *p, int prec) 198 + int arch_show_interrupts(struct seq_file *p, int prec) 199 199 { 200 200 int j; 201 201 ··· 231 231 return 0; 232 232 } 233 233 234 - int show_interrupts(struct seq_file *p, void *v) 235 - { 236 - unsigned long flags, any_count = 0; 237 - int i = *(loff_t *) v, j, prec; 238 - struct irqaction *action; 239 - struct irq_desc *desc; 240 - struct irq_chip *chip; 241 - 242 - if (i > nr_irqs) 243 - return 0; 244 - 245 - for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 246 - j *= 10; 247 - 248 - if (i == nr_irqs) 249 - return show_other_interrupts(p, prec); 250 - 251 - /* print header */ 252 - if (i == 0) { 253 - seq_printf(p, "%*s", prec + 8, ""); 254 - for_each_online_cpu(j) 255 - seq_printf(p, "CPU%-8d", j); 256 - seq_putc(p, '\n'); 257 - } 258 - 259 - desc = irq_to_desc(i); 260 - if (!desc) 261 - return 0; 262 - 263 - raw_spin_lock_irqsave(&desc->lock, flags); 264 - for_each_online_cpu(j) 265 - any_count |= kstat_irqs_cpu(i, j); 266 - action = desc->action; 267 - if (!action && !any_count) 268 - goto out; 269 - 270 - seq_printf(p, "%*d: ", prec, i); 271 - for_each_online_cpu(j) 272 - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 273 - 274 - chip = get_irq_desc_chip(desc); 275 - if (chip) 276 - seq_printf(p, " %-16s", chip->name); 277 - else 278 - seq_printf(p, " %-16s", "None"); 279 - seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); 280 - 281 - if (action) { 282 - seq_printf(p, " %s", action->name); 283 - while ((action = action->next) != NULL) 284 - seq_printf(p, ", %s", action->name); 285 - } 286 - 287 - seq_putc(p, '\n'); 288 - out: 289 - raw_spin_unlock_irqrestore(&desc->lock, flags); 290 - return 0; 291 - } 292 - 293 234 /* 294 235 * /proc/stat helpers 295 236 */ ··· 256 315 alloc_cpumask_var(&mask, GFP_KERNEL); 257 316 258 317 for_each_irq(irq) { 318 + struct irq_data *data; 259 319 struct irq_chip *chip; 260 320 261 321 desc = irq_to_desc(irq); 262 322 if (!desc) 263 323 continue; 264 324 265 - if (desc->status & IRQ_PER_CPU) 325 + data = irq_desc_get_irq_data(desc); 326 + if (irqd_is_per_cpu(data)) 266 327 continue; 267 328 268 - chip = get_irq_desc_chip(desc); 329 + chip = irq_data_get_irq_chip(data); 269 330 270 - cpumask_and(mask, desc->irq_data.affinity, map); 331 + cpumask_and(mask, data->affinity, map); 271 332 if (cpumask_any(mask) >= nr_cpu_ids) { 272 333 printk("Breaking affinity for irq %i\n", irq); 273 334 cpumask_copy(mask, map); 274 335 } 275 336 if (chip->irq_set_affinity) 276 - chip->irq_set_affinity(&desc->irq_data, mask, true); 337 + chip->irq_set_affinity(data, mask, true); 277 338 else if (desc->action && !(warned++)) 278 339 printk("Cannot set affinity for irq %i\n", irq); 279 340 } ··· 561 618 smp_wmb(); 562 619 563 620 /* Clear norequest flags */ 564 - irq_to_desc(i)->status &= ~IRQ_NOREQUEST; 621 + irq_clear_status_flags(i, IRQ_NOREQUEST); 565 622 566 623 /* Legacy flags are left to default at this point, 567 624 * one can then use irq_create_mapping() to ··· 770 827 771 828 /* Set type if specified and different than the current one */ 772 829 if (type != IRQ_TYPE_NONE && 773 - type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) 774 - set_irq_type(virq, type); 830 + type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) 831 + irq_set_irq_type(virq, type); 775 832 return virq; 776 833 } 777 834 EXPORT_SYMBOL_GPL(irq_create_of_mapping); ··· 794 851 return; 795 852 796 853 /* remove chip and handler */ 797 - set_irq_chip_and_handler(virq, NULL, NULL); 854 + irq_set_chip_and_handler(virq, NULL, NULL); 798 855 799 856 /* Make sure it's completed */ 800 857 synchronize_irq(virq); ··· 1099 1156 seq_printf(m, "%5d ", i); 1100 1157 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1101 1158 1102 - chip = get_irq_desc_chip(desc); 1159 + chip = irq_desc_get_chip(desc); 1103 1160 if (chip && chip->name) 1104 1161 p = chip->name; 1105 1162 else
+3 -3
arch/powerpc/kernel/machine_kexec.c
··· 31 31 if (!desc) 32 32 continue; 33 33 34 - chip = get_irq_desc_chip(desc); 34 + chip = irq_desc_get_chip(desc); 35 35 if (!chip) 36 36 continue; 37 37 38 - if (chip->irq_eoi && desc->status & IRQ_INPROGRESS) 38 + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) 39 39 chip->irq_eoi(&desc->irq_data); 40 40 41 41 if (chip->irq_mask) 42 42 chip->irq_mask(&desc->irq_data); 43 43 44 - if (chip->irq_disable && !(desc->status & IRQ_DISABLED)) 44 + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) 45 45 chip->irq_disable(&desc->irq_data); 46 46 } 47 47 }
+1 -1
arch/powerpc/kernel/pci-common.c
··· 261 261 262 262 virq = irq_create_mapping(NULL, line); 263 263 if (virq != NO_IRQ) 264 - set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 264 + irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 265 265 } else { 266 266 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 267 267 oirq.size, oirq.specifier[0], oirq.specifier[1],
+1 -1
arch/powerpc/kernel/time.c
··· 356 356 } 357 357 get_paca()->user_time_scaled += user_scaled; 358 358 359 - if (in_irq() || idle_task(smp_processor_id()) != tsk) { 359 + if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { 360 360 account_system_time(tsk, 0, delta, sys_scaled); 361 361 if (stolen) 362 362 account_steal_time(stolen);
+20
arch/powerpc/mm/dma-noncoherent.c
··· 399 399 #endif 400 400 } 401 401 EXPORT_SYMBOL(__dma_sync_page); 402 + 403 + /* 404 + * Return the PFN for a given cpu virtual address returned by 405 + * __dma_alloc_coherent. This is used by dma_mmap_coherent() 406 + */ 407 + unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) 408 + { 409 + /* This should always be populated, so we don't test every 410 + * level. If that fails, we'll have a nice crash which 411 + * will be as good as a BUG_ON() 412 + */ 413 + pgd_t *pgd = pgd_offset_k(cpu_addr); 414 + pud_t *pud = pud_offset(pgd, cpu_addr); 415 + pmd_t *pmd = pmd_offset(pud, cpu_addr); 416 + pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); 417 + 418 + if (pte_none(*ptep) || !pte_present(*ptep)) 419 + return 0; 420 + return pte_pfn(*ptep); 421 + }
+3 -3
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
··· 132 132 cpld_pic_host_map(struct irq_host *h, unsigned int virq, 133 133 irq_hw_number_t hw) 134 134 { 135 - irq_to_desc(virq)->status |= IRQ_LEVEL; 136 - set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq); 135 + irq_set_status_flags(virq, IRQ_LEVEL); 136 + irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq); 137 137 return 0; 138 138 } 139 139 ··· 198 198 goto end; 199 199 } 200 200 201 - set_irq_chained_handler(cascade_irq, cpld_pic_cascade); 201 + irq_set_chained_handler(cascade_irq, cpld_pic_cascade); 202 202 end: 203 203 of_node_put(np); 204 204 }
+7 -12
arch/powerpc/platforms/52xx/media5200.c
··· 82 82 83 83 void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) 84 84 { 85 - struct irq_chip *chip = get_irq_desc_chip(desc); 85 + struct irq_chip *chip = irq_desc_get_chip(desc); 86 86 int sub_virq, val; 87 87 u32 status, enable; 88 88 ··· 107 107 /* Processing done; can reenable the cascade now */ 108 108 raw_spin_lock(&desc->lock); 109 109 chip->irq_ack(&desc->irq_data); 110 - if (!(desc->status & IRQ_DISABLED)) 110 + if (!irqd_irq_disabled(&desc->irq_data)) 111 111 chip->irq_unmask(&desc->irq_data); 112 112 raw_spin_unlock(&desc->lock); 113 113 } ··· 115 115 static int media5200_irq_map(struct irq_host *h, unsigned int virq, 116 116 irq_hw_number_t hw) 117 117 { 118 - struct irq_desc *desc = irq_to_desc(virq); 119 - 120 118 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); 121 - set_irq_chip_data(virq, &media5200_irq); 122 - set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); 123 - set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 124 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 125 - desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL; 126 - 119 + irq_set_chip_data(virq, &media5200_irq); 120 + irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); 121 + irq_set_status_flags(virq, IRQ_LEVEL); 127 122 return 0; 128 123 } 129 124 ··· 182 187 183 188 media5200_irq.irqhost->host_data = &media5200_irq; 184 189 185 - set_irq_data(cascade_virq, &media5200_irq); 186 - set_irq_chained_handler(cascade_virq, media5200_irq_cascade); 190 + irq_set_handler_data(cascade_virq, &media5200_irq); 191 + irq_set_chained_handler(cascade_virq, media5200_irq_cascade); 187 192 188 193 return; 189 194
+5 -5
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
··· 192 192 193 193 void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) 194 194 { 195 - struct mpc52xx_gpt_priv *gpt = get_irq_data(virq); 195 + struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq); 196 196 int sub_virq; 197 197 u32 status; 198 198 ··· 209 209 struct mpc52xx_gpt_priv *gpt = h->host_data; 210 210 211 211 dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); 212 - set_irq_chip_data(virq, gpt); 213 - set_irq_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); 212 + irq_set_chip_data(virq, gpt); 213 + irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); 214 214 215 215 return 0; 216 216 } ··· 259 259 } 260 260 261 261 gpt->irqhost->host_data = gpt; 262 - set_irq_data(cascade_virq, gpt); 263 - set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); 262 + irq_set_handler_data(cascade_virq, gpt); 263 + irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); 264 264 265 265 /* If the GPT is currently disabled, then change it to be in Input 266 266 * Capture mode. If the mode is non-zero, then the pin could be
+3 -3
arch/powerpc/platforms/52xx/mpc52xx_pic.c
··· 214 214 ctrl_reg |= (type << (22 - (l2irq * 2))); 215 215 out_be32(&intr->ctrl, ctrl_reg); 216 216 217 - __set_irq_handler_unlocked(d->irq, handler); 217 + __irq_set_handler_locked(d->irq, handler); 218 218 219 219 return 0; 220 220 } ··· 414 414 else 415 415 hndlr = handle_level_irq; 416 416 417 - set_irq_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); 417 + irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); 418 418 pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", 419 419 __func__, l2irq, virq, (int)irq, type); 420 420 return 0; ··· 431 431 return -EINVAL; 432 432 } 433 433 434 - set_irq_chip_and_handler(virq, irqchip, handle_level_irq); 434 + irq_set_chip_and_handler(virq, irqchip, handle_level_irq); 435 435 pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); 436 436 437 437 return 0;
+8 -8
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
··· 81 81 82 82 static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) 83 83 { 84 - struct pq2ads_pci_pic *priv = get_irq_desc_data(desc); 84 + struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); 85 85 u32 stat, mask, pend; 86 86 int bit; 87 87 ··· 106 106 static int pci_pic_host_map(struct irq_host *h, unsigned int virq, 107 107 irq_hw_number_t hw) 108 108 { 109 - irq_to_desc(virq)->status |= IRQ_LEVEL; 110 - set_irq_chip_data(virq, h->host_data); 111 - set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); 109 + irq_set_status_flags(virq, IRQ_LEVEL); 110 + irq_set_chip_data(virq, h->host_data); 111 + irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); 112 112 return 0; 113 113 } 114 114 115 115 static void pci_host_unmap(struct irq_host *h, unsigned int virq) 116 116 { 117 117 /* remove chip and handler */ 118 - set_irq_chip_data(virq, NULL); 119 - set_irq_chip(virq, NULL); 118 + irq_set_chip_data(virq, NULL); 119 + irq_set_chip(virq, NULL); 120 120 } 121 121 122 122 static struct irq_host_ops pci_pic_host_ops = { ··· 175 175 176 176 priv->host = host; 177 177 host->host_data = priv; 178 - set_irq_data(irq, priv); 179 - set_irq_chained_handler(irq, pq2ads_pci_irq_demux); 178 + irq_set_handler_data(irq, priv); 179 + irq_set_chained_handler(irq, pq2ads_pci_irq_demux); 180 180 181 181 of_node_put(np); 182 182 return 0;
+2 -2
arch/powerpc/platforms/85xx/ksi8560.c
··· 56 56 57 57 static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) 58 58 { 59 - struct irq_chip *chip = get_irq_desc_chip(desc); 59 + struct irq_chip *chip = irq_desc_get_chip(desc); 60 60 int cascade_irq; 61 61 62 62 while ((cascade_irq = cpm2_get_irq()) >= 0) ··· 106 106 107 107 cpm2_pic_init(np); 108 108 of_node_put(np); 109 - set_irq_chained_handler(irq, cpm2_cascade); 109 + irq_set_chained_handler(irq, cpm2_cascade); 110 110 #endif 111 111 } 112 112
+2 -2
arch/powerpc/platforms/85xx/mpc85xx_ads.c
··· 50 50 51 51 static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) 52 52 { 53 - struct irq_chip *chip = get_irq_desc_chip(desc); 53 + struct irq_chip *chip = irq_desc_get_chip(desc); 54 54 int cascade_irq; 55 55 56 56 while ((cascade_irq = cpm2_get_irq()) >= 0) ··· 101 101 102 102 cpm2_pic_init(np); 103 103 of_node_put(np); 104 - set_irq_chained_handler(irq, cpm2_cascade); 104 + irq_set_chained_handler(irq, cpm2_cascade); 105 105 #endif 106 106 } 107 107
+1 -1
arch/powerpc/platforms/85xx/mpc85xx_cds.c
··· 255 255 } 256 256 257 257 /* Success. Connect our low-level cascade handler. */ 258 - set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler); 258 + irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler); 259 259 260 260 return 0; 261 261 }
+2 -2
arch/powerpc/platforms/85xx/mpc85xx_ds.c
··· 47 47 #ifdef CONFIG_PPC_I8259 48 48 static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) 49 49 { 50 - struct irq_chip *chip = get_irq_desc_chip(desc); 50 + struct irq_chip *chip = irq_desc_get_chip(desc); 51 51 unsigned int cascade_irq = i8259_irq(); 52 52 53 53 if (cascade_irq != NO_IRQ) { ··· 122 122 i8259_init(cascade_node, 0); 123 123 of_node_put(cascade_node); 124 124 125 - set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade); 125 + irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); 126 126 #endif /* CONFIG_PPC_I8259 */ 127 127 } 128 128
+2 -2
arch/powerpc/platforms/85xx/sbc8560.c
··· 41 41 42 42 static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) 43 43 { 44 - struct irq_chip *chip = get_irq_desc_chip(desc); 44 + struct irq_chip *chip = irq_desc_get_chip(desc); 45 45 int cascade_irq; 46 46 47 47 while ((cascade_irq = cpm2_get_irq()) >= 0) ··· 92 92 93 93 cpm2_pic_init(np); 94 94 of_node_put(np); 95 - set_irq_chained_handler(irq, cpm2_cascade); 95 + irq_set_chained_handler(irq, cpm2_cascade); 96 96 #endif 97 97 } 98 98
+6 -6
arch/powerpc/platforms/85xx/socrates_fpga_pic.c
··· 93 93 94 94 void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) 95 95 { 96 - struct irq_chip *chip = get_irq_desc_chip(desc); 96 + struct irq_chip *chip = irq_desc_get_chip(desc); 97 97 unsigned int cascade_irq; 98 98 99 99 /* ··· 245 245 irq_hw_number_t hwirq) 246 246 { 247 247 /* All interrupts are LEVEL sensitive */ 248 - irq_to_desc(virq)->status |= IRQ_LEVEL; 249 - set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip, 250 - handle_fasteoi_irq); 248 + irq_set_status_flags(virq, IRQ_LEVEL); 249 + irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip, 250 + handle_fasteoi_irq); 251 251 252 252 return 0; 253 253 } ··· 308 308 pr_warning("FPGA PIC: can't get irq%d.\n", i); 309 309 continue; 310 310 } 311 - set_irq_chained_handler(socrates_fpga_irqs[i], 312 - socrates_fpga_pic_cascade); 311 + irq_set_chained_handler(socrates_fpga_irqs[i], 312 + socrates_fpga_pic_cascade); 313 313 } 314 314 315 315 socrates_fpga_pic_iobase = of_iomap(pic, 0);
+2 -2
arch/powerpc/platforms/85xx/stx_gp3.c
··· 46 46 47 47 static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) 48 48 { 49 - struct irq_chip *chip = get_irq_desc_chip(desc); 49 + struct irq_chip *chip = irq_desc_get_chip(desc); 50 50 int cascade_irq; 51 51 52 52 while ((cascade_irq = cpm2_get_irq()) >= 0) ··· 102 102 103 103 cpm2_pic_init(np); 104 104 of_node_put(np); 105 - set_irq_chained_handler(irq, cpm2_cascade); 105 + irq_set_chained_handler(irq, cpm2_cascade); 106 106 #endif 107 107 } 108 108
+2 -2
arch/powerpc/platforms/85xx/tqm85xx.c
··· 44 44 45 45 static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) 46 46 { 47 - struct irq_chip *chip = get_irq_desc_chip(desc); 47 + struct irq_chip *chip = irq_desc_get_chip(desc); 48 48 int cascade_irq; 49 49 50 50 while ((cascade_irq = cpm2_get_irq()) >= 0) ··· 100 100 101 101 cpm2_pic_init(np); 102 102 of_node_put(np); 103 - set_irq_chained_handler(irq, cpm2_cascade); 103 + irq_set_chained_handler(irq, cpm2_cascade); 104 104 #endif 105 105 } 106 106
+4 -4
arch/powerpc/platforms/86xx/gef_pic.c
··· 95 95 96 96 void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) 97 97 { 98 - struct irq_chip *chip = get_irq_desc_chip(desc); 98 + struct irq_chip *chip = irq_desc_get_chip(desc); 99 99 unsigned int cascade_irq; 100 100 101 101 /* ··· 163 163 irq_hw_number_t hwirq) 164 164 { 165 165 /* All interrupts are LEVEL sensitive */ 166 - irq_to_desc(virq)->status |= IRQ_LEVEL; 167 - set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); 166 + irq_set_status_flags(virq, IRQ_LEVEL); 167 + irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); 168 168 169 169 return 0; 170 170 } ··· 225 225 return; 226 226 227 227 /* Chain with parent controller */ 228 - set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); 228 + irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); 229 229 } 230 230 231 231 /*
+2 -2
arch/powerpc/platforms/86xx/pic.c
··· 19 19 #ifdef CONFIG_PPC_I8259 20 20 static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) 21 21 { 22 - struct irq_chip *chip = get_irq_desc_chip(desc); 22 + struct irq_chip *chip = irq_desc_get_chip(desc); 23 23 unsigned int cascade_irq = i8259_irq(); 24 24 25 25 if (cascade_irq != NO_IRQ) ··· 77 77 i8259_init(cascade_node, 0); 78 78 of_node_put(cascade_node); 79 79 80 - set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade); 80 + irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade); 81 81 #endif 82 82 }
+3 -3
arch/powerpc/platforms/8xx/m8xx_setup.c
··· 226 226 227 227 generic_handle_irq(cascade_irq); 228 228 229 - chip = get_irq_desc_chip(cdesc); 229 + chip = irq_desc_get_chip(cdesc); 230 230 chip->irq_eoi(&cdesc->irq_data); 231 231 } 232 232 233 - chip = get_irq_desc_chip(desc); 233 + chip = irq_desc_get_chip(desc); 234 234 chip->irq_eoi(&desc->irq_data); 235 235 } 236 236 ··· 251 251 252 252 irq = cpm_pic_init(); 253 253 if (irq != NO_IRQ) 254 - set_irq_chained_handler(irq, cpm_cascade); 254 + irq_set_chained_handler(irq, cpm_cascade); 255 255 }
+7 -7
arch/powerpc/platforms/cell/axon_msi.c
··· 93 93 94 94 static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) 95 95 { 96 - struct irq_chip *chip = get_irq_desc_chip(desc); 97 - struct axon_msic *msic = get_irq_data(irq); 96 + struct irq_chip *chip = irq_desc_get_chip(desc); 97 + struct axon_msic *msic = irq_get_handler_data(irq); 98 98 u32 write_offset, msi; 99 99 int idx; 100 100 int retry = 0; ··· 287 287 } 288 288 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); 289 289 290 - set_irq_msi(virq, entry); 290 + irq_set_msi_desc(virq, entry); 291 291 msg.data = virq; 292 292 write_msi_msg(virq, &msg); 293 293 } ··· 305 305 if (entry->irq == NO_IRQ) 306 306 continue; 307 307 308 - set_irq_msi(entry->irq, NULL); 308 + irq_set_msi_desc(entry->irq, NULL); 309 309 irq_dispose_mapping(entry->irq); 310 310 } 311 311 } ··· 320 320 static int msic_host_map(struct irq_host *h, unsigned int virq, 321 321 irq_hw_number_t hw) 322 322 { 323 - set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); 323 + irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); 324 324 325 325 return 0; 326 326 } ··· 400 400 401 401 msic->irq_host->host_data = msic; 402 402 403 - set_irq_data(virq, msic); 404 - set_irq_chained_handler(virq, axon_msi_cascade); 403 + irq_set_handler_data(virq, msic); 404 + irq_set_chained_handler(virq, axon_msi_cascade); 405 405 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); 406 406 407 407 /* Enable the MSIC hardware */
+2 -3
arch/powerpc/platforms/cell/beat_interrupt.c
··· 136 136 static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, 137 137 irq_hw_number_t hw) 138 138 { 139 - struct irq_desc *desc = irq_to_desc(virq); 140 139 int64_t err; 141 140 142 141 err = beat_construct_and_connect_irq_plug(virq, hw); 143 142 if (err < 0) 144 143 return -EIO; 145 144 146 - desc->status |= IRQ_LEVEL; 147 - set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); 145 + irq_set_status_flags(virq, IRQ_LEVEL); 146 + irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); 148 147 return 0; 149 148 } 150 149
+8 -8
arch/powerpc/platforms/cell/interrupt.c
··· 101 101 102 102 static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) 103 103 { 104 - struct irq_chip *chip = get_irq_desc_chip(desc); 104 + struct irq_chip *chip = irq_desc_get_chip(desc); 105 105 struct cbe_iic_regs __iomem *node_iic = 106 - (void __iomem *)get_irq_desc_data(desc); 106 + (void __iomem *)irq_desc_get_handler_data(desc); 107 107 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; 108 108 unsigned long bits, ack; 109 109 int cascade; ··· 240 240 { 241 241 switch (hw & IIC_IRQ_TYPE_MASK) { 242 242 case IIC_IRQ_TYPE_IPI: 243 - set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); 243 + irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); 244 244 break; 245 245 case IIC_IRQ_TYPE_IOEXC: 246 - set_irq_chip_and_handler(virq, &iic_ioexc_chip, 247 - handle_iic_irq); 246 + irq_set_chip_and_handler(virq, &iic_ioexc_chip, 247 + handle_edge_eoi_irq); 248 248 break; 249 249 default: 250 - set_irq_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); 250 + irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); 251 251 } 252 252 return 0; 253 253 } ··· 364 364 * irq_data is a generic pointer that gets passed back 365 365 * to us later, so the forced cast is fine. 366 366 */ 367 - set_irq_data(cascade, (void __force *)node_iic); 368 - set_irq_chained_handler(cascade , iic_ioexc_cascade); 367 + irq_set_handler_data(cascade, (void __force *)node_iic); 368 + irq_set_chained_handler(cascade, iic_ioexc_cascade); 369 369 out_be64(&node_iic->iic_ir, 370 370 (1 << 12) /* priority */ | 371 371 (node << 4) /* dest node */ |
+4 -4
arch/powerpc/platforms/cell/setup.c
··· 187 187 188 188 static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) 189 189 { 190 - struct irq_chip *chip = get_irq_desc_chip(desc); 191 - struct mpic *mpic = get_irq_desc_data(desc); 190 + struct irq_chip *chip = irq_desc_get_chip(desc); 191 + struct mpic *mpic = irq_desc_get_handler_data(desc); 192 192 unsigned int virq; 193 193 194 194 virq = mpic_get_one_irq(mpic); ··· 223 223 224 224 printk(KERN_INFO "%s : hooking up to IRQ %d\n", 225 225 dn->full_name, virq); 226 - set_irq_data(virq, mpic); 227 - set_irq_chained_handler(virq, cell_mpic_cascade); 226 + irq_set_handler_data(virq, mpic); 227 + irq_set_chained_handler(virq, cell_mpic_cascade); 228 228 } 229 229 } 230 230
+7 -14
arch/powerpc/platforms/cell/spider-pic.c
··· 102 102 103 103 /* Reset edge detection logic if necessary 104 104 */ 105 - if (irq_to_desc(d->irq)->status & IRQ_LEVEL) 105 + if (irqd_is_level_type(d)) 106 106 return; 107 107 108 108 /* Only interrupts 47 to 50 can be set to edge */ ··· 119 119 struct spider_pic *pic = spider_virq_to_pic(d->irq); 120 120 unsigned int hw = irq_map[d->irq].hwirq; 121 121 void __iomem *cfg = spider_get_irq_config(pic, hw); 122 - struct irq_desc *desc = irq_to_desc(d->irq); 123 122 u32 old_mask; 124 123 u32 ic; 125 124 ··· 146 147 return -EINVAL; 147 148 } 148 149 149 - /* Update irq_desc */ 150 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 151 - desc->status |= type & IRQ_TYPE_SENSE_MASK; 152 - if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 153 - desc->status |= IRQ_LEVEL; 154 - 155 150 /* Configure the source. One gross hack that was there before and 156 151 * that I've kept around is the priority to the BE which I set to 157 152 * be the same as the interrupt source number. I don't know wether ··· 171 178 static int spider_host_map(struct irq_host *h, unsigned int virq, 172 179 irq_hw_number_t hw) 173 180 { 174 - set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); 181 + irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); 175 182 176 183 /* Set default irq type */ 177 - set_irq_type(virq, IRQ_TYPE_NONE); 184 + irq_set_irq_type(virq, IRQ_TYPE_NONE); 178 185 179 186 return 0; 180 187 } ··· 200 207 201 208 static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) 202 209 { 203 - struct irq_chip *chip = get_irq_desc_chip(desc); 204 - struct spider_pic *pic = get_irq_desc_data(desc); 210 + struct irq_chip *chip = irq_desc_get_chip(desc); 211 + struct spider_pic *pic = irq_desc_get_handler_data(desc); 205 212 unsigned int cs, virq; 206 213 207 214 cs = in_be32(pic->regs + TIR_CS) >> 24; ··· 321 328 virq = spider_find_cascade_and_node(pic); 322 329 if (virq == NO_IRQ) 323 330 return; 324 - set_irq_data(virq, pic); 325 - set_irq_chained_handler(virq, spider_irq_cascade); 331 + irq_set_handler_data(virq, pic); 332 + irq_set_chained_handler(virq, spider_irq_cascade); 326 333 327 334 printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", 328 335 pic->node_id, addr, of_node->full_name);
+2 -2
arch/powerpc/platforms/chrp/setup.c
··· 365 365 366 366 static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) 367 367 { 368 - struct irq_chip *chip = get_irq_desc_chip(desc); 368 + struct irq_chip *chip = irq_desc_get_chip(desc); 369 369 unsigned int cascade_irq = i8259_irq(); 370 370 371 371 if (cascade_irq != NO_IRQ) ··· 517 517 if (cascade_irq == NO_IRQ) 518 518 printk(KERN_ERR "i8259: failed to map cascade irq\n"); 519 519 else 520 - set_irq_chained_handler(cascade_irq, 520 + irq_set_chained_handler(cascade_irq, 521 521 chrp_8259_cascade); 522 522 } 523 523 }
+5 -5
arch/powerpc/platforms/embedded6xx/flipper-pic.c
··· 101 101 static int flipper_pic_map(struct irq_host *h, unsigned int virq, 102 102 irq_hw_number_t hwirq) 103 103 { 104 - set_irq_chip_data(virq, h->host_data); 105 - irq_to_desc(virq)->status |= IRQ_LEVEL; 106 - set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); 104 + irq_set_chip_data(virq, h->host_data); 105 + irq_set_status_flags(virq, IRQ_LEVEL); 106 + irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq); 107 107 return 0; 108 108 } 109 109 110 110 static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) 111 111 { 112 - set_irq_chip_data(irq, NULL); 113 - set_irq_chip(irq, NULL); 112 + irq_set_chip_data(irq, NULL); 113 + irq_set_chip(irq, NULL); 114 114 } 115 115 116 116 static int flipper_pic_match(struct irq_host *h, struct device_node *np)
+10 -10
arch/powerpc/platforms/embedded6xx/hlwd-pic.c
··· 94 94 static int hlwd_pic_map(struct irq_host *h, unsigned int virq, 95 95 irq_hw_number_t hwirq) 96 96 { 97 - set_irq_chip_data(virq, h->host_data); 98 - irq_to_desc(virq)->status |= IRQ_LEVEL; 99 - set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq); 97 + irq_set_chip_data(virq, h->host_data); 98 + irq_set_status_flags(virq, IRQ_LEVEL); 99 + irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq); 100 100 return 0; 101 101 } 102 102 103 103 static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq) 104 104 { 105 - set_irq_chip_data(irq, NULL); 106 - set_irq_chip(irq, NULL); 105 + irq_set_chip_data(irq, NULL); 106 + irq_set_chip(irq, NULL); 107 107 } 108 108 109 109 static struct irq_host_ops hlwd_irq_host_ops = { ··· 129 129 static void hlwd_pic_irq_cascade(unsigned int cascade_virq, 130 130 struct irq_desc *desc) 131 131 { 132 - struct irq_chip *chip = get_irq_desc_chip(desc); 133 - struct irq_host *irq_host = get_irq_data(cascade_virq); 132 + struct irq_chip *chip = irq_desc_get_chip(desc); 133 + struct irq_host *irq_host = irq_get_handler_data(cascade_virq); 134 134 unsigned int virq; 135 135 136 136 raw_spin_lock(&desc->lock); ··· 145 145 146 146 raw_spin_lock(&desc->lock); 147 147 chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */ 148 - if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) 148 + if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask) 149 149 chip->irq_unmask(&desc->irq_data); 150 150 raw_spin_unlock(&desc->lock); 151 151 } ··· 218 218 host = hlwd_pic_init(np); 219 219 BUG_ON(!host); 220 220 cascade_virq = irq_of_parse_and_map(np, 0); 221 - set_irq_data(cascade_virq, host); 222 - set_irq_chained_handler(cascade_virq, 221 + irq_set_handler_data(cascade_virq, host); 222 + irq_set_chained_handler(cascade_virq, 223 223 hlwd_pic_irq_cascade); 224 224 hlwd_irq_host = host; 225 225 break;
+2 -2
arch/powerpc/platforms/embedded6xx/holly.c
··· 198 198 cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); 199 199 pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); 200 200 tsi108_pci_int_init(cascade_node); 201 - set_irq_data(cascade_pci_irq, mpic); 202 - set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); 201 + irq_set_handler_data(cascade_pci_irq, mpic); 202 + irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); 203 203 #endif 204 204 /* Configure MPIC outputs to CPU0 */ 205 205 tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
+2 -2
arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
··· 153 153 DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, 154 154 (u32) cascade_pci_irq); 155 155 tsi108_pci_int_init(cascade_node); 156 - set_irq_data(cascade_pci_irq, mpic); 157 - set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); 156 + irq_set_handler_data(cascade_pci_irq, mpic); 157 + irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); 158 158 #endif 159 159 /* Configure MPIC outputs to CPU0 */ 160 160 tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
+2 -2
arch/powerpc/platforms/iseries/irq.c
··· 220 220 if (!desc) 221 221 continue; 222 222 223 - chip = get_irq_desc_chip(desc); 223 + chip = irq_desc_get_chip(desc); 224 224 if (chip && chip->irq_startup) { 225 225 raw_spin_lock_irqsave(&desc->lock, flags); 226 226 chip->irq_startup(&desc->irq_data); ··· 346 346 static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, 347 347 irq_hw_number_t hw) 348 348 { 349 - set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); 349 + irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); 350 350 351 351 return 0; 352 352 }
+1 -1
arch/powerpc/platforms/maple/pci.c
··· 498 498 printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); 499 499 dev->irq = irq_create_mapping(NULL, 1); 500 500 if (dev->irq != NO_IRQ) 501 - set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); 501 + irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); 502 502 } 503 503 504 504 /* Hide AMD8111 IDE interrupt when in legacy mode so
+1 -1
arch/powerpc/platforms/pasemi/setup.c
··· 239 239 if (nmiprop) { 240 240 nmi_virq = irq_create_mapping(NULL, *nmiprop); 241 241 mpic_irq_set_priority(nmi_virq, 15); 242 - set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); 242 + irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); 243 243 mpic_unmask_irq(irq_get_irq_data(nmi_virq)); 244 244 } 245 245
+1 -1
arch/powerpc/platforms/powermac/pci.c
··· 988 988 dev->vendor == PCI_VENDOR_ID_DEC && 989 989 dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { 990 990 dev->irq = irq_create_mapping(NULL, 60); 991 - set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); 991 + irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); 992 992 } 993 993 #endif /* CONFIG_PPC32 */ 994 994 }
+8 -9
arch/powerpc/platforms/powermac/pic.c
··· 157 157 int i = src >> 5; 158 158 159 159 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 160 - if ((irq_to_desc(d->irq)->status & IRQ_LEVEL) == 0) 160 + if (!irqd_is_level_type(d)) 161 161 out_le32(&pmac_irq_hw[i]->ack, bit); 162 162 __set_bit(src, ppc_cached_irq_mask); 163 163 __pmac_set_irq_mask(src, 0); ··· 289 289 static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, 290 290 irq_hw_number_t hw) 291 291 { 292 - struct irq_desc *desc = irq_to_desc(virq); 293 292 int level; 294 293 295 294 if (hw >= max_irqs) ··· 299 300 */ 300 301 level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); 301 302 if (level) 302 - desc->status |= IRQ_LEVEL; 303 - set_irq_chip_and_handler(virq, &pmac_pic, level ? 304 - handle_level_irq : handle_edge_irq); 303 + irq_set_status_flags(virq, IRQ_LEVEL); 304 + irq_set_chip_and_handler(virq, &pmac_pic, 305 + level ? handle_level_irq : handle_edge_irq); 305 306 return 0; 306 307 } 307 308 ··· 471 472 472 473 static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) 473 474 { 474 - struct irq_chip *chip = get_irq_desc_chip(desc); 475 - struct mpic *mpic = get_irq_desc_data(desc); 475 + struct irq_chip *chip = irq_desc_get_chip(desc); 476 + struct mpic *mpic = irq_desc_get_handler_data(desc); 476 477 unsigned int cascade_irq = mpic_get_one_irq(mpic); 477 478 478 479 if (cascade_irq != NO_IRQ) ··· 590 591 of_node_put(slave); 591 592 return 0; 592 593 } 593 - set_irq_data(cascade, mpic2); 594 - set_irq_chained_handler(cascade, pmac_u3_cascade); 594 + irq_set_handler_data(cascade, mpic2); 595 + irq_set_chained_handler(cascade, pmac_u3_cascade); 595 596 596 597 of_node_put(slave); 597 598 return 0;
+7 -7
arch/powerpc/platforms/ps3/interrupt.c
··· 194 194 pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, 195 195 outlet, cpu, *virq); 196 196 197 - result = set_irq_chip_data(*virq, pd); 197 + result = irq_set_chip_data(*virq, pd); 198 198 199 199 if (result) { 200 200 pr_debug("%s:%d: set_irq_chip_data failed\n", ··· 221 221 222 222 static int ps3_virq_destroy(unsigned int virq) 223 223 { 224 - const struct ps3_private *pd = get_irq_chip_data(virq); 224 + const struct ps3_private *pd = irq_get_chip_data(virq); 225 225 226 226 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, 227 227 __LINE__, pd->ppe_id, pd->thread_id, virq); 228 228 229 - set_irq_chip_data(virq, NULL); 229 + irq_set_chip_data(virq, NULL); 230 230 irq_dispose_mapping(virq); 231 231 232 232 pr_debug("%s:%d <-\n", __func__, __LINE__); ··· 256 256 goto fail_setup; 257 257 } 258 258 259 - pd = get_irq_chip_data(*virq); 259 + pd = irq_get_chip_data(*virq); 260 260 261 261 /* Binds outlet to cpu + virq. */ 262 262 ··· 291 291 int ps3_irq_plug_destroy(unsigned int virq) 292 292 { 293 293 int result; 294 - const struct ps3_private *pd = get_irq_chip_data(virq); 294 + const struct ps3_private *pd = irq_get_chip_data(virq); 295 295 296 296 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, 297 297 __LINE__, pd->ppe_id, pd->thread_id, virq); ··· 661 661 662 662 static void ps3_host_unmap(struct irq_host *h, unsigned int virq) 663 663 { 664 - set_irq_chip_data(virq, NULL); 664 + irq_set_chip_data(virq, NULL); 665 665 } 666 666 667 667 static int ps3_host_map(struct irq_host *h, unsigned int virq, ··· 670 670 pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, 671 671 virq); 672 672 673 - set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); 673 + irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); 674 674 675 675 return 0; 676 676 }
+2 -2
arch/powerpc/platforms/pseries/msi.c
··· 137 137 if (entry->irq == NO_IRQ) 138 138 continue; 139 139 140 - set_irq_msi(entry->irq, NULL); 140 + irq_set_msi_desc(entry->irq, NULL); 141 141 irq_dispose_mapping(entry->irq); 142 142 } 143 143 ··· 437 437 } 438 438 439 439 dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); 440 - set_irq_msi(virq, entry); 440 + irq_set_msi_desc(virq, entry); 441 441 442 442 /* Read config space back so we can restore after reset */ 443 443 read_msi_msg(virq, &msg);
+24
arch/powerpc/platforms/pseries/nvram.c
··· 480 480 const char *new_msgs, unsigned long new_len) 481 481 { 482 482 static unsigned int oops_count = 0; 483 + static bool panicking = false; 483 484 size_t text_len; 485 + 486 + switch (reason) { 487 + case KMSG_DUMP_RESTART: 488 + case KMSG_DUMP_HALT: 489 + case KMSG_DUMP_POWEROFF: 490 + /* These are almost always orderly shutdowns. */ 491 + return; 492 + case KMSG_DUMP_OOPS: 493 + case KMSG_DUMP_KEXEC: 494 + break; 495 + case KMSG_DUMP_PANIC: 496 + panicking = true; 497 + break; 498 + case KMSG_DUMP_EMERG: 499 + if (panicking) 500 + /* Panic report already captured. */ 501 + return; 502 + break; 503 + default: 504 + pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", 505 + __FUNCTION__, (int) reason); 506 + return; 507 + } 484 508 485 509 if (clobbering_unread_rtas_event()) 486 510 return;
+2 -2
arch/powerpc/platforms/pseries/setup.c
··· 114 114 115 115 static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) 116 116 { 117 - struct irq_chip *chip = get_irq_desc_chip(desc); 117 + struct irq_chip *chip = irq_desc_get_chip(desc); 118 118 unsigned int cascade_irq = i8259_irq(); 119 119 120 120 if (cascade_irq != NO_IRQ) ··· 169 169 printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); 170 170 i8259_init(found, intack); 171 171 of_node_put(found); 172 - set_irq_chained_handler(cascade, pseries_8259_cascade); 172 + irq_set_chained_handler(cascade, pseries_8259_cascade); 173 173 } 174 174 175 175 static void __init pseries_mpic_init_IRQ(void)
+2 -2
arch/powerpc/platforms/pseries/smp.c
··· 64 64 int qcss_tok = rtas_token("query-cpu-stopped-state"); 65 65 66 66 if (qcss_tok == RTAS_UNKNOWN_SERVICE) { 67 - printk(KERN_INFO "Firmware doesn't support " 68 - "query-cpu-stopped-state\n"); 67 + printk_once(KERN_INFO 68 + "Firmware doesn't support query-cpu-stopped-state\n"); 69 69 return QCSS_HARDWARE_ERROR; 70 70 } 71 71
+40 -39
arch/powerpc/platforms/pseries/xics.c
··· 204 204 205 205 static void xics_unmask_irq(struct irq_data *d) 206 206 { 207 - unsigned int irq; 207 + unsigned int hwirq; 208 208 int call_status; 209 209 int server; 210 210 211 211 pr_devel("xics: unmask virq %d\n", d->irq); 212 212 213 - irq = (unsigned int)irq_map[d->irq].hwirq; 214 - pr_devel(" -> map to hwirq 0x%x\n", irq); 215 - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 213 + hwirq = (unsigned int)irq_map[d->irq].hwirq; 214 + pr_devel(" -> map to hwirq 0x%x\n", hwirq); 215 + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) 216 216 return; 217 217 218 218 server = get_irq_server(d->irq, d->affinity, 0); 219 219 220 - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 220 + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, 221 221 DEFAULT_PRIORITY); 222 222 if (call_status != 0) { 223 223 printk(KERN_ERR 224 224 "%s: ibm_set_xive irq %u server %x returned %d\n", 225 - __func__, irq, server, call_status); 225 + __func__, hwirq, server, call_status); 226 226 return; 227 227 } 228 228 229 229 /* Now unmask the interrupt (often a no-op) */ 230 - call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); 230 + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); 231 231 if (call_status != 0) { 232 232 printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", 233 - __func__, irq, call_status); 233 + __func__, hwirq, call_status); 234 234 return; 235 235 } 236 236 } ··· 250 250 return 0; 251 251 } 252 252 253 - static void xics_mask_real_irq(struct irq_data *d) 253 + static void xics_mask_real_irq(unsigned int hwirq) 254 254 { 255 255 int call_status; 256 256 257 - if (d->irq == XICS_IPI) 257 + if (hwirq == XICS_IPI) 258 258 return; 259 259 260 - call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq); 260 + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); 261 261 if (call_status != 0) { 262 262 printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", 263 - __func__, d->irq, call_status); 263 + __func__, hwirq, call_status); 264 264 return; 265 265 } 266 266 267 267 /* Have to set XIVE to 0xff to be able to remove a slot */ 268 - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq, 268 + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, 269 269 default_server, 0xff); 270 270 if (call_status != 0) { 271 271 printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", 272 - __func__, d->irq, call_status); 272 + __func__, hwirq, call_status); 273 273 return; 274 274 } 275 275 } 276 276 277 277 static void xics_mask_irq(struct irq_data *d) 278 278 { 279 - unsigned int irq; 279 + unsigned int hwirq; 280 280 281 281 pr_devel("xics: mask virq %d\n", d->irq); 282 282 283 - irq = (unsigned int)irq_map[d->irq].hwirq; 284 - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 283 + hwirq = (unsigned int)irq_map[d->irq].hwirq; 284 + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) 285 285 return; 286 - xics_mask_real_irq(d); 286 + xics_mask_real_irq(hwirq); 287 287 } 288 288 289 289 static void xics_mask_unknown_vec(unsigned int vec) 290 290 { 291 291 printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); 292 - xics_mask_real_irq(irq_get_irq_data(vec)); 292 + xics_mask_real_irq(vec); 293 293 } 294 294 295 295 static inline unsigned int xics_xirr_vector(unsigned int xirr) ··· 373 373 374 374 static void xics_eoi_direct(struct irq_data *d) 375 375 { 376 - unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; 376 + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; 377 377 378 378 iosync(); 379 - direct_xirr_info_set((pop_cppr() << 24) | irq); 379 + direct_xirr_info_set((pop_cppr() << 24) | hwirq); 380 380 } 381 381 382 382 static void xics_eoi_lpar(struct irq_data *d) 383 383 { 384 - unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; 384 + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; 385 385 386 386 iosync(); 387 - lpar_xirr_info_set((pop_cppr() << 24) | irq); 387 + lpar_xirr_info_set((pop_cppr() << 24) | hwirq); 388 388 } 389 389 390 390 static int 391 391 xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) 392 392 { 393 - unsigned int irq; 393 + unsigned int hwirq; 394 394 int status; 395 395 int xics_status[2]; 396 396 int irq_server; 397 397 398 - irq = (unsigned int)irq_map[d->irq].hwirq; 399 - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 398 + hwirq = (unsigned int)irq_map[d->irq].hwirq; 399 + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) 400 400 return -1; 401 401 402 - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); 402 + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); 403 403 404 404 if (status) { 405 405 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", 406 - __func__, irq, status); 406 + __func__, hwirq, status); 407 407 return -1; 408 408 } 409 409 ··· 418 418 } 419 419 420 420 status = rtas_call(ibm_set_xive, 3, 1, NULL, 421 - irq, irq_server, xics_status[1]); 421 + hwirq, irq_server, xics_status[1]); 422 422 423 423 if (status) { 424 424 printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", 425 - __func__, irq, status); 425 + __func__, hwirq, status); 426 426 return -1; 427 427 } 428 428 ··· 470 470 /* Insert the interrupt mapping into the radix tree for fast lookup */ 471 471 irq_radix_revmap_insert(xics_host, virq, hw); 472 472 473 - irq_to_desc(virq)->status |= IRQ_LEVEL; 474 - set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); 473 + irq_set_status_flags(virq, IRQ_LEVEL); 474 + irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); 475 475 return 0; 476 476 } 477 477 ··· 600 600 * IPIs are marked IRQF_DISABLED as they must run with irqs 601 601 * disabled 602 602 */ 603 - set_irq_handler(ipi, handle_percpu_irq); 603 + irq_set_handler(ipi, handle_percpu_irq); 604 604 if (firmware_has_feature(FW_FEATURE_LPAR)) 605 605 rc = request_irq(ipi, xics_ipi_action_lpar, 606 606 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); ··· 874 874 void xics_migrate_irqs_away(void) 875 875 { 876 876 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); 877 - unsigned int irq, virq; 877 + int virq; 878 878 879 879 /* If we used to be the default server, move to the new "boot_cpuid" */ 880 880 if (hw_cpu == default_server) ··· 892 892 for_each_irq(virq) { 893 893 struct irq_desc *desc; 894 894 struct irq_chip *chip; 895 + unsigned int hwirq; 895 896 int xics_status[2]; 896 897 int status; 897 898 unsigned long flags; ··· 902 901 continue; 903 902 if (irq_map[virq].host != xics_host) 904 903 continue; 905 - irq = (unsigned int)irq_map[virq].hwirq; 904 + hwirq = (unsigned int)irq_map[virq].hwirq; 906 905 /* We need to get IPIs still. */ 907 - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 906 + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) 908 907 continue; 909 908 910 909 desc = irq_to_desc(virq); ··· 913 912 if (desc == NULL || desc->action == NULL) 914 913 continue; 915 914 916 - chip = get_irq_desc_chip(desc); 915 + chip = irq_desc_get_chip(desc); 917 916 if (chip == NULL || chip->irq_set_affinity == NULL) 918 917 continue; 919 918 920 919 raw_spin_lock_irqsave(&desc->lock, flags); 921 920 922 - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); 921 + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); 923 922 if (status) { 924 923 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", 925 - __func__, irq, status); 924 + __func__, hwirq, status); 926 925 goto unlock; 927 926 } 928 927
+2 -2
arch/powerpc/sysdev/cpm1.c
··· 103 103 { 104 104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); 105 105 106 - irq_to_desc(virq)->status |= IRQ_LEVEL; 107 - set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); 106 + irq_set_status_flags(virq, IRQ_LEVEL); 107 + irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); 108 108 return 0; 109 109 } 110 110
+20 -27
arch/powerpc/sysdev/cpm2_pic.c
··· 115 115 116 116 static void cpm2_end_irq(struct irq_data *d) 117 117 { 118 - struct irq_desc *desc; 119 118 int bit, word; 120 119 unsigned int irq_nr = virq_to_hw(d->irq); 121 120 122 - desc = irq_to_desc(irq_nr); 123 - if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)) 124 - && desc->action) { 121 + bit = irq_to_siubit[irq_nr]; 122 + word = irq_to_siureg[irq_nr]; 125 123 126 - bit = irq_to_siubit[irq_nr]; 127 - word = irq_to_siureg[irq_nr]; 124 + ppc_cached_irq_mask[word] |= 1 << bit; 125 + out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); 128 126 129 - ppc_cached_irq_mask[word] |= 1 << bit; 130 - out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); 131 - 132 - /* 133 - * Work around large numbers of spurious IRQs on PowerPC 82xx 134 - * systems. 135 - */ 136 - mb(); 137 - } 127 + /* 128 + * Work around large numbers of spurious IRQs on PowerPC 82xx 129 + * systems. 130 + */ 131 + mb(); 138 132 } 139 133 140 134 static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) 141 135 { 142 136 unsigned int src = virq_to_hw(d->irq); 143 - struct irq_desc *desc = irq_to_desc(d->irq); 144 137 unsigned int vold, vnew, edibit; 145 138 146 139 /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or ··· 155 162 goto err_sense; 156 163 } 157 164 158 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 159 - desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 160 - if (flow_type & IRQ_TYPE_LEVEL_LOW) { 161 - desc->status |= IRQ_LEVEL; 162 - desc->handle_irq = handle_level_irq; 163 - } else 164 - desc->handle_irq = handle_edge_irq; 165 + irqd_set_trigger_type(d, flow_type); 166 + if (flow_type & IRQ_TYPE_LEVEL_LOW) 167 + __irq_set_handler_locked(d->irq, handle_level_irq); 168 + else 169 + __irq_set_handler_locked(d->irq, handle_edge_irq); 165 170 166 171 /* internal IRQ senses are LEVEL_LOW 167 172 * EXT IRQ and Port C IRQ senses are programmable ··· 170 179 if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) 171 180 edibit = (31 - (CPM2_IRQ_PORTC0 - src)); 172 181 else 173 - return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; 182 + return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 183 + IRQ_SET_MASK_OK_NOCOPY : -EINVAL; 174 184 175 185 vold = in_be32(&cpm2_intctl->ic_siexr); 176 186 ··· 182 190 183 191 if (vold != vnew) 184 192 out_be32(&cpm2_intctl->ic_siexr, vnew); 185 - return 0; 193 + return IRQ_SET_MASK_OK_NOCOPY; 186 194 187 195 err_sense: 188 196 pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); ··· 196 204 .irq_ack = cpm2_ack, 197 205 .irq_eoi = cpm2_end_irq, 198 206 .irq_set_type = cpm2_set_irq_type, 207 + .flags = IRQCHIP_EOI_IF_HANDLED, 199 208 }; 200 209 201 210 unsigned int cpm2_get_irq(void) ··· 219 226 { 220 227 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); 221 228 222 - irq_to_desc(virq)->status |= IRQ_LEVEL; 223 - set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq); 229 + irq_set_status_flags(virq, IRQ_LEVEL); 230 + irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq); 224 231 return 0; 225 232 } 226 233
+22 -21
arch/powerpc/sysdev/fsl_msi.c
··· 64 64 struct fsl_msi *msi_data = h->host_data; 65 65 struct irq_chip *chip = &fsl_msi_chip; 66 66 67 - irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING; 67 + irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING); 68 68 69 - set_irq_chip_data(virq, msi_data); 70 - set_irq_chip_and_handler(virq, chip, handle_edge_irq); 69 + irq_set_chip_data(virq, msi_data); 70 + irq_set_chip_and_handler(virq, chip, handle_edge_irq); 71 71 72 72 return 0; 73 73 } ··· 110 110 list_for_each_entry(entry, &pdev->msi_list, list) { 111 111 if (entry->irq == NO_IRQ) 112 112 continue; 113 - msi_data = get_irq_data(entry->irq); 114 - set_irq_msi(entry->irq, NULL); 113 + msi_data = irq_get_handler_data(entry->irq); 114 + irq_set_msi_desc(entry->irq, NULL); 115 115 msi_bitmap_free_hwirqs(&msi_data->bitmap, 116 116 virq_to_hw(entry->irq), 1); 117 117 irq_dispose_mapping(entry->irq); ··· 168 168 rc = -ENOSPC; 169 169 goto out_free; 170 170 } 171 - set_irq_data(virq, msi_data); 172 - set_irq_msi(virq, entry); 171 + irq_set_handler_data(virq, msi_data); 172 + irq_set_msi_desc(virq, entry); 173 173 174 174 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); 175 175 write_msi_msg(virq, &msg); ··· 183 183 184 184 static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) 185 185 { 186 - struct irq_chip *chip = get_irq_desc_chip(desc); 186 + struct irq_chip *chip = irq_desc_get_chip(desc); 187 + struct irq_data *idata = irq_desc_get_irq_data(desc); 187 188 unsigned int cascade_irq; 188 189 struct fsl_msi *msi_data; 189 190 int msir_index = -1; ··· 193 192 u32 have_shift = 0; 194 193 struct fsl_msi_cascade_data *cascade_data; 195 194 196 - cascade_data = (struct fsl_msi_cascade_data *)get_irq_data(irq); 195 + cascade_data = (struct fsl_msi_cascade_data *)irq_get_handler_data(irq); 197 196 msi_data = cascade_data->msi_data; 198 197 199 198 raw_spin_lock(&desc->lock); 200 199 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 201 200 if (chip->irq_mask_ack) 202 - chip->irq_mask_ack(&desc->irq_data); 201 + chip->irq_mask_ack(idata); 203 202 else { 204 - chip->irq_mask(&desc->irq_data); 205 - chip->irq_ack(&desc->irq_data); 203 + chip->irq_mask(idata); 204 + chip->irq_ack(idata); 206 205 } 207 206 } 208 207 209 - if (unlikely(desc->status & IRQ_INPROGRESS)) 208 + if (unlikely(irqd_irq_inprogress(idata))) 210 209 goto unlock; 211 210 212 211 msir_index = cascade_data->index; ··· 214 213 if (msir_index >= NR_MSI_REG) 215 214 cascade_irq = NO_IRQ; 216 215 217 - desc->status |= IRQ_INPROGRESS; 216 + irqd_set_chained_irq_inprogress(idata); 218 217 switch (msi_data->feature & FSL_PIC_IP_MASK) { 219 218 case FSL_PIC_IP_MPIC: 220 219 msir_value = fsl_msi_read(msi_data->msi_regs, ··· 236 235 have_shift += intr_index + 1; 237 236 msir_value = msir_value >> (intr_index + 1); 238 237 } 239 - desc->status &= ~IRQ_INPROGRESS; 238 + irqd_clr_chained_irq_inprogress(idata); 240 239 241 240 switch (msi_data->feature & FSL_PIC_IP_MASK) { 242 241 case FSL_PIC_IP_MPIC: 243 - chip->irq_eoi(&desc->irq_data); 242 + chip->irq_eoi(idata); 244 243 break; 245 244 case FSL_PIC_IP_IPIC: 246 - if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) 247 - chip->irq_unmask(&desc->irq_data); 245 + if (!irqd_irq_disabled(idata) && chip->irq_unmask) 246 + chip->irq_unmask(idata); 248 247 break; 249 248 } 250 249 unlock: ··· 262 261 for (i = 0; i < NR_MSI_REG; i++) { 263 262 virq = msi->msi_virqs[i]; 264 263 if (virq != NO_IRQ) { 265 - cascade_data = get_irq_data(virq); 264 + cascade_data = irq_get_handler_data(virq); 266 265 kfree(cascade_data); 267 266 irq_dispose_mapping(virq); 268 267 } ··· 298 297 msi->msi_virqs[irq_index] = virt_msir; 299 298 cascade_data->index = offset + irq_index; 300 299 cascade_data->msi_data = msi; 301 - set_irq_data(virt_msir, cascade_data); 302 - set_irq_chained_handler(virt_msir, fsl_msi_cascade); 300 + irq_set_handler_data(virt_msir, cascade_data); 301 + irq_set_chained_handler(virt_msir, fsl_msi_cascade); 303 302 304 303 return 0; 305 304 }
+4 -4
arch/powerpc/sysdev/i8259.c
··· 175 175 176 176 /* We block the internal cascade */ 177 177 if (hw == 2) 178 - irq_to_desc(virq)->status |= IRQ_NOREQUEST; 178 + irq_set_status_flags(virq, IRQ_NOREQUEST); 179 179 180 180 /* We use the level handler only for now, we might want to 181 181 * be more cautious here but that works for now 182 182 */ 183 - irq_to_desc(virq)->status |= IRQ_LEVEL; 184 - set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); 183 + irq_set_status_flags(virq, IRQ_LEVEL); 184 + irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq); 185 185 return 0; 186 186 } 187 187 ··· 191 191 i8259_mask_irq(irq_get_irq_data(virq)); 192 192 193 193 /* remove chip and handler */ 194 - set_irq_chip_and_handler(virq, NULL, NULL); 194 + irq_set_chip_and_handler(virq, NULL, NULL); 195 195 196 196 /* Make sure it's completed */ 197 197 synchronize_irq(virq);
+10 -12
arch/powerpc/sysdev/ipic.c
··· 605 605 { 606 606 struct ipic *ipic = ipic_from_irq(d->irq); 607 607 unsigned int src = ipic_irq_to_hw(d->irq); 608 - struct irq_desc *desc = irq_to_desc(d->irq); 609 608 unsigned int vold, vnew, edibit; 610 609 611 610 if (flow_type == IRQ_TYPE_NONE) ··· 622 623 printk(KERN_ERR "ipic: edge sense not supported on internal " 623 624 "interrupts\n"); 624 625 return -EINVAL; 626 + 625 627 } 626 628 627 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 628 - desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 629 + irqd_set_trigger_type(d, flow_type); 629 630 if (flow_type & IRQ_TYPE_LEVEL_LOW) { 630 - desc->status |= IRQ_LEVEL; 631 - desc->handle_irq = handle_level_irq; 632 - desc->irq_data.chip = &ipic_level_irq_chip; 631 + __irq_set_handler_locked(d->irq, handle_level_irq); 632 + d->chip = &ipic_level_irq_chip; 633 633 } else { 634 - desc->handle_irq = handle_edge_irq; 635 - desc->irq_data.chip = &ipic_edge_irq_chip; 634 + __irq_set_handler_locked(d->irq, handle_edge_irq); 635 + d->chip = &ipic_edge_irq_chip; 636 636 } 637 637 638 638 /* only EXT IRQ senses are programmable on ipic ··· 653 655 } 654 656 if (vold != vnew) 655 657 ipic_write(ipic->regs, IPIC_SECNR, vnew); 656 - return 0; 658 + return IRQ_SET_MASK_OK_NOCOPY; 657 659 } 658 660 659 661 /* level interrupts and edge interrupts have different ack operations */ ··· 685 687 { 686 688 struct ipic *ipic = h->host_data; 687 689 688 - set_irq_chip_data(virq, ipic); 689 - set_irq_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); 690 + irq_set_chip_data(virq, ipic); 691 + irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); 690 692 691 693 /* Set default irq type */ 692 - set_irq_type(virq, IRQ_TYPE_NONE); 694 + irq_set_irq_type(virq, IRQ_TYPE_NONE); 693 695 694 696 return 0; 695 697 }
+2 -9
arch/powerpc/sysdev/mpc8xx_pic.c
··· 72 72 73 73 static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) 74 74 { 75 - struct irq_desc *desc = irq_to_desc(d->irq); 76 - 77 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 78 - desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 79 - if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 80 - desc->status |= IRQ_LEVEL; 81 - 82 75 if (flow_type & IRQ_TYPE_EDGE_FALLING) { 83 76 irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq; 84 77 unsigned int siel = in_be32(&siu_reg->sc_siel); ··· 80 87 if ((hw & 1) == 0) { 81 88 siel |= (0x80000000 >> hw); 82 89 out_be32(&siu_reg->sc_siel, siel); 83 - desc->handle_irq = handle_edge_irq; 90 + __irq_set_handler_locked(d->irq, handle_edge_irq); 84 91 } 85 92 } 86 93 return 0; ··· 117 124 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); 118 125 119 126 /* Set default irq handle */ 120 - set_irq_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); 127 + irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); 121 128 return 0; 122 129 } 123 130
+6 -6
arch/powerpc/sysdev/mpc8xxx_gpio.c
··· 145 145 146 146 static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) 147 147 { 148 - struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc); 148 + struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); 149 149 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 150 150 unsigned int mask; 151 151 ··· 278 278 if (mpc8xxx_gc->of_dev_id_data) 279 279 mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; 280 280 281 - set_irq_chip_data(virq, h->host_data); 282 - set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); 283 - set_irq_type(virq, IRQ_TYPE_NONE); 281 + irq_set_chip_data(virq, h->host_data); 282 + irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); 283 + irq_set_irq_type(virq, IRQ_TYPE_NONE); 284 284 285 285 return 0; 286 286 } ··· 369 369 out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); 370 370 out_be32(mm_gc->regs + GPIO_IMR, 0); 371 371 372 - set_irq_data(hwirq, mpc8xxx_gc); 373 - set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); 372 + irq_set_handler_data(hwirq, mpc8xxx_gc); 373 + irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); 374 374 375 375 skip_irq: 376 376 return;
+19 -24
arch/powerpc/sysdev/mpic.c
··· 361 361 } 362 362 363 363 static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, 364 - unsigned int irqflags) 364 + bool level) 365 365 { 366 366 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 367 367 unsigned long flags; ··· 370 370 if (fixup->base == NULL) 371 371 return; 372 372 373 - DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n", 374 - source, irqflags, fixup->index); 373 + DBG("startup_ht_interrupt(0x%x) index: %d\n", 374 + source, fixup->index); 375 375 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); 376 376 /* Enable and configure */ 377 377 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 378 378 tmp = readl(fixup->base + 4); 379 379 tmp &= ~(0x23U); 380 - if (irqflags & IRQ_LEVEL) 380 + if (level) 381 381 tmp |= 0x22; 382 382 writel(tmp, fixup->base + 4); 383 383 raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); ··· 389 389 #endif 390 390 } 391 391 392 - static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, 393 - unsigned int irqflags) 392 + static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source) 394 393 { 395 394 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 396 395 unsigned long flags; ··· 398 399 if (fixup->base == NULL) 399 400 return; 400 401 401 - DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags); 402 + DBG("shutdown_ht_interrupt(0x%x)\n", source); 402 403 403 404 /* Disable */ 404 405 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); ··· 615 616 if (irq < NUM_ISA_INTERRUPTS) 616 617 return NULL; 617 618 618 - return get_irq_chip_data(irq); 619 + return irq_get_chip_data(irq); 619 620 } 620 621 621 622 /* Determine if the linux irq is an IPI */ ··· 649 650 /* Get the mpic structure from the irq number */ 650 651 static inline struct mpic * mpic_from_irq(unsigned int irq) 651 652 { 652 - return get_irq_chip_data(irq); 653 + return irq_get_chip_data(irq); 653 654 } 654 655 655 656 /* Get the mpic structure from the irq data */ ··· 737 738 738 739 mpic_unmask_irq(d); 739 740 740 - if (irq_to_desc(d->irq)->status & IRQ_LEVEL) 741 + if (irqd_is_level_type(d)) 741 742 mpic_ht_end_irq(mpic, src); 742 743 } 743 744 ··· 747 748 unsigned int src = mpic_irq_to_hw(d->irq); 748 749 749 750 mpic_unmask_irq(d); 750 - mpic_startup_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status); 751 + mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); 751 752 752 753 return 0; 753 754 } ··· 757 758 struct mpic *mpic = mpic_from_irq_data(d); 758 759 unsigned int src = mpic_irq_to_hw(d->irq); 759 760 760 - mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status); 761 + mpic_shutdown_ht_interrupt(mpic, src); 761 762 mpic_mask_irq(d); 762 763 } 763 764 ··· 774 775 * latched another edge interrupt coming in anyway 775 776 */ 776 777 777 - if (irq_to_desc(d->irq)->status & IRQ_LEVEL) 778 + if (irqd_is_level_type(d)) 778 779 mpic_ht_end_irq(mpic, src); 779 780 mpic_eoi(mpic); 780 781 } ··· 863 864 { 864 865 struct mpic *mpic = mpic_from_irq_data(d); 865 866 unsigned int src = mpic_irq_to_hw(d->irq); 866 - struct irq_desc *desc = irq_to_desc(d->irq); 867 867 unsigned int vecpri, vold, vnew; 868 868 869 869 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", ··· 877 879 if (flow_type == IRQ_TYPE_NONE) 878 880 flow_type = IRQ_TYPE_LEVEL_LOW; 879 881 880 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 881 - desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 882 - if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 883 - desc->status |= IRQ_LEVEL; 882 + irqd_set_trigger_type(d, flow_type); 884 883 885 884 if (mpic_is_ht_interrupt(mpic, src)) 886 885 vecpri = MPIC_VECPRI_POLARITY_POSITIVE | ··· 892 897 if (vold != vnew) 893 898 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); 894 899 895 - return 0; 900 + return IRQ_SET_MASK_OK_NOCOPY;; 896 901 } 897 902 898 903 void mpic_set_vector(unsigned int virq, unsigned int vector) ··· 978 983 WARN_ON(!(mpic->flags & MPIC_PRIMARY)); 979 984 980 985 DBG("mpic: mapping as IPI\n"); 981 - set_irq_chip_data(virq, mpic); 982 - set_irq_chip_and_handler(virq, &mpic->hc_ipi, 986 + irq_set_chip_data(virq, mpic); 987 + irq_set_chip_and_handler(virq, &mpic->hc_ipi, 983 988 handle_percpu_irq); 984 989 return 0; 985 990 } ··· 1001 1006 1002 1007 DBG("mpic: mapping to irq chip @%p\n", chip); 1003 1008 1004 - set_irq_chip_data(virq, mpic); 1005 - set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); 1009 + irq_set_chip_data(virq, mpic); 1010 + irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); 1006 1011 1007 1012 /* Set default irq type */ 1008 - set_irq_type(virq, IRQ_TYPE_NONE); 1013 + irq_set_irq_type(virq, IRQ_TYPE_NONE); 1009 1014 1010 1015 /* If the MPIC was reset, then all vectors have already been 1011 1016 * initialized. Otherwise, a per source lazy initialization
+4 -4
arch/powerpc/sysdev/mpic_pasemi_msi.c
··· 81 81 if (entry->irq == NO_IRQ) 82 82 continue; 83 83 84 - set_irq_msi(entry->irq, NULL); 84 + irq_set_msi_desc(entry->irq, NULL); 85 85 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, 86 86 virq_to_hw(entry->irq), ALLOC_CHUNK); 87 87 irq_dispose_mapping(entry->irq); ··· 131 131 */ 132 132 mpic_set_vector(virq, 0); 133 133 134 - set_irq_msi(virq, entry); 135 - set_irq_chip(virq, &mpic_pasemi_msi_chip); 136 - set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 134 + irq_set_msi_desc(virq, entry); 135 + irq_set_chip(virq, &mpic_pasemi_msi_chip); 136 + irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 137 137 138 138 pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ 139 139 "addr 0x%x\n", virq, hwirq, msg.address_lo);
+4 -4
arch/powerpc/sysdev/mpic_u3msi.c
··· 129 129 if (entry->irq == NO_IRQ) 130 130 continue; 131 131 132 - set_irq_msi(entry->irq, NULL); 132 + irq_set_msi_desc(entry->irq, NULL); 133 133 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, 134 134 virq_to_hw(entry->irq), 1); 135 135 irq_dispose_mapping(entry->irq); ··· 166 166 return -ENOSPC; 167 167 } 168 168 169 - set_irq_msi(virq, entry); 170 - set_irq_chip(virq, &mpic_u3msi_chip); 171 - set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 169 + irq_set_msi_desc(virq, entry); 170 + irq_set_chip(virq, &mpic_u3msi_chip); 171 + irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 172 172 173 173 pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", 174 174 virq, hwirq, (unsigned long)addr);
+3 -2
arch/powerpc/sysdev/mv64x60_pic.c
··· 213 213 { 214 214 int level1; 215 215 216 - irq_to_desc(virq)->status |= IRQ_LEVEL; 216 + irq_set_status_flags(virq, IRQ_LEVEL); 217 217 218 218 level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; 219 219 BUG_ON(level1 > MV64x60_LEVEL1_GPP); 220 - set_irq_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq); 220 + irq_set_chip_and_handler(virq, mv64x60_chips[level1], 221 + handle_level_irq); 221 222 222 223 return 0; 223 224 }
+8 -8
arch/powerpc/sysdev/qe_lib/qe_ic.c
··· 189 189 190 190 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) 191 191 { 192 - return get_irq_chip_data(virq); 192 + return irq_get_chip_data(virq); 193 193 } 194 194 195 195 static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) ··· 267 267 /* Default chip */ 268 268 chip = &qe_ic->hc_irq; 269 269 270 - set_irq_chip_data(virq, qe_ic); 271 - irq_to_desc(virq)->status |= IRQ_LEVEL; 270 + irq_set_chip_data(virq, qe_ic); 271 + irq_set_status_flags(virq, IRQ_LEVEL); 272 272 273 - set_irq_chip_and_handler(virq, chip, handle_level_irq); 273 + irq_set_chip_and_handler(virq, chip, handle_level_irq); 274 274 275 275 return 0; 276 276 } ··· 386 386 387 387 qe_ic_write(qe_ic->regs, QEIC_CICR, temp); 388 388 389 - set_irq_data(qe_ic->virq_low, qe_ic); 390 - set_irq_chained_handler(qe_ic->virq_low, low_handler); 389 + irq_set_handler_data(qe_ic->virq_low, qe_ic); 390 + irq_set_chained_handler(qe_ic->virq_low, low_handler); 391 391 392 392 if (qe_ic->virq_high != NO_IRQ && 393 393 qe_ic->virq_high != qe_ic->virq_low) { 394 - set_irq_data(qe_ic->virq_high, qe_ic); 395 - set_irq_chained_handler(qe_ic->virq_high, high_handler); 394 + irq_set_handler_data(qe_ic->virq_high, qe_ic); 395 + irq_set_chained_handler(qe_ic->virq_high, high_handler); 396 396 } 397 397 } 398 398
+3 -3
arch/powerpc/sysdev/tsi108_pci.c
··· 391 391 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); 392 392 if ((virq >= 1) && (virq <= 4)){ 393 393 irq = virq + IRQ_PCI_INTAD_BASE - 1; 394 - irq_to_desc(irq)->status |= IRQ_LEVEL; 395 - set_irq_chip(irq, &tsi108_pci_irq); 394 + irq_set_status_flags(irq, IRQ_LEVEL); 395 + irq_set_chip(irq, &tsi108_pci_irq); 396 396 } 397 397 return 0; 398 398 } ··· 431 431 432 432 void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) 433 433 { 434 - struct irq_chip *chip = get_irq_desc_chip(desc); 434 + struct irq_chip *chip = irq_desc_get_chip(desc); 435 435 unsigned int cascade_irq = get_pci_source(); 436 436 437 437 if (cascade_irq != NO_IRQ)
+17 -24
arch/powerpc/sysdev/uic.c
··· 57 57 58 58 static void uic_unmask_irq(struct irq_data *d) 59 59 { 60 - struct irq_desc *desc = irq_to_desc(d->irq); 61 60 struct uic *uic = irq_data_get_irq_chip_data(d); 62 61 unsigned int src = uic_irq_to_hw(d->irq); 63 62 unsigned long flags; ··· 65 66 sr = 1 << (31-src); 66 67 spin_lock_irqsave(&uic->lock, flags); 67 68 /* ack level-triggered interrupts here */ 68 - if (desc->status & IRQ_LEVEL) 69 + if (irqd_is_level_type(d)) 69 70 mtdcr(uic->dcrbase + UIC_SR, sr); 70 71 er = mfdcr(uic->dcrbase + UIC_ER); 71 72 er |= sr; ··· 100 101 101 102 static void uic_mask_ack_irq(struct irq_data *d) 102 103 { 103 - struct irq_desc *desc = irq_to_desc(d->irq); 104 104 struct uic *uic = irq_data_get_irq_chip_data(d); 105 105 unsigned int src = uic_irq_to_hw(d->irq); 106 106 unsigned long flags; ··· 118 120 * level interrupts are ack'ed after the actual 119 121 * isr call in the uic_unmask_irq() 120 122 */ 121 - if (!(desc->status & IRQ_LEVEL)) 123 + if (!irqd_is_level_type(d)) 122 124 mtdcr(uic->dcrbase + UIC_SR, sr); 123 125 spin_unlock_irqrestore(&uic->lock, flags); 124 126 } ··· 127 129 { 128 130 struct uic *uic = irq_data_get_irq_chip_data(d); 129 131 unsigned int src = uic_irq_to_hw(d->irq); 130 - struct irq_desc *desc = irq_to_desc(d->irq); 131 132 unsigned long flags; 132 133 int trigger, polarity; 133 134 u32 tr, pr, mask; ··· 163 166 mtdcr(uic->dcrbase + UIC_PR, pr); 164 167 mtdcr(uic->dcrbase + UIC_TR, tr); 165 168 166 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 167 - desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 168 - if (!trigger) 169 - desc->status |= IRQ_LEVEL; 170 - 171 169 spin_unlock_irqrestore(&uic->lock, flags); 172 170 173 171 return 0; ··· 182 190 { 183 191 struct uic *uic = h->host_data; 184 192 185 - set_irq_chip_data(virq, uic); 193 + irq_set_chip_data(virq, uic); 186 194 /* Despite the name, handle_level_irq() works for both level 187 195 * and edge irqs on UIC. FIXME: check this is correct */ 188 - set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); 196 + irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); 189 197 190 198 /* Set default irq type */ 191 - set_irq_type(virq, IRQ_TYPE_NONE); 199 + irq_set_irq_type(virq, IRQ_TYPE_NONE); 192 200 193 201 return 0; 194 202 } ··· 212 220 213 221 void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) 214 222 { 215 - struct irq_chip *chip = get_irq_desc_chip(desc); 216 - struct uic *uic = get_irq_data(virq); 223 + struct irq_chip *chip = irq_desc_get_chip(desc); 224 + struct irq_data *idata = irq_desc_get_irq_data(desc); 225 + struct uic *uic = irq_get_handler_data(virq); 217 226 u32 msr; 218 227 int src; 219 228 int subvirq; 220 229 221 230 raw_spin_lock(&desc->lock); 222 - if (desc->status & IRQ_LEVEL) 223 - chip->irq_mask(&desc->irq_data); 231 + if (irqd_is_level_type(idata)) 232 + chip->irq_mask(idata); 224 233 else 225 - chip->irq_mask_ack(&desc->irq_data); 234 + chip->irq_mask_ack(idata); 226 235 raw_spin_unlock(&desc->lock); 227 236 228 237 msr = mfdcr(uic->dcrbase + UIC_MSR); ··· 237 244 238 245 uic_irq_ret: 239 246 raw_spin_lock(&desc->lock); 240 - if (desc->status & IRQ_LEVEL) 241 - chip->irq_ack(&desc->irq_data); 242 - if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) 243 - chip->irq_unmask(&desc->irq_data); 247 + if (irqd_is_level_type(idata)) 248 + chip->irq_ack(idata); 249 + if (!irqd_irq_disabled(idata) && chip->irq_unmask) 250 + chip->irq_unmask(idata); 244 251 raw_spin_unlock(&desc->lock); 245 252 } 246 253 ··· 329 336 330 337 cascade_virq = irq_of_parse_and_map(np, 0); 331 338 332 - set_irq_data(cascade_virq, uic); 333 - set_irq_chained_handler(cascade_virq, uic_irq_cascade); 339 + irq_set_handler_data(cascade_virq, uic); 340 + irq_set_chained_handler(cascade_virq, uic_irq_cascade); 334 341 335 342 /* FIXME: setup critical cascade?? */ 336 343 }
+7 -13
arch/powerpc/sysdev/xilinx_intc.c
··· 79 79 80 80 static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type) 81 81 { 82 - struct irq_desc *desc = irq_to_desc(d->irq); 83 - 84 - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 85 - desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 86 - if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 87 - desc->status |= IRQ_LEVEL; 88 82 return 0; 89 83 } 90 84 ··· 164 170 static int xilinx_intc_map(struct irq_host *h, unsigned int virq, 165 171 irq_hw_number_t irq) 166 172 { 167 - set_irq_chip_data(virq, h->host_data); 173 + irq_set_chip_data(virq, h->host_data); 168 174 169 175 if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || 170 176 xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { 171 - set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip, 172 - handle_level_irq); 177 + irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip, 178 + handle_level_irq); 173 179 } else { 174 - set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip, 175 - handle_edge_irq); 180 + irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip, 181 + handle_edge_irq); 176 182 } 177 183 return 0; 178 184 } ··· 223 229 */ 224 230 static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) 225 231 { 226 - struct irq_chip *chip = get_irq_desc_chip(desc); 232 + struct irq_chip *chip = irq_desc_get_chip(desc); 227 233 unsigned int cascade_irq = i8259_irq(); 228 234 229 235 if (cascade_irq) ··· 250 256 } 251 257 252 258 i8259_init(cascade_node, 0); 253 - set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade); 259 + irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade); 254 260 255 261 /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ 256 262 /* This looks like a dirty hack to me --gcl */
-1
arch/score/Kconfig
··· 3 3 config SCORE 4 4 def_bool y 5 5 select HAVE_GENERIC_HARDIRQS 6 - select GENERIC_HARDIRQS_NO_DEPRECATED 7 6 select GENERIC_IRQ_SHOW 8 7 9 8 choice
-1
arch/sh/Kconfig
··· 23 23 select HAVE_SPARSE_IRQ 24 24 select RTC_LIB 25 25 select GENERIC_ATOMIC64 26 - select GENERIC_HARDIRQS_NO_DEPRECATED 27 26 select GENERIC_IRQ_SHOW 28 27 help 29 28 The SuperH is a RISC processor targeted for use in embedded systems
+6 -6
arch/sh/boards/board-magicpanelr2.c
··· 388 388 { 389 389 plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */ 390 390 391 - set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */ 392 - set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */ 393 - set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */ 394 - set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */ 395 - set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */ 396 - set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */ 391 + irq_set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */ 392 + irq_set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */ 393 + irq_set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */ 394 + irq_set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */ 395 + irq_set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */ 396 + irq_set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */ 397 397 398 398 intc_set_priority(32, 13); /* IRQ0 CAN1 */ 399 399 intc_set_priority(33, 13); /* IRQ0 CAN2 */
+3 -3
arch/sh/boards/mach-ap325rxa/setup.c
··· 14 14 #include <linux/device.h> 15 15 #include <linux/interrupt.h> 16 16 #include <linux/platform_device.h> 17 - #include <linux/mfd/sh_mobile_sdhi.h> 18 17 #include <linux/mmc/host.h> 18 + #include <linux/mmc/sh_mobile_sdhi.h> 19 19 #include <linux/mtd/physmap.h> 20 20 #include <linux/mtd/sh_flctl.h> 21 21 #include <linux/delay.h> ··· 423 423 [0] = { 424 424 .name = "SDHI0", 425 425 .start = 0x04ce0000, 426 - .end = 0x04ce01ff, 426 + .end = 0x04ce00ff, 427 427 .flags = IORESOURCE_MEM, 428 428 }, 429 429 [1] = { ··· 453 453 [0] = { 454 454 .name = "SDHI1", 455 455 .start = 0x04cf0000, 456 - .end = 0x04cf01ff, 456 + .end = 0x04cf00ff, 457 457 .flags = IORESOURCE_MEM, 458 458 }, 459 459 [1] = {
+2 -2
arch/sh/boards/mach-cayman/irq.c
··· 149 149 } 150 150 151 151 for (i = 0; i < NR_EXT_IRQS; i++) { 152 - set_irq_chip_and_handler(START_EXT_IRQS + i, &cayman_irq_type, 153 - handle_level_irq); 152 + irq_set_chip_and_handler(START_EXT_IRQS + i, 153 + &cayman_irq_type, handle_level_irq); 154 154 } 155 155 156 156 /* Setup the SMSC interrupt */
+1 -2
arch/sh/boards/mach-dreamcast/irq.c
··· 161 161 return; 162 162 } 163 163 164 - set_irq_chip_and_handler(i, &systemasic_int, 165 - handle_level_irq); 164 + irq_set_chip_and_handler(i, &systemasic_int, handle_level_irq); 166 165 } 167 166 }
+7 -7
arch/sh/boards/mach-ecovec24/setup.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/device.h> 13 13 #include <linux/platform_device.h> 14 - #include <linux/mfd/sh_mobile_sdhi.h> 15 14 #include <linux/mmc/host.h> 16 15 #include <linux/mmc/sh_mmcif.h> 16 + #include <linux/mmc/sh_mobile_sdhi.h> 17 17 #include <linux/mtd/physmap.h> 18 18 #include <linux/gpio.h> 19 19 #include <linux/interrupt.h> ··· 464 464 .irq = IRQ0, 465 465 }; 466 466 467 - #ifdef CONFIG_MFD_SH_MOBILE_SDHI 467 + #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 468 468 /* SDHI0 */ 469 469 static void sdhi0_set_pwr(struct platform_device *pdev, int state) 470 470 { ··· 482 482 [0] = { 483 483 .name = "SDHI0", 484 484 .start = 0x04ce0000, 485 - .end = 0x04ce01ff, 485 + .end = 0x04ce00ff, 486 486 .flags = IORESOURCE_MEM, 487 487 }, 488 488 [1] = { ··· 522 522 [0] = { 523 523 .name = "SDHI1", 524 524 .start = 0x04cf0000, 525 - .end = 0x04cf01ff, 525 + .end = 0x04cf00ff, 526 526 .flags = IORESOURCE_MEM, 527 527 }, 528 528 [1] = { ··· 880 880 &ceu0_device, 881 881 &ceu1_device, 882 882 &keysc_device, 883 - #ifdef CONFIG_MFD_SH_MOBILE_SDHI 883 + #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 884 884 &sdhi0_device, 885 885 #if !defined(CONFIG_MMC_SH_MMCIF) 886 886 &sdhi1_device, ··· 1102 1102 1103 1103 /* enable TouchScreen */ 1104 1104 i2c_register_board_info(0, &ts_i2c_clients, 1); 1105 - set_irq_type(IRQ0, IRQ_TYPE_LEVEL_LOW); 1105 + irq_set_irq_type(IRQ0, IRQ_TYPE_LEVEL_LOW); 1106 1106 } 1107 1107 1108 1108 /* enable CEU0 */ ··· 1162 1162 gpio_direction_input(GPIO_PTR5); 1163 1163 gpio_direction_input(GPIO_PTR6); 1164 1164 1165 - #ifdef CONFIG_MFD_SH_MOBILE_SDHI 1165 + #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 1166 1166 /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ 1167 1167 gpio_request(GPIO_FN_SDHI0CD, NULL); 1168 1168 gpio_request(GPIO_FN_SDHI0WP, NULL);
+2 -2
arch/sh/boards/mach-kfr2r09/setup.c
··· 10 10 #include <linux/init.h> 11 11 #include <linux/platform_device.h> 12 12 #include <linux/interrupt.h> 13 - #include <linux/mfd/sh_mobile_sdhi.h> 14 13 #include <linux/mmc/host.h> 14 + #include <linux/mmc/sh_mobile_sdhi.h> 15 15 #include <linux/mfd/tmio.h> 16 16 #include <linux/mtd/physmap.h> 17 17 #include <linux/mtd/onenand.h> ··· 354 354 [0] = { 355 355 .name = "SDHI0", 356 356 .start = 0x04ce0000, 357 - .end = 0x04ce01ff, 357 + .end = 0x04ce00ff, 358 358 .flags = IORESOURCE_MEM, 359 359 }, 360 360 [1] = {
+1 -1
arch/sh/boards/mach-microdev/irq.c
··· 117 117 static void __init make_microdev_irq(unsigned int irq) 118 118 { 119 119 disable_irq_nosync(irq); 120 - set_irq_chip_and_handler(irq, &microdev_irq_type, handle_level_irq); 120 + irq_set_chip_and_handler(irq, &microdev_irq_type, handle_level_irq); 121 121 disable_microdev_irq(irq_get_irq_data(irq)); 122 122 } 123 123
+2 -2
arch/sh/boards/mach-migor/setup.c
··· 12 12 #include <linux/interrupt.h> 13 13 #include <linux/input.h> 14 14 #include <linux/input/sh_keysc.h> 15 - #include <linux/mfd/sh_mobile_sdhi.h> 16 15 #include <linux/mmc/host.h> 16 + #include <linux/mmc/sh_mobile_sdhi.h> 17 17 #include <linux/mtd/physmap.h> 18 18 #include <linux/mtd/nand.h> 19 19 #include <linux/i2c.h> ··· 399 399 [0] = { 400 400 .name = "SDHI", 401 401 .start = 0x04ce0000, 402 - .end = 0x04ce01ff, 402 + .end = 0x04ce00ff, 403 403 .flags = IORESOURCE_MEM, 404 404 }, 405 405 [1] = {
+2 -3
arch/sh/boards/mach-se/7206/irq.c
··· 92 92 { 93 93 unsigned short sts0,sts1; 94 94 unsigned int irq = data->irq; 95 - struct irq_desc *desc = irq_to_desc(irq); 96 95 97 - if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 96 + if (!irqd_irq_disabled(data) && !irqd_irq_inprogress(data)) 98 97 enable_se7206_irq(data); 99 98 /* FPGA isr clear */ 100 99 sts0 = __raw_readw(INTSTS0); ··· 125 126 static void make_se7206_irq(unsigned int irq) 126 127 { 127 128 disable_irq_nosync(irq); 128 - set_irq_chip_and_handler_name(irq, &se7206_irq_chip, 129 + irq_set_chip_and_handler_name(irq, &se7206_irq_chip, 129 130 handle_level_irq, "level"); 130 131 disable_se7206_irq(irq_get_irq_data(irq)); 131 132 }
+12 -11
arch/sh/boards/mach-se/7343/irq.c
··· 67 67 return; 68 68 se7343_fpga_irq[i] = irq; 69 69 70 - set_irq_chip_and_handler_name(se7343_fpga_irq[i], 70 + irq_set_chip_and_handler_name(se7343_fpga_irq[i], 71 71 &se7343_irq_chip, 72 - handle_level_irq, "level"); 72 + handle_level_irq, 73 + "level"); 73 74 74 - set_irq_chip_data(se7343_fpga_irq[i], (void *)i); 75 + irq_set_chip_data(se7343_fpga_irq[i], (void *)i); 75 76 } 76 77 77 - set_irq_chained_handler(IRQ0_IRQ, se7343_irq_demux); 78 - set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); 79 - set_irq_chained_handler(IRQ1_IRQ, se7343_irq_demux); 80 - set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); 81 - set_irq_chained_handler(IRQ4_IRQ, se7343_irq_demux); 82 - set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW); 83 - set_irq_chained_handler(IRQ5_IRQ, se7343_irq_demux); 84 - set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW); 78 + irq_set_chained_handler(IRQ0_IRQ, se7343_irq_demux); 79 + irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); 80 + irq_set_chained_handler(IRQ1_IRQ, se7343_irq_demux); 81 + irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); 82 + irq_set_chained_handler(IRQ4_IRQ, se7343_irq_demux); 83 + irq_set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW); 84 + irq_set_chained_handler(IRQ5_IRQ, se7343_irq_demux); 85 + irq_set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW); 85 86 }
+8 -7
arch/sh/boards/mach-se/7722/irq.c
··· 67 67 return; 68 68 se7722_fpga_irq[i] = irq; 69 69 70 - set_irq_chip_and_handler_name(se7722_fpga_irq[i], 70 + irq_set_chip_and_handler_name(se7722_fpga_irq[i], 71 71 &se7722_irq_chip, 72 - handle_level_irq, "level"); 72 + handle_level_irq, 73 + "level"); 73 74 74 - set_irq_chip_data(se7722_fpga_irq[i], (void *)i); 75 + irq_set_chip_data(se7722_fpga_irq[i], (void *)i); 75 76 } 76 77 77 - set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux); 78 - set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); 78 + irq_set_chained_handler(IRQ0_IRQ, se7722_irq_demux); 79 + irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); 79 80 80 - set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux); 81 - set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); 81 + irq_set_chained_handler(IRQ1_IRQ, se7722_irq_demux); 82 + irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); 82 83 }
+7 -8
arch/sh/boards/mach-se/7724/irq.c
··· 140 140 return; 141 141 } 142 142 143 - set_irq_chip_and_handler_name(irq, 144 - &se7724_irq_chip, 143 + irq_set_chip_and_handler_name(irq, &se7724_irq_chip, 145 144 handle_level_irq, "level"); 146 145 } 147 146 148 - set_irq_chained_handler(IRQ0_IRQ, se7724_irq_demux); 149 - set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); 147 + irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux); 148 + irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); 150 149 151 - set_irq_chained_handler(IRQ1_IRQ, se7724_irq_demux); 152 - set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); 150 + irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux); 151 + irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); 153 152 154 - set_irq_chained_handler(IRQ2_IRQ, se7724_irq_demux); 155 - set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW); 153 + irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux); 154 + irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW); 156 155 }
+3 -3
arch/sh/boards/mach-se/7724/setup.c
··· 14 14 #include <linux/device.h> 15 15 #include <linux/interrupt.h> 16 16 #include <linux/platform_device.h> 17 - #include <linux/mfd/sh_mobile_sdhi.h> 18 17 #include <linux/mmc/host.h> 18 + #include <linux/mmc/sh_mobile_sdhi.h> 19 19 #include <linux/mtd/physmap.h> 20 20 #include <linux/delay.h> 21 21 #include <linux/smc91x.h> ··· 456 456 [0] = { 457 457 .name = "SDHI0", 458 458 .start = 0x04ce0000, 459 - .end = 0x04ce01ff, 459 + .end = 0x04ce00ff, 460 460 .flags = IORESOURCE_MEM, 461 461 }, 462 462 [1] = { ··· 488 488 [0] = { 489 489 .name = "SDHI1", 490 490 .start = 0x04cf0000, 491 - .end = 0x04cf01ff, 491 + .end = 0x04cf00ff, 492 492 .flags = IORESOURCE_MEM, 493 493 }, 494 494 [1] = {
+4 -4
arch/sh/boards/mach-x3proto/gpio.c
··· 102 102 103 103 spin_lock_irqsave(&x3proto_gpio_lock, flags); 104 104 x3proto_gpio_irq_map[i] = irq; 105 - set_irq_chip_and_handler_name(irq, &dummy_irq_chip, 106 - handle_simple_irq, "gpio"); 105 + irq_set_chip_and_handler_name(irq, &dummy_irq_chip, 106 + handle_simple_irq, "gpio"); 107 107 spin_unlock_irqrestore(&x3proto_gpio_lock, flags); 108 108 } 109 109 ··· 113 113 x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio, 114 114 ilsel); 115 115 116 - set_irq_chained_handler(ilsel, x3proto_gpio_irq_handler); 117 - set_irq_wake(ilsel, 1); 116 + irq_set_chained_handler(ilsel, x3proto_gpio_irq_handler); 117 + irq_set_irq_wake(ilsel, 1); 118 118 119 119 return 0; 120 120
+3 -3
arch/sh/cchips/hd6446x/hd64461.c
··· 107 107 return -EINVAL; 108 108 } 109 109 110 - set_irq_chip_and_handler(i, &hd64461_irq_chip, 110 + irq_set_chip_and_handler(i, &hd64461_irq_chip, 111 111 handle_level_irq); 112 112 } 113 113 114 - set_irq_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux); 115 - set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW); 114 + irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux); 115 + irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW); 116 116 117 117 #ifdef CONFIG_HD64461_ENABLER 118 118 printk(KERN_INFO "HD64461: enabling PCMCIA devices\n");
+2 -2
arch/sh/kernel/cpu/irq/imask.c
··· 80 80 81 81 void make_imask_irq(unsigned int irq) 82 82 { 83 - set_irq_chip_and_handler_name(irq, &imask_irq_chip, 84 - handle_level_irq, "level"); 83 + irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq, 84 + "level"); 85 85 }
+1 -1
arch/sh/kernel/cpu/irq/intc-sh5.c
··· 135 135 136 136 /* Set default: per-line enable/disable, priority driven ack/eoi */ 137 137 for (i = 0; i < NR_INTC_IRQS; i++) 138 - set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq); 138 + irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq); 139 139 140 140 141 141 /* Disable all interrupts and set all priorities to 0 to avoid trouble */
+3 -3
arch/sh/kernel/cpu/irq/ipr.c
··· 74 74 } 75 75 76 76 disable_irq_nosync(p->irq); 77 - set_irq_chip_and_handler_name(p->irq, &desc->chip, 78 - handle_level_irq, "level"); 79 - set_irq_chip_data(p->irq, p); 77 + irq_set_chip_and_handler_name(p->irq, &desc->chip, 78 + handle_level_irq, "level"); 79 + irq_set_chip_data(p->irq, p); 80 80 disable_ipr_irq(irq_get_irq_data(p->irq)); 81 81 } 82 82 }
+2 -1
arch/sparc/Kconfig
··· 51 51 select HAVE_PERF_EVENTS 52 52 select PERF_USE_VMALLOC 53 53 select HAVE_GENERIC_HARDIRQS 54 - select GENERIC_HARDIRQS_NO_DEPRECATED 54 + select GENERIC_IRQ_SHOW 55 + select IRQ_PREFLOW_FASTEOI 55 56 56 57 config ARCH_DEFCONFIG 57 58 string
+33 -90
arch/sparc/kernel/irq_64.c
··· 162 162 /* 163 163 * /proc/interrupts printing: 164 164 */ 165 - 166 - int show_interrupts(struct seq_file *p, void *v) 165 + int arch_show_interrupts(struct seq_file *p, int prec) 167 166 { 168 - int i = *(loff_t *) v, j; 169 - struct irqaction * action; 170 - unsigned long flags; 167 + int j; 171 168 172 - if (i == 0) { 173 - seq_printf(p, " "); 174 - for_each_online_cpu(j) 175 - seq_printf(p, "CPU%d ",j); 176 - seq_putc(p, '\n'); 177 - } 178 - 179 - if (i < NR_IRQS) { 180 - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 181 - action = irq_desc[i].action; 182 - if (!action) 183 - goto skip; 184 - seq_printf(p, "%3d: ",i); 185 - #ifndef CONFIG_SMP 186 - seq_printf(p, "%10u ", kstat_irqs(i)); 187 - #else 188 - for_each_online_cpu(j) 189 - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 190 - #endif 191 - seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name); 192 - seq_printf(p, " %s", action->name); 193 - 194 - for (action=action->next; action; action = action->next) 195 - seq_printf(p, ", %s", action->name); 196 - 197 - seq_putc(p, '\n'); 198 - skip: 199 - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 200 - } else if (i == NR_IRQS) { 201 - seq_printf(p, "NMI: "); 202 - for_each_online_cpu(j) 203 - seq_printf(p, "%10u ", cpu_data(j).__nmi_count); 204 - seq_printf(p, " Non-maskable interrupts\n"); 205 - } 169 + seq_printf(p, "NMI: "); 170 + for_each_online_cpu(j) 171 + seq_printf(p, "%10u ", cpu_data(j).__nmi_count); 172 + seq_printf(p, " Non-maskable interrupts\n"); 206 173 return 0; 207 174 } 208 175 ··· 311 344 static void sun4u_irq_eoi(struct irq_data *data) 312 345 { 313 346 struct irq_handler_data *handler_data = data->handler_data; 314 - struct irq_desc *desc = irq_desc + data->irq; 315 - 316 - if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 317 - return; 318 347 319 348 if (likely(handler_data)) 320 349 upa_writeq(ICLR_IDLE, handler_data->iclr); ··· 365 402 static void sun4v_irq_eoi(struct irq_data *data) 366 403 { 367 404 unsigned int ino = irq_table[data->irq].dev_ino; 368 - struct irq_desc *desc = irq_desc + data->irq; 369 405 int err; 370 - 371 - if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 372 - return; 373 406 374 407 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 375 408 if (err != HV_EOK) ··· 440 481 441 482 static void sun4v_virq_eoi(struct irq_data *data) 442 483 { 443 - struct irq_desc *desc = irq_desc + data->irq; 444 484 unsigned long dev_handle, dev_ino; 445 485 int err; 446 - 447 - if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 448 - return; 449 486 450 487 dev_handle = irq_table[data->irq].dev_handle; 451 488 dev_ino = irq_table[data->irq].dev_ino; ··· 460 505 .irq_disable = sun4u_irq_disable, 461 506 .irq_eoi = sun4u_irq_eoi, 462 507 .irq_set_affinity = sun4u_set_affinity, 508 + .flags = IRQCHIP_EOI_IF_HANDLED, 463 509 }; 464 510 465 511 static struct irq_chip sun4v_irq = { ··· 469 513 .irq_disable = sun4v_irq_disable, 470 514 .irq_eoi = sun4v_irq_eoi, 471 515 .irq_set_affinity = sun4v_set_affinity, 516 + .flags = IRQCHIP_EOI_IF_HANDLED, 472 517 }; 473 518 474 519 static struct irq_chip sun4v_virq = { ··· 478 521 .irq_disable = sun4v_virq_disable, 479 522 .irq_eoi = sun4v_virq_eoi, 480 523 .irq_set_affinity = sun4v_virt_set_affinity, 524 + .flags = IRQCHIP_EOI_IF_HANDLED, 481 525 }; 482 526 483 - static void pre_flow_handler(unsigned int irq, struct irq_desc *desc) 527 + static void pre_flow_handler(struct irq_data *d) 484 528 { 485 - struct irq_handler_data *handler_data = get_irq_data(irq); 486 - unsigned int ino = irq_table[irq].dev_ino; 529 + struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); 530 + unsigned int ino = irq_table[d->irq].dev_ino; 487 531 488 532 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); 489 - 490 - handle_fasteoi_irq(irq, desc); 491 533 } 492 534 493 535 void irq_install_pre_handler(int irq, 494 536 void (*func)(unsigned int, void *, void *), 495 537 void *arg1, void *arg2) 496 538 { 497 - struct irq_handler_data *handler_data = get_irq_data(irq); 498 - struct irq_desc *desc = irq_desc + irq; 539 + struct irq_handler_data *handler_data = irq_get_handler_data(irq); 499 540 500 541 handler_data->pre_handler = func; 501 542 handler_data->arg1 = arg1; 502 543 handler_data->arg2 = arg2; 503 544 504 - desc->handle_irq = pre_flow_handler; 545 + __irq_set_preflow_handler(irq, pre_flow_handler); 505 546 } 506 547 507 548 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) ··· 517 562 if (!irq) { 518 563 irq = irq_alloc(0, ino); 519 564 bucket_set_irq(__pa(bucket), irq); 520 - set_irq_chip_and_handler_name(irq, 521 - &sun4u_irq, 522 - handle_fasteoi_irq, 523 - "IVEC"); 565 + irq_set_chip_and_handler_name(irq, &sun4u_irq, 566 + handle_fasteoi_irq, "IVEC"); 524 567 } 525 568 526 - handler_data = get_irq_data(irq); 569 + handler_data = irq_get_handler_data(irq); 527 570 if (unlikely(handler_data)) 528 571 goto out; 529 572 ··· 530 577 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 531 578 prom_halt(); 532 579 } 533 - set_irq_data(irq, handler_data); 580 + irq_set_handler_data(irq, handler_data); 534 581 535 582 handler_data->imap = imap; 536 583 handler_data->iclr = iclr; ··· 553 600 if (!irq) { 554 601 irq = irq_alloc(0, sysino); 555 602 bucket_set_irq(__pa(bucket), irq); 556 - set_irq_chip_and_handler_name(irq, chip, 557 - handle_fasteoi_irq, 603 + irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, 558 604 "IVEC"); 559 605 } 560 606 561 - handler_data = get_irq_data(irq); 607 + handler_data = irq_get_handler_data(irq); 562 608 if (unlikely(handler_data)) 563 609 goto out; 564 610 ··· 566 614 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 567 615 prom_halt(); 568 616 } 569 - set_irq_data(irq, handler_data); 617 + irq_set_handler_data(irq, handler_data); 570 618 571 619 /* Catch accidental accesses to these things. IMAP/ICLR handling 572 620 * is done by hypervisor calls on sun4v platforms, not by direct ··· 591 639 struct irq_handler_data *handler_data; 592 640 unsigned long hv_err, cookie; 593 641 struct ino_bucket *bucket; 594 - struct irq_desc *desc; 595 642 unsigned int irq; 596 643 597 644 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); ··· 611 660 irq = irq_alloc(devhandle, devino); 612 661 bucket_set_irq(__pa(bucket), irq); 613 662 614 - set_irq_chip_and_handler_name(irq, &sun4v_virq, 615 - handle_fasteoi_irq, 663 + irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq, 616 664 "IVEC"); 617 665 618 666 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); ··· 622 672 * especially wrt. locking, we do not let request_irq() enable 623 673 * the interrupt. 624 674 */ 625 - desc = irq_desc + irq; 626 - desc->status |= IRQ_NOAUTOEN; 627 - 628 - set_irq_data(irq, handler_data); 675 + irq_set_status_flags(irq, IRQ_NOAUTOEN); 676 + irq_set_handler_data(irq, handler_data); 629 677 630 678 /* Catch accidental accesses to these things. IMAP/ICLR handling 631 679 * is done by hypervisor calls on sun4v platforms, not by direct ··· 682 734 orig_sp = set_hardirq_stack(); 683 735 684 736 while (bucket_pa) { 685 - struct irq_desc *desc; 686 737 unsigned long next_pa; 687 738 unsigned int irq; 688 739 ··· 689 742 irq = bucket_get_irq(bucket_pa); 690 743 bucket_clear_chain_pa(bucket_pa); 691 744 692 - desc = irq_desc + irq; 693 - 694 - if (!(desc->status & IRQ_DISABLED)) 695 - desc->handle_irq(irq, desc); 745 + generic_handle_irq(irq); 696 746 697 747 bucket_pa = next_pa; 698 748 } ··· 732 788 unsigned int irq; 733 789 734 790 for (irq = 0; irq < NR_IRQS; irq++) { 791 + struct irq_desc *desc = irq_to_desc(irq); 792 + struct irq_data *data = irq_desc_get_irq_data(desc); 735 793 unsigned long flags; 736 794 737 - raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); 738 - if (irq_desc[irq].action && 739 - !(irq_desc[irq].status & IRQ_PER_CPU)) { 740 - struct irq_data *data = irq_get_irq_data(irq); 741 - 795 + raw_spin_lock_irqsave(&desc->lock, flags); 796 + if (desc->action && !irqd_is_per_cpu(data)) { 742 797 if (data->chip->irq_set_affinity) 743 798 data->chip->irq_set_affinity(data, 744 - data->affinity, 745 - false); 799 + data->affinity, 800 + false); 746 801 } 747 - raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 802 + raw_spin_unlock_irqrestore(&desc->lock, flags); 748 803 } 749 804 750 805 tick_ops->disable_irq(); ··· 981 1038 : "i" (PSTATE_IE) 982 1039 : "g1"); 983 1040 984 - irq_desc[0].action = &timer_irq_action; 1041 + irq_to_desc(0)->action = &timer_irq_action; 985 1042 }
+1 -1
arch/sparc/kernel/pci.c
··· 1012 1012 1013 1013 void arch_teardown_msi_irq(unsigned int irq) 1014 1014 { 1015 - struct msi_desc *entry = get_irq_msi(irq); 1015 + struct msi_desc *entry = irq_get_msi_desc(irq); 1016 1016 struct pci_dev *pdev = entry->dev; 1017 1017 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 1018 1018
+6 -9
arch/sparc/kernel/pci_msi.c
··· 30 30 31 31 err = ops->dequeue_msi(pbm, msiqid, &head, &msi); 32 32 if (likely(err > 0)) { 33 - struct irq_desc *desc; 34 33 unsigned int irq; 35 34 36 35 irq = pbm->msi_irq_table[msi - pbm->msi_first]; 37 - desc = irq_desc + irq; 38 - 39 - desc->handle_irq(irq, desc); 36 + generic_handle_irq(irq); 40 37 } 41 38 42 39 if (unlikely(err < 0)) ··· 133 136 if (!*irq_p) 134 137 goto out_err; 135 138 136 - set_irq_chip_and_handler_name(*irq_p, &msi_irq, 137 - handle_simple_irq, "MSI"); 139 + irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq, 140 + "MSI"); 138 141 139 142 err = alloc_msi(pbm); 140 143 if (unlikely(err < 0)) ··· 160 163 } 161 164 msg.data = msi; 162 165 163 - set_irq_msi(*irq_p, entry); 166 + irq_set_msi_desc(*irq_p, entry); 164 167 write_msi_msg(*irq_p, &msg); 165 168 166 169 return 0; ··· 169 172 free_msi(pbm, msi); 170 173 171 174 out_irq_free: 172 - set_irq_chip(*irq_p, NULL); 175 + irq_set_chip(*irq_p, NULL); 173 176 irq_free(*irq_p); 174 177 *irq_p = 0; 175 178 ··· 208 211 209 212 free_msi(pbm, msi_num); 210 213 211 - set_irq_chip(irq, NULL); 214 + irq_set_chip(irq, NULL); 212 215 irq_free(irq); 213 216 } 214 217
-1
arch/tile/Kconfig
··· 11 11 select HAVE_GENERIC_HARDIRQS 12 12 select GENERIC_IRQ_PROBE 13 13 select GENERIC_PENDING_IRQ if SMP 14 - select GENERIC_HARDIRQS_NO_DEPRECATED 15 14 select GENERIC_IRQ_SHOW 16 15 17 16 # FIXME: investigate whether we need/want these options.
-1
arch/um/Kconfig.common
··· 7 7 bool 8 8 default y 9 9 select HAVE_GENERIC_HARDIRQS 10 - select GENERIC_HARDIRQS_NO_DEPRECATED 11 10 select GENERIC_IRQ_SHOW 12 11 13 12 config MMU
+1 -1
arch/unicore32/Kconfig
··· 10 10 select HAVE_KERNEL_LZMA 11 11 select GENERIC_FIND_FIRST_BIT 12 12 select GENERIC_IRQ_PROBE 13 - select GENERIC_HARDIRQS_NO_DEPRECATED 13 + select GENERIC_IRQ_SHOW 14 14 select ARCH_WANT_FRAME_POINTERS 15 15 help 16 16 UniCore-32 is 32-bit Instruction Set Architecture,
+8 -50
arch/unicore32/kernel/irq.c
··· 321 321 writel(1, INTC_ICCR); 322 322 323 323 for (irq = 0; irq < IRQ_GPIOHIGH; irq++) { 324 - set_irq_chip(irq, &puv3_low_gpio_chip); 325 - set_irq_handler(irq, handle_edge_irq); 324 + irq_set_chip(irq, &puv3_low_gpio_chip); 325 + irq_set_handler(irq, handle_edge_irq); 326 326 irq_modify_status(irq, 327 327 IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 328 328 0); 329 329 } 330 330 331 331 for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) { 332 - set_irq_chip(irq, &puv3_normal_chip); 333 - set_irq_handler(irq, handle_level_irq); 332 + irq_set_chip(irq, &puv3_normal_chip); 333 + irq_set_handler(irq, handle_level_irq); 334 334 irq_modify_status(irq, 335 335 IRQ_NOREQUEST | IRQ_NOAUTOEN, 336 336 IRQ_NOPROBE); 337 337 } 338 338 339 339 for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) { 340 - set_irq_chip(irq, &puv3_high_gpio_chip); 341 - set_irq_handler(irq, handle_edge_irq); 340 + irq_set_chip(irq, &puv3_high_gpio_chip); 341 + irq_set_handler(irq, handle_edge_irq); 342 342 irq_modify_status(irq, 343 343 IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 344 344 0); ··· 347 347 /* 348 348 * Install handler for GPIO 0-27 edge detect interrupts 349 349 */ 350 - set_irq_chip(IRQ_GPIOHIGH, &puv3_normal_chip); 351 - set_irq_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler); 350 + irq_set_chip(IRQ_GPIOHIGH, &puv3_normal_chip); 351 + irq_set_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler); 352 352 353 353 #ifdef CONFIG_PUV3_GPIO 354 354 puv3_init_gpio(); 355 355 #endif 356 - } 357 - 358 - int show_interrupts(struct seq_file *p, void *v) 359 - { 360 - int i = *(loff_t *) v, cpu; 361 - struct irq_desc *desc; 362 - struct irqaction *action; 363 - unsigned long flags; 364 - 365 - if (i == 0) { 366 - char cpuname[12]; 367 - 368 - seq_printf(p, " "); 369 - for_each_present_cpu(cpu) { 370 - sprintf(cpuname, "CPU%d", cpu); 371 - seq_printf(p, " %10s", cpuname); 372 - } 373 - seq_putc(p, '\n'); 374 - } 375 - 376 - if (i < nr_irqs) { 377 - desc = irq_to_desc(i); 378 - raw_spin_lock_irqsave(&desc->lock, flags); 379 - action = desc->action; 380 - if (!action) 381 - goto unlock; 382 - 383 - seq_printf(p, "%3d: ", i); 384 - for_each_present_cpu(cpu) 385 - seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 386 - seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-"); 387 - seq_printf(p, " %s", action->name); 388 - for (action = action->next; action; action = action->next) 389 - seq_printf(p, ", %s", action->name); 390 - 391 - seq_putc(p, '\n'); 392 - unlock: 393 - raw_spin_unlock_irqrestore(&desc->lock, flags); 394 - } else if (i == nr_irqs) { 395 - seq_printf(p, "Error in interrupt!\n"); 396 - } 397 - return 0; 398 356 } 399 357 400 358 /*
+1 -1
arch/x86/kernel/apb_timer.c
··· 316 316 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 317 317 irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 318 318 /* APB timer irqs are set up as mp_irqs, timer is edge type */ 319 - __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); 319 + __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge"); 320 320 321 321 if (system_state == SYSTEM_BOOTING) { 322 322 if (request_irq(adev->irq, apbt_interrupt_handler,
+6 -4
arch/x86/xen/p2m.c
··· 497 497 return true; 498 498 } 499 499 500 - bool __early_alloc_p2m(unsigned long pfn) 500 + static bool __init __early_alloc_p2m(unsigned long pfn) 501 501 { 502 502 unsigned topidx, mididx, idx; 503 503 ··· 530 530 } 531 531 return idx != 0; 532 532 } 533 - unsigned long set_phys_range_identity(unsigned long pfn_s, 533 + unsigned long __init set_phys_range_identity(unsigned long pfn_s, 534 534 unsigned long pfn_e) 535 535 { 536 536 unsigned long pfn; ··· 671 671 page->private = mfn; 672 672 page->index = pfn_to_mfn(pfn); 673 673 674 - __set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); 674 + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) 675 + return -ENOMEM; 676 + 675 677 if (!PageHighMem(page)) 676 678 /* Just zap old mapping for now */ 677 679 pte_clear(&init_mm, address, ptep); ··· 711 709 spin_lock_irqsave(&m2p_override_lock, flags); 712 710 list_del(&page->lru); 713 711 spin_unlock_irqrestore(&m2p_override_lock, flags); 714 - __set_phys_to_machine(pfn, page->index); 712 + set_phys_to_machine(pfn, page->index); 715 713 716 714 if (!PageHighMem(page)) 717 715 set_pte_at(&init_mm, address, ptep,
-1
arch/xtensa/Kconfig
··· 9 9 select HAVE_IDE 10 10 select HAVE_GENERIC_HARDIRQS 11 11 select GENERIC_IRQ_SHOW 12 - select GENERIC_HARDIRQS_NO_DEPRECATED 13 12 help 14 13 Xtensa processors are 32-bit RISC machines designed by Tensilica 15 14 primarily for embedded systems. These processors are both
+1 -1
drivers/ata/pata_ixp4xx_cf.c
··· 167 167 168 168 irq = platform_get_irq(pdev, 0); 169 169 if (irq) 170 - set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 170 + irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 171 171 172 172 /* Setup expansion bus chip selects */ 173 173 *data->cs0_cfg = data->cs0_bits;
+22 -21
drivers/ata/pata_palmld.c
··· 33 33 34 34 #define DRV_NAME "pata_palmld" 35 35 36 + static struct gpio palmld_hdd_gpios[] = { 37 + { GPIO_NR_PALMLD_IDE_PWEN, GPIOF_INIT_HIGH, "HDD Power" }, 38 + { GPIO_NR_PALMLD_IDE_RESET, GPIOF_INIT_LOW, "HDD Reset" }, 39 + }; 40 + 36 41 static struct scsi_host_template palmld_sht = { 37 42 ATA_PIO_SHT(DRV_NAME), 38 43 }; ··· 57 52 58 53 /* allocate host */ 59 54 host = ata_host_alloc(&pdev->dev, 1); 60 - if (!host) 61 - return -ENOMEM; 55 + if (!host) { 56 + ret = -ENOMEM; 57 + goto err1; 58 + } 62 59 63 60 /* remap drive's physical memory address */ 64 61 mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); 65 - if (!mem) 66 - return -ENOMEM; 62 + if (!mem) { 63 + ret = -ENOMEM; 64 + goto err1; 65 + } 67 66 68 67 /* request and activate power GPIO, IRQ GPIO */ 69 - ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR"); 68 + ret = gpio_request_array(palmld_hdd_gpios, 69 + ARRAY_SIZE(palmld_hdd_gpios)); 70 70 if (ret) 71 71 goto err1; 72 - ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1); 73 - if (ret) 74 - goto err2; 75 - 76 - ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST"); 77 - if (ret) 78 - goto err2; 79 - ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0); 80 - if (ret) 81 - goto err3; 82 72 83 73 /* reset the drive */ 84 74 gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); ··· 96 96 ata_sff_std_ports(&ap->ioaddr); 97 97 98 98 /* activate host */ 99 - return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, 99 + ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, 100 100 &palmld_sht); 101 + if (ret) 102 + goto err2; 101 103 102 - err3: 103 - gpio_free(GPIO_NR_PALMLD_IDE_RESET); 104 + return ret; 105 + 104 106 err2: 105 - gpio_free(GPIO_NR_PALMLD_IDE_PWEN); 107 + gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios)); 106 108 err1: 107 109 return ret; 108 110 } ··· 118 116 /* power down the HDD */ 119 117 gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); 120 118 121 - gpio_free(GPIO_NR_PALMLD_IDE_RESET); 122 - gpio_free(GPIO_NR_PALMLD_IDE_PWEN); 119 + gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios)); 123 120 124 121 return 0; 125 122 }
+2 -2
drivers/ata/pata_rb532_cf.c
··· 60 60 struct rb532_cf_info *info = ah->private_data; 61 61 62 62 if (gpio_get_value(info->gpio_line)) { 63 - set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); 63 + irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); 64 64 ata_sff_interrupt(info->irq, dev_instance); 65 65 } else { 66 - set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); 66 + irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); 67 67 } 68 68 69 69 return IRQ_HANDLED;
+1 -1
drivers/char/tpm/tpm.c
··· 980 980 return -EBUSY; 981 981 } 982 982 983 - chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); 983 + chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); 984 984 if (chip->data_buffer == NULL) { 985 985 clear_bit(0, &chip->is_open); 986 986 put_device(chip->dev);
+1 -1
drivers/edac/amd64_edac.c
··· 2679 2679 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); 2680 2680 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); 2681 2681 if (!(mcis && ecc_stngs)) 2682 - goto err_ret; 2682 + goto err_free; 2683 2683 2684 2684 msrs = msrs_alloc(); 2685 2685 if (!msrs)
+1 -1
drivers/gpio/Kconfig
··· 416 416 417 417 config AB8500_GPIO 418 418 bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions" 419 - depends on AB8500_CORE 419 + depends on AB8500_CORE && BROKEN 420 420 help 421 421 Select this to enable the AB8500 IC GPIO driver 422 422 endif
+1 -1
drivers/hwmon/gpio-fan.c
··· 116 116 return 0; 117 117 118 118 INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); 119 - set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); 119 + irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); 120 120 err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED, 121 121 "GPIO fan alarm", fan_data); 122 122 if (err)
+2 -2
drivers/input/keyboard/lm8323.c
··· 809 809 struct lm8323_chip *lm = i2c_get_clientdata(client); 810 810 int i; 811 811 812 - set_irq_wake(client->irq, 0); 812 + irq_set_irq_wake(client->irq, 0); 813 813 disable_irq(client->irq); 814 814 815 815 mutex_lock(&lm->lock); ··· 838 838 led_classdev_resume(&lm->pwm[i].cdev); 839 839 840 840 enable_irq(client->irq); 841 - set_irq_wake(client->irq, 1); 841 + irq_set_irq_wake(client->irq, 1); 842 842 843 843 return 0; 844 844 }
+1 -1
drivers/input/serio/ams_delta_serio.c
··· 149 149 * at FIQ level, switch back from edge to simple interrupt handler 150 150 * to avoid bad interaction. 151 151 */ 152 - set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 152 + irq_set_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 153 153 handle_simple_irq); 154 154 155 155 serio_register_port(ams_delta_serio);
+1 -1
drivers/input/touchscreen/mainstone-wm97xx.c
··· 219 219 } 220 220 221 221 wm->pen_irq = gpio_to_irq(irq); 222 - set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); 222 + irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); 223 223 } else /* pen irq not supported */ 224 224 pen_int = 0; 225 225
+1 -1
drivers/input/touchscreen/zylonite-wm97xx.c
··· 193 193 gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); 194 194 195 195 wm->pen_irq = IRQ_GPIO(gpio_touch_irq); 196 - set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); 196 + irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); 197 197 198 198 wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, 199 199 WM97XX_GPIO_POL_HIGH,
-14
drivers/mfd/Kconfig
··· 60 60 This driver supports the ASIC3 multifunction chip found on many 61 61 PDAs (mainly iPAQ and HTC based ones) 62 62 63 - config MFD_SH_MOBILE_SDHI 64 - bool "Support for SuperH Mobile SDHI" 65 - depends on SUPERH || ARCH_SHMOBILE 66 - select MFD_CORE 67 - select TMIO_MMC_DMA 68 - ---help--- 69 - This driver supports the SDHI hardware block found in many 70 - SuperH Mobile SoCs. 71 - 72 63 config MFD_DAVINCI_VOICECODEC 73 64 tristate 74 65 select MFD_CORE ··· 256 265 config MFD_TMIO 257 266 bool 258 267 default n 259 - 260 - config TMIO_MMC_DMA 261 - bool 262 - select DMA_ENGINE 263 - select DMADEVICES 264 268 265 269 config MFD_T7L66XB 266 270 bool "Support Toshiba T7L66XB"
-1
drivers/mfd/Makefile
··· 6 6 obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o 7 7 obj-$(CONFIG_MFD_SM501) += sm501.o 8 8 obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o 9 - obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o 10 9 11 10 obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o 12 11 obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
+33 -62
drivers/mfd/sh_mobile_sdhi.c drivers/mmc/host/sh_mobile_sdhi.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/platform_device.h> 25 25 #include <linux/mmc/host.h> 26 - #include <linux/mfd/core.h> 26 + #include <linux/mmc/sh_mobile_sdhi.h> 27 27 #include <linux/mfd/tmio.h> 28 - #include <linux/mfd/sh_mobile_sdhi.h> 29 28 #include <linux/sh_dma.h> 29 + 30 + #include "tmio_mmc.h" 30 31 31 32 struct sh_mobile_sdhi { 32 33 struct clk *clk; 33 34 struct tmio_mmc_data mmc_data; 34 - struct mfd_cell cell_mmc; 35 35 struct sh_dmae_slave param_tx; 36 36 struct sh_dmae_slave param_rx; 37 37 struct tmio_mmc_dma dma_priv; 38 38 }; 39 39 40 - static struct resource sh_mobile_sdhi_resources[] = { 41 - { 42 - .start = 0x000, 43 - .end = 0x1ff, 44 - .flags = IORESOURCE_MEM, 45 - }, 46 - { 47 - .start = 0, 48 - .end = 0, 49 - .flags = IORESOURCE_IRQ, 50 - }, 51 - }; 52 - 53 - static struct mfd_cell sh_mobile_sdhi_cell = { 54 - .name = "tmio-mmc", 55 - .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources), 56 - .resources = sh_mobile_sdhi_resources, 57 - }; 58 - 59 - static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state) 40 + static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) 60 41 { 61 - struct platform_device *pdev = to_platform_device(tmio->dev.parent); 62 42 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 63 43 64 44 if (p && p->set_pwr) 65 45 p->set_pwr(pdev, state); 66 46 } 67 47 68 - static int sh_mobile_sdhi_get_cd(struct platform_device *tmio) 48 + static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) 69 49 { 70 - struct platform_device *pdev = to_platform_device(tmio->dev.parent); 71 50 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 72 51 73 52 if (p && p->get_cd) ··· 60 81 struct sh_mobile_sdhi *priv; 61 82 struct tmio_mmc_data *mmc_data; 62 83 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 63 - struct resource *mem; 84 + struct tmio_mmc_host *host; 64 85 char clk_name[8]; 65 - int ret, irq; 66 - 67 - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 68 - if (!mem) 69 - dev_err(&pdev->dev, "missing MEM resource\n"); 70 - 71 - irq = platform_get_irq(pdev, 0); 72 - if (irq < 0) 73 - dev_err(&pdev->dev, "missing IRQ resource\n"); 74 - 75 - if (!mem || (irq < 0)) 76 - return -EINVAL; 86 + int ret; 77 87 78 88 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); 79 89 if (priv == NULL) { ··· 77 109 if (IS_ERR(priv->clk)) { 78 110 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); 79 111 ret = PTR_ERR(priv->clk); 80 - kfree(priv); 81 - return ret; 112 + goto eclkget; 82 113 } 83 114 84 115 clk_enable(priv->clk); ··· 90 123 mmc_data->flags = p->tmio_flags; 91 124 mmc_data->ocr_mask = p->tmio_ocr_mask; 92 125 mmc_data->capabilities |= p->tmio_caps; 126 + 127 + if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 128 + priv->param_tx.slave_id = p->dma_slave_tx; 129 + priv->param_rx.slave_id = p->dma_slave_rx; 130 + priv->dma_priv.chan_priv_tx = &priv->param_tx; 131 + priv->dma_priv.chan_priv_rx = &priv->param_rx; 132 + priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ 133 + mmc_data->dma = &priv->dma_priv; 134 + } 93 135 } 94 136 95 137 /* ··· 112 136 */ 113 137 mmc_data->flags |= TMIO_MMC_SDIO_IRQ; 114 138 115 - if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 116 - priv->param_tx.slave_id = p->dma_slave_tx; 117 - priv->param_rx.slave_id = p->dma_slave_rx; 118 - priv->dma_priv.chan_priv_tx = &priv->param_tx; 119 - priv->dma_priv.chan_priv_rx = &priv->param_rx; 120 - priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ 121 - mmc_data->dma = &priv->dma_priv; 122 - } 139 + ret = tmio_mmc_host_probe(&host, pdev, mmc_data); 140 + if (ret < 0) 141 + goto eprobe; 123 142 124 - memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); 125 - priv->cell_mmc.mfd_data = mmc_data; 143 + pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 144 + (unsigned long)host->ctl, host->irq); 126 145 127 - platform_set_drvdata(pdev, priv); 146 + return ret; 128 147 129 - ret = mfd_add_devices(&pdev->dev, pdev->id, 130 - &priv->cell_mmc, 1, mem, irq); 131 - if (ret) { 132 - clk_disable(priv->clk); 133 - clk_put(priv->clk); 134 - kfree(priv); 135 - } 136 - 148 + eprobe: 149 + clk_disable(priv->clk); 150 + clk_put(priv->clk); 151 + eclkget: 152 + kfree(priv); 137 153 return ret; 138 154 } 139 155 140 156 static int sh_mobile_sdhi_remove(struct platform_device *pdev) 141 157 { 142 - struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev); 158 + struct mmc_host *mmc = platform_get_drvdata(pdev); 159 + struct tmio_mmc_host *host = mmc_priv(mmc); 160 + struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); 143 161 144 - mfd_remove_devices(&pdev->dev); 162 + tmio_mmc_host_remove(host); 145 163 clk_disable(priv->clk); 146 164 clk_put(priv->clk); 147 165 kfree(priv); ··· 168 198 MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); 169 199 MODULE_AUTHOR("Magnus Damm"); 170 200 MODULE_LICENSE("GPL v2"); 201 + MODULE_ALIAS("platform:sh_mobile_sdhi");
+1 -1
drivers/misc/sgi-gru/grufile.c
··· 373 373 374 374 if (gru_irq_count[chiplet] == 0) { 375 375 gru_chip[chiplet].name = irq_name; 376 - ret = set_irq_chip(irq, &gru_chip[chiplet]); 376 + ret = irq_set_chip(irq, &gru_chip[chiplet]); 377 377 if (ret) { 378 378 printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", 379 379 GRU_DRIVER_ID_STR, -ret);
+1 -2
drivers/mmc/card/mmc_test.c
··· 1875 1875 unsigned int tot_sz, int max_scatter) 1876 1876 { 1877 1877 unsigned int dev_addr, i, cnt, sz, ssz; 1878 - struct timespec ts1, ts2, ts; 1878 + struct timespec ts1, ts2; 1879 1879 int ret; 1880 1880 1881 1881 sz = test->area.max_tfr; ··· 1912 1912 } 1913 1913 getnstimeofday(&ts2); 1914 1914 1915 - ts = timespec_sub(ts2, ts1); 1916 1915 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1917 1916 1918 1917 return 0;
+13 -1
drivers/mmc/core/sd_ops.c
··· 9 9 * your option) any later version. 10 10 */ 11 11 12 + #include <linux/slab.h> 12 13 #include <linux/types.h> 13 14 #include <linux/scatterlist.h> 14 15 ··· 253 252 struct mmc_command cmd; 254 253 struct mmc_data data; 255 254 struct scatterlist sg; 255 + void *data_buf; 256 256 257 257 BUG_ON(!card); 258 258 BUG_ON(!card->host); ··· 264 262 err = mmc_app_cmd(card->host, card); 265 263 if (err) 266 264 return err; 265 + 266 + /* dma onto stack is unsafe/nonportable, but callers to this 267 + * routine normally provide temporary on-stack buffers ... 268 + */ 269 + data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); 270 + if (data_buf == NULL) 271 + return -ENOMEM; 267 272 268 273 memset(&mrq, 0, sizeof(struct mmc_request)); 269 274 memset(&cmd, 0, sizeof(struct mmc_command)); ··· 289 280 data.sg = &sg; 290 281 data.sg_len = 1; 291 282 292 - sg_init_one(&sg, scr, 8); 283 + sg_init_one(&sg, data_buf, 8); 293 284 294 285 mmc_set_data_timeout(&data, card); 295 286 296 287 mmc_wait_for_req(card->host, &mrq); 288 + 289 + memcpy(scr, data_buf, sizeof(card->raw_scr)); 290 + kfree(data_buf); 297 291 298 292 if (cmd.error) 299 293 return cmd.error;
+13 -1
drivers/mmc/host/Kconfig
··· 439 439 To compile this driver as a module, choose M here: the 440 440 module will be called sdricoh_cs. 441 441 442 + config MMC_TMIO_CORE 443 + tristate 444 + 442 445 config MMC_TMIO 443 446 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" 444 - depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI 447 + depends on MFD_TMIO || MFD_ASIC3 448 + select MMC_TMIO_CORE 445 449 help 446 450 This provides support for the SD/MMC cell found in TC6393XB, 447 451 T7L66XB and also HTC ASIC3 452 + 453 + config MMC_SDHI 454 + tristate "SH-Mobile SDHI SD/SDIO controller support" 455 + depends on SUPERH || ARCH_SHMOBILE 456 + select MMC_TMIO_CORE 457 + help 458 + This provides support for the SDHI SD/SDIO controller found in 459 + SuperH and ARM SH-Mobile SoCs 448 460 449 461 config MMC_CB710 450 462 tristate "ENE CB710 MMC/SD Interface support"
+7 -1
drivers/mmc/host/Makefile
··· 29 29 obj-$(CONFIG_MMC_S3C) += s3cmci.o 30 30 obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 31 31 obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 32 - obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 32 + obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o 33 + tmio_mmc_core-y := tmio_mmc_pio.o 34 + ifneq ($(CONFIG_MMC_SDHI),n) 35 + tmio_mmc_core-y += tmio_mmc_dma.o 36 + endif 37 + obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o 38 + obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 33 39 obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 34 40 obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 35 41 obj-$(CONFIG_MMC_DW) += dw_mmc.o
+2 -2
drivers/mmc/host/dw_mmc.c
··· 316 316 317 317 /* Stop the IDMAC running */ 318 318 temp = mci_readl(host, BMOD); 319 - temp &= ~SDMMC_IDMAC_ENABLE; 319 + temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 320 320 mci_writel(host, BMOD, temp); 321 321 } 322 322 ··· 385 385 386 386 /* Enable the IDMAC */ 387 387 temp = mci_readl(host, BMOD); 388 - temp |= SDMMC_IDMAC_ENABLE; 388 + temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 389 389 mci_writel(host, BMOD, temp); 390 390 391 391 /* Start it running */
+12 -1
drivers/mmc/host/mmci.c
··· 68 68 .datalength_bits = 16, 69 69 }; 70 70 71 + static struct variant_data variant_arm_extended_fifo = { 72 + .fifosize = 128 * 4, 73 + .fifohalfsize = 64 * 4, 74 + .datalength_bits = 16, 75 + }; 76 + 71 77 static struct variant_data variant_u300 = { 72 78 .fifosize = 16 * 4, 73 79 .fifohalfsize = 8 * 4, ··· 1283 1277 static struct amba_id mmci_ids[] = { 1284 1278 { 1285 1279 .id = 0x00041180, 1286 - .mask = 0x000fffff, 1280 + .mask = 0xff0fffff, 1287 1281 .data = &variant_arm, 1282 + }, 1283 + { 1284 + .id = 0x01041180, 1285 + .mask = 0xff0fffff, 1286 + .data = &variant_arm_extended_fifo, 1288 1287 }, 1289 1288 { 1290 1289 .id = 0x00041181,
+2
drivers/mmc/host/of_mmc_spi.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/device.h> 17 17 #include <linux/slab.h> 18 + #include <linux/irq.h> 18 19 #include <linux/gpio.h> 19 20 #include <linux/of.h> 20 21 #include <linux/of_gpio.h> 22 + #include <linux/of_irq.h> 21 23 #include <linux/spi/spi.h> 22 24 #include <linux/spi/mmc_spi.h> 23 25 #include <linux/mmc/core.h>
+79 -7
drivers/mmc/host/sdhci-esdhc-imx.c
··· 16 16 #include <linux/err.h> 17 17 #include <linux/clk.h> 18 18 #include <linux/gpio.h> 19 + #include <linux/slab.h> 19 20 #include <linux/mmc/host.h> 20 21 #include <linux/mmc/sdhci-pltfm.h> 22 + #include <linux/mmc/mmc.h> 23 + #include <linux/mmc/sdio.h> 21 24 #include <mach/hardware.h> 22 25 #include <mach/esdhc.h> 23 26 #include "sdhci.h" 24 27 #include "sdhci-pltfm.h" 25 28 #include "sdhci-esdhc.h" 29 + 30 + /* VENDOR SPEC register */ 31 + #define SDHCI_VENDOR_SPEC 0xC0 32 + #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 33 + 34 + #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) 35 + /* 36 + * The CMDTYPE of the CMD register (offset 0xE) should be set to 37 + * "11" when the STOP CMD12 is issued on imx53 to abort one 38 + * open ended multi-blk IO. Otherwise the TC INT wouldn't 39 + * be generated. 40 + * In exact block transfer, the controller doesn't complete the 41 + * operations automatically as required at the end of the 42 + * transfer and remains on hold if the abort command is not sent. 43 + * As a result, the TC flag is not asserted and SW received timeout 44 + * exeception. Bit1 of Vendor Spec registor is used to fix it. 45 + */ 46 + #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) 47 + 48 + struct pltfm_imx_data { 49 + int flags; 50 + u32 scratchpad; 51 + }; 26 52 27 53 static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) 28 54 { ··· 60 34 61 35 static u32 esdhc_readl_le(struct sdhci_host *host, int reg) 62 36 { 37 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 38 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 39 + 63 40 /* fake CARD_PRESENT flag on mx25/35 */ 64 41 u32 val = readl(host->ioaddr + reg); 65 42 66 - if (unlikely(reg == SDHCI_PRESENT_STATE)) { 43 + if (unlikely((reg == SDHCI_PRESENT_STATE) 44 + && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { 67 45 struct esdhc_platform_data *boarddata = 68 46 host->mmc->parent->platform_data; 69 47 ··· 85 55 86 56 static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) 87 57 { 88 - if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) 58 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 59 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 60 + 61 + if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) 62 + && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) 89 63 /* 90 64 * these interrupts won't work with a custom card_detect gpio 91 65 * (only applied to mx25/35) 92 66 */ 93 67 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 68 + 69 + if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 70 + && (reg == SDHCI_INT_STATUS) 71 + && (val & SDHCI_INT_DATA_END))) { 72 + u32 v; 73 + v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); 74 + v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; 75 + writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); 76 + } 94 77 95 78 writel(val, host->ioaddr + reg); 96 79 } ··· 119 76 static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) 120 77 { 121 78 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 79 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 122 80 123 81 switch (reg) { 124 82 case SDHCI_TRANSFER_MODE: ··· 127 83 * Postpone this write, we must do it together with a 128 84 * command write that is down below. 129 85 */ 130 - pltfm_host->scratchpad = val; 86 + if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 87 + && (host->cmd->opcode == SD_IO_RW_EXTENDED) 88 + && (host->cmd->data->blocks > 1) 89 + && (host->cmd->data->flags & MMC_DATA_READ)) { 90 + u32 v; 91 + v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); 92 + v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; 93 + writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); 94 + } 95 + imx_data->scratchpad = val; 131 96 return; 132 97 case SDHCI_COMMAND: 133 - writel(val << 16 | pltfm_host->scratchpad, 98 + if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) 99 + && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) 100 + val |= SDHCI_CMD_ABORTCMD; 101 + writel(val << 16 | imx_data->scratchpad, 134 102 host->ioaddr + SDHCI_TRANSFER_MODE); 135 103 return; 136 104 case SDHCI_BLOCK_SIZE: ··· 202 146 } 203 147 204 148 static struct sdhci_ops sdhci_esdhc_ops = { 149 + .read_l = esdhc_readl_le, 205 150 .read_w = esdhc_readw_le, 151 + .write_l = esdhc_writel_le, 206 152 .write_w = esdhc_writew_le, 207 153 .write_b = esdhc_writeb_le, 208 154 .set_clock = esdhc_set_clock, ··· 226 168 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; 227 169 struct clk *clk; 228 170 int err; 171 + struct pltfm_imx_data *imx_data; 229 172 230 173 clk = clk_get(mmc_dev(host->mmc), NULL); 231 174 if (IS_ERR(clk)) { ··· 236 177 clk_enable(clk); 237 178 pltfm_host->clk = clk; 238 179 239 - if (cpu_is_mx35() || cpu_is_mx51()) 180 + imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); 181 + if (!imx_data) { 182 + clk_disable(pltfm_host->clk); 183 + clk_put(pltfm_host->clk); 184 + return -ENOMEM; 185 + } 186 + pltfm_host->priv = imx_data; 187 + 188 + if (!cpu_is_mx25()) 240 189 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 241 190 242 191 if (cpu_is_mx25() || cpu_is_mx35()) { ··· 253 186 /* write_protect can't be routed to controller, use gpio */ 254 187 sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; 255 188 } 189 + 190 + if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) 191 + imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; 256 192 257 193 if (boarddata) { 258 194 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); ··· 284 214 goto no_card_detect_irq; 285 215 } 286 216 287 - sdhci_esdhc_ops.write_l = esdhc_writel_le; 288 - sdhci_esdhc_ops.read_l = esdhc_readl_le; 217 + imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; 289 218 /* Now we have a working card_detect again */ 290 219 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 291 220 } ··· 296 227 no_card_detect_pin: 297 228 boarddata->cd_gpio = err; 298 229 not_supported: 230 + kfree(imx_data); 299 231 return 0; 300 232 } 301 233 ··· 304 234 { 305 235 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 306 236 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; 237 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 307 238 308 239 if (boarddata && gpio_is_valid(boarddata->wp_gpio)) 309 240 gpio_free(boarddata->wp_gpio); ··· 318 247 319 248 clk_disable(pltfm_host->clk); 320 249 clk_put(pltfm_host->clk); 250 + kfree(imx_data); 321 251 } 322 252 323 253 struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+1 -2
drivers/mmc/host/sdhci-esdhc.h
··· 23 23 SDHCI_QUIRK_NONSTANDARD_CLOCK | \ 24 24 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ 25 25 SDHCI_QUIRK_PIO_NEEDS_DELAY | \ 26 - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ 27 - SDHCI_QUIRK_NO_CARD_NO_RESET) 26 + SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 28 27 29 28 #define ESDHC_SYSTEM_CONTROL 0x2c 30 29 #define ESDHC_CLOCK_MASK 0x0000fff0
+2 -1
drivers/mmc/host/sdhci-of-esdhc.c
··· 74 74 75 75 struct sdhci_of_data sdhci_esdhc = { 76 76 /* card detection could be handled via GPIO */ 77 - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION, 77 + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION 78 + | SDHCI_QUIRK_NO_CARD_NO_RESET, 78 79 .ops = { 79 80 .read_l = sdhci_be32bs_readl, 80 81 .read_w = esdhc_readw,
+2 -4
drivers/mmc/host/sdhci-pci.c
··· 1016 1016 struct sdhci_pci_chip *chip; 1017 1017 struct sdhci_pci_slot *slot; 1018 1018 1019 - u8 slots, rev, first_bar; 1019 + u8 slots, first_bar; 1020 1020 int ret, i; 1021 1021 1022 1022 BUG_ON(pdev == NULL); 1023 1023 BUG_ON(ent == NULL); 1024 1024 1025 - pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); 1026 - 1027 1025 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 1028 - (int)pdev->vendor, (int)pdev->device, (int)rev); 1026 + (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); 1029 1027 1030 1028 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 1031 1029 if (ret)
+1 -1
drivers/mmc/host/sdhci-pltfm.h
··· 17 17 18 18 struct sdhci_pltfm_host { 19 19 struct clk *clk; 20 - u32 scratchpad; /* to handle quirks across io-accessor calls */ 20 + void *priv; /* to handle quirks across io-accessor calls */ 21 21 }; 22 22 23 23 extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
+1 -1
drivers/mmc/host/sdhci-spear.c
··· 50 50 /* val == 1 -> card removed, val == 0 -> card inserted */ 51 51 /* if card removed - set irq for low level, else vice versa */ 52 52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; 53 - set_irq_type(irq, gpio_irq_type); 53 + irq_set_irq_type(irq, gpio_irq_type); 54 54 55 55 if (sdhci->data->card_power_gpio >= 0) { 56 56 if (!sdhci->data->power_always_enb) {
+1
drivers/mmc/host/sdhci.h
··· 45 45 #define SDHCI_CMD_CRC 0x08 46 46 #define SDHCI_CMD_INDEX 0x10 47 47 #define SDHCI_CMD_DATA 0x20 48 + #define SDHCI_CMD_ABORTCMD 0xC0 48 49 49 50 #define SDHCI_CMD_RESP_NONE 0x00 50 51 #define SDHCI_CMD_RESP_LONG 0x01
+18 -1267
drivers/mmc/host/tmio_mmc.c
··· 1 1 /* 2 - * linux/drivers/mmc/tmio_mmc.c 2 + * linux/drivers/mmc/host/tmio_mmc.c 3 3 * 4 - * Copyright (C) 2004 Ian Molton 5 - * Copyright (C) 2007 Ian Molton 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify 8 8 * it under the terms of the GNU General Public License version 2 as ··· 11 11 * Driver for the MMC / SD / SDIO cell found in: 12 12 * 13 13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 - * 15 - * This driver draws mainly on scattered spec sheets, Reverse engineering 16 - * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 17 - * support). (Further 4 bit support from a later datasheet). 18 - * 19 - * TODO: 20 - * Investigate using a workqueue for PIO transfers 21 - * Eliminate FIXMEs 22 - * SDIO support 23 - * Better Power management 24 - * Handle MMC errors better 25 - * double buffer support 26 - * 27 14 */ 28 15 29 - #include <linux/delay.h> 30 16 #include <linux/device.h> 31 - #include <linux/dmaengine.h> 32 - #include <linux/highmem.h> 33 - #include <linux/interrupt.h> 34 - #include <linux/io.h> 35 - #include <linux/irq.h> 36 17 #include <linux/mfd/core.h> 37 18 #include <linux/mfd/tmio.h> 38 19 #include <linux/mmc/host.h> 39 20 #include <linux/module.h> 40 21 #include <linux/pagemap.h> 41 22 #include <linux/scatterlist.h> 42 - #include <linux/workqueue.h> 43 - #include <linux/spinlock.h> 44 23 45 - #define CTL_SD_CMD 0x00 46 - #define CTL_ARG_REG 0x04 47 - #define CTL_STOP_INTERNAL_ACTION 0x08 48 - #define CTL_XFER_BLK_COUNT 0xa 49 - #define CTL_RESPONSE 0x0c 50 - #define CTL_STATUS 0x1c 51 - #define CTL_IRQ_MASK 0x20 52 - #define CTL_SD_CARD_CLK_CTL 0x24 53 - #define CTL_SD_XFER_LEN 0x26 54 - #define CTL_SD_MEM_CARD_OPT 0x28 55 - #define CTL_SD_ERROR_DETAIL_STATUS 0x2c 56 - #define CTL_SD_DATA_PORT 0x30 57 - #define CTL_TRANSACTION_CTL 0x34 58 - #define CTL_SDIO_STATUS 0x36 59 - #define CTL_SDIO_IRQ_MASK 0x38 60 - #define CTL_RESET_SD 0xe0 61 - #define CTL_SDIO_REGS 0x100 62 - #define CTL_CLK_AND_WAIT_CTL 0x138 63 - #define CTL_RESET_SDIO 0x1e0 64 - 65 - /* Definitions for values the CTRL_STATUS register can take. */ 66 - #define TMIO_STAT_CMDRESPEND 0x00000001 67 - #define TMIO_STAT_DATAEND 0x00000004 68 - #define TMIO_STAT_CARD_REMOVE 0x00000008 69 - #define TMIO_STAT_CARD_INSERT 0x00000010 70 - #define TMIO_STAT_SIGSTATE 0x00000020 71 - #define TMIO_STAT_WRPROTECT 0x00000080 72 - #define TMIO_STAT_CARD_REMOVE_A 0x00000100 73 - #define TMIO_STAT_CARD_INSERT_A 0x00000200 74 - #define TMIO_STAT_SIGSTATE_A 0x00000400 75 - #define TMIO_STAT_CMD_IDX_ERR 0x00010000 76 - #define TMIO_STAT_CRCFAIL 0x00020000 77 - #define TMIO_STAT_STOPBIT_ERR 0x00040000 78 - #define TMIO_STAT_DATATIMEOUT 0x00080000 79 - #define TMIO_STAT_RXOVERFLOW 0x00100000 80 - #define TMIO_STAT_TXUNDERRUN 0x00200000 81 - #define TMIO_STAT_CMDTIMEOUT 0x00400000 82 - #define TMIO_STAT_RXRDY 0x01000000 83 - #define TMIO_STAT_TXRQ 0x02000000 84 - #define TMIO_STAT_ILL_FUNC 0x20000000 85 - #define TMIO_STAT_CMD_BUSY 0x40000000 86 - #define TMIO_STAT_ILL_ACCESS 0x80000000 87 - 88 - /* Definitions for values the CTRL_SDIO_STATUS register can take. */ 89 - #define TMIO_SDIO_STAT_IOIRQ 0x0001 90 - #define TMIO_SDIO_STAT_EXPUB52 0x4000 91 - #define TMIO_SDIO_STAT_EXWT 0x8000 92 - #define TMIO_SDIO_MASK_ALL 0xc007 93 - 94 - /* Define some IRQ masks */ 95 - /* This is the mask used at reset by the chip */ 96 - #define TMIO_MASK_ALL 0x837f031d 97 - #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) 98 - #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) 99 - #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ 100 - TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 101 - #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 102 - 103 - #define enable_mmc_irqs(host, i) \ 104 - do { \ 105 - u32 mask;\ 106 - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ 107 - mask &= ~((i) & TMIO_MASK_IRQ); \ 108 - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ 109 - } while (0) 110 - 111 - #define disable_mmc_irqs(host, i) \ 112 - do { \ 113 - u32 mask;\ 114 - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ 115 - mask |= ((i) & TMIO_MASK_IRQ); \ 116 - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ 117 - } while (0) 118 - 119 - #define ack_mmc_irqs(host, i) \ 120 - do { \ 121 - sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ 122 - } while (0) 123 - 124 - /* This is arbitrary, just noone needed any higher alignment yet */ 125 - #define MAX_ALIGN 4 126 - 127 - struct tmio_mmc_host { 128 - void __iomem *ctl; 129 - unsigned long bus_shift; 130 - struct mmc_command *cmd; 131 - struct mmc_request *mrq; 132 - struct mmc_data *data; 133 - struct mmc_host *mmc; 134 - int irq; 135 - unsigned int sdio_irq_enabled; 136 - 137 - /* Callbacks for clock / power control */ 138 - void (*set_pwr)(struct platform_device *host, int state); 139 - void (*set_clk_div)(struct platform_device *host, int state); 140 - 141 - /* pio related stuff */ 142 - struct scatterlist *sg_ptr; 143 - struct scatterlist *sg_orig; 144 - unsigned int sg_len; 145 - unsigned int sg_off; 146 - 147 - struct platform_device *pdev; 148 - 149 - /* DMA support */ 150 - struct dma_chan *chan_rx; 151 - struct dma_chan *chan_tx; 152 - struct tasklet_struct dma_complete; 153 - struct tasklet_struct dma_issue; 154 - #ifdef CONFIG_TMIO_MMC_DMA 155 - u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); 156 - struct scatterlist bounce_sg; 157 - #endif 158 - 159 - /* Track lost interrupts */ 160 - struct delayed_work delayed_reset_work; 161 - spinlock_t lock; 162 - unsigned long last_req_ts; 163 - }; 164 - 165 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); 166 - 167 - static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) 168 - { 169 - return readw(host->ctl + (addr << host->bus_shift)); 170 - } 171 - 172 - static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, 173 - u16 *buf, int count) 174 - { 175 - readsw(host->ctl + (addr << host->bus_shift), buf, count); 176 - } 177 - 178 - static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) 179 - { 180 - return readw(host->ctl + (addr << host->bus_shift)) | 181 - readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; 182 - } 183 - 184 - static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) 185 - { 186 - writew(val, host->ctl + (addr << host->bus_shift)); 187 - } 188 - 189 - static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, 190 - u16 *buf, int count) 191 - { 192 - writesw(host->ctl + (addr << host->bus_shift), buf, count); 193 - } 194 - 195 - static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) 196 - { 197 - writew(val, host->ctl + (addr << host->bus_shift)); 198 - writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 199 - } 200 - 201 - static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 202 - { 203 - host->sg_len = data->sg_len; 204 - host->sg_ptr = data->sg; 205 - host->sg_orig = data->sg; 206 - host->sg_off = 0; 207 - } 208 - 209 - static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 210 - { 211 - host->sg_ptr = sg_next(host->sg_ptr); 212 - host->sg_off = 0; 213 - return --host->sg_len; 214 - } 215 - 216 - static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 217 - { 218 - local_irq_save(*flags); 219 - return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 220 - } 221 - 222 - static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt) 223 - { 224 - kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); 225 - local_irq_restore(*flags); 226 - } 227 - 228 - #ifdef CONFIG_MMC_DEBUG 229 - 230 - #define STATUS_TO_TEXT(a, status, i) \ 231 - do { \ 232 - if (status & TMIO_STAT_##a) { \ 233 - if (i++) \ 234 - printk(" | "); \ 235 - printk(#a); \ 236 - } \ 237 - } while (0) 238 - 239 - void pr_debug_status(u32 status) 240 - { 241 - int i = 0; 242 - printk(KERN_DEBUG "status: %08x = ", status); 243 - STATUS_TO_TEXT(CARD_REMOVE, status, i); 244 - STATUS_TO_TEXT(CARD_INSERT, status, i); 245 - STATUS_TO_TEXT(SIGSTATE, status, i); 246 - STATUS_TO_TEXT(WRPROTECT, status, i); 247 - STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 248 - STATUS_TO_TEXT(CARD_INSERT_A, status, i); 249 - STATUS_TO_TEXT(SIGSTATE_A, status, i); 250 - STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 251 - STATUS_TO_TEXT(STOPBIT_ERR, status, i); 252 - STATUS_TO_TEXT(ILL_FUNC, status, i); 253 - STATUS_TO_TEXT(CMD_BUSY, status, i); 254 - STATUS_TO_TEXT(CMDRESPEND, status, i); 255 - STATUS_TO_TEXT(DATAEND, status, i); 256 - STATUS_TO_TEXT(CRCFAIL, status, i); 257 - STATUS_TO_TEXT(DATATIMEOUT, status, i); 258 - STATUS_TO_TEXT(CMDTIMEOUT, status, i); 259 - STATUS_TO_TEXT(RXOVERFLOW, status, i); 260 - STATUS_TO_TEXT(TXUNDERRUN, status, i); 261 - STATUS_TO_TEXT(RXRDY, status, i); 262 - STATUS_TO_TEXT(TXRQ, status, i); 263 - STATUS_TO_TEXT(ILL_ACCESS, status, i); 264 - printk("\n"); 265 - } 266 - 267 - #else 268 - #define pr_debug_status(s) do { } while (0) 269 - #endif 270 - 271 - static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 272 - { 273 - struct tmio_mmc_host *host = mmc_priv(mmc); 274 - 275 - if (enable) { 276 - host->sdio_irq_enabled = 1; 277 - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 278 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 279 - (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 280 - } else { 281 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 282 - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 283 - host->sdio_irq_enabled = 0; 284 - } 285 - } 286 - 287 - static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 288 - { 289 - u32 clk = 0, clock; 290 - 291 - if (new_clock) { 292 - for (clock = host->mmc->f_min, clk = 0x80000080; 293 - new_clock >= (clock<<1); clk >>= 1) 294 - clock <<= 1; 295 - clk |= 0x100; 296 - } 297 - 298 - if (host->set_clk_div) 299 - host->set_clk_div(host->pdev, (clk>>22) & 1); 300 - 301 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 302 - } 303 - 304 - static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 305 - { 306 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 307 - 308 - /* 309 - * Testing on sh-mobile showed that SDIO IRQs are unmasked when 310 - * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the 311 - * device IRQ here and restore the SDIO IRQ mask before 312 - * re-enabling the device IRQ. 313 - */ 314 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 315 - disable_irq(host->irq); 316 - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 317 - msleep(10); 318 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 319 - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 320 - enable_irq(host->irq); 321 - } 322 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 323 - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 324 - msleep(10); 325 - } 326 - 327 - static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 328 - { 329 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 330 - 331 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 332 - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 333 - msleep(10); 334 - /* see comment in tmio_mmc_clk_stop above */ 335 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 336 - disable_irq(host->irq); 337 - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 338 - msleep(10); 339 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 340 - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 341 - enable_irq(host->irq); 342 - } 343 - } 344 - 345 - static void reset(struct tmio_mmc_host *host) 346 - { 347 - /* FIXME - should we set stop clock reg here */ 348 - sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 349 - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 350 - msleep(10); 351 - sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 352 - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 353 - msleep(10); 354 - } 355 - 356 - static void tmio_mmc_reset_work(struct work_struct *work) 357 - { 358 - struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 359 - delayed_reset_work.work); 360 - struct mmc_request *mrq; 361 - unsigned long flags; 362 - 363 - spin_lock_irqsave(&host->lock, flags); 364 - mrq = host->mrq; 365 - 366 - /* request already finished */ 367 - if (!mrq 368 - || time_is_after_jiffies(host->last_req_ts + 369 - msecs_to_jiffies(2000))) { 370 - spin_unlock_irqrestore(&host->lock, flags); 371 - return; 372 - } 373 - 374 - dev_warn(&host->pdev->dev, 375 - "timeout waiting for hardware interrupt (CMD%u)\n", 376 - mrq->cmd->opcode); 377 - 378 - if (host->data) 379 - host->data->error = -ETIMEDOUT; 380 - else if (host->cmd) 381 - host->cmd->error = -ETIMEDOUT; 382 - else 383 - mrq->cmd->error = -ETIMEDOUT; 384 - 385 - host->cmd = NULL; 386 - host->data = NULL; 387 - host->mrq = NULL; 388 - 389 - spin_unlock_irqrestore(&host->lock, flags); 390 - 391 - reset(host); 392 - 393 - mmc_request_done(host->mmc, mrq); 394 - } 395 - 396 - static void 397 - tmio_mmc_finish_request(struct tmio_mmc_host *host) 398 - { 399 - struct mmc_request *mrq = host->mrq; 400 - 401 - if (!mrq) 402 - return; 403 - 404 - host->mrq = NULL; 405 - host->cmd = NULL; 406 - host->data = NULL; 407 - 408 - cancel_delayed_work(&host->delayed_reset_work); 409 - 410 - mmc_request_done(host->mmc, mrq); 411 - } 412 - 413 - /* These are the bitmasks the tmio chip requires to implement the MMC response 414 - * types. Note that R1 and R6 are the same in this scheme. */ 415 - #define APP_CMD 0x0040 416 - #define RESP_NONE 0x0300 417 - #define RESP_R1 0x0400 418 - #define RESP_R1B 0x0500 419 - #define RESP_R2 0x0600 420 - #define RESP_R3 0x0700 421 - #define DATA_PRESENT 0x0800 422 - #define TRANSFER_READ 0x1000 423 - #define TRANSFER_MULTI 0x2000 424 - #define SECURITY_CMD 0x4000 425 - 426 - static int 427 - tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 428 - { 429 - struct mmc_data *data = host->data; 430 - int c = cmd->opcode; 431 - 432 - /* Command 12 is handled by hardware */ 433 - if (cmd->opcode == 12 && !cmd->arg) { 434 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 435 - return 0; 436 - } 437 - 438 - switch (mmc_resp_type(cmd)) { 439 - case MMC_RSP_NONE: c |= RESP_NONE; break; 440 - case MMC_RSP_R1: c |= RESP_R1; break; 441 - case MMC_RSP_R1B: c |= RESP_R1B; break; 442 - case MMC_RSP_R2: c |= RESP_R2; break; 443 - case MMC_RSP_R3: c |= RESP_R3; break; 444 - default: 445 - pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 446 - return -EINVAL; 447 - } 448 - 449 - host->cmd = cmd; 450 - 451 - /* FIXME - this seems to be ok commented out but the spec suggest this bit 452 - * should be set when issuing app commands. 453 - * if(cmd->flags & MMC_FLAG_ACMD) 454 - * c |= APP_CMD; 455 - */ 456 - if (data) { 457 - c |= DATA_PRESENT; 458 - if (data->blocks > 1) { 459 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 460 - c |= TRANSFER_MULTI; 461 - } 462 - if (data->flags & MMC_DATA_READ) 463 - c |= TRANSFER_READ; 464 - } 465 - 466 - enable_mmc_irqs(host, TMIO_MASK_CMD); 467 - 468 - /* Fire off the command */ 469 - sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 470 - sd_ctrl_write16(host, CTL_SD_CMD, c); 471 - 472 - return 0; 473 - } 474 - 475 - /* 476 - * This chip always returns (at least?) as much data as you ask for. 477 - * I'm unsure what happens if you ask for less than a block. This should be 478 - * looked into to ensure that a funny length read doesnt hose the controller. 479 - */ 480 - static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 481 - { 482 - struct mmc_data *data = host->data; 483 - void *sg_virt; 484 - unsigned short *buf; 485 - unsigned int count; 486 - unsigned long flags; 487 - 488 - if (!data) { 489 - pr_debug("Spurious PIO IRQ\n"); 490 - return; 491 - } 492 - 493 - sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 494 - buf = (unsigned short *)(sg_virt + host->sg_off); 495 - 496 - count = host->sg_ptr->length - host->sg_off; 497 - if (count > data->blksz) 498 - count = data->blksz; 499 - 500 - pr_debug("count: %08x offset: %08x flags %08x\n", 501 - count, host->sg_off, data->flags); 502 - 503 - /* Transfer the data */ 504 - if (data->flags & MMC_DATA_READ) 505 - sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 506 - else 507 - sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 508 - 509 - host->sg_off += count; 510 - 511 - tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 512 - 513 - if (host->sg_off == host->sg_ptr->length) 514 - tmio_mmc_next_sg(host); 515 - 516 - return; 517 - } 518 - 519 - /* needs to be called with host->lock held */ 520 - static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 521 - { 522 - struct mmc_data *data = host->data; 523 - struct mmc_command *stop; 524 - 525 - host->data = NULL; 526 - 527 - if (!data) { 528 - dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 529 - return; 530 - } 531 - stop = data->stop; 532 - 533 - /* FIXME - return correct transfer count on errors */ 534 - if (!data->error) 535 - data->bytes_xfered = data->blocks * data->blksz; 536 - else 537 - data->bytes_xfered = 0; 538 - 539 - pr_debug("Completed data request\n"); 540 - 541 - /* 542 - * FIXME: other drivers allow an optional stop command of any given type 543 - * which we dont do, as the chip can auto generate them. 544 - * Perhaps we can be smarter about when to use auto CMD12 and 545 - * only issue the auto request when we know this is the desired 546 - * stop command, allowing fallback to the stop command the 547 - * upper layers expect. For now, we do what works. 548 - */ 549 - 550 - if (data->flags & MMC_DATA_READ) { 551 - if (!host->chan_rx) 552 - disable_mmc_irqs(host, TMIO_MASK_READOP); 553 - else 554 - tmio_check_bounce_buffer(host); 555 - dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 556 - host->mrq); 557 - } else { 558 - if (!host->chan_tx) 559 - disable_mmc_irqs(host, TMIO_MASK_WRITEOP); 560 - dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 561 - host->mrq); 562 - } 563 - 564 - if (stop) { 565 - if (stop->opcode == 12 && !stop->arg) 566 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 567 - else 568 - BUG(); 569 - } 570 - 571 - tmio_mmc_finish_request(host); 572 - } 573 - 574 - static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 575 - { 576 - struct mmc_data *data; 577 - spin_lock(&host->lock); 578 - data = host->data; 579 - 580 - if (!data) 581 - goto out; 582 - 583 - if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 584 - /* 585 - * Has all data been written out yet? Testing on SuperH showed, 586 - * that in most cases the first interrupt comes already with the 587 - * BUSY status bit clear, but on some operations, like mount or 588 - * in the beginning of a write / sync / umount, there is one 589 - * DATAEND interrupt with the BUSY bit set, in this cases 590 - * waiting for one more interrupt fixes the problem. 591 - */ 592 - if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 593 - disable_mmc_irqs(host, TMIO_STAT_DATAEND); 594 - tasklet_schedule(&host->dma_complete); 595 - } 596 - } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { 597 - disable_mmc_irqs(host, TMIO_STAT_DATAEND); 598 - tasklet_schedule(&host->dma_complete); 599 - } else { 600 - tmio_mmc_do_data_irq(host); 601 - } 602 - out: 603 - spin_unlock(&host->lock); 604 - } 605 - 606 - static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 607 - unsigned int stat) 608 - { 609 - struct mmc_command *cmd = host->cmd; 610 - int i, addr; 611 - 612 - spin_lock(&host->lock); 613 - 614 - if (!host->cmd) { 615 - pr_debug("Spurious CMD irq\n"); 616 - goto out; 617 - } 618 - 619 - host->cmd = NULL; 620 - 621 - /* This controller is sicker than the PXA one. Not only do we need to 622 - * drop the top 8 bits of the first response word, we also need to 623 - * modify the order of the response for short response command types. 624 - */ 625 - 626 - for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 627 - cmd->resp[i] = sd_ctrl_read32(host, addr); 628 - 629 - if (cmd->flags & MMC_RSP_136) { 630 - cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 631 - cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 632 - cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 633 - cmd->resp[3] <<= 8; 634 - } else if (cmd->flags & MMC_RSP_R3) { 635 - cmd->resp[0] = cmd->resp[3]; 636 - } 637 - 638 - if (stat & TMIO_STAT_CMDTIMEOUT) 639 - cmd->error = -ETIMEDOUT; 640 - else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 641 - cmd->error = -EILSEQ; 642 - 643 - /* If there is data to handle we enable data IRQs here, and 644 - * we will ultimatley finish the request in the data_end handler. 645 - * If theres no data or we encountered an error, finish now. 646 - */ 647 - if (host->data && !cmd->error) { 648 - if (host->data->flags & MMC_DATA_READ) { 649 - if (!host->chan_rx) 650 - enable_mmc_irqs(host, TMIO_MASK_READOP); 651 - } else { 652 - if (!host->chan_tx) 653 - enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 654 - else 655 - tasklet_schedule(&host->dma_issue); 656 - } 657 - } else { 658 - tmio_mmc_finish_request(host); 659 - } 660 - 661 - out: 662 - spin_unlock(&host->lock); 663 - 664 - return; 665 - } 666 - 667 - static irqreturn_t tmio_mmc_irq(int irq, void *devid) 668 - { 669 - struct tmio_mmc_host *host = devid; 670 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 671 - unsigned int ireg, irq_mask, status; 672 - unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 673 - 674 - pr_debug("MMC IRQ begin\n"); 675 - 676 - status = sd_ctrl_read32(host, CTL_STATUS); 677 - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 678 - ireg = status & TMIO_MASK_IRQ & ~irq_mask; 679 - 680 - sdio_ireg = 0; 681 - if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 682 - sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 683 - sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 684 - sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 685 - 686 - sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 687 - 688 - if (sdio_ireg && !host->sdio_irq_enabled) { 689 - pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 690 - sdio_status, sdio_irq_mask, sdio_ireg); 691 - tmio_mmc_enable_sdio_irq(host->mmc, 0); 692 - goto out; 693 - } 694 - 695 - if (host->mmc->caps & MMC_CAP_SDIO_IRQ && 696 - sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 697 - mmc_signal_sdio_irq(host->mmc); 698 - 699 - if (sdio_ireg) 700 - goto out; 701 - } 702 - 703 - pr_debug_status(status); 704 - pr_debug_status(ireg); 705 - 706 - if (!ireg) { 707 - disable_mmc_irqs(host, status & ~irq_mask); 708 - 709 - pr_warning("tmio_mmc: Spurious irq, disabling! " 710 - "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 711 - pr_debug_status(status); 712 - 713 - goto out; 714 - } 715 - 716 - while (ireg) { 717 - /* Card insert / remove attempts */ 718 - if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 719 - ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 720 - TMIO_STAT_CARD_REMOVE); 721 - mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 722 - } 723 - 724 - /* CRC and other errors */ 725 - /* if (ireg & TMIO_STAT_ERR_IRQ) 726 - * handled |= tmio_error_irq(host, irq, stat); 727 - */ 728 - 729 - /* Command completion */ 730 - if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 731 - ack_mmc_irqs(host, 732 - TMIO_STAT_CMDRESPEND | 733 - TMIO_STAT_CMDTIMEOUT); 734 - tmio_mmc_cmd_irq(host, status); 735 - } 736 - 737 - /* Data transfer */ 738 - if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 739 - ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 740 - tmio_mmc_pio_irq(host); 741 - } 742 - 743 - /* Data transfer completion */ 744 - if (ireg & TMIO_STAT_DATAEND) { 745 - ack_mmc_irqs(host, TMIO_STAT_DATAEND); 746 - tmio_mmc_data_irq(host); 747 - } 748 - 749 - /* Check status - keep going until we've handled it all */ 750 - status = sd_ctrl_read32(host, CTL_STATUS); 751 - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 752 - ireg = status & TMIO_MASK_IRQ & ~irq_mask; 753 - 754 - pr_debug("Status at end of loop: %08x\n", status); 755 - pr_debug_status(status); 756 - } 757 - pr_debug("MMC IRQ end\n"); 758 - 759 - out: 760 - return IRQ_HANDLED; 761 - } 762 - 763 - #ifdef CONFIG_TMIO_MMC_DMA 764 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) 765 - { 766 - if (host->sg_ptr == &host->bounce_sg) { 767 - unsigned long flags; 768 - void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 769 - memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 770 - tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 771 - } 772 - } 773 - 774 - static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 775 - { 776 - #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 777 - /* Switch DMA mode on or off - SuperH specific? */ 778 - sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); 779 - #endif 780 - } 781 - 782 - static void tmio_dma_complete(void *arg) 783 - { 784 - struct tmio_mmc_host *host = arg; 785 - 786 - dev_dbg(&host->pdev->dev, "Command completed\n"); 787 - 788 - if (!host->data) 789 - dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); 790 - else 791 - enable_mmc_irqs(host, TMIO_STAT_DATAEND); 792 - } 793 - 794 - static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 795 - { 796 - struct scatterlist *sg = host->sg_ptr, *sg_tmp; 797 - struct dma_async_tx_descriptor *desc = NULL; 798 - struct dma_chan *chan = host->chan_rx; 799 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 800 - dma_cookie_t cookie; 801 - int ret, i; 802 - bool aligned = true, multiple = true; 803 - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 804 - 805 - for_each_sg(sg, sg_tmp, host->sg_len, i) { 806 - if (sg_tmp->offset & align) 807 - aligned = false; 808 - if (sg_tmp->length & align) { 809 - multiple = false; 810 - break; 811 - } 812 - } 813 - 814 - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 815 - align >= MAX_ALIGN)) || !multiple) { 816 - ret = -EINVAL; 817 - goto pio; 818 - } 819 - 820 - /* The only sg element can be unaligned, use our bounce buffer then */ 821 - if (!aligned) { 822 - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 823 - host->sg_ptr = &host->bounce_sg; 824 - sg = host->sg_ptr; 825 - } 826 - 827 - ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 828 - if (ret > 0) 829 - desc = chan->device->device_prep_slave_sg(chan, sg, ret, 830 - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 831 - 832 - if (desc) { 833 - desc->callback = tmio_dma_complete; 834 - desc->callback_param = host; 835 - cookie = dmaengine_submit(desc); 836 - dma_async_issue_pending(chan); 837 - } 838 - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 839 - __func__, host->sg_len, ret, cookie, host->mrq); 840 - 841 - pio: 842 - if (!desc) { 843 - /* DMA failed, fall back to PIO */ 844 - if (ret >= 0) 845 - ret = -EIO; 846 - host->chan_rx = NULL; 847 - dma_release_channel(chan); 848 - /* Free the Tx channel too */ 849 - chan = host->chan_tx; 850 - if (chan) { 851 - host->chan_tx = NULL; 852 - dma_release_channel(chan); 853 - } 854 - dev_warn(&host->pdev->dev, 855 - "DMA failed: %d, falling back to PIO\n", ret); 856 - tmio_mmc_enable_dma(host, false); 857 - } 858 - 859 - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 860 - desc, cookie, host->sg_len); 861 - } 862 - 863 - static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 864 - { 865 - struct scatterlist *sg = host->sg_ptr, *sg_tmp; 866 - struct dma_async_tx_descriptor *desc = NULL; 867 - struct dma_chan *chan = host->chan_tx; 868 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 869 - dma_cookie_t cookie; 870 - int ret, i; 871 - bool aligned = true, multiple = true; 872 - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 873 - 874 - for_each_sg(sg, sg_tmp, host->sg_len, i) { 875 - if (sg_tmp->offset & align) 876 - aligned = false; 877 - if (sg_tmp->length & align) { 878 - multiple = false; 879 - break; 880 - } 881 - } 882 - 883 - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 884 - align >= MAX_ALIGN)) || !multiple) { 885 - ret = -EINVAL; 886 - goto pio; 887 - } 888 - 889 - /* The only sg element can be unaligned, use our bounce buffer then */ 890 - if (!aligned) { 891 - unsigned long flags; 892 - void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 893 - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 894 - memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 895 - tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 896 - host->sg_ptr = &host->bounce_sg; 897 - sg = host->sg_ptr; 898 - } 899 - 900 - ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 901 - if (ret > 0) 902 - desc = chan->device->device_prep_slave_sg(chan, sg, ret, 903 - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 904 - 905 - if (desc) { 906 - desc->callback = tmio_dma_complete; 907 - desc->callback_param = host; 908 - cookie = dmaengine_submit(desc); 909 - } 910 - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 911 - __func__, host->sg_len, ret, cookie, host->mrq); 912 - 913 - pio: 914 - if (!desc) { 915 - /* DMA failed, fall back to PIO */ 916 - if (ret >= 0) 917 - ret = -EIO; 918 - host->chan_tx = NULL; 919 - dma_release_channel(chan); 920 - /* Free the Rx channel too */ 921 - chan = host->chan_rx; 922 - if (chan) { 923 - host->chan_rx = NULL; 924 - dma_release_channel(chan); 925 - } 926 - dev_warn(&host->pdev->dev, 927 - "DMA failed: %d, falling back to PIO\n", ret); 928 - tmio_mmc_enable_dma(host, false); 929 - } 930 - 931 - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 932 - desc, cookie); 933 - } 934 - 935 - static void tmio_mmc_start_dma(struct tmio_mmc_host *host, 936 - struct mmc_data *data) 937 - { 938 - if (data->flags & MMC_DATA_READ) { 939 - if (host->chan_rx) 940 - tmio_mmc_start_dma_rx(host); 941 - } else { 942 - if (host->chan_tx) 943 - tmio_mmc_start_dma_tx(host); 944 - } 945 - } 946 - 947 - static void tmio_issue_tasklet_fn(unsigned long priv) 948 - { 949 - struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 950 - struct dma_chan *chan = host->chan_tx; 951 - 952 - dma_async_issue_pending(chan); 953 - } 954 - 955 - static void tmio_tasklet_fn(unsigned long arg) 956 - { 957 - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 958 - unsigned long flags; 959 - 960 - spin_lock_irqsave(&host->lock, flags); 961 - 962 - if (!host->data) 963 - goto out; 964 - 965 - if (host->data->flags & MMC_DATA_READ) 966 - dma_unmap_sg(host->chan_rx->device->dev, 967 - host->sg_ptr, host->sg_len, 968 - DMA_FROM_DEVICE); 969 - else 970 - dma_unmap_sg(host->chan_tx->device->dev, 971 - host->sg_ptr, host->sg_len, 972 - DMA_TO_DEVICE); 973 - 974 - tmio_mmc_do_data_irq(host); 975 - out: 976 - spin_unlock_irqrestore(&host->lock, flags); 977 - } 978 - 979 - /* It might be necessary to make filter MFD specific */ 980 - static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) 981 - { 982 - dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 983 - chan->private = arg; 984 - return true; 985 - } 986 - 987 - static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 988 - struct tmio_mmc_data *pdata) 989 - { 990 - /* We can only either use DMA for both Tx and Rx or not use it at all */ 991 - if (pdata->dma) { 992 - dma_cap_mask_t mask; 993 - 994 - dma_cap_zero(mask); 995 - dma_cap_set(DMA_SLAVE, mask); 996 - 997 - host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, 998 - pdata->dma->chan_priv_tx); 999 - dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 1000 - host->chan_tx); 1001 - 1002 - if (!host->chan_tx) 1003 - return; 1004 - 1005 - host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, 1006 - pdata->dma->chan_priv_rx); 1007 - dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 1008 - host->chan_rx); 1009 - 1010 - if (!host->chan_rx) { 1011 - dma_release_channel(host->chan_tx); 1012 - host->chan_tx = NULL; 1013 - return; 1014 - } 1015 - 1016 - tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); 1017 - tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); 1018 - 1019 - tmio_mmc_enable_dma(host, true); 1020 - } 1021 - } 1022 - 1023 - static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 1024 - { 1025 - if (host->chan_tx) { 1026 - struct dma_chan *chan = host->chan_tx; 1027 - host->chan_tx = NULL; 1028 - dma_release_channel(chan); 1029 - } 1030 - if (host->chan_rx) { 1031 - struct dma_chan *chan = host->chan_rx; 1032 - host->chan_rx = NULL; 1033 - dma_release_channel(chan); 1034 - } 1035 - } 1036 - #else 1037 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) 1038 - { 1039 - } 1040 - 1041 - static void tmio_mmc_start_dma(struct tmio_mmc_host *host, 1042 - struct mmc_data *data) 1043 - { 1044 - } 1045 - 1046 - static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1047 - struct tmio_mmc_data *pdata) 1048 - { 1049 - host->chan_tx = NULL; 1050 - host->chan_rx = NULL; 1051 - } 1052 - 1053 - static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 1054 - { 1055 - } 1056 - #endif 1057 - 1058 - static int tmio_mmc_start_data(struct tmio_mmc_host *host, 1059 - struct mmc_data *data) 1060 - { 1061 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1062 - 1063 - pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 1064 - data->blksz, data->blocks); 1065 - 1066 - /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 1067 - if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 1068 - int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 1069 - 1070 - if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 1071 - pr_err("%s: %d byte block unsupported in 4 bit mode\n", 1072 - mmc_hostname(host->mmc), data->blksz); 1073 - return -EINVAL; 1074 - } 1075 - } 1076 - 1077 - tmio_mmc_init_sg(host, data); 1078 - host->data = data; 1079 - 1080 - /* Set transfer length / blocksize */ 1081 - sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 1082 - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 1083 - 1084 - tmio_mmc_start_dma(host, data); 1085 - 1086 - return 0; 1087 - } 1088 - 1089 - /* Process requests from the MMC layer */ 1090 - static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 1091 - { 1092 - struct tmio_mmc_host *host = mmc_priv(mmc); 1093 - int ret; 1094 - 1095 - if (host->mrq) 1096 - pr_debug("request not null\n"); 1097 - 1098 - host->last_req_ts = jiffies; 1099 - wmb(); 1100 - host->mrq = mrq; 1101 - 1102 - if (mrq->data) { 1103 - ret = tmio_mmc_start_data(host, mrq->data); 1104 - if (ret) 1105 - goto fail; 1106 - } 1107 - 1108 - ret = tmio_mmc_start_command(host, mrq->cmd); 1109 - if (!ret) { 1110 - schedule_delayed_work(&host->delayed_reset_work, 1111 - msecs_to_jiffies(2000)); 1112 - return; 1113 - } 1114 - 1115 - fail: 1116 - host->mrq = NULL; 1117 - mrq->cmd->error = ret; 1118 - mmc_request_done(mmc, mrq); 1119 - } 1120 - 1121 - /* Set MMC clock / power. 1122 - * Note: This controller uses a simple divider scheme therefore it cannot 1123 - * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 1124 - * MMC wont run that fast, it has to be clocked at 12MHz which is the next 1125 - * slowest setting. 1126 - */ 1127 - static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1128 - { 1129 - struct tmio_mmc_host *host = mmc_priv(mmc); 1130 - 1131 - if (ios->clock) 1132 - tmio_mmc_set_clock(host, ios->clock); 1133 - 1134 - /* Power sequence - OFF -> ON -> UP */ 1135 - switch (ios->power_mode) { 1136 - case MMC_POWER_OFF: /* power down SD bus */ 1137 - if (host->set_pwr) 1138 - host->set_pwr(host->pdev, 0); 1139 - tmio_mmc_clk_stop(host); 1140 - break; 1141 - case MMC_POWER_ON: /* power up SD bus */ 1142 - if (host->set_pwr) 1143 - host->set_pwr(host->pdev, 1); 1144 - break; 1145 - case MMC_POWER_UP: /* start bus clock */ 1146 - tmio_mmc_clk_start(host); 1147 - break; 1148 - } 1149 - 1150 - switch (ios->bus_width) { 1151 - case MMC_BUS_WIDTH_1: 1152 - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 1153 - break; 1154 - case MMC_BUS_WIDTH_4: 1155 - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 1156 - break; 1157 - } 1158 - 1159 - /* Let things settle. delay taken from winCE driver */ 1160 - udelay(140); 1161 - } 1162 - 1163 - static int tmio_mmc_get_ro(struct mmc_host *mmc) 1164 - { 1165 - struct tmio_mmc_host *host = mmc_priv(mmc); 1166 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1167 - 1168 - return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 1169 - (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; 1170 - } 1171 - 1172 - static int tmio_mmc_get_cd(struct mmc_host *mmc) 1173 - { 1174 - struct tmio_mmc_host *host = mmc_priv(mmc); 1175 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1176 - 1177 - if (!pdata->get_cd) 1178 - return -ENOSYS; 1179 - else 1180 - return pdata->get_cd(host->pdev); 1181 - } 1182 - 1183 - static const struct mmc_host_ops tmio_mmc_ops = { 1184 - .request = tmio_mmc_request, 1185 - .set_ios = tmio_mmc_set_ios, 1186 - .get_ro = tmio_mmc_get_ro, 1187 - .get_cd = tmio_mmc_get_cd, 1188 - .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 1189 - }; 24 + #include "tmio_mmc.h" 1190 25 1191 26 #ifdef CONFIG_PM 1192 27 static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) ··· 62 1227 #define tmio_mmc_resume NULL 63 1228 #endif 64 1229 65 - static int __devinit tmio_mmc_probe(struct platform_device *dev) 1230 + static int __devinit tmio_mmc_probe(struct platform_device *pdev) 66 1231 { 67 - const struct mfd_cell *cell = mfd_get_cell(dev); 1232 + const struct mfd_cell *cell = mfd_get_cell(pdev); 68 1233 struct tmio_mmc_data *pdata; 69 - struct resource *res_ctl; 70 1234 struct tmio_mmc_host *host; 71 - struct mmc_host *mmc; 72 1235 int ret = -EINVAL; 73 - u32 irq_mask = TMIO_MASK_CMD; 74 1236 75 - if (dev->num_resources != 2) 1237 + if (pdev->num_resources != 2) 76 1238 goto out; 77 1239 78 - res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 79 - if (!res_ctl) 80 - goto out; 81 - 82 - pdata = mfd_get_data(dev); 1240 + pdata = mfd_get_data(pdev); 83 1241 if (!pdata || !pdata->hclk) 84 1242 goto out; 85 1243 86 - ret = -ENOMEM; 87 - 88 - mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); 89 - if (!mmc) 90 - goto out; 91 - 92 - host = mmc_priv(mmc); 93 - host->mmc = mmc; 94 - host->pdev = dev; 95 - platform_set_drvdata(dev, mmc); 96 - 97 - host->set_pwr = pdata->set_pwr; 98 - host->set_clk_div = pdata->set_clk_div; 99 - 100 - /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 101 - host->bus_shift = resource_size(res_ctl) >> 10; 102 - 103 - host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 104 - if (!host->ctl) 105 - goto host_free; 106 - 107 - mmc->ops = &tmio_mmc_ops; 108 - mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; 109 - mmc->f_max = pdata->hclk; 110 - mmc->f_min = mmc->f_max / 512; 111 - mmc->max_segs = 32; 112 - mmc->max_blk_size = 512; 113 - mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 114 - mmc->max_segs; 115 - mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 116 - mmc->max_seg_size = mmc->max_req_size; 117 - if (pdata->ocr_mask) 118 - mmc->ocr_avail = pdata->ocr_mask; 119 - else 120 - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 121 - 122 1244 /* Tell the MFD core we are ready to be enabled */ 123 1245 if (cell->enable) { 124 - ret = cell->enable(dev); 1246 + ret = cell->enable(pdev); 125 1247 if (ret) 126 - goto unmap_ctl; 1248 + goto out; 127 1249 } 128 1250 129 - tmio_mmc_clk_stop(host); 130 - reset(host); 131 - 132 - ret = platform_get_irq(dev, 0); 133 - if (ret >= 0) 134 - host->irq = ret; 135 - else 136 - goto cell_disable; 137 - 138 - disable_mmc_irqs(host, TMIO_MASK_ALL); 139 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 140 - tmio_mmc_enable_sdio_irq(mmc, 0); 141 - 142 - ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 143 - IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); 1251 + ret = tmio_mmc_host_probe(&host, pdev, pdata); 144 1252 if (ret) 145 1253 goto cell_disable; 146 1254 147 - spin_lock_init(&host->lock); 148 - 149 - /* Init delayed work for request timeouts */ 150 - INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); 151 - 152 - /* See if we also get DMA */ 153 - tmio_mmc_request_dma(host, pdata); 154 - 155 - mmc_add_host(mmc); 156 - 157 1255 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 158 1256 (unsigned long)host->ctl, host->irq); 159 - 160 - /* Unmask the IRQs we want to know about */ 161 - if (!host->chan_rx) 162 - irq_mask |= TMIO_MASK_READOP; 163 - if (!host->chan_tx) 164 - irq_mask |= TMIO_MASK_WRITEOP; 165 - enable_mmc_irqs(host, irq_mask); 166 1257 167 1258 return 0; 168 1259 169 1260 cell_disable: 170 1261 if (cell->disable) 171 - cell->disable(dev); 172 - unmap_ctl: 173 - iounmap(host->ctl); 174 - host_free: 175 - mmc_free_host(mmc); 1262 + cell->disable(pdev); 176 1263 out: 177 1264 return ret; 178 1265 } 179 1266 180 - static int __devexit tmio_mmc_remove(struct platform_device *dev) 1267 + static int __devexit tmio_mmc_remove(struct platform_device *pdev) 181 1268 { 182 - const struct mfd_cell *cell = mfd_get_cell(dev); 183 - struct mmc_host *mmc = platform_get_drvdata(dev); 1269 + const struct mfd_cell *cell = mfd_get_cell(pdev); 1270 + struct mmc_host *mmc = platform_get_drvdata(pdev); 184 1271 185 - platform_set_drvdata(dev, NULL); 1272 + platform_set_drvdata(pdev, NULL); 186 1273 187 1274 if (mmc) { 188 - struct tmio_mmc_host *host = mmc_priv(mmc); 189 - mmc_remove_host(mmc); 190 - cancel_delayed_work_sync(&host->delayed_reset_work); 191 - tmio_mmc_release_dma(host); 192 - free_irq(host->irq, host); 1275 + tmio_mmc_host_remove(mmc_priv(mmc)); 193 1276 if (cell->disable) 194 - cell->disable(dev); 195 - iounmap(host->ctl); 196 - mmc_free_host(mmc); 1277 + cell->disable(pdev); 197 1278 } 198 1279 199 1280 return 0;
+123
drivers/mmc/host/tmio_mmc.h
··· 1 + /* 2 + * linux/drivers/mmc/host/tmio_mmc.h 3 + * 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * Driver for the MMC / SD / SDIO cell found in: 12 + * 13 + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 + */ 15 + 16 + #ifndef TMIO_MMC_H 17 + #define TMIO_MMC_H 18 + 19 + #include <linux/highmem.h> 20 + #include <linux/mmc/tmio.h> 21 + #include <linux/pagemap.h> 22 + 23 + /* Definitions for values the CTRL_SDIO_STATUS register can take. */ 24 + #define TMIO_SDIO_STAT_IOIRQ 0x0001 25 + #define TMIO_SDIO_STAT_EXPUB52 0x4000 26 + #define TMIO_SDIO_STAT_EXWT 0x8000 27 + #define TMIO_SDIO_MASK_ALL 0xc007 28 + 29 + /* Define some IRQ masks */ 30 + /* This is the mask used at reset by the chip */ 31 + #define TMIO_MASK_ALL 0x837f031d 32 + #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) 33 + #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) 34 + #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ 35 + TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 36 + #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 37 + 38 + struct tmio_mmc_data; 39 + 40 + struct tmio_mmc_host { 41 + void __iomem *ctl; 42 + unsigned long bus_shift; 43 + struct mmc_command *cmd; 44 + struct mmc_request *mrq; 45 + struct mmc_data *data; 46 + struct mmc_host *mmc; 47 + int irq; 48 + unsigned int sdio_irq_enabled; 49 + 50 + /* Callbacks for clock / power control */ 51 + void (*set_pwr)(struct platform_device *host, int state); 52 + void (*set_clk_div)(struct platform_device *host, int state); 53 + 54 + /* pio related stuff */ 55 + struct scatterlist *sg_ptr; 56 + struct scatterlist *sg_orig; 57 + unsigned int sg_len; 58 + unsigned int sg_off; 59 + 60 + struct platform_device *pdev; 61 + struct tmio_mmc_data *pdata; 62 + 63 + /* DMA support */ 64 + bool force_pio; 65 + struct dma_chan *chan_rx; 66 + struct dma_chan *chan_tx; 67 + struct tasklet_struct dma_complete; 68 + struct tasklet_struct dma_issue; 69 + struct scatterlist bounce_sg; 70 + u8 *bounce_buf; 71 + 72 + /* Track lost interrupts */ 73 + struct delayed_work delayed_reset_work; 74 + spinlock_t lock; 75 + unsigned long last_req_ts; 76 + }; 77 + 78 + int tmio_mmc_host_probe(struct tmio_mmc_host **host, 79 + struct platform_device *pdev, 80 + struct tmio_mmc_data *pdata); 81 + void tmio_mmc_host_remove(struct tmio_mmc_host *host); 82 + void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); 83 + 84 + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 85 + void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 86 + 87 + static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 88 + unsigned long *flags) 89 + { 90 + local_irq_save(*flags); 91 + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 92 + } 93 + 94 + static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, 95 + unsigned long *flags, void *virt) 96 + { 97 + kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); 98 + local_irq_restore(*flags); 99 + } 100 + 101 + #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) 102 + void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); 103 + void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); 104 + void tmio_mmc_release_dma(struct tmio_mmc_host *host); 105 + #else 106 + static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, 107 + struct mmc_data *data) 108 + { 109 + } 110 + 111 + static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, 112 + struct tmio_mmc_data *pdata) 113 + { 114 + host->chan_tx = NULL; 115 + host->chan_rx = NULL; 116 + } 117 + 118 + static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) 119 + { 120 + } 121 + #endif 122 + 123 + #endif
+317
drivers/mmc/host/tmio_mmc_dma.c
··· 1 + /* 2 + * linux/drivers/mmc/tmio_mmc_dma.c 3 + * 4 + * Copyright (C) 2010-2011 Guennadi Liakhovetski 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * DMA function for TMIO MMC implementations 11 + */ 12 + 13 + #include <linux/device.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/mfd/tmio.h> 16 + #include <linux/mmc/host.h> 17 + #include <linux/mmc/tmio.h> 18 + #include <linux/pagemap.h> 19 + #include <linux/scatterlist.h> 20 + 21 + #include "tmio_mmc.h" 22 + 23 + #define TMIO_MMC_MIN_DMA_LEN 8 24 + 25 + static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 26 + { 27 + #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 28 + /* Switch DMA mode on or off - SuperH specific? */ 29 + writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); 30 + #endif 31 + } 32 + 33 + static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 34 + { 35 + struct scatterlist *sg = host->sg_ptr, *sg_tmp; 36 + struct dma_async_tx_descriptor *desc = NULL; 37 + struct dma_chan *chan = host->chan_rx; 38 + struct tmio_mmc_data *pdata = host->pdata; 39 + dma_cookie_t cookie; 40 + int ret, i; 41 + bool aligned = true, multiple = true; 42 + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 43 + 44 + for_each_sg(sg, sg_tmp, host->sg_len, i) { 45 + if (sg_tmp->offset & align) 46 + aligned = false; 47 + if (sg_tmp->length & align) { 48 + multiple = false; 49 + break; 50 + } 51 + } 52 + 53 + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 54 + (align & PAGE_MASK))) || !multiple) { 55 + ret = -EINVAL; 56 + goto pio; 57 + } 58 + 59 + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 60 + host->force_pio = true; 61 + return; 62 + } 63 + 64 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); 65 + 66 + /* The only sg element can be unaligned, use our bounce buffer then */ 67 + if (!aligned) { 68 + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 69 + host->sg_ptr = &host->bounce_sg; 70 + sg = host->sg_ptr; 71 + } 72 + 73 + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 74 + if (ret > 0) 75 + desc = chan->device->device_prep_slave_sg(chan, sg, ret, 76 + DMA_FROM_DEVICE, DMA_CTRL_ACK); 77 + 78 + if (desc) { 79 + cookie = dmaengine_submit(desc); 80 + if (cookie < 0) { 81 + desc = NULL; 82 + ret = cookie; 83 + } 84 + } 85 + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 86 + __func__, host->sg_len, ret, cookie, host->mrq); 87 + 88 + pio: 89 + if (!desc) { 90 + /* DMA failed, fall back to PIO */ 91 + if (ret >= 0) 92 + ret = -EIO; 93 + host->chan_rx = NULL; 94 + dma_release_channel(chan); 95 + /* Free the Tx channel too */ 96 + chan = host->chan_tx; 97 + if (chan) { 98 + host->chan_tx = NULL; 99 + dma_release_channel(chan); 100 + } 101 + dev_warn(&host->pdev->dev, 102 + "DMA failed: %d, falling back to PIO\n", ret); 103 + tmio_mmc_enable_dma(host, false); 104 + } 105 + 106 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 107 + desc, cookie, host->sg_len); 108 + } 109 + 110 + static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 111 + { 112 + struct scatterlist *sg = host->sg_ptr, *sg_tmp; 113 + struct dma_async_tx_descriptor *desc = NULL; 114 + struct dma_chan *chan = host->chan_tx; 115 + struct tmio_mmc_data *pdata = host->pdata; 116 + dma_cookie_t cookie; 117 + int ret, i; 118 + bool aligned = true, multiple = true; 119 + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 120 + 121 + for_each_sg(sg, sg_tmp, host->sg_len, i) { 122 + if (sg_tmp->offset & align) 123 + aligned = false; 124 + if (sg_tmp->length & align) { 125 + multiple = false; 126 + break; 127 + } 128 + } 129 + 130 + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 131 + (align & PAGE_MASK))) || !multiple) { 132 + ret = -EINVAL; 133 + goto pio; 134 + } 135 + 136 + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 137 + host->force_pio = true; 138 + return; 139 + } 140 + 141 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); 142 + 143 + /* The only sg element can be unaligned, use our bounce buffer then */ 144 + if (!aligned) { 145 + unsigned long flags; 146 + void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 147 + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 148 + memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 149 + tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 150 + host->sg_ptr = &host->bounce_sg; 151 + sg = host->sg_ptr; 152 + } 153 + 154 + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 155 + if (ret > 0) 156 + desc = chan->device->device_prep_slave_sg(chan, sg, ret, 157 + DMA_TO_DEVICE, DMA_CTRL_ACK); 158 + 159 + if (desc) { 160 + cookie = dmaengine_submit(desc); 161 + if (cookie < 0) { 162 + desc = NULL; 163 + ret = cookie; 164 + } 165 + } 166 + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 167 + __func__, host->sg_len, ret, cookie, host->mrq); 168 + 169 + pio: 170 + if (!desc) { 171 + /* DMA failed, fall back to PIO */ 172 + if (ret >= 0) 173 + ret = -EIO; 174 + host->chan_tx = NULL; 175 + dma_release_channel(chan); 176 + /* Free the Rx channel too */ 177 + chan = host->chan_rx; 178 + if (chan) { 179 + host->chan_rx = NULL; 180 + dma_release_channel(chan); 181 + } 182 + dev_warn(&host->pdev->dev, 183 + "DMA failed: %d, falling back to PIO\n", ret); 184 + tmio_mmc_enable_dma(host, false); 185 + } 186 + 187 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 188 + desc, cookie); 189 + } 190 + 191 + void tmio_mmc_start_dma(struct tmio_mmc_host *host, 192 + struct mmc_data *data) 193 + { 194 + if (data->flags & MMC_DATA_READ) { 195 + if (host->chan_rx) 196 + tmio_mmc_start_dma_rx(host); 197 + } else { 198 + if (host->chan_tx) 199 + tmio_mmc_start_dma_tx(host); 200 + } 201 + } 202 + 203 + static void tmio_mmc_issue_tasklet_fn(unsigned long priv) 204 + { 205 + struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 206 + struct dma_chan *chan = NULL; 207 + 208 + spin_lock_irq(&host->lock); 209 + 210 + if (host && host->data) { 211 + if (host->data->flags & MMC_DATA_READ) 212 + chan = host->chan_rx; 213 + else 214 + chan = host->chan_tx; 215 + } 216 + 217 + spin_unlock_irq(&host->lock); 218 + 219 + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 220 + 221 + if (chan) 222 + dma_async_issue_pending(chan); 223 + } 224 + 225 + static void tmio_mmc_tasklet_fn(unsigned long arg) 226 + { 227 + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 228 + 229 + spin_lock_irq(&host->lock); 230 + 231 + if (!host->data) 232 + goto out; 233 + 234 + if (host->data->flags & MMC_DATA_READ) 235 + dma_unmap_sg(host->chan_rx->device->dev, 236 + host->sg_ptr, host->sg_len, 237 + DMA_FROM_DEVICE); 238 + else 239 + dma_unmap_sg(host->chan_tx->device->dev, 240 + host->sg_ptr, host->sg_len, 241 + DMA_TO_DEVICE); 242 + 243 + tmio_mmc_do_data_irq(host); 244 + out: 245 + spin_unlock_irq(&host->lock); 246 + } 247 + 248 + /* It might be necessary to make filter MFD specific */ 249 + static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) 250 + { 251 + dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 252 + chan->private = arg; 253 + return true; 254 + } 255 + 256 + void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 257 + { 258 + /* We can only either use DMA for both Tx and Rx or not use it at all */ 259 + if (pdata->dma) { 260 + dma_cap_mask_t mask; 261 + 262 + dma_cap_zero(mask); 263 + dma_cap_set(DMA_SLAVE, mask); 264 + 265 + host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, 266 + pdata->dma->chan_priv_tx); 267 + dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 268 + host->chan_tx); 269 + 270 + if (!host->chan_tx) 271 + return; 272 + 273 + host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, 274 + pdata->dma->chan_priv_rx); 275 + dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 276 + host->chan_rx); 277 + 278 + if (!host->chan_rx) 279 + goto ereqrx; 280 + 281 + host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 282 + if (!host->bounce_buf) 283 + goto ebouncebuf; 284 + 285 + tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 286 + tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 287 + 288 + tmio_mmc_enable_dma(host, true); 289 + 290 + return; 291 + ebouncebuf: 292 + dma_release_channel(host->chan_rx); 293 + host->chan_rx = NULL; 294 + ereqrx: 295 + dma_release_channel(host->chan_tx); 296 + host->chan_tx = NULL; 297 + return; 298 + } 299 + } 300 + 301 + void tmio_mmc_release_dma(struct tmio_mmc_host *host) 302 + { 303 + if (host->chan_tx) { 304 + struct dma_chan *chan = host->chan_tx; 305 + host->chan_tx = NULL; 306 + dma_release_channel(chan); 307 + } 308 + if (host->chan_rx) { 309 + struct dma_chan *chan = host->chan_rx; 310 + host->chan_rx = NULL; 311 + dma_release_channel(chan); 312 + } 313 + if (host->bounce_buf) { 314 + free_pages((unsigned long)host->bounce_buf, 0); 315 + host->bounce_buf = NULL; 316 + } 317 + }
+897
drivers/mmc/host/tmio_mmc_pio.c
··· 1 + /* 2 + * linux/drivers/mmc/host/tmio_mmc_pio.c 3 + * 4 + * Copyright (C) 2011 Guennadi Liakhovetski 5 + * Copyright (C) 2007 Ian Molton 6 + * Copyright (C) 2004 Ian Molton 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * Driver for the MMC / SD / SDIO IP found in: 13 + * 14 + * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs 15 + * 16 + * This driver draws mainly on scattered spec sheets, Reverse engineering 17 + * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 18 + * support). (Further 4 bit support from a later datasheet). 19 + * 20 + * TODO: 21 + * Investigate using a workqueue for PIO transfers 22 + * Eliminate FIXMEs 23 + * SDIO support 24 + * Better Power management 25 + * Handle MMC errors better 26 + * double buffer support 27 + * 28 + */ 29 + 30 + #include <linux/delay.h> 31 + #include <linux/device.h> 32 + #include <linux/highmem.h> 33 + #include <linux/interrupt.h> 34 + #include <linux/io.h> 35 + #include <linux/irq.h> 36 + #include <linux/mfd/tmio.h> 37 + #include <linux/mmc/host.h> 38 + #include <linux/mmc/tmio.h> 39 + #include <linux/module.h> 40 + #include <linux/pagemap.h> 41 + #include <linux/platform_device.h> 42 + #include <linux/scatterlist.h> 43 + #include <linux/workqueue.h> 44 + #include <linux/spinlock.h> 45 + 46 + #include "tmio_mmc.h" 47 + 48 + static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) 49 + { 50 + return readw(host->ctl + (addr << host->bus_shift)); 51 + } 52 + 53 + static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, 54 + u16 *buf, int count) 55 + { 56 + readsw(host->ctl + (addr << host->bus_shift), buf, count); 57 + } 58 + 59 + static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) 60 + { 61 + return readw(host->ctl + (addr << host->bus_shift)) | 62 + readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; 63 + } 64 + 65 + static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) 66 + { 67 + writew(val, host->ctl + (addr << host->bus_shift)); 68 + } 69 + 70 + static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, 71 + u16 *buf, int count) 72 + { 73 + writesw(host->ctl + (addr << host->bus_shift), buf, count); 74 + } 75 + 76 + static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) 77 + { 78 + writew(val, host->ctl + (addr << host->bus_shift)); 79 + writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 80 + } 81 + 82 + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 83 + { 84 + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); 85 + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 86 + } 87 + 88 + void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 89 + { 90 + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); 91 + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 92 + } 93 + 94 + static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) 95 + { 96 + sd_ctrl_write32(host, CTL_STATUS, ~i); 97 + } 98 + 99 + static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 100 + { 101 + host->sg_len = data->sg_len; 102 + host->sg_ptr = data->sg; 103 + host->sg_orig = data->sg; 104 + host->sg_off = 0; 105 + } 106 + 107 + static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 108 + { 109 + host->sg_ptr = sg_next(host->sg_ptr); 110 + host->sg_off = 0; 111 + return --host->sg_len; 112 + } 113 + 114 + #ifdef CONFIG_MMC_DEBUG 115 + 116 + #define STATUS_TO_TEXT(a, status, i) \ 117 + do { \ 118 + if (status & TMIO_STAT_##a) { \ 119 + if (i++) \ 120 + printk(" | "); \ 121 + printk(#a); \ 122 + } \ 123 + } while (0) 124 + 125 + static void pr_debug_status(u32 status) 126 + { 127 + int i = 0; 128 + printk(KERN_DEBUG "status: %08x = ", status); 129 + STATUS_TO_TEXT(CARD_REMOVE, status, i); 130 + STATUS_TO_TEXT(CARD_INSERT, status, i); 131 + STATUS_TO_TEXT(SIGSTATE, status, i); 132 + STATUS_TO_TEXT(WRPROTECT, status, i); 133 + STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 134 + STATUS_TO_TEXT(CARD_INSERT_A, status, i); 135 + STATUS_TO_TEXT(SIGSTATE_A, status, i); 136 + STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 137 + STATUS_TO_TEXT(STOPBIT_ERR, status, i); 138 + STATUS_TO_TEXT(ILL_FUNC, status, i); 139 + STATUS_TO_TEXT(CMD_BUSY, status, i); 140 + STATUS_TO_TEXT(CMDRESPEND, status, i); 141 + STATUS_TO_TEXT(DATAEND, status, i); 142 + STATUS_TO_TEXT(CRCFAIL, status, i); 143 + STATUS_TO_TEXT(DATATIMEOUT, status, i); 144 + STATUS_TO_TEXT(CMDTIMEOUT, status, i); 145 + STATUS_TO_TEXT(RXOVERFLOW, status, i); 146 + STATUS_TO_TEXT(TXUNDERRUN, status, i); 147 + STATUS_TO_TEXT(RXRDY, status, i); 148 + STATUS_TO_TEXT(TXRQ, status, i); 149 + STATUS_TO_TEXT(ILL_ACCESS, status, i); 150 + printk("\n"); 151 + } 152 + 153 + #else 154 + #define pr_debug_status(s) do { } while (0) 155 + #endif 156 + 157 + static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 158 + { 159 + struct tmio_mmc_host *host = mmc_priv(mmc); 160 + 161 + if (enable) { 162 + host->sdio_irq_enabled = 1; 163 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 164 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 165 + (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 166 + } else { 167 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 168 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 169 + host->sdio_irq_enabled = 0; 170 + } 171 + } 172 + 173 + static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 174 + { 175 + u32 clk = 0, clock; 176 + 177 + if (new_clock) { 178 + for (clock = host->mmc->f_min, clk = 0x80000080; 179 + new_clock >= (clock<<1); clk >>= 1) 180 + clock <<= 1; 181 + clk |= 0x100; 182 + } 183 + 184 + if (host->set_clk_div) 185 + host->set_clk_div(host->pdev, (clk>>22) & 1); 186 + 187 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 188 + } 189 + 190 + static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 191 + { 192 + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 193 + 194 + /* implicit BUG_ON(!res) */ 195 + if (resource_size(res) > 0x100) { 196 + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 197 + msleep(10); 198 + } 199 + 200 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 201 + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 202 + msleep(10); 203 + } 204 + 205 + static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 206 + { 207 + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 208 + 209 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 210 + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 211 + msleep(10); 212 + 213 + /* implicit BUG_ON(!res) */ 214 + if (resource_size(res) > 0x100) { 215 + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 216 + msleep(10); 217 + } 218 + } 219 + 220 + static void tmio_mmc_reset(struct tmio_mmc_host *host) 221 + { 222 + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 223 + 224 + /* FIXME - should we set stop clock reg here */ 225 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 226 + /* implicit BUG_ON(!res) */ 227 + if (resource_size(res) > 0x100) 228 + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 229 + msleep(10); 230 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 231 + if (resource_size(res) > 0x100) 232 + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 233 + msleep(10); 234 + } 235 + 236 + static void tmio_mmc_reset_work(struct work_struct *work) 237 + { 238 + struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 239 + delayed_reset_work.work); 240 + struct mmc_request *mrq; 241 + unsigned long flags; 242 + 243 + spin_lock_irqsave(&host->lock, flags); 244 + mrq = host->mrq; 245 + 246 + /* request already finished */ 247 + if (!mrq 248 + || time_is_after_jiffies(host->last_req_ts + 249 + msecs_to_jiffies(2000))) { 250 + spin_unlock_irqrestore(&host->lock, flags); 251 + return; 252 + } 253 + 254 + dev_warn(&host->pdev->dev, 255 + "timeout waiting for hardware interrupt (CMD%u)\n", 256 + mrq->cmd->opcode); 257 + 258 + if (host->data) 259 + host->data->error = -ETIMEDOUT; 260 + else if (host->cmd) 261 + host->cmd->error = -ETIMEDOUT; 262 + else 263 + mrq->cmd->error = -ETIMEDOUT; 264 + 265 + host->cmd = NULL; 266 + host->data = NULL; 267 + host->mrq = NULL; 268 + host->force_pio = false; 269 + 270 + spin_unlock_irqrestore(&host->lock, flags); 271 + 272 + tmio_mmc_reset(host); 273 + 274 + mmc_request_done(host->mmc, mrq); 275 + } 276 + 277 + static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 278 + { 279 + struct mmc_request *mrq = host->mrq; 280 + 281 + if (!mrq) 282 + return; 283 + 284 + host->mrq = NULL; 285 + host->cmd = NULL; 286 + host->data = NULL; 287 + host->force_pio = false; 288 + 289 + cancel_delayed_work(&host->delayed_reset_work); 290 + 291 + mmc_request_done(host->mmc, mrq); 292 + } 293 + 294 + /* These are the bitmasks the tmio chip requires to implement the MMC response 295 + * types. Note that R1 and R6 are the same in this scheme. */ 296 + #define APP_CMD 0x0040 297 + #define RESP_NONE 0x0300 298 + #define RESP_R1 0x0400 299 + #define RESP_R1B 0x0500 300 + #define RESP_R2 0x0600 301 + #define RESP_R3 0x0700 302 + #define DATA_PRESENT 0x0800 303 + #define TRANSFER_READ 0x1000 304 + #define TRANSFER_MULTI 0x2000 305 + #define SECURITY_CMD 0x4000 306 + 307 + static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 308 + { 309 + struct mmc_data *data = host->data; 310 + int c = cmd->opcode; 311 + 312 + /* Command 12 is handled by hardware */ 313 + if (cmd->opcode == 12 && !cmd->arg) { 314 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 315 + return 0; 316 + } 317 + 318 + switch (mmc_resp_type(cmd)) { 319 + case MMC_RSP_NONE: c |= RESP_NONE; break; 320 + case MMC_RSP_R1: c |= RESP_R1; break; 321 + case MMC_RSP_R1B: c |= RESP_R1B; break; 322 + case MMC_RSP_R2: c |= RESP_R2; break; 323 + case MMC_RSP_R3: c |= RESP_R3; break; 324 + default: 325 + pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 326 + return -EINVAL; 327 + } 328 + 329 + host->cmd = cmd; 330 + 331 + /* FIXME - this seems to be ok commented out but the spec suggest this bit 332 + * should be set when issuing app commands. 333 + * if(cmd->flags & MMC_FLAG_ACMD) 334 + * c |= APP_CMD; 335 + */ 336 + if (data) { 337 + c |= DATA_PRESENT; 338 + if (data->blocks > 1) { 339 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 340 + c |= TRANSFER_MULTI; 341 + } 342 + if (data->flags & MMC_DATA_READ) 343 + c |= TRANSFER_READ; 344 + } 345 + 346 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); 347 + 348 + /* Fire off the command */ 349 + sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 350 + sd_ctrl_write16(host, CTL_SD_CMD, c); 351 + 352 + return 0; 353 + } 354 + 355 + /* 356 + * This chip always returns (at least?) as much data as you ask for. 357 + * I'm unsure what happens if you ask for less than a block. This should be 358 + * looked into to ensure that a funny length read doesnt hose the controller. 359 + */ 360 + static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 361 + { 362 + struct mmc_data *data = host->data; 363 + void *sg_virt; 364 + unsigned short *buf; 365 + unsigned int count; 366 + unsigned long flags; 367 + 368 + if ((host->chan_tx || host->chan_rx) && !host->force_pio) { 369 + pr_err("PIO IRQ in DMA mode!\n"); 370 + return; 371 + } else if (!data) { 372 + pr_debug("Spurious PIO IRQ\n"); 373 + return; 374 + } 375 + 376 + sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 377 + buf = (unsigned short *)(sg_virt + host->sg_off); 378 + 379 + count = host->sg_ptr->length - host->sg_off; 380 + if (count > data->blksz) 381 + count = data->blksz; 382 + 383 + pr_debug("count: %08x offset: %08x flags %08x\n", 384 + count, host->sg_off, data->flags); 385 + 386 + /* Transfer the data */ 387 + if (data->flags & MMC_DATA_READ) 388 + sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 389 + else 390 + sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 391 + 392 + host->sg_off += count; 393 + 394 + tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 395 + 396 + if (host->sg_off == host->sg_ptr->length) 397 + tmio_mmc_next_sg(host); 398 + 399 + return; 400 + } 401 + 402 + static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) 403 + { 404 + if (host->sg_ptr == &host->bounce_sg) { 405 + unsigned long flags; 406 + void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 407 + memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 408 + tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 409 + } 410 + } 411 + 412 + /* needs to be called with host->lock held */ 413 + void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 414 + { 415 + struct mmc_data *data = host->data; 416 + struct mmc_command *stop; 417 + 418 + host->data = NULL; 419 + 420 + if (!data) { 421 + dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 422 + return; 423 + } 424 + stop = data->stop; 425 + 426 + /* FIXME - return correct transfer count on errors */ 427 + if (!data->error) 428 + data->bytes_xfered = data->blocks * data->blksz; 429 + else 430 + data->bytes_xfered = 0; 431 + 432 + pr_debug("Completed data request\n"); 433 + 434 + /* 435 + * FIXME: other drivers allow an optional stop command of any given type 436 + * which we dont do, as the chip can auto generate them. 437 + * Perhaps we can be smarter about when to use auto CMD12 and 438 + * only issue the auto request when we know this is the desired 439 + * stop command, allowing fallback to the stop command the 440 + * upper layers expect. For now, we do what works. 441 + */ 442 + 443 + if (data->flags & MMC_DATA_READ) { 444 + if (host->chan_rx && !host->force_pio) 445 + tmio_mmc_check_bounce_buffer(host); 446 + dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 447 + host->mrq); 448 + } else { 449 + dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 450 + host->mrq); 451 + } 452 + 453 + if (stop) { 454 + if (stop->opcode == 12 && !stop->arg) 455 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 456 + else 457 + BUG(); 458 + } 459 + 460 + tmio_mmc_finish_request(host); 461 + } 462 + 463 + static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 464 + { 465 + struct mmc_data *data; 466 + spin_lock(&host->lock); 467 + data = host->data; 468 + 469 + if (!data) 470 + goto out; 471 + 472 + if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { 473 + /* 474 + * Has all data been written out yet? Testing on SuperH showed, 475 + * that in most cases the first interrupt comes already with the 476 + * BUSY status bit clear, but on some operations, like mount or 477 + * in the beginning of a write / sync / umount, there is one 478 + * DATAEND interrupt with the BUSY bit set, in this cases 479 + * waiting for one more interrupt fixes the problem. 480 + */ 481 + if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 482 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 483 + tasklet_schedule(&host->dma_complete); 484 + } 485 + } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 486 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 487 + tasklet_schedule(&host->dma_complete); 488 + } else { 489 + tmio_mmc_do_data_irq(host); 490 + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 491 + } 492 + out: 493 + spin_unlock(&host->lock); 494 + } 495 + 496 + static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 497 + unsigned int stat) 498 + { 499 + struct mmc_command *cmd = host->cmd; 500 + int i, addr; 501 + 502 + spin_lock(&host->lock); 503 + 504 + if (!host->cmd) { 505 + pr_debug("Spurious CMD irq\n"); 506 + goto out; 507 + } 508 + 509 + host->cmd = NULL; 510 + 511 + /* This controller is sicker than the PXA one. Not only do we need to 512 + * drop the top 8 bits of the first response word, we also need to 513 + * modify the order of the response for short response command types. 514 + */ 515 + 516 + for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 517 + cmd->resp[i] = sd_ctrl_read32(host, addr); 518 + 519 + if (cmd->flags & MMC_RSP_136) { 520 + cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 521 + cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 522 + cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 523 + cmd->resp[3] <<= 8; 524 + } else if (cmd->flags & MMC_RSP_R3) { 525 + cmd->resp[0] = cmd->resp[3]; 526 + } 527 + 528 + if (stat & TMIO_STAT_CMDTIMEOUT) 529 + cmd->error = -ETIMEDOUT; 530 + else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 531 + cmd->error = -EILSEQ; 532 + 533 + /* If there is data to handle we enable data IRQs here, and 534 + * we will ultimatley finish the request in the data_end handler. 535 + * If theres no data or we encountered an error, finish now. 536 + */ 537 + if (host->data && !cmd->error) { 538 + if (host->data->flags & MMC_DATA_READ) { 539 + if (host->force_pio || !host->chan_rx) 540 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); 541 + else 542 + tasklet_schedule(&host->dma_issue); 543 + } else { 544 + if (host->force_pio || !host->chan_tx) 545 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 546 + else 547 + tasklet_schedule(&host->dma_issue); 548 + } 549 + } else { 550 + tmio_mmc_finish_request(host); 551 + } 552 + 553 + out: 554 + spin_unlock(&host->lock); 555 + } 556 + 557 + static irqreturn_t tmio_mmc_irq(int irq, void *devid) 558 + { 559 + struct tmio_mmc_host *host = devid; 560 + struct tmio_mmc_data *pdata = host->pdata; 561 + unsigned int ireg, irq_mask, status; 562 + unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 563 + 564 + pr_debug("MMC IRQ begin\n"); 565 + 566 + status = sd_ctrl_read32(host, CTL_STATUS); 567 + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 568 + ireg = status & TMIO_MASK_IRQ & ~irq_mask; 569 + 570 + sdio_ireg = 0; 571 + if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 572 + sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 573 + sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 574 + sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 575 + 576 + sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 577 + 578 + if (sdio_ireg && !host->sdio_irq_enabled) { 579 + pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 580 + sdio_status, sdio_irq_mask, sdio_ireg); 581 + tmio_mmc_enable_sdio_irq(host->mmc, 0); 582 + goto out; 583 + } 584 + 585 + if (host->mmc->caps & MMC_CAP_SDIO_IRQ && 586 + sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 587 + mmc_signal_sdio_irq(host->mmc); 588 + 589 + if (sdio_ireg) 590 + goto out; 591 + } 592 + 593 + pr_debug_status(status); 594 + pr_debug_status(ireg); 595 + 596 + if (!ireg) { 597 + tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); 598 + 599 + pr_warning("tmio_mmc: Spurious irq, disabling! " 600 + "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 601 + pr_debug_status(status); 602 + 603 + goto out; 604 + } 605 + 606 + while (ireg) { 607 + /* Card insert / remove attempts */ 608 + if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 609 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 610 + TMIO_STAT_CARD_REMOVE); 611 + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 612 + } 613 + 614 + /* CRC and other errors */ 615 + /* if (ireg & TMIO_STAT_ERR_IRQ) 616 + * handled |= tmio_error_irq(host, irq, stat); 617 + */ 618 + 619 + /* Command completion */ 620 + if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 621 + tmio_mmc_ack_mmc_irqs(host, 622 + TMIO_STAT_CMDRESPEND | 623 + TMIO_STAT_CMDTIMEOUT); 624 + tmio_mmc_cmd_irq(host, status); 625 + } 626 + 627 + /* Data transfer */ 628 + if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 629 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 630 + tmio_mmc_pio_irq(host); 631 + } 632 + 633 + /* Data transfer completion */ 634 + if (ireg & TMIO_STAT_DATAEND) { 635 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); 636 + tmio_mmc_data_irq(host); 637 + } 638 + 639 + /* Check status - keep going until we've handled it all */ 640 + status = sd_ctrl_read32(host, CTL_STATUS); 641 + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 642 + ireg = status & TMIO_MASK_IRQ & ~irq_mask; 643 + 644 + pr_debug("Status at end of loop: %08x\n", status); 645 + pr_debug_status(status); 646 + } 647 + pr_debug("MMC IRQ end\n"); 648 + 649 + out: 650 + return IRQ_HANDLED; 651 + } 652 + 653 + static int tmio_mmc_start_data(struct tmio_mmc_host *host, 654 + struct mmc_data *data) 655 + { 656 + struct tmio_mmc_data *pdata = host->pdata; 657 + 658 + pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 659 + data->blksz, data->blocks); 660 + 661 + /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 662 + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 663 + int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 664 + 665 + if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 666 + pr_err("%s: %d byte block unsupported in 4 bit mode\n", 667 + mmc_hostname(host->mmc), data->blksz); 668 + return -EINVAL; 669 + } 670 + } 671 + 672 + tmio_mmc_init_sg(host, data); 673 + host->data = data; 674 + 675 + /* Set transfer length / blocksize */ 676 + sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 677 + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 678 + 679 + tmio_mmc_start_dma(host, data); 680 + 681 + return 0; 682 + } 683 + 684 + /* Process requests from the MMC layer */ 685 + static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 686 + { 687 + struct tmio_mmc_host *host = mmc_priv(mmc); 688 + int ret; 689 + 690 + if (host->mrq) 691 + pr_debug("request not null\n"); 692 + 693 + host->last_req_ts = jiffies; 694 + wmb(); 695 + host->mrq = mrq; 696 + 697 + if (mrq->data) { 698 + ret = tmio_mmc_start_data(host, mrq->data); 699 + if (ret) 700 + goto fail; 701 + } 702 + 703 + ret = tmio_mmc_start_command(host, mrq->cmd); 704 + if (!ret) { 705 + schedule_delayed_work(&host->delayed_reset_work, 706 + msecs_to_jiffies(2000)); 707 + return; 708 + } 709 + 710 + fail: 711 + host->mrq = NULL; 712 + host->force_pio = false; 713 + mrq->cmd->error = ret; 714 + mmc_request_done(mmc, mrq); 715 + } 716 + 717 + /* Set MMC clock / power. 718 + * Note: This controller uses a simple divider scheme therefore it cannot 719 + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 720 + * MMC wont run that fast, it has to be clocked at 12MHz which is the next 721 + * slowest setting. 722 + */ 723 + static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 724 + { 725 + struct tmio_mmc_host *host = mmc_priv(mmc); 726 + 727 + if (ios->clock) 728 + tmio_mmc_set_clock(host, ios->clock); 729 + 730 + /* Power sequence - OFF -> UP -> ON */ 731 + if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 732 + /* power down SD bus */ 733 + if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) 734 + host->set_pwr(host->pdev, 0); 735 + tmio_mmc_clk_stop(host); 736 + } else if (ios->power_mode == MMC_POWER_UP) { 737 + /* power up SD bus */ 738 + if (host->set_pwr) 739 + host->set_pwr(host->pdev, 1); 740 + } else { 741 + /* start bus clock */ 742 + tmio_mmc_clk_start(host); 743 + } 744 + 745 + switch (ios->bus_width) { 746 + case MMC_BUS_WIDTH_1: 747 + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 748 + break; 749 + case MMC_BUS_WIDTH_4: 750 + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 751 + break; 752 + } 753 + 754 + /* Let things settle. delay taken from winCE driver */ 755 + udelay(140); 756 + } 757 + 758 + static int tmio_mmc_get_ro(struct mmc_host *mmc) 759 + { 760 + struct tmio_mmc_host *host = mmc_priv(mmc); 761 + struct tmio_mmc_data *pdata = host->pdata; 762 + 763 + return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 764 + !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 765 + } 766 + 767 + static int tmio_mmc_get_cd(struct mmc_host *mmc) 768 + { 769 + struct tmio_mmc_host *host = mmc_priv(mmc); 770 + struct tmio_mmc_data *pdata = host->pdata; 771 + 772 + if (!pdata->get_cd) 773 + return -ENOSYS; 774 + else 775 + return pdata->get_cd(host->pdev); 776 + } 777 + 778 + static const struct mmc_host_ops tmio_mmc_ops = { 779 + .request = tmio_mmc_request, 780 + .set_ios = tmio_mmc_set_ios, 781 + .get_ro = tmio_mmc_get_ro, 782 + .get_cd = tmio_mmc_get_cd, 783 + .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 784 + }; 785 + 786 + int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, 787 + struct platform_device *pdev, 788 + struct tmio_mmc_data *pdata) 789 + { 790 + struct tmio_mmc_host *_host; 791 + struct mmc_host *mmc; 792 + struct resource *res_ctl; 793 + int ret; 794 + u32 irq_mask = TMIO_MASK_CMD; 795 + 796 + res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); 797 + if (!res_ctl) 798 + return -EINVAL; 799 + 800 + mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); 801 + if (!mmc) 802 + return -ENOMEM; 803 + 804 + _host = mmc_priv(mmc); 805 + _host->pdata = pdata; 806 + _host->mmc = mmc; 807 + _host->pdev = pdev; 808 + platform_set_drvdata(pdev, mmc); 809 + 810 + _host->set_pwr = pdata->set_pwr; 811 + _host->set_clk_div = pdata->set_clk_div; 812 + 813 + /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 814 + _host->bus_shift = resource_size(res_ctl) >> 10; 815 + 816 + _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 817 + if (!_host->ctl) { 818 + ret = -ENOMEM; 819 + goto host_free; 820 + } 821 + 822 + mmc->ops = &tmio_mmc_ops; 823 + mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; 824 + mmc->f_max = pdata->hclk; 825 + mmc->f_min = mmc->f_max / 512; 826 + mmc->max_segs = 32; 827 + mmc->max_blk_size = 512; 828 + mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 829 + mmc->max_segs; 830 + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 831 + mmc->max_seg_size = mmc->max_req_size; 832 + if (pdata->ocr_mask) 833 + mmc->ocr_avail = pdata->ocr_mask; 834 + else 835 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 836 + 837 + tmio_mmc_clk_stop(_host); 838 + tmio_mmc_reset(_host); 839 + 840 + ret = platform_get_irq(pdev, 0); 841 + if (ret < 0) 842 + goto unmap_ctl; 843 + 844 + _host->irq = ret; 845 + 846 + tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 847 + if (pdata->flags & TMIO_MMC_SDIO_IRQ) 848 + tmio_mmc_enable_sdio_irq(mmc, 0); 849 + 850 + ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | 851 + IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); 852 + if (ret) 853 + goto unmap_ctl; 854 + 855 + spin_lock_init(&_host->lock); 856 + 857 + /* Init delayed work for request timeouts */ 858 + INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 859 + 860 + /* See if we also get DMA */ 861 + tmio_mmc_request_dma(_host, pdata); 862 + 863 + mmc_add_host(mmc); 864 + 865 + /* Unmask the IRQs we want to know about */ 866 + if (!_host->chan_rx) 867 + irq_mask |= TMIO_MASK_READOP; 868 + if (!_host->chan_tx) 869 + irq_mask |= TMIO_MASK_WRITEOP; 870 + 871 + tmio_mmc_enable_mmc_irqs(_host, irq_mask); 872 + 873 + *host = _host; 874 + 875 + return 0; 876 + 877 + unmap_ctl: 878 + iounmap(_host->ctl); 879 + host_free: 880 + mmc_free_host(mmc); 881 + 882 + return ret; 883 + } 884 + EXPORT_SYMBOL(tmio_mmc_host_probe); 885 + 886 + void tmio_mmc_host_remove(struct tmio_mmc_host *host) 887 + { 888 + mmc_remove_host(host->mmc); 889 + cancel_delayed_work_sync(&host->delayed_reset_work); 890 + tmio_mmc_release_dma(host); 891 + free_irq(host->irq, host); 892 + iounmap(host->ctl); 893 + mmc_free_host(host->mmc); 894 + } 895 + EXPORT_SYMBOL(tmio_mmc_host_remove); 896 + 897 + MODULE_LICENSE("GPL v2");
+2 -3
drivers/mmc/host/via-sdmmc.c
··· 1087 1087 struct mmc_host *mmc; 1088 1088 struct via_crdr_mmc_host *sdhost; 1089 1089 u32 base, len; 1090 - u8 rev, gatt; 1090 + u8 gatt; 1091 1091 int ret; 1092 1092 1093 - pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev); 1094 1093 pr_info(DRV_NAME 1095 1094 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", 1096 1095 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1097 - (int)rev); 1096 + (int)pcidev->revision); 1098 1097 1099 1098 ret = pci_enable_device(pcidev); 1100 1099 if (ret)
+11 -2
drivers/net/bfin_mac.c
··· 1237 1237 1238 1238 if (phydev->interface == PHY_INTERFACE_MODE_RMII) { 1239 1239 opmode |= RMII; /* For Now only 100MBit are supported */ 1240 - #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 1241 - opmode |= TE; 1240 + #if defined(CONFIG_BF537) || defined(CONFIG_BF536) 1241 + if (__SILICON_REVISION__ < 3) { 1242 + /* 1243 + * This isn't publicly documented (fun times!), but in 1244 + * silicon <=0.2, the RX and TX pins are clocked together. 1245 + * So in order to recv, we must enable the transmit side 1246 + * as well. This will cause a spurious TX interrupt too, 1247 + * but we can easily consume that. 1248 + */ 1249 + opmode |= TE; 1250 + } 1242 1251 #endif 1243 1252 } 1244 1253
+1 -1
drivers/net/bnx2.c
··· 8317 8317 #endif 8318 8318 }; 8319 8319 8320 - static void inline vlan_features_add(struct net_device *dev, u32 flags) 8320 + static inline void vlan_features_add(struct net_device *dev, u32 flags) 8321 8321 { 8322 8322 dev->vlan_features |= flags; 8323 8323 }
+5 -11
drivers/net/can/c_can/c_can.c
··· 588 588 { 589 589 struct c_can_priv *priv = netdev_priv(dev); 590 590 591 - if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 592 - /* disable automatic retransmission */ 593 - priv->write_reg(priv, &priv->regs->control, 594 - CONTROL_DISABLE_AR); 595 - else 596 - /* enable automatic retransmission */ 597 - priv->write_reg(priv, &priv->regs->control, 598 - CONTROL_ENABLE_AR); 591 + /* enable automatic retransmission */ 592 + priv->write_reg(priv, &priv->regs->control, 593 + CONTROL_ENABLE_AR); 599 594 600 595 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & 601 596 CAN_CTRLMODE_LOOPBACK)) { ··· 699 704 700 705 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 701 706 msg_obj_no = get_tx_echo_msg_obj(priv); 702 - c_can_inval_msg_object(dev, 0, msg_obj_no); 703 707 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 704 708 if (!(val & (1 << msg_obj_no))) { 705 709 can_get_echo_skb(dev, ··· 707 713 &priv->regs->ifregs[0].msg_cntrl) 708 714 & IF_MCONT_DLC_MASK; 709 715 stats->tx_packets++; 716 + c_can_inval_msg_object(dev, 0, msg_obj_no); 710 717 } 711 718 } 712 719 ··· 1107 1112 priv->can.bittiming_const = &c_can_bittiming_const; 1108 1113 priv->can.do_set_mode = c_can_set_mode; 1109 1114 priv->can.do_get_berr_counter = c_can_get_berr_counter; 1110 - priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT | 1111 - CAN_CTRLMODE_LOOPBACK | 1115 + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1112 1116 CAN_CTRLMODE_LISTENONLY | 1113 1117 CAN_CTRLMODE_BERR_REPORTING; 1114 1118
+5 -4
drivers/net/can/c_can/c_can_platform.c
··· 73 73 void __iomem *addr; 74 74 struct net_device *dev; 75 75 struct c_can_priv *priv; 76 - struct resource *mem, *irq; 76 + struct resource *mem; 77 + int irq; 77 78 #ifdef CONFIG_HAVE_CLK 78 79 struct clk *clk; 79 80 ··· 89 88 90 89 /* get the platform data */ 91 90 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 92 - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 93 - if (!mem || (irq <= 0)) { 91 + irq = platform_get_irq(pdev, 0); 92 + if (!mem || irq <= 0) { 94 93 ret = -ENODEV; 95 94 goto exit_free_clk; 96 95 } ··· 118 117 119 118 priv = netdev_priv(dev); 120 119 121 - dev->irq = irq->start; 120 + dev->irq = irq; 122 121 priv->regs = addr; 123 122 #ifdef CONFIG_HAVE_CLK 124 123 priv->can.clock.freq = clk_get_rate(clk);
+10 -4
drivers/net/cxgb3/cxgb3_main.c
··· 1983 1983 { 1984 1984 struct port_info *pi = netdev_priv(dev); 1985 1985 struct adapter *adapter = pi->adapter; 1986 - struct qset_params *qsp = &adapter->params.sge.qset[0]; 1987 - struct sge_qset *qs = &adapter->sge.qs[0]; 1986 + struct qset_params *qsp; 1987 + struct sge_qset *qs; 1988 + int i; 1988 1989 1989 1990 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) 1990 1991 return -EINVAL; 1991 1992 1992 - qsp->coalesce_usecs = c->rx_coalesce_usecs; 1993 - t3_update_qset_coalesce(qs, qsp); 1993 + for (i = 0; i < pi->nqsets; i++) { 1994 + qsp = &adapter->params.sge.qset[i]; 1995 + qs = &adapter->sge.qs[i]; 1996 + qsp->coalesce_usecs = c->rx_coalesce_usecs; 1997 + t3_update_qset_coalesce(qs, qsp); 1998 + } 1999 + 1994 2000 return 0; 1995 2001 } 1996 2002
+4 -4
drivers/net/dm9000.c
··· 621 621 /* change in wol state, update IRQ state */ 622 622 623 623 if (!dm->wake_state) 624 - set_irq_wake(dm->irq_wake, 1); 624 + irq_set_irq_wake(dm->irq_wake, 1); 625 625 else if (dm->wake_state & !opts) 626 - set_irq_wake(dm->irq_wake, 0); 626 + irq_set_irq_wake(dm->irq_wake, 0); 627 627 } 628 628 629 629 dm->wake_state = opts; ··· 1424 1424 } else { 1425 1425 1426 1426 /* test to see if irq is really wakeup capable */ 1427 - ret = set_irq_wake(db->irq_wake, 1); 1427 + ret = irq_set_irq_wake(db->irq_wake, 1); 1428 1428 if (ret) { 1429 1429 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", 1430 1430 db->irq_wake, ret); 1431 1431 ret = 0; 1432 1432 } else { 1433 - set_irq_wake(db->irq_wake, 0); 1433 + irq_set_irq_wake(db->irq_wake, 0); 1434 1434 db->wake_supported = 1; 1435 1435 } 1436 1436 }
+16 -14
drivers/net/jme.c
··· 273 273 { 274 274 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); 275 275 pci_set_power_state(jme->pdev, PCI_D0); 276 - pci_enable_wake(jme->pdev, PCI_D0, false); 276 + device_set_wakeup_enable(&jme->pdev->dev, false); 277 277 } 278 278 279 279 static int ··· 2538 2538 2539 2539 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2540 2540 2541 + device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs); 2542 + 2541 2543 return 0; 2542 2544 } 2543 2545 ··· 3174 3172 } 3175 3173 3176 3174 #ifdef CONFIG_PM 3177 - static int 3178 - jme_suspend(struct pci_dev *pdev, pm_message_t state) 3175 + static int jme_suspend(struct device *dev) 3179 3176 { 3177 + struct pci_dev *pdev = to_pci_dev(dev); 3180 3178 struct net_device *netdev = pci_get_drvdata(pdev); 3181 3179 struct jme_adapter *jme = netdev_priv(netdev); 3182 3180 ··· 3208 3206 tasklet_hi_enable(&jme->rxclean_task); 3209 3207 tasklet_hi_enable(&jme->rxempty_task); 3210 3208 3211 - pci_save_state(pdev); 3212 3209 jme_powersave_phy(jme); 3213 - pci_enable_wake(jme->pdev, PCI_D3hot, true); 3214 - pci_set_power_state(pdev, PCI_D3hot); 3215 3210 3216 3211 return 0; 3217 3212 } 3218 3213 3219 - static int 3220 - jme_resume(struct pci_dev *pdev) 3214 + static int jme_resume(struct device *dev) 3221 3215 { 3216 + struct pci_dev *pdev = to_pci_dev(dev); 3222 3217 struct net_device *netdev = pci_get_drvdata(pdev); 3223 3218 struct jme_adapter *jme = netdev_priv(netdev); 3224 3219 3225 - jme_clear_pm(jme); 3226 - pci_restore_state(pdev); 3220 + jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); 3227 3221 3228 3222 jme_phy_on(jme); 3229 3223 if (test_bit(JME_FLAG_SSET, &jme->flags)) ··· 3236 3238 3237 3239 return 0; 3238 3240 } 3241 + 3242 + static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); 3243 + #define JME_PM_OPS (&jme_pm_ops) 3244 + 3245 + #else 3246 + 3247 + #define JME_PM_OPS NULL 3239 3248 #endif 3240 3249 3241 3250 static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { ··· 3256 3251 .id_table = jme_pci_tbl, 3257 3252 .probe = jme_init_one, 3258 3253 .remove = __devexit_p(jme_remove_one), 3259 - #ifdef CONFIG_PM 3260 - .suspend = jme_suspend, 3261 - .resume = jme_resume, 3262 - #endif /* CONFIG_PM */ 3263 3254 .shutdown = jme_shutdown, 3255 + .driver.pm = JME_PM_OPS, 3264 3256 }; 3265 3257 3266 3258 static int __init
+1 -1
drivers/net/ksz884x.c
··· 4898 4898 goto unlock; 4899 4899 } 4900 4900 skb_copy_and_csum_dev(org_skb, skb->data); 4901 - org_skb->ip_summed = 0; 4901 + org_skb->ip_summed = CHECKSUM_NONE; 4902 4902 skb->len = org_skb->len; 4903 4903 copy_old_skb(org_skb, skb); 4904 4904 }
+3
drivers/net/mlx4/en_netdev.c
··· 742 742 0, MLX4_PROT_ETH)) 743 743 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 744 744 745 + /* Must redo promiscuous mode setup. */ 746 + priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 747 + 745 748 /* Schedule multicast task to populate multicast list */ 746 749 queue_work(mdev->workqueue, &priv->mcast_task); 747 750
+23 -14
drivers/net/myri10ge/myri10ge.c
··· 1312 1312 * page into an skb */ 1313 1313 1314 1314 static inline int 1315 - myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, 1316 - int bytes, int len, __wsum csum) 1315 + myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, 1316 + int lro_enabled) 1317 1317 { 1318 1318 struct myri10ge_priv *mgp = ss->mgp; 1319 1319 struct sk_buff *skb; 1320 1320 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1321 - int i, idx, hlen, remainder; 1321 + struct myri10ge_rx_buf *rx; 1322 + int i, idx, hlen, remainder, bytes; 1322 1323 struct pci_dev *pdev = mgp->pdev; 1323 1324 struct net_device *dev = mgp->dev; 1324 1325 u8 *va; 1326 + 1327 + if (len <= mgp->small_bytes) { 1328 + rx = &ss->rx_small; 1329 + bytes = mgp->small_bytes; 1330 + } else { 1331 + rx = &ss->rx_big; 1332 + bytes = mgp->big_bytes; 1333 + } 1325 1334 1326 1335 len += MXGEFW_PAD; 1327 1336 idx = rx->cnt & rx->mask; ··· 1350 1341 remainder -= MYRI10GE_ALLOC_SIZE; 1351 1342 } 1352 1343 1353 - if (dev->features & NETIF_F_LRO) { 1344 + if (lro_enabled) { 1354 1345 rx_frags[0].page_offset += MXGEFW_PAD; 1355 1346 rx_frags[0].size -= MXGEFW_PAD; 1356 1347 len -= MXGEFW_PAD; ··· 1472 1463 { 1473 1464 struct myri10ge_rx_done *rx_done = &ss->rx_done; 1474 1465 struct myri10ge_priv *mgp = ss->mgp; 1475 - struct net_device *netdev = mgp->dev; 1466 + 1476 1467 unsigned long rx_bytes = 0; 1477 1468 unsigned long rx_packets = 0; 1478 1469 unsigned long rx_ok; ··· 1483 1474 u16 length; 1484 1475 __wsum checksum; 1485 1476 1477 + /* 1478 + * Prevent compiler from generating more than one ->features memory 1479 + * access to avoid theoretical race condition with functions that 1480 + * change NETIF_F_LRO flag at runtime. 1481 + */ 1482 + bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; 1483 + 1486 1484 while (rx_done->entry[idx].length != 0 && work_done < budget) { 1487 1485 length = ntohs(rx_done->entry[idx].length); 1488 1486 rx_done->entry[idx].length = 0; 1489 1487 checksum = csum_unfold(rx_done->entry[idx].checksum); 1490 - if (length <= mgp->small_bytes) 1491 - rx_ok = myri10ge_rx_done(ss, &ss->rx_small, 1492 - mgp->small_bytes, 1493 - length, checksum); 1494 - else 1495 - rx_ok = myri10ge_rx_done(ss, &ss->rx_big, 1496 - mgp->big_bytes, 1497 - length, checksum); 1488 + rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); 1498 1489 rx_packets += rx_ok; 1499 1490 rx_bytes += rx_ok * (unsigned long)length; 1500 1491 cnt++; ··· 1506 1497 ss->stats.rx_packets += rx_packets; 1507 1498 ss->stats.rx_bytes += rx_bytes; 1508 1499 1509 - if (netdev->features & NETIF_F_LRO) 1500 + if (lro_enabled) 1510 1501 lro_flush_all(&rx_done->lro_mgr); 1511 1502 1512 1503 /* restock receive rings if needed */
+1 -1
drivers/net/netxen/netxen_nic_ethtool.c
··· 871 871 struct netxen_adapter *adapter = netdev_priv(netdev); 872 872 int hw_lro; 873 873 874 - if (data & ~ETH_FLAG_LRO) 874 + if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) 875 875 return -EINVAL; 876 876 877 877 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
+1 -1
drivers/net/qlcnic/qlcnic_ethtool.c
··· 1003 1003 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1004 1004 int hw_lro; 1005 1005 1006 - if (data & ~ETH_FLAG_LRO) 1006 + if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) 1007 1007 return -EINVAL; 1008 1008 1009 1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+1 -1
drivers/net/s2io.c
··· 6726 6726 int rc = 0; 6727 6727 int changed = 0; 6728 6728 6729 - if (data & ~ETH_FLAG_LRO) 6729 + if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO)) 6730 6730 return -EINVAL; 6731 6731 6732 6732 if (data & ETH_FLAG_LRO) {
+3 -3
drivers/net/tg3.c
··· 48 48 #include <net/ip.h> 49 49 50 50 #include <asm/system.h> 51 - #include <asm/io.h> 51 + #include <linux/io.h> 52 52 #include <asm/byteorder.h> 53 - #include <asm/uaccess.h> 53 + #include <linux/uaccess.h> 54 54 55 55 #ifdef CONFIG_SPARC 56 56 #include <asm/idprom.h> ··· 13118 13118 13119 13119 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 13120 13120 13121 - static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13121 + static inline void vlan_features_add(struct net_device *dev, unsigned long flags) 13122 13122 { 13123 13123 dev->vlan_features |= flags; 13124 13124 }
+2 -2
drivers/net/vmxnet3/vmxnet3_ethtool.c
··· 304 304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 305 305 unsigned long flags; 306 306 307 - if (data & ~ETH_FLAG_LRO) 308 - return -EOPNOTSUPP; 307 + if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) 308 + return -EINVAL; 309 309 310 310 if (lro_requested ^ lro_present) { 311 311 /* toggle the LRO feature*/
+2 -2
drivers/net/vxge/vxge-ethtool.c
··· 1117 1117 struct vxgedev *vdev = netdev_priv(dev); 1118 1118 enum vxge_hw_status status; 1119 1119 1120 - if (data & ~ETH_FLAG_RXHASH) 1121 - return -EOPNOTSUPP; 1120 + if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH)) 1121 + return -EINVAL; 1122 1122 1123 1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) 1124 1124 return 0;
+1 -2
drivers/net/wireless/p54/p54spi.c
··· 649 649 goto err_free_common; 650 650 } 651 651 652 - set_irq_type(gpio_to_irq(p54spi_gpio_irq), 653 - IRQ_TYPE_EDGE_RISING); 652 + irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); 654 653 655 654 disable_irq(gpio_to_irq(p54spi_gpio_irq)); 656 655
+1 -1
drivers/net/wireless/wl1251/sdio.c
··· 265 265 goto disable; 266 266 } 267 267 268 - set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 268 + irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 269 269 disable_irq(wl->irq); 270 270 271 271 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+1 -1
drivers/net/wireless/wl1251/spi.c
··· 286 286 goto out_free; 287 287 } 288 288 289 - set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 289 + irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 290 290 291 291 disable_irq(wl->irq); 292 292
+1 -1
drivers/parisc/eisa.c
··· 340 340 /* Reserve IRQ2 */ 341 341 setup_irq(2, &irq2_action); 342 342 for (i = 0; i < 16; i++) { 343 - set_irq_chip_and_handler(i, &eisa_interrupt_type, 343 + irq_set_chip_and_handler(i, &eisa_interrupt_type, 344 344 handle_simple_irq); 345 345 } 346 346
+2 -2
drivers/parisc/gsc.c
··· 152 152 if (irq > GSC_IRQ_MAX) 153 153 return NO_IRQ; 154 154 155 - set_irq_chip_and_handler(irq, type, handle_simple_irq); 156 - set_irq_chip_data(irq, data); 155 + irq_set_chip_and_handler(irq, type, handle_simple_irq); 156 + irq_set_chip_data(irq, data); 157 157 158 158 return irq++; 159 159 }
+2 -1
drivers/parisc/superio.c
··· 355 355 #endif 356 356 357 357 for (i = 0; i < 16; i++) { 358 - set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq); 358 + irq_set_chip_and_handler(i, &superio_interrupt_type, 359 + handle_simple_irq); 359 360 } 360 361 361 362 /*
+6 -6
drivers/pci/dmar.c
··· 1226 1226 1227 1227 void dmar_msi_unmask(struct irq_data *data) 1228 1228 { 1229 - struct intel_iommu *iommu = irq_data_get_irq_data(data); 1229 + struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); 1230 1230 unsigned long flag; 1231 1231 1232 1232 /* unmask it */ ··· 1240 1240 void dmar_msi_mask(struct irq_data *data) 1241 1241 { 1242 1242 unsigned long flag; 1243 - struct intel_iommu *iommu = irq_data_get_irq_data(data); 1243 + struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); 1244 1244 1245 1245 /* mask it */ 1246 1246 spin_lock_irqsave(&iommu->register_lock, flag); ··· 1252 1252 1253 1253 void dmar_msi_write(int irq, struct msi_msg *msg) 1254 1254 { 1255 - struct intel_iommu *iommu = get_irq_data(irq); 1255 + struct intel_iommu *iommu = irq_get_handler_data(irq); 1256 1256 unsigned long flag; 1257 1257 1258 1258 spin_lock_irqsave(&iommu->register_lock, flag); ··· 1264 1264 1265 1265 void dmar_msi_read(int irq, struct msi_msg *msg) 1266 1266 { 1267 - struct intel_iommu *iommu = get_irq_data(irq); 1267 + struct intel_iommu *iommu = irq_get_handler_data(irq); 1268 1268 unsigned long flag; 1269 1269 1270 1270 spin_lock_irqsave(&iommu->register_lock, flag); ··· 1382 1382 return -EINVAL; 1383 1383 } 1384 1384 1385 - set_irq_data(irq, iommu); 1385 + irq_set_handler_data(irq, iommu); 1386 1386 iommu->irq = irq; 1387 1387 1388 1388 ret = arch_setup_dmar_msi(irq); 1389 1389 if (ret) { 1390 - set_irq_data(irq, NULL); 1390 + irq_set_handler_data(irq, NULL); 1391 1391 iommu->irq = 0; 1392 1392 destroy_irq(irq); 1393 1393 return ret;
+8 -8
drivers/pci/htirq.c
··· 34 34 35 35 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) 36 36 { 37 - struct ht_irq_cfg *cfg = get_irq_data(irq); 37 + struct ht_irq_cfg *cfg = irq_get_handler_data(irq); 38 38 unsigned long flags; 39 39 spin_lock_irqsave(&ht_irq_lock, flags); 40 40 if (cfg->msg.address_lo != msg->address_lo) { ··· 53 53 54 54 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) 55 55 { 56 - struct ht_irq_cfg *cfg = get_irq_data(irq); 56 + struct ht_irq_cfg *cfg = irq_get_handler_data(irq); 57 57 *msg = cfg->msg; 58 58 } 59 59 60 60 void mask_ht_irq(struct irq_data *data) 61 61 { 62 - struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); 62 + struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); 63 63 struct ht_irq_msg msg = cfg->msg; 64 64 65 65 msg.address_lo |= 1; ··· 68 68 69 69 void unmask_ht_irq(struct irq_data *data) 70 70 { 71 - struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); 71 + struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); 72 72 struct ht_irq_msg msg = cfg->msg; 73 73 74 74 msg.address_lo &= ~1; ··· 126 126 kfree(cfg); 127 127 return -EBUSY; 128 128 } 129 - set_irq_data(irq, cfg); 129 + irq_set_handler_data(irq, cfg); 130 130 131 131 if (arch_setup_ht_irq(irq, dev) < 0) { 132 132 ht_destroy_irq(irq); ··· 162 162 { 163 163 struct ht_irq_cfg *cfg; 164 164 165 - cfg = get_irq_data(irq); 166 - set_irq_chip(irq, NULL); 167 - set_irq_data(irq, NULL); 165 + cfg = irq_get_handler_data(irq); 166 + irq_set_chip(irq, NULL); 167 + irq_set_handler_data(irq, NULL); 168 168 destroy_irq(irq); 169 169 170 170 kfree(cfg);
+1 -1
drivers/pci/intel-iommu.c
··· 1206 1206 iommu_disable_translation(iommu); 1207 1207 1208 1208 if (iommu->irq) { 1209 - set_irq_data(iommu->irq, NULL); 1209 + irq_set_handler_data(iommu->irq, NULL); 1210 1210 /* This will mask the irq */ 1211 1211 free_irq(iommu->irq, iommu); 1212 1212 destroy_irq(iommu->irq);
+1 -1
drivers/pci/intr_remapping.c
··· 50 50 51 51 static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 52 52 { 53 - struct irq_cfg *cfg = get_irq_chip_data(irq); 53 + struct irq_cfg *cfg = irq_get_chip_data(irq); 54 54 return cfg ? &cfg->irq_2_iommu : NULL; 55 55 } 56 56
+5 -5
drivers/pci/msi.c
··· 236 236 237 237 void read_msi_msg(unsigned int irq, struct msi_msg *msg) 238 238 { 239 - struct msi_desc *entry = get_irq_msi(irq); 239 + struct msi_desc *entry = irq_get_msi_desc(irq); 240 240 241 241 __read_msi_msg(entry, msg); 242 242 } ··· 253 253 254 254 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 255 255 { 256 - struct msi_desc *entry = get_irq_msi(irq); 256 + struct msi_desc *entry = irq_get_msi_desc(irq); 257 257 258 258 __get_cached_msi_msg(entry, msg); 259 259 } ··· 297 297 298 298 void write_msi_msg(unsigned int irq, struct msi_msg *msg) 299 299 { 300 - struct msi_desc *entry = get_irq_msi(irq); 300 + struct msi_desc *entry = irq_get_msi_desc(irq); 301 301 302 302 __write_msi_msg(entry, msg); 303 303 } ··· 354 354 if (!dev->msi_enabled) 355 355 return; 356 356 357 - entry = get_irq_msi(dev->irq); 357 + entry = irq_get_msi_desc(dev->irq); 358 358 pos = entry->msi_attrib.pos; 359 359 360 360 pci_intx_for_msi(dev, 0); ··· 519 519 PCI_MSIX_ENTRY_VECTOR_CTRL; 520 520 521 521 entries[i].vector = entry->irq; 522 - set_irq_msi(entry->irq, entry); 522 + irq_set_msi_desc(entry->irq, entry); 523 523 entry->masked = readl(entry->mask_base + offset); 524 524 msix_mask_irq(entry, 1); 525 525 i++;
+1 -1
drivers/pcmcia/bfin_cf_pcmcia.c
··· 235 235 cf->irq = irq; 236 236 cf->socket.pci_irq = irq; 237 237 238 - set_irq_type(irq, IRQF_TRIGGER_LOW); 238 + irq_set_irq_type(irq, IRQF_TRIGGER_LOW); 239 239 240 240 io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 241 241 attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+1 -1
drivers/pcmcia/db1xxx_ss.c
··· 181 181 /* all other (older) Db1x00 boards use a GPIO to show 182 182 * card detection status: use both-edge triggers. 183 183 */ 184 - set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); 184 + irq_set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); 185 185 ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq, 186 186 0, "pcmcia_carddetect", sock); 187 187
+50 -85
drivers/pcmcia/pxa2xx_colibri.c
··· 34 34 #define COLIBRI320_DETECT_GPIO 81 35 35 #define COLIBRI320_READY_GPIO 29 36 36 37 - static struct { 38 - int reset_gpio; 39 - int ppen_gpio; 40 - int bvd1_gpio; 41 - int bvd2_gpio; 42 - int detect_gpio; 43 - int ready_gpio; 44 - } colibri_pcmcia_gpio; 37 + enum { 38 + DETECT = 0, 39 + READY = 1, 40 + BVD1 = 2, 41 + BVD2 = 3, 42 + PPEN = 4, 43 + RESET = 5, 44 + }; 45 + 46 + /* Contents of this array are configured on-the-fly in init function */ 47 + static struct gpio colibri_pcmcia_gpios[] = { 48 + { 0, GPIOF_IN, "PCMCIA Detect" }, 49 + { 0, GPIOF_IN, "PCMCIA Ready" }, 50 + { 0, GPIOF_IN, "PCMCIA BVD1" }, 51 + { 0, GPIOF_IN, "PCMCIA BVD2" }, 52 + { 0, GPIOF_INIT_LOW, "PCMCIA PPEN" }, 53 + { 0, GPIOF_INIT_HIGH,"PCMCIA Reset" }, 54 + }; 45 55 46 56 static struct pcmcia_irqs colibri_irqs[] = { 47 57 { ··· 64 54 { 65 55 int ret; 66 56 67 - ret = gpio_request(colibri_pcmcia_gpio.detect_gpio, "DETECT"); 57 + ret = gpio_request_array(colibri_pcmcia_gpios, 58 + ARRAY_SIZE(colibri_pcmcia_gpios)); 68 59 if (ret) 69 60 goto err1; 70 - ret = gpio_direction_input(colibri_pcmcia_gpio.detect_gpio); 71 - if (ret) 72 - goto err2; 73 61 74 - ret = gpio_request(colibri_pcmcia_gpio.ready_gpio, "READY"); 75 - if (ret) 76 - goto err2; 77 - ret = gpio_direction_input(colibri_pcmcia_gpio.ready_gpio); 78 - if (ret) 79 - goto err3; 62 + colibri_irqs[0].irq = gpio_to_irq(colibri_pcmcia_gpios[DETECT].gpio); 63 + skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpios[READY].gpio); 80 64 81 - ret = gpio_request(colibri_pcmcia_gpio.bvd1_gpio, "BVD1"); 82 - if (ret) 83 - goto err3; 84 - ret = gpio_direction_input(colibri_pcmcia_gpio.bvd1_gpio); 85 - if (ret) 86 - goto err4; 87 - 88 - ret = gpio_request(colibri_pcmcia_gpio.bvd2_gpio, "BVD2"); 89 - if (ret) 90 - goto err4; 91 - ret = gpio_direction_input(colibri_pcmcia_gpio.bvd2_gpio); 92 - if (ret) 93 - goto err5; 94 - 95 - ret = gpio_request(colibri_pcmcia_gpio.ppen_gpio, "PPEN"); 96 - if (ret) 97 - goto err5; 98 - ret = gpio_direction_output(colibri_pcmcia_gpio.ppen_gpio, 0); 99 - if (ret) 100 - goto err6; 101 - 102 - ret = gpio_request(colibri_pcmcia_gpio.reset_gpio, "RESET"); 103 - if (ret) 104 - goto err6; 105 - ret = gpio_direction_output(colibri_pcmcia_gpio.reset_gpio, 1); 106 - if (ret) 107 - goto err7; 108 - 109 - colibri_irqs[0].irq = gpio_to_irq(colibri_pcmcia_gpio.detect_gpio); 110 - skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpio.ready_gpio); 111 - 112 - return soc_pcmcia_request_irqs(skt, colibri_irqs, 65 + ret = soc_pcmcia_request_irqs(skt, colibri_irqs, 113 66 ARRAY_SIZE(colibri_irqs)); 67 + if (ret) 68 + goto err2; 114 69 115 - err7: 116 - gpio_free(colibri_pcmcia_gpio.detect_gpio); 117 - err6: 118 - gpio_free(colibri_pcmcia_gpio.ready_gpio); 119 - err5: 120 - gpio_free(colibri_pcmcia_gpio.bvd1_gpio); 121 - err4: 122 - gpio_free(colibri_pcmcia_gpio.bvd2_gpio); 123 - err3: 124 - gpio_free(colibri_pcmcia_gpio.reset_gpio); 70 + return ret; 71 + 125 72 err2: 126 - gpio_free(colibri_pcmcia_gpio.ppen_gpio); 73 + gpio_free_array(colibri_pcmcia_gpios, 74 + ARRAY_SIZE(colibri_pcmcia_gpios)); 127 75 err1: 128 76 return ret; 129 77 } 130 78 131 79 static void colibri_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 132 80 { 133 - gpio_free(colibri_pcmcia_gpio.detect_gpio); 134 - gpio_free(colibri_pcmcia_gpio.ready_gpio); 135 - gpio_free(colibri_pcmcia_gpio.bvd1_gpio); 136 - gpio_free(colibri_pcmcia_gpio.bvd2_gpio); 137 - gpio_free(colibri_pcmcia_gpio.reset_gpio); 138 - gpio_free(colibri_pcmcia_gpio.ppen_gpio); 81 + gpio_free_array(colibri_pcmcia_gpios, 82 + ARRAY_SIZE(colibri_pcmcia_gpios)); 139 83 } 140 84 141 85 static void colibri_pcmcia_socket_state(struct soc_pcmcia_socket *skt, 142 86 struct pcmcia_state *state) 143 87 { 144 88 145 - state->detect = !!gpio_get_value(colibri_pcmcia_gpio.detect_gpio); 146 - state->ready = !!gpio_get_value(colibri_pcmcia_gpio.ready_gpio); 147 - state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpio.bvd1_gpio); 148 - state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpio.bvd2_gpio); 89 + state->detect = !!gpio_get_value(colibri_pcmcia_gpios[DETECT].gpio); 90 + state->ready = !!gpio_get_value(colibri_pcmcia_gpios[READY].gpio); 91 + state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpios[BVD1].gpio); 92 + state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpios[BVD2].gpio); 149 93 state->wrprot = 0; 150 94 state->vs_3v = 1; 151 95 state->vs_Xv = 0; ··· 109 145 colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, 110 146 const socket_state_t *state) 111 147 { 112 - gpio_set_value(colibri_pcmcia_gpio.ppen_gpio, 148 + gpio_set_value(colibri_pcmcia_gpios[PPEN].gpio, 113 149 !(state->Vcc == 33 && state->Vpp < 50)); 114 - gpio_set_value(colibri_pcmcia_gpio.reset_gpio, state->flags & SS_RESET); 150 + gpio_set_value(colibri_pcmcia_gpios[RESET].gpio, 151 + state->flags & SS_RESET); 115 152 return 0; 116 153 } 117 154 ··· 155 190 156 191 /* Colibri PXA270 */ 157 192 if (machine_is_colibri()) { 158 - colibri_pcmcia_gpio.reset_gpio = COLIBRI270_RESET_GPIO; 159 - colibri_pcmcia_gpio.ppen_gpio = COLIBRI270_PPEN_GPIO; 160 - colibri_pcmcia_gpio.bvd1_gpio = COLIBRI270_BVD1_GPIO; 161 - colibri_pcmcia_gpio.bvd2_gpio = COLIBRI270_BVD2_GPIO; 162 - colibri_pcmcia_gpio.detect_gpio = COLIBRI270_DETECT_GPIO; 163 - colibri_pcmcia_gpio.ready_gpio = COLIBRI270_READY_GPIO; 193 + colibri_pcmcia_gpios[RESET].gpio = COLIBRI270_RESET_GPIO; 194 + colibri_pcmcia_gpios[PPEN].gpio = COLIBRI270_PPEN_GPIO; 195 + colibri_pcmcia_gpios[BVD1].gpio = COLIBRI270_BVD1_GPIO; 196 + colibri_pcmcia_gpios[BVD2].gpio = COLIBRI270_BVD2_GPIO; 197 + colibri_pcmcia_gpios[DETECT].gpio = COLIBRI270_DETECT_GPIO; 198 + colibri_pcmcia_gpios[READY].gpio = COLIBRI270_READY_GPIO; 164 199 /* Colibri PXA320 */ 165 200 } else if (machine_is_colibri320()) { 166 - colibri_pcmcia_gpio.reset_gpio = COLIBRI320_RESET_GPIO; 167 - colibri_pcmcia_gpio.ppen_gpio = COLIBRI320_PPEN_GPIO; 168 - colibri_pcmcia_gpio.bvd1_gpio = COLIBRI320_BVD1_GPIO; 169 - colibri_pcmcia_gpio.bvd2_gpio = COLIBRI320_BVD2_GPIO; 170 - colibri_pcmcia_gpio.detect_gpio = COLIBRI320_DETECT_GPIO; 171 - colibri_pcmcia_gpio.ready_gpio = COLIBRI320_READY_GPIO; 201 + colibri_pcmcia_gpios[RESET].gpio = COLIBRI320_RESET_GPIO; 202 + colibri_pcmcia_gpios[PPEN].gpio = COLIBRI320_PPEN_GPIO; 203 + colibri_pcmcia_gpios[BVD1].gpio = COLIBRI320_BVD1_GPIO; 204 + colibri_pcmcia_gpios[BVD2].gpio = COLIBRI320_BVD2_GPIO; 205 + colibri_pcmcia_gpios[DETECT].gpio = COLIBRI320_DETECT_GPIO; 206 + colibri_pcmcia_gpios[READY].gpio = COLIBRI320_READY_GPIO; 172 207 } 173 208 174 209 ret = platform_device_add_data(colibri_pcmcia_device,
+10 -32
drivers/pcmcia/pxa2xx_palmld.c
··· 4 4 * Driver for Palm LifeDrive PCMCIA 5 5 * 6 6 * Copyright (C) 2006 Alex Osborne <ato@meshy.org> 7 - * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com> 7 + * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com> 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License version 2 as ··· 20 20 #include <mach/palmld.h> 21 21 #include "soc_common.h" 22 22 23 + static struct gpio palmld_pcmcia_gpios[] = { 24 + { GPIO_NR_PALMLD_PCMCIA_POWER, GPIOF_INIT_LOW, "PCMCIA Power" }, 25 + { GPIO_NR_PALMLD_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, 26 + { GPIO_NR_PALMLD_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, 27 + }; 28 + 23 29 static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 24 30 { 25 31 int ret; 26 32 27 - ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_POWER, "PCMCIA PWR"); 28 - if (ret) 29 - goto err1; 30 - ret = gpio_direction_output(GPIO_NR_PALMLD_PCMCIA_POWER, 0); 31 - if (ret) 32 - goto err2; 33 - 34 - ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_RESET, "PCMCIA RST"); 35 - if (ret) 36 - goto err2; 37 - ret = gpio_direction_output(GPIO_NR_PALMLD_PCMCIA_RESET, 1); 38 - if (ret) 39 - goto err3; 40 - 41 - ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_READY, "PCMCIA RDY"); 42 - if (ret) 43 - goto err3; 44 - ret = gpio_direction_input(GPIO_NR_PALMLD_PCMCIA_READY); 45 - if (ret) 46 - goto err4; 33 + ret = gpio_request_array(palmld_pcmcia_gpios, 34 + ARRAY_SIZE(palmld_pcmcia_gpios)); 47 35 48 36 skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); 49 - return 0; 50 37 51 - err4: 52 - gpio_free(GPIO_NR_PALMLD_PCMCIA_READY); 53 - err3: 54 - gpio_free(GPIO_NR_PALMLD_PCMCIA_RESET); 55 - err2: 56 - gpio_free(GPIO_NR_PALMLD_PCMCIA_POWER); 57 - err1: 58 38 return ret; 59 39 } 60 40 61 41 static void palmld_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 62 42 { 63 - gpio_free(GPIO_NR_PALMLD_PCMCIA_READY); 64 - gpio_free(GPIO_NR_PALMLD_PCMCIA_RESET); 65 - gpio_free(GPIO_NR_PALMLD_PCMCIA_POWER); 43 + gpio_free_array(palmld_pcmcia_gpios, ARRAY_SIZE(palmld_pcmcia_gpios)); 66 44 } 67 45 68 46 static void palmld_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+13 -62
drivers/pcmcia/pxa2xx_palmtc.c
··· 4 4 * Driver for Palm Tungsten|C PCMCIA 5 5 * 6 6 * Copyright (C) 2008 Alex Osborne <ato@meshy.org> 7 - * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com> 7 + * Copyright (C) 2009-2011 Marek Vasut <marek.vasut@gmail.com> 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License version 2 as ··· 21 21 #include <mach/palmtc.h> 22 22 #include "soc_common.h" 23 23 24 + static struct gpio palmtc_pcmcia_gpios[] = { 25 + { GPIO_NR_PALMTC_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" }, 26 + { GPIO_NR_PALMTC_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" }, 27 + { GPIO_NR_PALMTC_PCMCIA_POWER3, GPIOF_INIT_LOW, "PCMCIA Power 3" }, 28 + { GPIO_NR_PALMTC_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, 29 + { GPIO_NR_PALMTC_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, 30 + { GPIO_NR_PALMTC_PCMCIA_PWRREADY, GPIOF_IN, "PCMCIA Power Ready" }, 31 + }; 32 + 24 33 static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 25 34 { 26 35 int ret; 27 36 28 - ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER1, "PCMCIA PWR1"); 29 - if (ret) 30 - goto err1; 31 - ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER1, 0); 32 - if (ret) 33 - goto err2; 34 - 35 - ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER2, "PCMCIA PWR2"); 36 - if (ret) 37 - goto err2; 38 - ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER2, 0); 39 - if (ret) 40 - goto err3; 41 - 42 - ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER3, "PCMCIA PWR3"); 43 - if (ret) 44 - goto err3; 45 - ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER3, 0); 46 - if (ret) 47 - goto err4; 48 - 49 - ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_RESET, "PCMCIA RST"); 50 - if (ret) 51 - goto err4; 52 - ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_RESET, 1); 53 - if (ret) 54 - goto err5; 55 - 56 - ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_READY, "PCMCIA RDY"); 57 - if (ret) 58 - goto err5; 59 - ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_READY); 60 - if (ret) 61 - goto err6; 62 - 63 - ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_PWRREADY, "PCMCIA PWRRDY"); 64 - if (ret) 65 - goto err6; 66 - ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_PWRREADY); 67 - if (ret) 68 - goto err7; 37 + ret = gpio_request_array(palmtc_pcmcia_gpios, 38 + ARRAY_SIZE(palmtc_pcmcia_gpios)); 69 39 70 40 skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY); 71 - return 0; 72 41 73 - err7: 74 - gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY); 75 - err6: 76 - gpio_free(GPIO_NR_PALMTC_PCMCIA_READY); 77 - err5: 78 - gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET); 79 - err4: 80 - gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3); 81 - err3: 82 - gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2); 83 - err2: 84 - gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1); 85 - err1: 86 42 return ret; 87 43 } 88 44 89 45 static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 90 46 { 91 - gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY); 92 - gpio_free(GPIO_NR_PALMTC_PCMCIA_READY); 93 - gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET); 94 - gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3); 95 - gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2); 96 - gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1); 47 + gpio_free_array(palmtc_pcmcia_gpios, ARRAY_SIZE(palmtc_pcmcia_gpios)); 97 48 } 98 49 99 50 static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+12 -45
drivers/pcmcia/pxa2xx_palmtx.c
··· 3 3 * 4 4 * Driver for Palm T|X PCMCIA 5 5 * 6 - * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com> 6 + * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com> 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify 9 9 * it under the terms of the GNU General Public License version 2 as ··· 13 13 14 14 #include <linux/module.h> 15 15 #include <linux/platform_device.h> 16 + #include <linux/gpio.h> 16 17 17 18 #include <asm/mach-types.h> 18 - 19 - #include <mach/gpio.h> 20 19 #include <mach/palmtx.h> 21 - 22 20 #include "soc_common.h" 21 + 22 + static struct gpio palmtx_pcmcia_gpios[] = { 23 + { GPIO_NR_PALMTX_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" }, 24 + { GPIO_NR_PALMTX_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" }, 25 + { GPIO_NR_PALMTX_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, 26 + { GPIO_NR_PALMTX_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, 27 + }; 23 28 24 29 static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 25 30 { 26 31 int ret; 27 32 28 - ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER1, "PCMCIA PWR1"); 29 - if (ret) 30 - goto err1; 31 - ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER1, 0); 32 - if (ret) 33 - goto err2; 34 - 35 - ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER2, "PCMCIA PWR2"); 36 - if (ret) 37 - goto err2; 38 - ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER2, 0); 39 - if (ret) 40 - goto err3; 41 - 42 - ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_RESET, "PCMCIA RST"); 43 - if (ret) 44 - goto err3; 45 - ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_RESET, 1); 46 - if (ret) 47 - goto err4; 48 - 49 - ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_READY, "PCMCIA RDY"); 50 - if (ret) 51 - goto err4; 52 - ret = gpio_direction_input(GPIO_NR_PALMTX_PCMCIA_READY); 53 - if (ret) 54 - goto err5; 33 + ret = gpio_request_array(palmtx_pcmcia_gpios, 34 + ARRAY_SIZE(palmtx_pcmcia_gpios)); 55 35 56 36 skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); 57 - return 0; 58 37 59 - err5: 60 - gpio_free(GPIO_NR_PALMTX_PCMCIA_READY); 61 - err4: 62 - gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET); 63 - err3: 64 - gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2); 65 - err2: 66 - gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1); 67 - err1: 68 38 return ret; 69 39 } 70 40 71 41 static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 72 42 { 73 - gpio_free(GPIO_NR_PALMTX_PCMCIA_READY); 74 - gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET); 75 - gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2); 76 - gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1); 43 + gpio_free_array(palmtx_pcmcia_gpios, ARRAY_SIZE(palmtx_pcmcia_gpios)); 77 44 } 78 45 79 46 static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+30 -80
drivers/pcmcia/pxa2xx_vpac270.c
··· 3 3 * 4 4 * Driver for Voipac PXA270 PCMCIA and CF sockets 5 5 * 6 - * Copyright (C) 2010 7 - * Marek Vasut <marek.vasut@gmail.com> 6 + * Copyright (C) 2010-2011 Marek Vasut <marek.vasut@gmail.com> 8 7 * 9 8 * This program is free software; you can redistribute it and/or modify 10 9 * it under the terms of the GNU General Public License version 2 as ··· 20 21 #include <mach/vpac270.h> 21 22 22 23 #include "soc_common.h" 24 + 25 + static struct gpio vpac270_pcmcia_gpios[] = { 26 + { GPIO84_VPAC270_PCMCIA_CD, GPIOF_IN, "PCMCIA Card Detect" }, 27 + { GPIO35_VPAC270_PCMCIA_RDY, GPIOF_IN, "PCMCIA Ready" }, 28 + { GPIO107_VPAC270_PCMCIA_PPEN, GPIOF_INIT_LOW, "PCMCIA PPEN" }, 29 + { GPIO11_VPAC270_PCMCIA_RESET, GPIOF_INIT_LOW, "PCMCIA Reset" }, 30 + }; 31 + 32 + static struct gpio vpac270_cf_gpios[] = { 33 + { GPIO17_VPAC270_CF_CD, GPIOF_IN, "CF Card Detect" }, 34 + { GPIO12_VPAC270_CF_RDY, GPIOF_IN, "CF Ready" }, 35 + { GPIO16_VPAC270_CF_RESET, GPIOF_INIT_LOW, "CF Reset" }, 36 + }; 23 37 24 38 static struct pcmcia_irqs cd_irqs[] = { 25 39 { ··· 52 40 int ret; 53 41 54 42 if (skt->nr == 0) { 55 - ret = gpio_request(GPIO84_VPAC270_PCMCIA_CD, "PCMCIA CD"); 56 - if (ret) 57 - goto err1; 58 - ret = gpio_direction_input(GPIO84_VPAC270_PCMCIA_CD); 59 - if (ret) 60 - goto err2; 61 - 62 - ret = gpio_request(GPIO35_VPAC270_PCMCIA_RDY, "PCMCIA RDY"); 63 - if (ret) 64 - goto err2; 65 - ret = gpio_direction_input(GPIO35_VPAC270_PCMCIA_RDY); 66 - if (ret) 67 - goto err3; 68 - 69 - ret = gpio_request(GPIO107_VPAC270_PCMCIA_PPEN, "PCMCIA PPEN"); 70 - if (ret) 71 - goto err3; 72 - ret = gpio_direction_output(GPIO107_VPAC270_PCMCIA_PPEN, 0); 73 - if (ret) 74 - goto err4; 75 - 76 - ret = gpio_request(GPIO11_VPAC270_PCMCIA_RESET, "PCMCIA RESET"); 77 - if (ret) 78 - goto err4; 79 - ret = gpio_direction_output(GPIO11_VPAC270_PCMCIA_RESET, 0); 80 - if (ret) 81 - goto err5; 43 + ret = gpio_request_array(vpac270_pcmcia_gpios, 44 + ARRAY_SIZE(vpac270_pcmcia_gpios)); 82 45 83 46 skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY); 84 47 85 - return soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1); 86 - 87 - err5: 88 - gpio_free(GPIO11_VPAC270_PCMCIA_RESET); 89 - err4: 90 - gpio_free(GPIO107_VPAC270_PCMCIA_PPEN); 91 - err3: 92 - gpio_free(GPIO35_VPAC270_PCMCIA_RDY); 93 - err2: 94 - gpio_free(GPIO84_VPAC270_PCMCIA_CD); 95 - err1: 96 - return ret; 97 - 48 + if (!ret) 49 + ret = soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1); 98 50 } else { 99 - ret = gpio_request(GPIO17_VPAC270_CF_CD, "CF CD"); 100 - if (ret) 101 - goto err6; 102 - ret = gpio_direction_input(GPIO17_VPAC270_CF_CD); 103 - if (ret) 104 - goto err7; 105 - 106 - ret = gpio_request(GPIO12_VPAC270_CF_RDY, "CF RDY"); 107 - if (ret) 108 - goto err7; 109 - ret = gpio_direction_input(GPIO12_VPAC270_CF_RDY); 110 - if (ret) 111 - goto err8; 112 - 113 - ret = gpio_request(GPIO16_VPAC270_CF_RESET, "CF RESET"); 114 - if (ret) 115 - goto err8; 116 - ret = gpio_direction_output(GPIO16_VPAC270_CF_RESET, 0); 117 - if (ret) 118 - goto err9; 51 + ret = gpio_request_array(vpac270_cf_gpios, 52 + ARRAY_SIZE(vpac270_cf_gpios)); 119 53 120 54 skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY); 121 55 122 - return soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1); 123 - 124 - err9: 125 - gpio_free(GPIO16_VPAC270_CF_RESET); 126 - err8: 127 - gpio_free(GPIO12_VPAC270_CF_RDY); 128 - err7: 129 - gpio_free(GPIO17_VPAC270_CF_CD); 130 - err6: 131 - return ret; 132 - 56 + if (!ret) 57 + ret = soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1); 133 58 } 59 + 60 + return ret; 134 61 } 135 62 136 63 static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 137 64 { 138 - gpio_free(GPIO11_VPAC270_PCMCIA_RESET); 139 - gpio_free(GPIO107_VPAC270_PCMCIA_PPEN); 140 - gpio_free(GPIO35_VPAC270_PCMCIA_RDY); 141 - gpio_free(GPIO84_VPAC270_PCMCIA_CD); 142 - gpio_free(GPIO16_VPAC270_CF_RESET); 143 - gpio_free(GPIO12_VPAC270_CF_RDY); 144 - gpio_free(GPIO17_VPAC270_CF_CD); 65 + if (skt->nr == 0) 66 + gpio_request_array(vpac270_pcmcia_gpios, 67 + ARRAY_SIZE(vpac270_pcmcia_gpios)); 68 + else 69 + gpio_request_array(vpac270_cf_gpios, 70 + ARRAY_SIZE(vpac270_cf_gpios)); 145 71 } 146 72 147 73 static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+1 -1
drivers/pcmcia/sa1100_nanoengine.c
··· 86 86 GPDR &= ~nano_skts[i].input_pins; 87 87 GPDR |= nano_skts[i].output_pins; 88 88 GPCR = nano_skts[i].clear_outputs; 89 - set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); 89 + irq_set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); 90 90 skt->socket.pci_irq = nano_skts[i].pci_irq; 91 91 92 92 return soc_pcmcia_request_irqs(skt,
+7 -7
drivers/pcmcia/soc_common.c
··· 155 155 */ 156 156 if (skt->irq_state != 1 && state->io_irq) { 157 157 skt->irq_state = 1; 158 - set_irq_type(skt->socket.pci_irq, 159 - IRQ_TYPE_EDGE_FALLING); 158 + irq_set_irq_type(skt->socket.pci_irq, 159 + IRQ_TYPE_EDGE_FALLING); 160 160 } else if (skt->irq_state == 1 && state->io_irq == 0) { 161 161 skt->irq_state = 0; 162 - set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); 162 + irq_set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); 163 163 } 164 164 165 165 skt->cs_state = *state; ··· 537 537 IRQF_DISABLED, irqs[i].str, skt); 538 538 if (res) 539 539 break; 540 - set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); 540 + irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); 541 541 } 542 542 543 543 if (res) { ··· 570 570 571 571 for (i = 0; i < nr; i++) 572 572 if (irqs[i].sock == skt->nr) 573 - set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); 573 + irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); 574 574 } 575 575 EXPORT_SYMBOL(soc_pcmcia_disable_irqs); 576 576 ··· 581 581 582 582 for (i = 0; i < nr; i++) 583 583 if (irqs[i].sock == skt->nr) { 584 - set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); 585 - set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); 584 + irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); 585 + irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); 586 586 } 587 587 } 588 588 EXPORT_SYMBOL(soc_pcmcia_enable_irqs);
+1 -1
drivers/pcmcia/xxs1500_ss.c
··· 274 274 * edge detector. 275 275 */ 276 276 irq = gpio_to_irq(GPIO_CDA); 277 - set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); 277 + irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); 278 278 ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); 279 279 if (ret) { 280 280 dev_err(&pdev->dev, "cannot setup cd irq\n");
+5 -3
drivers/platform/x86/intel_pmic_gpio.c
··· 257 257 } 258 258 259 259 for (i = 0; i < 8; i++) { 260 - set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, 261 - handle_simple_irq, "demux"); 262 - set_irq_chip_data(i + pg->irq_base, pg); 260 + irq_set_chip_and_handler_name(i + pg->irq_base, 261 + &pmic_irqchip, 262 + handle_simple_irq, 263 + "demux"); 264 + irq_set_chip_data(i + pg->irq_base, pg); 263 265 } 264 266 return 0; 265 267 err:
+2 -2
drivers/power/z2_battery.c
··· 215 215 if (ret) 216 216 goto err2; 217 217 218 - set_irq_type(gpio_to_irq(info->charge_gpio), 219 - IRQ_TYPE_EDGE_BOTH); 218 + irq_set_irq_type(gpio_to_irq(info->charge_gpio), 219 + IRQ_TYPE_EDGE_BOTH); 220 220 ret = request_irq(gpio_to_irq(info->charge_gpio), 221 221 z2_charge_switch_irq, IRQF_DISABLED, 222 222 "AC Detect", charger);
+3 -3
drivers/rtc/rtc-sh.c
··· 782 782 struct platform_device *pdev = to_platform_device(dev); 783 783 struct sh_rtc *rtc = platform_get_drvdata(pdev); 784 784 785 - set_irq_wake(rtc->periodic_irq, enabled); 785 + irq_set_irq_wake(rtc->periodic_irq, enabled); 786 786 787 787 if (rtc->carry_irq > 0) { 788 - set_irq_wake(rtc->carry_irq, enabled); 789 - set_irq_wake(rtc->alarm_irq, enabled); 788 + irq_set_irq_wake(rtc->carry_irq, enabled); 789 + irq_set_irq_wake(rtc->alarm_irq, enabled); 790 790 } 791 791 } 792 792
+9 -14
drivers/sh/intc/core.c
··· 63 63 64 64 static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) 65 65 { 66 - generic_handle_irq((unsigned int)get_irq_data(irq)); 66 + generic_handle_irq((unsigned int)irq_get_handler_data(irq)); 67 67 } 68 68 69 69 static void __init intc_register_irq(struct intc_desc *desc, ··· 116 116 irq_data = irq_get_irq_data(irq); 117 117 118 118 disable_irq_nosync(irq); 119 - set_irq_chip_and_handler_name(irq, &d->chip, 120 - handle_level_irq, "level"); 121 - set_irq_chip_data(irq, (void *)data[primary]); 119 + irq_set_chip_and_handler_name(irq, &d->chip, handle_level_irq, 120 + "level"); 121 + irq_set_chip_data(irq, (void *)data[primary]); 122 122 123 123 /* 124 124 * set priority level ··· 340 340 vect2->enum_id = 0; 341 341 342 342 /* redirect this interrupts to the first one */ 343 - set_irq_chip(irq2, &dummy_irq_chip); 344 - set_irq_chained_handler(irq2, intc_redirect_irq); 345 - set_irq_data(irq2, (void *)irq); 343 + irq_set_chip(irq2, &dummy_irq_chip); 344 + irq_set_chained_handler(irq2, intc_redirect_irq); 345 + irq_set_handler_data(irq2, (void *)irq); 346 346 } 347 347 } 348 348 ··· 387 387 /* enable wakeup irqs belonging to this intc controller */ 388 388 for_each_active_irq(irq) { 389 389 struct irq_data *data; 390 - struct irq_desc *desc; 391 390 struct irq_chip *chip; 392 391 393 392 data = irq_get_irq_data(irq); 394 393 chip = irq_data_get_irq_chip(data); 395 394 if (chip != &d->chip) 396 395 continue; 397 - desc = irq_to_desc(irq); 398 - if ((desc->status & IRQ_WAKEUP)) 396 + if (irqd_is_wakeup_set(data)) 399 397 chip->irq_enable(data); 400 398 } 401 399 } 402 - 403 400 return 0; 404 401 } 405 402 ··· 409 412 410 413 for_each_active_irq(irq) { 411 414 struct irq_data *data; 412 - struct irq_desc *desc; 413 415 struct irq_chip *chip; 414 416 415 417 data = irq_get_irq_data(irq); ··· 419 423 */ 420 424 if (chip != &d->chip) 421 425 continue; 422 - desc = irq_to_desc(irq); 423 - if (desc->status & IRQ_DISABLED) 426 + if (irqd_irq_disabled(data)) 424 427 chip->irq_disable(data); 425 428 else 426 429 chip->irq_enable(data);
+2 -2
drivers/sh/intc/internals.h
··· 86 86 87 87 static inline struct intc_desc_int *get_intc_desc(unsigned int irq) 88 88 { 89 - struct irq_chip *chip = get_irq_chip(irq); 89 + struct irq_chip *chip = irq_get_chip(irq); 90 90 91 91 return container_of(chip, struct intc_desc_int, chip); 92 92 } ··· 103 103 set_irq_flags(irq, IRQF_VALID); 104 104 #else 105 105 /* same effect on other architectures */ 106 - set_irq_noprobe(irq); 106 + irq_set_noprobe(irq); 107 107 #endif 108 108 } 109 109
+6 -6
drivers/sh/intc/virq.c
··· 110 110 { 111 111 struct irq_data *data = irq_get_irq_data(irq); 112 112 struct irq_chip *chip = irq_data_get_irq_chip(data); 113 - struct intc_virq_list *entry, *vlist = irq_data_get_irq_data(data); 113 + struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); 114 114 struct intc_desc_int *d = get_intc_desc(irq); 115 115 116 116 chip->irq_mask_ack(data); ··· 118 118 for_each_virq(entry, vlist) { 119 119 unsigned long addr, handle; 120 120 121 - handle = (unsigned long)get_irq_data(entry->irq); 121 + handle = (unsigned long)irq_get_handler_data(entry->irq); 122 122 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); 123 123 124 124 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) ··· 229 229 230 230 intc_irq_xlate_set(irq, entry->enum_id, d); 231 231 232 - set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq), 232 + irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq), 233 233 handle_simple_irq, "virq"); 234 - set_irq_chip_data(irq, get_irq_chip_data(entry->pirq)); 234 + irq_set_chip_data(irq, irq_get_chip_data(entry->pirq)); 235 235 236 - set_irq_data(irq, (void *)entry->handle); 236 + irq_set_handler_data(irq, (void *)entry->handle); 237 237 238 - set_irq_chained_handler(entry->pirq, intc_virq_handler); 238 + irq_set_chained_handler(entry->pirq, intc_virq_handler); 239 239 add_virq_to_pirq(entry->pirq, irq); 240 240 241 241 radix_tree_tag_clear(&d->tree, entry->enum_id,
+2 -2
drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
··· 341 341 if (error) 342 342 return -ENODEV; 343 343 344 - set_irq_wake(sdhcinfo->oob_irq, 1); 344 + irq_set_irq_wake(sdhcinfo->oob_irq, 1); 345 345 sdhcinfo->oob_irq_registered = true; 346 346 } 347 347 ··· 352 352 { 353 353 SDLX_MSG(("%s: Enter\n", __func__)); 354 354 355 - set_irq_wake(sdhcinfo->oob_irq, 0); 355 + irq_set_irq_wake(sdhcinfo->oob_irq, 0); 356 356 disable_irq(sdhcinfo->oob_irq); /* just in case.. */ 357 357 free_irq(sdhcinfo->oob_irq, NULL); 358 358 sdhcinfo->oob_irq_registered = false;
+1 -1
drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
··· 597 597 int result; 598 598 int irq_pin = AST_INT; 599 599 600 - set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); 600 + irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); 601 601 602 602 /* 603 603 * for shared IRQS must provide non NULL device ptr
+1 -1
drivers/tty/hvc/hvc_xen.c
··· 178 178 if (xencons_irq < 0) 179 179 xencons_irq = 0; /* NO_IRQ */ 180 180 else 181 - set_irq_noprobe(xencons_irq); 181 + irq_set_noprobe(xencons_irq); 182 182 183 183 hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); 184 184 if (IS_ERR(hp))
+2 -2
drivers/tty/serial/msm_serial_hs.c
··· 1644 1644 if (unlikely(uport->irq < 0)) 1645 1645 return -ENXIO; 1646 1646 1647 - if (unlikely(set_irq_wake(uport->irq, 1))) 1647 + if (unlikely(irq_set_irq_wake(uport->irq, 1))) 1648 1648 return -ENXIO; 1649 1649 1650 1650 if (pdata == NULL || pdata->rx_wakeup_irq < 0) ··· 1658 1658 if (unlikely(msm_uport->rx_wakeup.irq < 0)) 1659 1659 return -ENXIO; 1660 1660 1661 - if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1))) 1661 + if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1))) 1662 1662 return -ENXIO; 1663 1663 } 1664 1664
+7 -69
drivers/usb/gadget/pxa25x_udc.c
··· 139 139 static void pxa25x_ep_fifo_flush (struct usb_ep *ep); 140 140 static void nuke (struct pxa25x_ep *, int status); 141 141 142 - /* one GPIO should be used to detect VBUS from the host */ 143 - static int is_vbus_present(void) 144 - { 145 - struct pxa2xx_udc_mach_info *mach = the_controller->mach; 146 - 147 - if (gpio_is_valid(mach->gpio_vbus)) { 148 - int value = gpio_get_value(mach->gpio_vbus); 149 - 150 - if (mach->gpio_vbus_inverted) 151 - return !value; 152 - else 153 - return !!value; 154 - } 155 - if (mach->udc_is_connected) 156 - return mach->udc_is_connected(); 157 - return 1; 158 - } 159 - 160 142 /* one GPIO should control a D+ pullup, so host sees this device (or not) */ 161 143 static void pullup_off(void) 162 144 { ··· 1037 1055 "%s version: %s\nGadget driver: %s\nHost %s\n\n", 1038 1056 driver_name, DRIVER_VERSION SIZE_STR "(pio)", 1039 1057 dev->driver ? dev->driver->driver.name : "(none)", 1040 - is_vbus_present() ? "full speed" : "disconnected"); 1058 + dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected"); 1041 1059 1042 1060 /* registers for device and ep0 */ 1043 1061 seq_printf(m, ··· 1076 1094 (tmp & UDCCFR_ACM) ? " acm" : ""); 1077 1095 } 1078 1096 1079 - if (!is_vbus_present() || !dev->driver) 1097 + if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver) 1080 1098 goto done; 1081 1099 1082 1100 seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n", ··· 1417 1435 1418 1436 #endif 1419 1437 1420 - static irqreturn_t udc_vbus_irq(int irq, void *_dev) 1421 - { 1422 - struct pxa25x_udc *dev = _dev; 1423 - 1424 - pxa25x_udc_vbus_session(&dev->gadget, is_vbus_present()); 1425 - return IRQ_HANDLED; 1426 - } 1427 - 1428 1438 1429 1439 /*-------------------------------------------------------------------------*/ 1430 1440 ··· 1740 1766 if (unlikely(udccr & UDCCR_SUSIR)) { 1741 1767 udc_ack_int_UDCCR(UDCCR_SUSIR); 1742 1768 handled = 1; 1743 - DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present() 1744 - ? "" : "+disconnect"); 1769 + DBG(DBG_VERBOSE, "USB suspend\n"); 1745 1770 1746 - if (!is_vbus_present()) 1747 - stop_activity(dev, dev->driver); 1748 - else if (dev->gadget.speed != USB_SPEED_UNKNOWN 1771 + if (dev->gadget.speed != USB_SPEED_UNKNOWN 1749 1772 && dev->driver 1750 1773 && dev->driver->suspend) 1751 1774 dev->driver->suspend(&dev->gadget); ··· 1757 1786 1758 1787 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1759 1788 && dev->driver 1760 - && dev->driver->resume 1761 - && is_vbus_present()) 1789 + && dev->driver->resume) 1762 1790 dev->driver->resume(&dev->gadget); 1763 1791 } 1764 1792 ··· 2107 2137 static int __init pxa25x_udc_probe(struct platform_device *pdev) 2108 2138 { 2109 2139 struct pxa25x_udc *dev = &memory; 2110 - int retval, vbus_irq, irq; 2140 + int retval, irq; 2111 2141 u32 chiprev; 2112 2142 2113 2143 /* insist on Intel/ARM/XScale */ ··· 2169 2199 2170 2200 dev->transceiver = otg_get_transceiver(); 2171 2201 2172 - if (gpio_is_valid(dev->mach->gpio_vbus)) { 2173 - if ((retval = gpio_request(dev->mach->gpio_vbus, 2174 - "pxa25x_udc GPIO VBUS"))) { 2175 - dev_dbg(&pdev->dev, 2176 - "can't get vbus gpio %d, err: %d\n", 2177 - dev->mach->gpio_vbus, retval); 2178 - goto err_gpio_vbus; 2179 - } 2180 - gpio_direction_input(dev->mach->gpio_vbus); 2181 - vbus_irq = gpio_to_irq(dev->mach->gpio_vbus); 2182 - } else 2183 - vbus_irq = 0; 2184 - 2185 2202 if (gpio_is_valid(dev->mach->gpio_pullup)) { 2186 2203 if ((retval = gpio_request(dev->mach->gpio_pullup, 2187 2204 "pca25x_udc GPIO PULLUP"))) { ··· 2194 2237 udc_disable(dev); 2195 2238 udc_reinit(dev); 2196 2239 2197 - dev->vbus = !!is_vbus_present(); 2240 + dev->vbus = 0; 2198 2241 2199 2242 /* irq setup after old hardware state is cleaned up */ 2200 2243 retval = request_irq(irq, pxa25x_udc_irq, ··· 2230 2273 } 2231 2274 } else 2232 2275 #endif 2233 - if (vbus_irq) { 2234 - retval = request_irq(vbus_irq, udc_vbus_irq, 2235 - IRQF_DISABLED | IRQF_SAMPLE_RANDOM | 2236 - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 2237 - driver_name, dev); 2238 - if (retval != 0) { 2239 - pr_err("%s: can't get irq %i, err %d\n", 2240 - driver_name, vbus_irq, retval); 2241 - goto err_vbus_irq; 2242 - } 2243 - } 2244 2276 create_debug_files(dev); 2245 2277 2246 2278 return 0; 2247 2279 2248 - err_vbus_irq: 2249 2280 #ifdef CONFIG_ARCH_LUBBOCK 2250 2281 free_irq(LUBBOCK_USB_DISC_IRQ, dev); 2251 2282 err_irq_lub: ··· 2243 2298 if (gpio_is_valid(dev->mach->gpio_pullup)) 2244 2299 gpio_free(dev->mach->gpio_pullup); 2245 2300 err_gpio_pullup: 2246 - if (gpio_is_valid(dev->mach->gpio_vbus)) 2247 - gpio_free(dev->mach->gpio_vbus); 2248 - err_gpio_vbus: 2249 2301 if (dev->transceiver) { 2250 2302 otg_put_transceiver(dev->transceiver); 2251 2303 dev->transceiver = NULL; ··· 2279 2337 free_irq(LUBBOCK_USB_IRQ, dev); 2280 2338 } 2281 2339 #endif 2282 - if (gpio_is_valid(dev->mach->gpio_vbus)) { 2283 - free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev); 2284 - gpio_free(dev->mach->gpio_vbus); 2285 - } 2286 2340 if (gpio_is_valid(dev->mach->gpio_pullup)) 2287 2341 gpio_free(dev->mach->gpio_pullup); 2288 2342
+1 -1
drivers/usb/host/oxu210hp-hcd.c
··· 3832 3832 return -EBUSY; 3833 3833 } 3834 3834 3835 - ret = set_irq_type(irq, IRQF_TRIGGER_FALLING); 3835 + ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); 3836 3836 if (ret) { 3837 3837 dev_err(&pdev->dev, "error setting irq type\n"); 3838 3838 ret = -EFAULT;
+1 -1
drivers/usb/musb/tusb6010.c
··· 943 943 musb_writel(tbase, TUSB_INT_CTRL_CONF, 944 944 TUSB_INT_CTRL_CONF_INT_RELCYC(0)); 945 945 946 - set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); 946 + irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); 947 947 948 948 /* maybe force into the Default-A OTG state machine */ 949 949 if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
+89 -47
drivers/video/pxafb.c
··· 627 627 628 628 static void overlay1fb_disable(struct pxafb_layer *ofb) 629 629 { 630 - uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5); 630 + uint32_t lccr5; 631 + 632 + if (!(lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN)) 633 + return; 634 + 635 + lccr5 = lcd_readl(ofb->fbi, LCCR5); 631 636 632 637 lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] & ~OVLxC1_OEN); 633 638 ··· 690 685 691 686 static void overlay2fb_disable(struct pxafb_layer *ofb) 692 687 { 693 - uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5); 688 + uint32_t lccr5; 689 + 690 + if (!(lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN)) 691 + return; 692 + 693 + lccr5 = lcd_readl(ofb->fbi, LCCR5); 694 694 695 695 lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] & ~OVLxC1_OEN); 696 696 ··· 730 720 if (user == 0) 731 721 return -ENODEV; 732 722 733 - /* allow only one user at a time */ 734 - if (atomic_inc_and_test(&ofb->usage)) 735 - return -EBUSY; 723 + if (ofb->usage++ == 0) 724 + /* unblank the base framebuffer */ 725 + fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK); 736 726 737 - /* unblank the base framebuffer */ 738 - fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK); 739 727 return 0; 740 728 } 741 729 ··· 741 733 { 742 734 struct pxafb_layer *ofb = (struct pxafb_layer*) info; 743 735 744 - atomic_dec(&ofb->usage); 745 - ofb->ops->disable(ofb); 736 + if (ofb->usage == 1) { 737 + ofb->ops->disable(ofb); 738 + ofb->fb.var.height = -1; 739 + ofb->fb.var.width = -1; 740 + ofb->fb.var.xres = ofb->fb.var.xres_virtual = 0; 741 + ofb->fb.var.yres = ofb->fb.var.yres_virtual = 0; 746 742 747 - free_pages_exact(ofb->video_mem, ofb->video_mem_size); 748 - ofb->video_mem = NULL; 749 - ofb->video_mem_size = 0; 743 + ofb->usage--; 744 + } 750 745 return 0; 751 746 } 752 747 ··· 761 750 int xpos, ypos, pfor, bpp; 762 751 763 752 xpos = NONSTD_TO_XPOS(var->nonstd); 764 - ypos = NONSTD_TO_XPOS(var->nonstd); 753 + ypos = NONSTD_TO_YPOS(var->nonstd); 765 754 pfor = NONSTD_TO_PFOR(var->nonstd); 766 755 767 756 bpp = pxafb_var_to_bpp(var); ··· 805 794 return 0; 806 795 } 807 796 808 - static int overlayfb_map_video_memory(struct pxafb_layer *ofb) 797 + static int overlayfb_check_video_memory(struct pxafb_layer *ofb) 809 798 { 810 799 struct fb_var_screeninfo *var = &ofb->fb.var; 811 800 int pfor = NONSTD_TO_PFOR(var->nonstd); ··· 823 812 824 813 size = PAGE_ALIGN(ofb->fb.fix.line_length * var->yres_virtual); 825 814 826 - /* don't re-allocate if the original video memory is enough */ 827 815 if (ofb->video_mem) { 828 816 if (ofb->video_mem_size >= size) 829 817 return 0; 830 - 831 - free_pages_exact(ofb->video_mem, ofb->video_mem_size); 832 818 } 833 - 834 - ofb->video_mem = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 835 - if (ofb->video_mem == NULL) 836 - return -ENOMEM; 837 - 838 - ofb->video_mem_phys = virt_to_phys(ofb->video_mem); 839 - ofb->video_mem_size = size; 840 - 841 - mutex_lock(&ofb->fb.mm_lock); 842 - ofb->fb.fix.smem_start = ofb->video_mem_phys; 843 - ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual; 844 - mutex_unlock(&ofb->fb.mm_lock); 845 - ofb->fb.screen_base = ofb->video_mem; 846 - return 0; 819 + return -EINVAL; 847 820 } 848 821 849 822 static int overlayfb_set_par(struct fb_info *info) ··· 836 841 struct fb_var_screeninfo *var = &info->var; 837 842 int xpos, ypos, pfor, bpp, ret; 838 843 839 - ret = overlayfb_map_video_memory(ofb); 844 + ret = overlayfb_check_video_memory(ofb); 840 845 if (ret) 841 846 return ret; 842 847 843 848 bpp = pxafb_var_to_bpp(var); 844 849 xpos = NONSTD_TO_XPOS(var->nonstd); 845 - ypos = NONSTD_TO_XPOS(var->nonstd); 850 + ypos = NONSTD_TO_YPOS(var->nonstd); 846 851 pfor = NONSTD_TO_PFOR(var->nonstd); 847 852 848 853 ofb->control[0] = OVLxC1_PPL(var->xres) | OVLxC1_LPO(var->yres) | ··· 886 891 887 892 ofb->id = id; 888 893 ofb->ops = &ofb_ops[id]; 889 - atomic_set(&ofb->usage, 0); 894 + ofb->usage = 0; 890 895 ofb->fbi = fbi; 891 896 init_completion(&ofb->branch_done); 892 897 } ··· 899 904 return 0; 900 905 } 901 906 902 - static int __devinit pxafb_overlay_init(struct pxafb_info *fbi) 907 + static int __devinit pxafb_overlay_map_video_memory(struct pxafb_info *pxafb, 908 + struct pxafb_layer *ofb) 909 + { 910 + /* We assume that user will use at most video_mem_size for overlay fb, 911 + * anyway, it's useless to use 16bpp main plane and 24bpp overlay 912 + */ 913 + ofb->video_mem = alloc_pages_exact(PAGE_ALIGN(pxafb->video_mem_size), 914 + GFP_KERNEL | __GFP_ZERO); 915 + if (ofb->video_mem == NULL) 916 + return -ENOMEM; 917 + 918 + ofb->video_mem_phys = virt_to_phys(ofb->video_mem); 919 + ofb->video_mem_size = PAGE_ALIGN(pxafb->video_mem_size); 920 + 921 + mutex_lock(&ofb->fb.mm_lock); 922 + ofb->fb.fix.smem_start = ofb->video_mem_phys; 923 + ofb->fb.fix.smem_len = pxafb->video_mem_size; 924 + mutex_unlock(&ofb->fb.mm_lock); 925 + 926 + ofb->fb.screen_base = ofb->video_mem; 927 + 928 + return 0; 929 + } 930 + 931 + static void __devinit pxafb_overlay_init(struct pxafb_info *fbi) 903 932 { 904 933 int i, ret; 905 934 906 935 if (!pxafb_overlay_supported()) 907 - return 0; 936 + return; 908 937 909 938 for (i = 0; i < 2; i++) { 910 - init_pxafb_overlay(fbi, &fbi->overlay[i], i); 911 - ret = register_framebuffer(&fbi->overlay[i].fb); 939 + struct pxafb_layer *ofb = &fbi->overlay[i]; 940 + init_pxafb_overlay(fbi, ofb, i); 941 + ret = register_framebuffer(&ofb->fb); 912 942 if (ret) { 913 943 dev_err(fbi->dev, "failed to register overlay %d\n", i); 914 - return ret; 944 + continue; 915 945 } 946 + ret = pxafb_overlay_map_video_memory(fbi, ofb); 947 + if (ret) { 948 + dev_err(fbi->dev, 949 + "failed to map video memory for overlay %d\n", 950 + i); 951 + unregister_framebuffer(&ofb->fb); 952 + continue; 953 + } 954 + ofb->registered = 1; 916 955 } 917 956 918 957 /* mask all IU/BS/EOF/SOF interrupts */ 919 958 lcd_writel(fbi, LCCR5, ~0); 920 959 921 - /* place overlay(s) on top of base */ 922 - fbi->lccr0 |= LCCR0_OUC; 923 960 pr_info("PXA Overlay driver loaded successfully!\n"); 924 - return 0; 925 961 } 926 962 927 963 static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi) ··· 962 936 if (!pxafb_overlay_supported()) 963 937 return; 964 938 965 - for (i = 0; i < 2; i++) 966 - unregister_framebuffer(&fbi->overlay[i].fb); 939 + for (i = 0; i < 2; i++) { 940 + struct pxafb_layer *ofb = &fbi->overlay[i]; 941 + if (ofb->registered) { 942 + if (ofb->video_mem) 943 + free_pages_exact(ofb->video_mem, 944 + ofb->video_mem_size); 945 + unregister_framebuffer(&ofb->fb); 946 + } 947 + } 967 948 } 968 949 #else 969 950 static inline void pxafb_overlay_init(struct pxafb_info *fbi) {} ··· 1401 1368 (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) || 1402 1369 (lcd_readl(fbi, LCCR4) != fbi->reg_lccr4) || 1403 1370 (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) || 1404 - (lcd_readl(fbi, FDADR1) != fbi->fdadr[1])) 1371 + ((fbi->lccr0 & LCCR0_SDS) && 1372 + (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))) 1405 1373 pxafb_schedule_work(fbi, C_REENABLE); 1406 1374 1407 1375 return 0; ··· 1454 1420 lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); 1455 1421 1456 1422 lcd_writel(fbi, FDADR0, fbi->fdadr[0]); 1457 - lcd_writel(fbi, FDADR1, fbi->fdadr[1]); 1423 + if (fbi->lccr0 & LCCR0_SDS) 1424 + lcd_writel(fbi, FDADR1, fbi->fdadr[1]); 1458 1425 lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); 1459 1426 } 1460 1427 ··· 1648 1613 1649 1614 switch (val) { 1650 1615 case CPUFREQ_PRECHANGE: 1651 - set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); 1616 + if (!fbi->overlay[0].usage && !fbi->overlay[1].usage) 1617 + set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); 1652 1618 break; 1653 1619 1654 1620 case CPUFREQ_POSTCHANGE: ··· 1841 1805 fbi->task_state = (u_char)-1; 1842 1806 1843 1807 pxafb_decode_mach_info(fbi, inf); 1808 + 1809 + #ifdef CONFIG_FB_PXA_OVERLAY 1810 + /* place overlay(s) on top of base */ 1811 + if (pxafb_overlay_supported()) 1812 + fbi->lccr0 |= LCCR0_OUC; 1813 + #endif 1844 1814 1845 1815 init_waitqueue_head(&fbi->ctrlr_wait); 1846 1816 INIT_WORK(&fbi->task, pxafb_task);
+2 -1
drivers/video/pxafb.h
··· 92 92 struct pxafb_layer { 93 93 struct fb_info fb; 94 94 int id; 95 - atomic_t usage; 95 + int registered; 96 + uint32_t usage; 96 97 uint32_t control[2]; 97 98 98 99 struct pxafb_layer_ops *ops;
+2 -2
drivers/w1/masters/ds1wm.c
··· 368 368 ds1wm_data->active_high = plat->active_high; 369 369 370 370 if (res->flags & IORESOURCE_IRQ_HIGHEDGE) 371 - set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); 371 + irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); 372 372 if (res->flags & IORESOURCE_IRQ_LOWEDGE) 373 - set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); 373 + irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); 374 374 375 375 ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, 376 376 "ds1wm", ds1wm_data);
+10 -12
drivers/watchdog/davinci_wdt.c
··· 202 202 static int __devinit davinci_wdt_probe(struct platform_device *pdev) 203 203 { 204 204 int ret = 0, size; 205 - struct resource *res; 206 205 struct device *dev = &pdev->dev; 207 206 208 207 wdt_clk = clk_get(dev, NULL); ··· 215 216 216 217 dev_info(dev, "heartbeat %d sec\n", heartbeat); 217 218 218 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 219 - if (res == NULL) { 219 + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 220 + if (wdt_mem == NULL) { 220 221 dev_err(dev, "failed to get memory region resource\n"); 221 222 return -ENOENT; 222 223 } 223 224 224 - size = resource_size(res); 225 - wdt_mem = request_mem_region(res->start, size, pdev->name); 226 - 227 - if (wdt_mem == NULL) { 225 + size = resource_size(wdt_mem); 226 + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { 228 227 dev_err(dev, "failed to get memory region\n"); 229 228 return -ENOENT; 230 229 } 231 230 232 - wdt_base = ioremap(res->start, size); 231 + wdt_base = ioremap(wdt_mem->start, size); 233 232 if (!wdt_base) { 234 233 dev_err(dev, "failed to map memory region\n"); 234 + release_mem_region(wdt_mem->start, size); 235 + wdt_mem = NULL; 235 236 return -ENOMEM; 236 237 } 237 238 238 239 ret = misc_register(&davinci_wdt_miscdev); 239 240 if (ret < 0) { 240 241 dev_err(dev, "cannot register misc device\n"); 241 - release_resource(wdt_mem); 242 - kfree(wdt_mem); 242 + release_mem_region(wdt_mem->start, size); 243 + wdt_mem = NULL; 243 244 } else { 244 245 set_bit(WDT_DEVICE_INITED, &wdt_status); 245 246 } ··· 252 253 { 253 254 misc_deregister(&davinci_wdt_miscdev); 254 255 if (wdt_mem) { 255 - release_resource(wdt_mem); 256 - kfree(wdt_mem); 256 + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); 257 257 wdt_mem = NULL; 258 258 } 259 259
+8 -12
drivers/watchdog/max63xx_wdt.c
··· 270 270 { 271 271 int ret = 0; 272 272 int size; 273 - struct resource *res; 274 273 struct device *dev = &pdev->dev; 275 274 struct max63xx_timeout *table; 276 275 ··· 293 294 294 295 max63xx_pdev = pdev; 295 296 296 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 297 - if (res == NULL) { 297 + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 298 + if (wdt_mem == NULL) { 298 299 dev_err(dev, "failed to get memory region resource\n"); 299 300 return -ENOENT; 300 301 } 301 302 302 - size = resource_size(res); 303 - wdt_mem = request_mem_region(res->start, size, pdev->name); 304 - 305 - if (wdt_mem == NULL) { 303 + size = resource_size(wdt_mem); 304 + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { 306 305 dev_err(dev, "failed to get memory region\n"); 307 306 return -ENOENT; 308 307 } 309 308 310 - wdt_base = ioremap(res->start, size); 309 + wdt_base = ioremap(wdt_mem->start, size); 311 310 if (!wdt_base) { 312 311 dev_err(dev, "failed to map memory region\n"); 313 312 ret = -ENOMEM; ··· 323 326 out_unmap: 324 327 iounmap(wdt_base); 325 328 out_request: 326 - release_resource(wdt_mem); 327 - kfree(wdt_mem); 329 + release_mem_region(wdt_mem->start, size); 330 + wdt_mem = NULL; 328 331 329 332 return ret; 330 333 } ··· 333 336 { 334 337 misc_deregister(&max63xx_wdt_miscdev); 335 338 if (wdt_mem) { 336 - release_resource(wdt_mem); 337 - kfree(wdt_mem); 339 + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); 338 340 wdt_mem = NULL; 339 341 } 340 342
+1 -1
drivers/watchdog/nv_tco.c
··· 302 302 * Init & exit routines 303 303 */ 304 304 305 - static unsigned char __init nv_tco_getdevice(void) 305 + static unsigned char __devinit nv_tco_getdevice(void) 306 306 { 307 307 struct pci_dev *dev = NULL; 308 308 u32 val;
+13 -15
drivers/watchdog/pnx4008_wdt.c
··· 254 254 static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) 255 255 { 256 256 int ret = 0, size; 257 - struct resource *res; 258 257 259 258 if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) 260 259 heartbeat = DEFAULT_HEARTBEAT; ··· 261 262 printk(KERN_INFO MODULE_NAME 262 263 "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); 263 264 264 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 265 - if (res == NULL) { 265 + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 266 + if (wdt_mem == NULL) { 266 267 printk(KERN_INFO MODULE_NAME 267 268 "failed to get memory region resouce\n"); 268 269 return -ENOENT; 269 270 } 270 271 271 - size = resource_size(res); 272 - wdt_mem = request_mem_region(res->start, size, pdev->name); 272 + size = resource_size(wdt_mem); 273 273 274 - if (wdt_mem == NULL) { 274 + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { 275 275 printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); 276 276 return -ENOENT; 277 277 } 278 - wdt_base = (void __iomem *)IO_ADDRESS(res->start); 278 + wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start); 279 279 280 280 wdt_clk = clk_get(&pdev->dev, NULL); 281 281 if (IS_ERR(wdt_clk)) { 282 282 ret = PTR_ERR(wdt_clk); 283 - release_resource(wdt_mem); 284 - kfree(wdt_mem); 283 + release_mem_region(wdt_mem->start, size); 284 + wdt_mem = NULL; 285 285 goto out; 286 286 } 287 287 288 288 ret = clk_enable(wdt_clk); 289 289 if (ret) { 290 - release_resource(wdt_mem); 291 - kfree(wdt_mem); 290 + release_mem_region(wdt_mem->start, size); 291 + wdt_mem = NULL; 292 + clk_put(wdt_clk); 292 293 goto out; 293 294 } 294 295 295 296 ret = misc_register(&pnx4008_wdt_miscdev); 296 297 if (ret < 0) { 297 298 printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); 298 - release_resource(wdt_mem); 299 - kfree(wdt_mem); 299 + release_mem_region(wdt_mem->start, size); 300 + wdt_mem = NULL; 300 301 clk_disable(wdt_clk); 301 302 clk_put(wdt_clk); 302 303 } else { ··· 319 320 clk_put(wdt_clk); 320 321 321 322 if (wdt_mem) { 322 - release_resource(wdt_mem); 323 - kfree(wdt_mem); 323 + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); 324 324 wdt_mem = NULL; 325 325 } 326 326 return 0;
+8 -11
drivers/watchdog/s3c2410_wdt.c
··· 402 402 403 403 static int __devinit s3c2410wdt_probe(struct platform_device *pdev) 404 404 { 405 - struct resource *res; 406 405 struct device *dev; 407 406 unsigned int wtcon; 408 407 int started = 0; ··· 415 416 416 417 /* get the memory region for the watchdog timer */ 417 418 418 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 419 - if (res == NULL) { 419 + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 420 + if (wdt_mem == NULL) { 420 421 dev_err(dev, "no memory resource specified\n"); 421 422 return -ENOENT; 422 423 } 423 424 424 - size = resource_size(res); 425 - wdt_mem = request_mem_region(res->start, size, pdev->name); 426 - if (wdt_mem == NULL) { 425 + size = resource_size(wdt_mem); 426 + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { 427 427 dev_err(dev, "failed to get memory region\n"); 428 428 return -EBUSY; 429 429 } 430 430 431 - wdt_base = ioremap(res->start, size); 431 + wdt_base = ioremap(wdt_mem->start, size); 432 432 if (wdt_base == NULL) { 433 433 dev_err(dev, "failed to ioremap() region\n"); 434 434 ret = -EINVAL; ··· 522 524 iounmap(wdt_base); 523 525 524 526 err_req: 525 - release_resource(wdt_mem); 526 - kfree(wdt_mem); 527 + release_mem_region(wdt_mem->start, size); 528 + wdt_mem = NULL; 527 529 528 530 return ret; 529 531 } ··· 543 545 544 546 iounmap(wdt_base); 545 547 546 - release_resource(wdt_mem); 547 - kfree(wdt_mem); 548 + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); 548 549 wdt_mem = NULL; 549 550 return 0; 550 551 }
+13 -3
drivers/watchdog/softdog.c
··· 48 48 #include <linux/init.h> 49 49 #include <linux/jiffies.h> 50 50 #include <linux/uaccess.h> 51 + #include <linux/kernel.h> 51 52 52 53 #define PFX "SoftDog: " 53 54 ··· 76 75 "Softdog action, set to 1 to ignore reboots, 0 to reboot " 77 76 "(default depends on ONLY_TESTING)"); 78 77 78 + static int soft_panic; 79 + module_param(soft_panic, int, 0); 80 + MODULE_PARM_DESC(soft_panic, 81 + "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); 82 + 79 83 /* 80 84 * Our timer 81 85 */ ··· 104 98 105 99 if (soft_noboot) 106 100 printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); 107 - else { 101 + else if (soft_panic) { 102 + printk(KERN_CRIT PFX "Initiating panic.\n"); 103 + panic("Software Watchdog Timer expired."); 104 + } else { 108 105 printk(KERN_CRIT PFX "Initiating system reboot.\n"); 109 106 emergency_restart(); 110 107 printk(KERN_CRIT PFX "Reboot didn't ?????\n"); ··· 276 267 }; 277 268 278 269 static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " 279 - "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; 270 + "initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d " 271 + "(nowayout= %d)\n"; 280 272 281 273 static int __init watchdog_init(void) 282 274 { ··· 308 298 return ret; 309 299 } 310 300 311 - printk(banner, soft_noboot, soft_margin, nowayout); 301 + printk(banner, soft_noboot, soft_margin, soft_panic, nowayout); 312 302 313 303 return 0; 314 304 }
+14 -2
drivers/watchdog/sp5100_tco.c
··· 42 42 #define PFX TCO_MODULE_NAME ": " 43 43 44 44 /* internal variables */ 45 + static u32 tcobase_phys; 45 46 static void __iomem *tcobase; 46 47 static unsigned int pm_iobase; 47 48 static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ ··· 306 305 /* Low three bits of BASE0 are reserved. */ 307 306 val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); 308 307 308 + if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, 309 + "SP5100 TCO")) { 310 + printk(KERN_ERR PFX "mmio address 0x%04x already in use\n", 311 + val); 312 + goto unreg_region; 313 + } 314 + tcobase_phys = val; 315 + 309 316 tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); 310 317 if (tcobase == 0) { 311 318 printk(KERN_ERR PFX "failed to get tcobase address\n"); 312 - goto unreg_region; 319 + goto unreg_mem_region; 313 320 } 314 321 315 322 /* Enable watchdog decode bit */ ··· 355 346 /* Done */ 356 347 return 1; 357 348 358 - iounmap(tcobase); 349 + unreg_mem_region: 350 + release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); 359 351 unreg_region: 360 352 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); 361 353 exit: ··· 411 401 412 402 exit: 413 403 iounmap(tcobase); 404 + release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); 414 405 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); 415 406 return ret; 416 407 } ··· 425 414 /* Deregister */ 426 415 misc_deregister(&sp5100_tco_miscdev); 427 416 iounmap(tcobase); 417 + release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); 428 418 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); 429 419 } 430 420
+13 -13
drivers/xen/events.c
··· 122 122 /* Get info for IRQ */ 123 123 static struct irq_info *info_for_irq(unsigned irq) 124 124 { 125 - return get_irq_data(irq); 125 + return irq_get_handler_data(irq); 126 126 } 127 127 128 128 /* Constructors for packed IRQ information. */ ··· 403 403 404 404 info->type = IRQT_UNBOUND; 405 405 406 - set_irq_data(irq, info); 406 + irq_set_handler_data(irq, info); 407 407 408 408 list_add_tail(&info->list, &xen_irq_list_head); 409 409 } ··· 458 458 459 459 static void xen_free_irq(unsigned irq) 460 460 { 461 - struct irq_info *info = get_irq_data(irq); 461 + struct irq_info *info = irq_get_handler_data(irq); 462 462 463 463 list_del(&info->list); 464 464 465 - set_irq_data(irq, NULL); 465 + irq_set_handler_data(irq, NULL); 466 466 467 467 kfree(info); 468 468 ··· 585 585 { 586 586 int evtchn = evtchn_from_irq(data->irq); 587 587 588 - move_native_irq(data->irq); 588 + irq_move_irq(data); 589 589 590 590 if (VALID_EVTCHN(evtchn)) { 591 591 mask_evtchn(evtchn); ··· 639 639 if (irq < 0) 640 640 goto out; 641 641 642 - set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 643 - handle_level_irq, name); 642 + irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, 643 + name); 644 644 645 645 irq_op.irq = irq; 646 646 irq_op.vector = 0; ··· 690 690 if (irq == -1) 691 691 goto out; 692 692 693 - set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 694 - handle_level_irq, name); 693 + irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, 694 + name); 695 695 696 696 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); 697 697 ret = irq_set_msi_desc(irq, msidesc); ··· 772 772 if (irq == -1) 773 773 goto out; 774 774 775 - set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 775 + irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 776 776 handle_fasteoi_irq, "event"); 777 777 778 778 xen_irq_info_evtchn_init(irq, evtchn); ··· 799 799 if (irq < 0) 800 800 goto out; 801 801 802 - set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 802 + irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 803 803 handle_percpu_irq, "ipi"); 804 804 805 805 bind_ipi.vcpu = cpu; ··· 848 848 if (irq == -1) 849 849 goto out; 850 850 851 - set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 851 + irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 852 852 handle_percpu_irq, "virq"); 853 853 854 854 bind_virq.virq = virq; ··· 1339 1339 { 1340 1340 int evtchn = evtchn_from_irq(data->irq); 1341 1341 1342 - move_masked_irq(data->irq); 1342 + irq_move_masked_irq(data); 1343 1343 1344 1344 if (VALID_EVTCHN(evtchn)) 1345 1345 unmask_evtchn(evtchn);
+4 -2
drivers/xen/gntdev.c
··· 273 273 map->vma->vm_start + map->notify.addr; 274 274 err = copy_to_user(tmp, &err, 1); 275 275 if (err) 276 - return err; 276 + return -EFAULT; 277 277 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 278 278 } else if (pgno >= offset && pgno < offset + pages) { 279 279 uint8_t *tmp = kmap(map->pages[pgno]); ··· 662 662 if (map->flags) { 663 663 if ((vma->vm_flags & VM_WRITE) && 664 664 (map->flags & GNTMAP_readonly)) 665 - return -EINVAL; 665 + goto out_unlock_put; 666 666 } else { 667 667 map->flags = GNTMAP_host_map; 668 668 if (!(vma->vm_flags & VM_WRITE)) ··· 700 700 spin_unlock(&priv->lock); 701 701 return err; 702 702 703 + out_unlock_put: 704 + spin_unlock(&priv->lock); 703 705 out_put_map: 704 706 if (use_ptemod) 705 707 map->vma = NULL;
+1 -1
fs/ceph/addr.c
··· 92 92 ci->i_head_snapc = ceph_get_snap_context(snapc); 93 93 ++ci->i_wrbuffer_ref_head; 94 94 if (ci->i_wrbuffer_ref == 0) 95 - igrab(inode); 95 + ihold(inode); 96 96 ++ci->i_wrbuffer_ref; 97 97 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 98 98 "snapc %p seq %lld (%d snaps)\n",
+2 -2
fs/ceph/snap.c
··· 463 463 464 464 dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode, 465 465 capsnap, snapc); 466 - igrab(inode); 467 - 466 + ihold(inode); 467 + 468 468 atomic_set(&capsnap->nref, 1); 469 469 capsnap->ci = ci; 470 470 INIT_LIST_HEAD(&capsnap->ci_item);
+2 -1
fs/nfs/nfs4state.c
··· 590 590 state->owner = owner; 591 591 atomic_inc(&owner->so_count); 592 592 list_add(&state->inode_states, &nfsi->open_states); 593 - state->inode = igrab(inode); 593 + ihold(inode); 594 + state->inode = inode; 594 595 spin_unlock(&inode->i_lock); 595 596 /* Note: The reclaim code dictates that we add stateless 596 597 * and read-only stateids to the end of the list */
+5 -4
include/linux/can/core.h
··· 36 36 * @prot: pointer to struct proto structure. 37 37 */ 38 38 struct can_proto { 39 - int type; 40 - int protocol; 41 - struct proto_ops *ops; 42 - struct proto *prot; 39 + int type; 40 + int protocol; 41 + const struct proto_ops *ops; 42 + struct proto *prot; 43 43 }; 44 44 45 45 /* function prototypes for the CAN networklayer core (af_can.c) */ ··· 58 58 void *data); 59 59 60 60 extern int can_send(struct sk_buff *skb, int loop); 61 + extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 61 62 62 63 #endif /* CAN_CORE_H */
+1
include/linux/ethtool.h
··· 680 680 u32 ethtool_op_get_flags(struct net_device *dev); 681 681 int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); 682 682 void ethtool_ntuple_flush(struct net_device *dev); 683 + bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); 683 684 684 685 /** 685 686 * &ethtool_ops - Alter and report network device settings
-8
include/linux/interrupt.h
··· 338 338 /* IRQ wakeup (PM) control: */ 339 339 extern int irq_set_irq_wake(unsigned int irq, unsigned int on); 340 340 341 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 342 - /* Please do not use: Use the replacement functions instead */ 343 - static inline int set_irq_wake(unsigned int irq, unsigned int on) 344 - { 345 - return irq_set_irq_wake(irq, on); 346 - } 347 - #endif 348 - 349 341 static inline int enable_irq_wake(unsigned int irq) 350 342 { 351 343 return irq_set_irq_wake(irq, 1);
-128
include/linux/irq.h
··· 64 64 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) 65 65 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 66 66 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread 67 - * 68 - * Deprecated bits. They are kept updated as long as 69 - * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits 70 - * are internal state of the core code and if you really need to acces 71 - * them then talk to the genirq maintainer instead of hacking 72 - * something weird. 73 - * 74 67 */ 75 68 enum { 76 69 IRQ_TYPE_NONE = 0x00000000, ··· 85 92 IRQ_NO_BALANCING = (1 << 13), 86 93 IRQ_MOVE_PCNTXT = (1 << 14), 87 94 IRQ_NESTED_THREAD = (1 << 15), 88 - 89 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 90 - IRQ_INPROGRESS = (1 << 16), 91 - IRQ_REPLAY = (1 << 17), 92 - IRQ_WAITING = (1 << 18), 93 - IRQ_DISABLED = (1 << 19), 94 - IRQ_PENDING = (1 << 20), 95 - IRQ_MASKED = (1 << 21), 96 - IRQ_MOVE_PENDING = (1 << 22), 97 - IRQ_AFFINITY_SET = (1 << 23), 98 - IRQ_WAKEUP = (1 << 24), 99 - #endif 100 95 }; 101 96 102 97 #define IRQF_MODIFY_MASK \ ··· 302 321 */ 303 322 struct irq_chip { 304 323 const char *name; 305 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 306 - unsigned int (*startup)(unsigned int irq); 307 - void (*shutdown)(unsigned int irq); 308 - void (*enable)(unsigned int irq); 309 - void (*disable)(unsigned int irq); 310 - 311 - void (*ack)(unsigned int irq); 312 - void (*mask)(unsigned int irq); 313 - void (*mask_ack)(unsigned int irq); 314 - void (*unmask)(unsigned int irq); 315 - void (*eoi)(unsigned int irq); 316 - 317 - void (*end)(unsigned int irq); 318 - int (*set_affinity)(unsigned int irq, 319 - const struct cpumask *dest); 320 - int (*retrigger)(unsigned int irq); 321 - int (*set_type)(unsigned int irq, unsigned int flow_type); 322 - int (*set_wake)(unsigned int irq, unsigned int on); 323 - 324 - void (*bus_lock)(unsigned int irq); 325 - void (*bus_sync_unlock)(unsigned int irq); 326 - #endif 327 324 unsigned int (*irq_startup)(struct irq_data *data); 328 325 void (*irq_shutdown)(struct irq_data *data); 329 326 void (*irq_enable)(struct irq_data *data); ··· 379 420 #ifdef CONFIG_GENERIC_HARDIRQS 380 421 381 422 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 382 - void move_native_irq(int irq); 383 - void move_masked_irq(int irq); 384 423 void irq_move_irq(struct irq_data *data); 385 424 void irq_move_masked_irq(struct irq_data *data); 386 425 #else 387 - static inline void move_native_irq(int irq) { } 388 - static inline void move_masked_irq(int irq) { } 389 426 static inline void irq_move_irq(struct irq_data *data) { } 390 427 static inline void irq_move_masked_irq(struct irq_data *data) { } 391 428 #endif ··· 543 588 { 544 589 return d->msi_desc; 545 590 } 546 - 547 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 548 - /* Please do not use: Use the replacement functions instead */ 549 - static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip) 550 - { 551 - return irq_set_chip(irq, chip); 552 - } 553 - static inline int set_irq_data(unsigned int irq, void *data) 554 - { 555 - return irq_set_handler_data(irq, data); 556 - } 557 - static inline int set_irq_chip_data(unsigned int irq, void *data) 558 - { 559 - return irq_set_chip_data(irq, data); 560 - } 561 - static inline int set_irq_type(unsigned int irq, unsigned int type) 562 - { 563 - return irq_set_irq_type(irq, type); 564 - } 565 - static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry) 566 - { 567 - return irq_set_msi_desc(irq, entry); 568 - } 569 - static inline struct irq_chip *get_irq_chip(unsigned int irq) 570 - { 571 - return irq_get_chip(irq); 572 - } 573 - static inline void *get_irq_chip_data(unsigned int irq) 574 - { 575 - return irq_get_chip_data(irq); 576 - } 577 - static inline void *get_irq_data(unsigned int irq) 578 - { 579 - return irq_get_handler_data(irq); 580 - } 581 - static inline void *irq_data_get_irq_data(struct irq_data *d) 582 - { 583 - return irq_data_get_irq_handler_data(d); 584 - } 585 - static inline struct msi_desc *get_irq_msi(unsigned int irq) 586 - { 587 - return irq_get_msi_desc(irq); 588 - } 589 - static inline void set_irq_noprobe(unsigned int irq) 590 - { 591 - irq_set_noprobe(irq); 592 - } 593 - static inline void set_irq_probe(unsigned int irq) 594 - { 595 - irq_set_probe(irq); 596 - } 597 - static inline void set_irq_nested_thread(unsigned int irq, int nest) 598 - { 599 - irq_set_nested_thread(irq, nest); 600 - } 601 - static inline void 602 - set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 603 - irq_flow_handler_t handle, const char *name) 604 - { 605 - irq_set_chip_and_handler_name(irq, chip, handle, name); 606 - } 607 - static inline void 608 - set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 609 - irq_flow_handler_t handle) 610 - { 611 - irq_set_chip_and_handler(irq, chip, handle); 612 - } 613 - static inline void 614 - __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 615 - const char *name) 616 - { 617 - __irq_set_handler(irq, handle, is_chained, name); 618 - } 619 - static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle) 620 - { 621 - irq_set_handler(irq, handle); 622 - } 623 - static inline void 624 - set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle) 625 - { 626 - irq_set_chained_handler(irq, handle); 627 - } 628 - #endif 629 591 630 592 int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); 631 593 void irq_free_descs(unsigned int irq, unsigned int cnt);
+1 -59
include/linux/irqdesc.h
··· 35 35 * @name: flow handler name for /proc/interrupts output 36 36 */ 37 37 struct irq_desc { 38 - 39 - #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 40 38 struct irq_data irq_data; 41 - #else 42 - /* 43 - * This union will go away, once we fixed the direct access to 44 - * irq_desc all over the place. The direct fields are a 1:1 45 - * overlay of irq_data. 46 - */ 47 - union { 48 - struct irq_data irq_data; 49 - struct { 50 - unsigned int irq; 51 - unsigned int node; 52 - unsigned int pad_do_not_even_think_about_it; 53 - struct irq_chip *chip; 54 - void *handler_data; 55 - void *chip_data; 56 - struct msi_desc *msi_desc; 57 - #ifdef CONFIG_SMP 58 - cpumask_var_t affinity; 59 - #endif 60 - }; 61 - }; 62 - #endif 63 - 64 39 struct timer_rand_state *timer_rand_state; 65 40 unsigned int __percpu *kstat_irqs; 66 41 irq_flow_handler_t handle_irq; ··· 43 68 irq_preflow_handler_t preflow_handler; 44 69 #endif 45 70 struct irqaction *action; /* IRQ action list */ 46 - #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 47 71 unsigned int status_use_accessors; 48 - #else 49 - unsigned int status; /* IRQ status */ 50 - #endif 51 72 unsigned int core_internal_state__do_not_mess_with_it; 52 73 unsigned int depth; /* nested irq disables */ 53 74 unsigned int wake_depth; /* nested wake enables */ ··· 98 127 return desc->irq_data.msi_desc; 99 128 } 100 129 101 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 102 - static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) 103 - { 104 - return irq_desc_get_chip(desc); 105 - } 106 - static inline void *get_irq_desc_data(struct irq_desc *desc) 107 - { 108 - return irq_desc_get_handler_data(desc); 109 - } 110 - 111 - static inline void *get_irq_desc_chip_data(struct irq_desc *desc) 112 - { 113 - return irq_desc_get_chip_data(desc); 114 - } 115 - 116 - static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) 117 - { 118 - return irq_desc_get_msi_desc(desc); 119 - } 120 - #endif 121 - 122 130 /* 123 131 * Architectures call this to let the generic IRQ layer 124 132 * handle an interrupt. If the descriptor is attached to an ··· 144 194 desc->name = name; 145 195 } 146 196 147 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 148 - static inline void __set_irq_handler_unlocked(int irq, 149 - irq_flow_handler_t handler) 150 - { 151 - __irq_set_handler_locked(irq, handler); 152 - } 153 - 154 197 static inline int irq_balancing_disabled(unsigned int irq) 155 198 { 156 199 struct irq_desc *desc; 157 200 158 201 desc = irq_to_desc(irq); 159 - return desc->status & IRQ_NO_BALANCING_MASK; 202 + return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; 160 203 } 161 - #endif 162 204 163 205 static inline void 164 206 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
include/linux/mfd/sh_mobile_sdhi.h include/linux/mmc/sh_mobile_sdhi.h
+63
include/linux/mmc/tmio.h
··· 1 + /* 2 + * include/linux/mmc/tmio.h 3 + * 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * Driver for the MMC / SD / SDIO cell found in: 12 + * 13 + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 + */ 15 + #ifndef _LINUX_MMC_TMIO_H_ 16 + #define _LINUX_MMC_TMIO_H_ 17 + 18 + #define CTL_SD_CMD 0x00 19 + #define CTL_ARG_REG 0x04 20 + #define CTL_STOP_INTERNAL_ACTION 0x08 21 + #define CTL_XFER_BLK_COUNT 0xa 22 + #define CTL_RESPONSE 0x0c 23 + #define CTL_STATUS 0x1c 24 + #define CTL_IRQ_MASK 0x20 25 + #define CTL_SD_CARD_CLK_CTL 0x24 26 + #define CTL_SD_XFER_LEN 0x26 27 + #define CTL_SD_MEM_CARD_OPT 0x28 28 + #define CTL_SD_ERROR_DETAIL_STATUS 0x2c 29 + #define CTL_SD_DATA_PORT 0x30 30 + #define CTL_TRANSACTION_CTL 0x34 31 + #define CTL_SDIO_STATUS 0x36 32 + #define CTL_SDIO_IRQ_MASK 0x38 33 + #define CTL_RESET_SD 0xe0 34 + #define CTL_SDIO_REGS 0x100 35 + #define CTL_CLK_AND_WAIT_CTL 0x138 36 + #define CTL_RESET_SDIO 0x1e0 37 + 38 + /* Definitions for values the CTRL_STATUS register can take. */ 39 + #define TMIO_STAT_CMDRESPEND 0x00000001 40 + #define TMIO_STAT_DATAEND 0x00000004 41 + #define TMIO_STAT_CARD_REMOVE 0x00000008 42 + #define TMIO_STAT_CARD_INSERT 0x00000010 43 + #define TMIO_STAT_SIGSTATE 0x00000020 44 + #define TMIO_STAT_WRPROTECT 0x00000080 45 + #define TMIO_STAT_CARD_REMOVE_A 0x00000100 46 + #define TMIO_STAT_CARD_INSERT_A 0x00000200 47 + #define TMIO_STAT_SIGSTATE_A 0x00000400 48 + #define TMIO_STAT_CMD_IDX_ERR 0x00010000 49 + #define TMIO_STAT_CRCFAIL 0x00020000 50 + #define TMIO_STAT_STOPBIT_ERR 0x00040000 51 + #define TMIO_STAT_DATATIMEOUT 0x00080000 52 + #define TMIO_STAT_RXOVERFLOW 0x00100000 53 + #define TMIO_STAT_TXUNDERRUN 0x00200000 54 + #define TMIO_STAT_CMDTIMEOUT 0x00400000 55 + #define TMIO_STAT_RXRDY 0x01000000 56 + #define TMIO_STAT_TXRQ 0x02000000 57 + #define TMIO_STAT_ILL_FUNC 0x20000000 58 + #define TMIO_STAT_CMD_BUSY 0x40000000 59 + #define TMIO_STAT_ILL_ACCESS 0x80000000 60 + 61 + #define TMIO_BBS 512 /* Boot block size */ 62 + 63 + #endif /* _LINUX_MMC_TMIO_H_ */
+7 -1
include/linux/skbuff.h
··· 122 122 123 123 struct sk_buff; 124 124 125 - /* To allow 64K frame to be packed as single skb without frag_list */ 125 + /* To allow 64K frame to be packed as single skb without frag_list. Since 126 + * GRO uses frags we allocate at least 16 regardless of page size. 127 + */ 128 + #if (65536/PAGE_SIZE + 2) < 16 129 + #define MAX_SKB_FRAGS 16 130 + #else 126 131 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 132 + #endif 127 133 128 134 typedef struct skb_frag_struct skb_frag_t; 129 135
+32
include/linux/vmalloc.h
··· 95 95 96 96 extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 97 97 struct page ***pages); 98 + #ifdef CONFIG_MMU 98 99 extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 99 100 pgprot_t prot, struct page **pages); 100 101 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); 101 102 extern void unmap_kernel_range(unsigned long addr, unsigned long size); 103 + #else 104 + static inline int 105 + map_kernel_range_noflush(unsigned long start, unsigned long size, 106 + pgprot_t prot, struct page **pages) 107 + { 108 + return size >> PAGE_SHIFT; 109 + } 110 + static inline void 111 + unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 112 + { 113 + } 114 + static inline void 115 + unmap_kernel_range(unsigned long addr, unsigned long size) 116 + { 117 + } 118 + #endif 102 119 103 120 /* Allocate/destroy a 'vmalloc' VM area. */ 104 121 extern struct vm_struct *alloc_vm_area(size_t size); ··· 133 116 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 134 117 135 118 #ifdef CONFIG_SMP 119 + # ifdef CONFIG_MMU 136 120 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 137 121 const size_t *sizes, int nr_vms, 138 122 size_t align); 139 123 140 124 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 125 + # else 126 + static inline struct vm_struct ** 127 + pcpu_get_vm_areas(const unsigned long *offsets, 128 + const size_t *sizes, int nr_vms, 129 + size_t align) 130 + { 131 + return NULL; 132 + } 133 + 134 + static inline void 135 + pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 136 + { 137 + } 138 + # endif 141 139 #endif 142 140 143 141 #endif /* _LINUX_VMALLOC_H */
+1 -1
include/net/dst.h
··· 345 345 346 346 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 347 347 { 348 - struct dst_entry *child = skb_dst(skb)->child; 348 + struct dst_entry *child = dst_clone(skb_dst(skb)->child); 349 349 350 350 skb_dst_drop(skb); 351 351 return child;
+7 -1
include/net/rose.h
··· 14 14 15 15 #define ROSE_MIN_LEN 3 16 16 17 + #define ROSE_CALL_REQ_ADDR_LEN_OFF 3 18 + #define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */ 19 + #define ROSE_CALL_REQ_DEST_ADDR_OFF 4 20 + #define ROSE_CALL_REQ_SRC_ADDR_OFF 9 21 + #define ROSE_CALL_REQ_FACILITIES_OFF 14 22 + 17 23 #define ROSE_GFI 0x10 18 24 #define ROSE_Q_BIT 0x80 19 25 #define ROSE_D_BIT 0x40 ··· 220 214 extern int rose_validate_nr(struct sock *, unsigned short); 221 215 extern void rose_write_internal(struct sock *, int); 222 216 extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *); 223 - extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *); 217 + extern int rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *); 224 218 extern void rose_disconnect(struct sock *, int, int, int); 225 219 226 220 /* rose_timer.c */
+22
include/net/xfrm.h
··· 1601 1601 } 1602 1602 1603 1603 #ifdef CONFIG_XFRM_MIGRATE 1604 + static inline int xfrm_replay_clone(struct xfrm_state *x, 1605 + struct xfrm_state *orig) 1606 + { 1607 + x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn), 1608 + GFP_KERNEL); 1609 + if (!x->replay_esn) 1610 + return -ENOMEM; 1611 + 1612 + x->replay_esn->bmp_len = orig->replay_esn->bmp_len; 1613 + x->replay_esn->replay_window = orig->replay_esn->replay_window; 1614 + 1615 + x->preplay_esn = kmemdup(x->replay_esn, 1616 + xfrm_replay_state_esn_len(x->replay_esn), 1617 + GFP_KERNEL); 1618 + if (!x->preplay_esn) { 1619 + kfree(x->replay_esn); 1620 + return -ENOMEM; 1621 + } 1622 + 1623 + return 0; 1624 + } 1625 + 1604 1626 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) 1605 1627 { 1606 1628 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
-7
kernel/irq/Kconfig
··· 10 10 config GENERIC_HARDIRQS 11 11 def_bool y 12 12 13 - # Select this to disable the deprecated stuff 14 - config GENERIC_HARDIRQS_NO_DEPRECATED 15 - bool 16 - 17 - config GENERIC_HARDIRQS_NO_COMPAT 18 - bool 19 - 20 13 # Options selectable by the architecture code 21 14 22 15 # Make sparse irq Kconfig switch below available
+1 -3
kernel/irq/autoprobe.c
··· 70 70 raw_spin_lock_irq(&desc->lock); 71 71 if (!desc->action && irq_settings_can_probe(desc)) { 72 72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; 73 - if (irq_startup(desc)) { 74 - irq_compat_set_pending(desc); 73 + if (irq_startup(desc)) 75 74 desc->istate |= IRQS_PENDING; 76 - } 77 75 } 78 76 raw_spin_unlock_irq(&desc->lock); 79 77 }
+1 -130
kernel/irq/chip.c
··· 34 34 if (!chip) 35 35 chip = &no_irq_chip; 36 36 37 - irq_chip_set_defaults(chip); 38 37 desc->irq_data.chip = chip; 39 38 irq_put_desc_unlock(desc, flags); 40 39 /* ··· 140 141 static void irq_state_clr_disabled(struct irq_desc *desc) 141 142 { 142 143 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 143 - irq_compat_clr_disabled(desc); 144 144 } 145 145 146 146 static void irq_state_set_disabled(struct irq_desc *desc) 147 147 { 148 148 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 149 - irq_compat_set_disabled(desc); 150 149 } 151 150 152 151 static void irq_state_clr_masked(struct irq_desc *desc) 153 152 { 154 153 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 155 - irq_compat_clr_masked(desc); 156 154 } 157 155 158 156 static void irq_state_set_masked(struct irq_desc *desc) 159 157 { 160 158 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 161 - irq_compat_set_masked(desc); 162 159 } 163 160 164 161 int irq_startup(struct irq_desc *desc) ··· 202 207 desc->irq_data.chip->irq_disable(&desc->irq_data); 203 208 irq_state_set_masked(desc); 204 209 } 205 - } 206 - 207 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 208 - /* Temporary migration helpers */ 209 - static void compat_irq_mask(struct irq_data *data) 210 - { 211 - data->chip->mask(data->irq); 212 - } 213 - 214 - static void compat_irq_unmask(struct irq_data *data) 215 - { 216 - data->chip->unmask(data->irq); 217 - } 218 - 219 - static void compat_irq_ack(struct irq_data *data) 220 - { 221 - data->chip->ack(data->irq); 222 - } 223 - 224 - static void compat_irq_mask_ack(struct irq_data *data) 225 - { 226 - data->chip->mask_ack(data->irq); 227 - } 228 - 229 - static void compat_irq_eoi(struct irq_data *data) 230 - { 231 - data->chip->eoi(data->irq); 232 - } 233 - 234 - static void compat_irq_enable(struct irq_data *data) 235 - { 236 - data->chip->enable(data->irq); 237 - } 238 - 239 - static void compat_irq_disable(struct irq_data *data) 240 - { 241 - data->chip->disable(data->irq); 242 - } 243 - 244 - static void compat_irq_shutdown(struct irq_data *data) 245 - { 246 - data->chip->shutdown(data->irq); 247 - } 248 - 249 - static unsigned int compat_irq_startup(struct irq_data *data) 250 - { 251 - return data->chip->startup(data->irq); 252 - } 253 - 254 - static int compat_irq_set_affinity(struct irq_data *data, 255 - const struct cpumask *dest, bool force) 256 - { 257 - return data->chip->set_affinity(data->irq, dest); 258 - } 259 - 260 - static int compat_irq_set_type(struct irq_data *data, unsigned int type) 261 - { 262 - return data->chip->set_type(data->irq, type); 263 - } 264 - 265 - static int compat_irq_set_wake(struct irq_data *data, unsigned int on) 266 - { 267 - return data->chip->set_wake(data->irq, on); 268 - } 269 - 270 - static int compat_irq_retrigger(struct irq_data *data) 271 - { 272 - return data->chip->retrigger(data->irq); 273 - } 274 - 275 - static void compat_bus_lock(struct irq_data *data) 276 - { 277 - data->chip->bus_lock(data->irq); 278 - } 279 - 280 - static void compat_bus_sync_unlock(struct irq_data *data) 281 - { 282 - data->chip->bus_sync_unlock(data->irq); 283 - } 284 - #endif 285 - 286 - /* 287 - * Fixup enable/disable function pointers 288 - */ 289 - void irq_chip_set_defaults(struct irq_chip *chip) 290 - { 291 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 292 - if (chip->enable) 293 - chip->irq_enable = compat_irq_enable; 294 - if (chip->disable) 295 - chip->irq_disable = compat_irq_disable; 296 - if (chip->shutdown) 297 - chip->irq_shutdown = compat_irq_shutdown; 298 - if (chip->startup) 299 - chip->irq_startup = compat_irq_startup; 300 - if (!chip->end) 301 - chip->end = dummy_irq_chip.end; 302 - if (chip->bus_lock) 303 - chip->irq_bus_lock = compat_bus_lock; 304 - if (chip->bus_sync_unlock) 305 - chip->irq_bus_sync_unlock = compat_bus_sync_unlock; 306 - if (chip->mask) 307 - chip->irq_mask = compat_irq_mask; 308 - if (chip->unmask) 309 - chip->irq_unmask = compat_irq_unmask; 310 - if (chip->ack) 311 - chip->irq_ack = compat_irq_ack; 312 - if (chip->mask_ack) 313 - chip->irq_mask_ack = compat_irq_mask_ack; 314 - if (chip->eoi) 315 - chip->irq_eoi = compat_irq_eoi; 316 - if (chip->set_affinity) 317 - chip->irq_set_affinity = compat_irq_set_affinity; 318 - if (chip->set_type) 319 - chip->irq_set_type = compat_irq_set_type; 320 - if (chip->set_wake) 321 - chip->irq_set_wake = compat_irq_set_wake; 322 - if (chip->retrigger) 323 - chip->irq_retrigger = compat_irq_retrigger; 324 - #endif 325 210 } 326 211 327 212 static inline void mask_ack_irq(struct irq_desc *desc) ··· 256 381 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 257 382 goto out_unlock; 258 383 259 - irq_compat_set_progress(desc); 260 384 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 261 385 raw_spin_unlock_irq(&desc->lock); 262 386 ··· 265 391 266 392 raw_spin_lock_irq(&desc->lock); 267 393 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 268 - irq_compat_clr_progress(desc); 269 394 270 395 out_unlock: 271 396 raw_spin_unlock_irq(&desc->lock); ··· 387 514 * then mask it and get out of here: 388 515 */ 389 516 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 390 - irq_compat_set_pending(desc); 391 517 desc->istate |= IRQS_PENDING; 392 518 mask_irq(desc); 393 519 goto out; ··· 439 567 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 440 568 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 441 569 if (!irq_check_poll(desc)) { 442 - irq_compat_set_pending(desc); 443 570 desc->istate |= IRQS_PENDING; 444 571 mask_ack_irq(desc); 445 572 goto out_unlock; ··· 514 643 } while ((desc->istate & IRQS_PENDING) && 515 644 !irqd_irq_disabled(&desc->irq_data)); 516 645 517 - out_unlock: 646 + out_eoi: 518 647 chip->irq_eoi(&desc->irq_data); 519 648 raw_spin_unlock(&desc->lock); 520 649 }
-72
kernel/irq/compat.h
··· 1 - /* 2 - * Compat layer for transition period 3 - */ 4 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 5 - static inline void irq_compat_set_progress(struct irq_desc *desc) 6 - { 7 - desc->status |= IRQ_INPROGRESS; 8 - } 9 - 10 - static inline void irq_compat_clr_progress(struct irq_desc *desc) 11 - { 12 - desc->status &= ~IRQ_INPROGRESS; 13 - } 14 - static inline void irq_compat_set_disabled(struct irq_desc *desc) 15 - { 16 - desc->status |= IRQ_DISABLED; 17 - } 18 - static inline void irq_compat_clr_disabled(struct irq_desc *desc) 19 - { 20 - desc->status &= ~IRQ_DISABLED; 21 - } 22 - static inline void irq_compat_set_pending(struct irq_desc *desc) 23 - { 24 - desc->status |= IRQ_PENDING; 25 - } 26 - 27 - static inline void irq_compat_clr_pending(struct irq_desc *desc) 28 - { 29 - desc->status &= ~IRQ_PENDING; 30 - } 31 - static inline void irq_compat_set_masked(struct irq_desc *desc) 32 - { 33 - desc->status |= IRQ_MASKED; 34 - } 35 - 36 - static inline void irq_compat_clr_masked(struct irq_desc *desc) 37 - { 38 - desc->status &= ~IRQ_MASKED; 39 - } 40 - static inline void irq_compat_set_move_pending(struct irq_desc *desc) 41 - { 42 - desc->status |= IRQ_MOVE_PENDING; 43 - } 44 - 45 - static inline void irq_compat_clr_move_pending(struct irq_desc *desc) 46 - { 47 - desc->status &= ~IRQ_MOVE_PENDING; 48 - } 49 - static inline void irq_compat_set_affinity(struct irq_desc *desc) 50 - { 51 - desc->status |= IRQ_AFFINITY_SET; 52 - } 53 - 54 - static inline void irq_compat_clr_affinity(struct irq_desc *desc) 55 - { 56 - desc->status &= ~IRQ_AFFINITY_SET; 57 - } 58 - #else 59 - static inline void irq_compat_set_progress(struct irq_desc *desc) { } 60 - static inline void irq_compat_clr_progress(struct irq_desc *desc) { } 61 - static inline void irq_compat_set_disabled(struct irq_desc *desc) { } 62 - static inline void irq_compat_clr_disabled(struct irq_desc *desc) { } 63 - static inline void irq_compat_set_pending(struct irq_desc *desc) { } 64 - static inline void irq_compat_clr_pending(struct irq_desc *desc) { } 65 - static inline void irq_compat_set_masked(struct irq_desc *desc) { } 66 - static inline void irq_compat_clr_masked(struct irq_desc *desc) { } 67 - static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } 68 - static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } 69 - static inline void irq_compat_set_affinity(struct irq_desc *desc) { } 70 - static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } 71 - #endif 72 -
+1 -1
kernel/irq/debug.h
··· 4 4 5 5 #include <linux/kallsyms.h> 6 6 7 - #define P(f) if (desc->status & f) printk("%14s set\n", #f) 7 + #define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) 8 8 #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 9 9 /* FIXME */ 10 10 #define PD(f) do { } while (0)
-9
kernel/irq/dummychip.c
··· 31 31 return 0; 32 32 } 33 33 34 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 35 - static void compat_noop(unsigned int irq) { } 36 - #define END_INIT .end = compat_noop 37 - #else 38 - #define END_INIT 39 - #endif 40 - 41 34 /* 42 35 * Generic no controller implementation 43 36 */ ··· 41 48 .irq_enable = noop, 42 49 .irq_disable = noop, 43 50 .irq_ack = ack_bad, 44 - END_INIT 45 51 }; 46 52 47 53 /* ··· 56 64 .irq_ack = noop, 57 65 .irq_mask = noop, 58 66 .irq_unmask = noop, 59 - END_INIT 60 67 };
-3
kernel/irq/handle.c
··· 175 175 struct irqaction *action = desc->action; 176 176 irqreturn_t ret; 177 177 178 - irq_compat_clr_pending(desc); 179 178 desc->istate &= ~IRQS_PENDING; 180 - irq_compat_set_progress(desc); 181 179 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 182 180 raw_spin_unlock(&desc->lock); 183 181 ··· 183 185 184 186 raw_spin_lock(&desc->lock); 185 187 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 186 - irq_compat_clr_progress(desc); 187 188 return ret; 188 189 }
-10
kernel/irq/internals.h
··· 15 15 16 16 #define istate core_internal_state__do_not_mess_with_it 17 17 18 - #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 19 - # define status status_use_accessors 20 - #endif 21 - 22 18 extern int noirqdebug; 23 19 24 20 /* ··· 57 61 IRQS_SUSPENDED = 0x00000800, 58 62 }; 59 63 60 - #include "compat.h" 61 64 #include "debug.h" 62 65 #include "settings.h" 63 66 64 67 #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 65 - 66 - /* Set default functions for irq_chip structures: */ 67 - extern void irq_chip_set_defaults(struct irq_chip *chip); 68 68 69 69 extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 70 70 unsigned long flags); ··· 148 156 static inline void irqd_set_move_pending(struct irq_data *d) 149 157 { 150 158 d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; 151 - irq_compat_set_move_pending(irq_data_to_desc(d)); 152 159 } 153 160 154 161 static inline void irqd_clr_move_pending(struct irq_data *d) 155 162 { 156 163 d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; 157 - irq_compat_clr_move_pending(irq_data_to_desc(d)); 158 164 } 159 165 160 166 static inline void irqd_clear(struct irq_data *d, unsigned int mask)
+2 -10
kernel/irq/manage.c
··· 132 132 } 133 133 #else 134 134 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } 135 - static inline bool irq_move_pending(struct irq_desc *data) { return false; } 135 + static inline bool irq_move_pending(struct irq_data *data) { return false; } 136 136 static inline void 137 137 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 138 138 static inline void ··· 166 166 kref_get(&desc->affinity_notify->kref); 167 167 schedule_work(&desc->affinity_notify->work); 168 168 } 169 - irq_compat_set_affinity(desc); 170 169 irqd_set(data, IRQD_AFFINITY_SET); 171 170 172 171 return ret; ··· 296 297 if (cpumask_intersects(desc->irq_data.affinity, 297 298 cpu_online_mask)) 298 299 set = desc->irq_data.affinity; 299 - else { 300 - irq_compat_clr_affinity(desc); 300 + else 301 301 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 302 - } 303 302 } 304 303 305 304 cpumask_and(mask, cpu_online_mask, set); ··· 584 587 irqd_set(&desc->irq_data, IRQD_LEVEL); 585 588 } 586 589 587 - if (chip != desc->irq_data.chip) 588 - irq_chip_set_defaults(desc->irq_data.chip); 589 590 ret = 0; 590 591 break; 591 592 default: ··· 780 785 * but AFAICT IRQS_PENDING should be fine as it 781 786 * retriggers the interrupt itself --- tglx 782 787 */ 783 - irq_compat_set_pending(desc); 784 788 desc->istate |= IRQS_PENDING; 785 789 raw_spin_unlock_irq(&desc->lock); 786 790 } else { ··· 975 981 new->thread_mask = 1 << ffz(thread_mask); 976 982 977 983 if (!shared) { 978 - irq_chip_set_defaults(desc->irq_data.chip); 979 - 980 984 init_waitqueue_head(&desc->wait_for_threads); 981 985 982 986 /* Setup the type (level, edge polarity) if configured: */
-10
kernel/irq/migration.c
··· 53 53 cpumask_clear(desc->pending_mask); 54 54 } 55 55 56 - void move_masked_irq(int irq) 57 - { 58 - irq_move_masked_irq(irq_get_irq_data(irq)); 59 - } 60 - 61 56 void irq_move_irq(struct irq_data *idata) 62 57 { 63 58 bool masked; ··· 74 79 irq_move_masked_irq(idata); 75 80 if (!masked) 76 81 idata->chip->irq_unmask(idata); 77 - } 78 - 79 - void move_native_irq(int irq) 80 - { 81 - irq_move_irq(irq_get_irq_data(irq)); 82 82 }
+6 -2
kernel/irq/proc.c
··· 364 364 return 0; 365 365 } 366 366 367 + #ifndef ACTUAL_NR_IRQS 368 + # define ACTUAL_NR_IRQS nr_irqs 369 + #endif 370 + 367 371 int show_interrupts(struct seq_file *p, void *v) 368 372 { 369 373 static int prec; ··· 377 373 struct irqaction *action; 378 374 struct irq_desc *desc; 379 375 380 - if (i > nr_irqs) 376 + if (i > ACTUAL_NR_IRQS) 381 377 return 0; 382 378 383 - if (i == nr_irqs) 379 + if (i == ACTUAL_NR_IRQS) 384 380 return arch_show_interrupts(p, prec); 385 381 386 382 /* print header and calculate the width of the first column */
-1
kernel/irq/resend.c
··· 65 65 if (desc->istate & IRQS_REPLAY) 66 66 return; 67 67 if (desc->istate & IRQS_PENDING) { 68 - irq_compat_clr_pending(desc); 69 68 desc->istate &= ~IRQS_PENDING; 70 69 desc->istate |= IRQS_REPLAY; 71 70
+21 -34
kernel/irq/settings.h
··· 15 15 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 16 16 }; 17 17 18 - #define IRQ_INPROGRESS GOT_YOU_MORON 19 - #define IRQ_REPLAY GOT_YOU_MORON 20 - #define IRQ_WAITING GOT_YOU_MORON 21 - #define IRQ_DISABLED GOT_YOU_MORON 22 - #define IRQ_PENDING GOT_YOU_MORON 23 - #define IRQ_MASKED GOT_YOU_MORON 24 - #define IRQ_WAKEUP GOT_YOU_MORON 25 - #define IRQ_MOVE_PENDING GOT_YOU_MORON 26 18 #define IRQ_PER_CPU GOT_YOU_MORON 27 19 #define IRQ_NO_BALANCING GOT_YOU_MORON 28 - #define IRQ_AFFINITY_SET GOT_YOU_MORON 29 20 #define IRQ_LEVEL GOT_YOU_MORON 30 21 #define IRQ_NOPROBE GOT_YOU_MORON 31 22 #define IRQ_NOREQUEST GOT_YOU_MORON ··· 28 37 static inline void 29 38 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) 30 39 { 31 - desc->status &= ~(clr & _IRQF_MODIFY_MASK); 32 - desc->status |= (set & _IRQF_MODIFY_MASK); 40 + desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); 41 + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); 33 42 } 34 43 35 44 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) 36 45 { 37 - return desc->status & _IRQ_PER_CPU; 46 + return desc->status_use_accessors & _IRQ_PER_CPU; 38 47 } 39 48 40 49 static inline void irq_settings_set_per_cpu(struct irq_desc *desc) 41 50 { 42 - desc->status |= _IRQ_PER_CPU; 51 + desc->status_use_accessors |= _IRQ_PER_CPU; 43 52 } 44 53 45 54 static inline void irq_settings_set_no_balancing(struct irq_desc *desc) 46 55 { 47 - desc->status |= _IRQ_NO_BALANCING; 56 + desc->status_use_accessors |= _IRQ_NO_BALANCING; 48 57 } 49 58 50 59 static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) 51 60 { 52 - return desc->status & _IRQ_NO_BALANCING; 61 + return desc->status_use_accessors & _IRQ_NO_BALANCING; 53 62 } 54 63 55 64 static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) 56 65 { 57 - return desc->status & IRQ_TYPE_SENSE_MASK; 66 + return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK; 58 67 } 59 68 60 69 static inline void 61 70 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) 62 71 { 63 - desc->status &= ~IRQ_TYPE_SENSE_MASK; 64 - desc->status |= mask & IRQ_TYPE_SENSE_MASK; 72 + desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK; 73 + desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; 65 74 } 66 75 67 76 static inline bool irq_settings_is_level(struct irq_desc *desc) 68 77 { 69 - return desc->status & _IRQ_LEVEL; 78 + return desc->status_use_accessors & _IRQ_LEVEL; 70 79 } 71 80 72 81 static inline void irq_settings_clr_level(struct irq_desc *desc) 73 82 { 74 - desc->status &= ~_IRQ_LEVEL; 83 + desc->status_use_accessors &= ~_IRQ_LEVEL; 75 84 } 76 85 77 86 static inline void irq_settings_set_level(struct irq_desc *desc) 78 87 { 79 - desc->status |= _IRQ_LEVEL; 88 + desc->status_use_accessors |= _IRQ_LEVEL; 80 89 } 81 90 82 91 static inline bool irq_settings_can_request(struct irq_desc *desc) 83 92 { 84 - return !(desc->status & _IRQ_NOREQUEST); 93 + return !(desc->status_use_accessors & _IRQ_NOREQUEST); 85 94 } 86 95 87 96 static inline void irq_settings_clr_norequest(struct irq_desc *desc) 88 97 { 89 - desc->status &= ~_IRQ_NOREQUEST; 98 + desc->status_use_accessors &= ~_IRQ_NOREQUEST; 90 99 } 91 100 92 101 static inline void irq_settings_set_norequest(struct irq_desc *desc) 93 102 { 94 - desc->status |= _IRQ_NOREQUEST; 103 + desc->status_use_accessors |= _IRQ_NOREQUEST; 95 104 } 96 105 97 106 static inline bool irq_settings_can_probe(struct irq_desc *desc) 98 107 { 99 - return !(desc->status & _IRQ_NOPROBE); 108 + return !(desc->status_use_accessors & _IRQ_NOPROBE); 100 109 } 101 110 102 111 static inline void irq_settings_clr_noprobe(struct irq_desc *desc) 103 112 { 104 - desc->status &= ~_IRQ_NOPROBE; 113 + desc->status_use_accessors &= ~_IRQ_NOPROBE; 105 114 } 106 115 107 116 static inline void irq_settings_set_noprobe(struct irq_desc *desc) 108 117 { 109 - desc->status |= _IRQ_NOPROBE; 118 + desc->status_use_accessors |= _IRQ_NOPROBE; 110 119 } 111 120 112 121 static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) 113 122 { 114 - return desc->status & _IRQ_MOVE_PCNTXT; 123 + return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; 115 124 } 116 125 117 126 static inline bool irq_settings_can_autoenable(struct irq_desc *desc) 118 127 { 119 - return !(desc->status & _IRQ_NOAUTOEN); 128 + return !(desc->status_use_accessors & _IRQ_NOAUTOEN); 120 129 } 121 130 122 131 static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) 123 132 { 124 - return desc->status & _IRQ_NESTED_THREAD; 133 + return desc->status_use_accessors & _IRQ_NESTED_THREAD; 125 134 } 126 - 127 - /* Nothing should touch desc->status from now on */ 128 - #undef status 129 - #define status USE_THE_PROPER_WRAPPERS_YOU_MORON
-1
kernel/irq/spurious.c
··· 93 93 * Already running: If it is shared get the other 94 94 * CPU to go looking for our mystery interrupt too 95 95 */ 96 - irq_compat_set_pending(desc); 97 96 desc->istate |= IRQS_PENDING; 98 97 goto out; 99 98 }
+39 -13
mm/nommu.c
··· 1971 1971 } 1972 1972 EXPORT_SYMBOL(filemap_fault); 1973 1973 1974 - /* 1975 - * Access another process' address space. 1976 - * - source/target buffer must be kernel space 1977 - */ 1978 - int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 1974 + static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1975 + unsigned long addr, void *buf, int len, int write) 1979 1976 { 1980 1977 struct vm_area_struct *vma; 1981 - struct mm_struct *mm; 1982 - 1983 - if (addr + len < addr) 1984 - return 0; 1985 - 1986 - mm = get_task_mm(tsk); 1987 - if (!mm) 1988 - return 0; 1989 1978 1990 1979 down_read(&mm->mmap_sem); 1991 1980 ··· 1999 2010 } 2000 2011 2001 2012 up_read(&mm->mmap_sem); 2013 + 2014 + return len; 2015 + } 2016 + 2017 + /** 2018 + * @access_remote_vm - access another process' address space 2019 + * @mm: the mm_struct of the target address space 2020 + * @addr: start address to access 2021 + * @buf: source or destination buffer 2022 + * @len: number of bytes to transfer 2023 + * @write: whether the access is a write 2024 + * 2025 + * The caller must hold a reference on @mm. 2026 + */ 2027 + int access_remote_vm(struct mm_struct *mm, unsigned long addr, 2028 + void *buf, int len, int write) 2029 + { 2030 + return __access_remote_vm(NULL, mm, addr, buf, len, write); 2031 + } 2032 + 2033 + /* 2034 + * Access another process' address space. 2035 + * - source/target buffer must be kernel space 2036 + */ 2037 + int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 2038 + { 2039 + struct mm_struct *mm; 2040 + 2041 + if (addr + len < addr) 2042 + return 0; 2043 + 2044 + mm = get_task_mm(tsk); 2045 + if (!mm) 2046 + return 0; 2047 + 2048 + len = __access_remote_vm(tsk, mm, addr, buf, len, write); 2049 + 2002 2050 mmput(mm); 2003 2051 return len; 2004 2052 }
+1 -2
mm/percpu.c
··· 1008 1008 } 1009 1009 1010 1010 if (in_first_chunk) { 1011 - if ((unsigned long)addr < VMALLOC_START || 1012 - (unsigned long)addr >= VMALLOC_END) 1011 + if (!is_vmalloc_addr(addr)) 1013 1012 return __pa(addr); 1014 1013 else 1015 1014 return page_to_phys(vmalloc_to_page(addr));
+5 -1
net/bridge/br_if.c
··· 389 389 { 390 390 struct net_bridge_port *p; 391 391 int err = 0; 392 + bool changed_addr; 392 393 393 394 /* Don't allow bridging non-ethernet like devices */ 394 395 if ((dev->flags & IFF_LOOPBACK) || ··· 447 446 list_add_rcu(&p->list, &br->port_list); 448 447 449 448 spin_lock_bh(&br->lock); 450 - br_stp_recalculate_bridge_id(br); 449 + changed_addr = br_stp_recalculate_bridge_id(br); 451 450 br_features_recompute(br); 452 451 453 452 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && ··· 456 455 spin_unlock_bh(&br->lock); 457 456 458 457 br_ifinfo_notify(RTM_NEWLINK, p); 458 + 459 + if (changed_addr) 460 + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 459 461 460 462 dev_set_mtu(br->dev, br_min_mtu(br)); 461 463
+1 -1
net/bridge/br_private.h
··· 497 497 extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val); 498 498 extern void br_stp_enable_port(struct net_bridge_port *p); 499 499 extern void br_stp_disable_port(struct net_bridge_port *p); 500 - extern void br_stp_recalculate_bridge_id(struct net_bridge *br); 500 + extern bool br_stp_recalculate_bridge_id(struct net_bridge *br); 501 501 extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); 502 502 extern void br_stp_set_bridge_priority(struct net_bridge *br, 503 503 u16 newprio);
+6 -3
net/bridge/br_stp_if.c
··· 204 204 static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; 205 205 206 206 /* called under bridge lock */ 207 - void br_stp_recalculate_bridge_id(struct net_bridge *br) 207 + bool br_stp_recalculate_bridge_id(struct net_bridge *br) 208 208 { 209 209 const unsigned char *br_mac_zero = 210 210 (const unsigned char *)br_mac_zero_aligned; ··· 222 222 223 223 } 224 224 225 - if (compare_ether_addr(br->bridge_id.addr, addr)) 226 - br_stp_change_bridge_id(br, addr); 225 + if (compare_ether_addr(br->bridge_id.addr, addr) == 0) 226 + return false; /* no change */ 227 + 228 + br_stp_change_bridge_id(br, addr); 229 + return true; 227 230 } 228 231 229 232 /* called under bridge lock */
+3 -6
net/can/af_can.c
··· 95 95 * af_can socket functions 96 96 */ 97 97 98 - static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 98 + int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 99 99 { 100 100 struct sock *sk = sock->sk; 101 101 ··· 108 108 return -ENOIOCTLCMD; 109 109 } 110 110 } 111 + EXPORT_SYMBOL(can_ioctl); 111 112 112 113 static void can_sock_destruct(struct sock *sk) 113 114 { ··· 699 698 printk(KERN_ERR "can: protocol %d already registered\n", 700 699 proto); 701 700 err = -EBUSY; 702 - } else { 701 + } else 703 702 proto_tab[proto] = cp; 704 703 705 - /* use generic ioctl function if not defined by module */ 706 - if (!cp->ops->ioctl) 707 - cp->ops->ioctl = can_ioctl; 708 - } 709 704 spin_unlock(&proto_tab_lock); 710 705 711 706 if (err < 0)
+2 -2
net/can/bcm.c
··· 1569 1569 return size; 1570 1570 } 1571 1571 1572 - static struct proto_ops bcm_ops __read_mostly = { 1572 + static const struct proto_ops bcm_ops = { 1573 1573 .family = PF_CAN, 1574 1574 .release = bcm_release, 1575 1575 .bind = sock_no_bind, ··· 1578 1578 .accept = sock_no_accept, 1579 1579 .getname = sock_no_getname, 1580 1580 .poll = datagram_poll, 1581 - .ioctl = NULL, /* use can_ioctl() from af_can.c */ 1581 + .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ 1582 1582 .listen = sock_no_listen, 1583 1583 .shutdown = sock_no_shutdown, 1584 1584 .setsockopt = sock_no_setsockopt,
+2 -2
net/can/raw.c
··· 742 742 return size; 743 743 } 744 744 745 - static struct proto_ops raw_ops __read_mostly = { 745 + static const struct proto_ops raw_ops = { 746 746 .family = PF_CAN, 747 747 .release = raw_release, 748 748 .bind = raw_bind, ··· 751 751 .accept = sock_no_accept, 752 752 .getname = raw_getname, 753 753 .poll = datagram_poll, 754 - .ioctl = NULL, /* use can_ioctl() from af_can.c */ 754 + .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ 755 755 .listen = sock_no_listen, 756 756 .shutdown = sock_no_shutdown, 757 757 .setsockopt = raw_setsockopt,
+1 -54
net/core/dev.c
··· 1140 1140 1141 1141 ASSERT_RTNL(); 1142 1142 1143 - /* 1144 - * Is it even present? 1145 - */ 1146 1143 if (!netif_device_present(dev)) 1147 1144 return -ENODEV; 1148 1145 ··· 1148 1151 if (ret) 1149 1152 return ret; 1150 1153 1151 - /* 1152 - * Call device private open method 1153 - */ 1154 1154 set_bit(__LINK_STATE_START, &dev->state); 1155 1155 1156 1156 if (ops->ndo_validate_addr) ··· 1156 1162 if (!ret && ops->ndo_open) 1157 1163 ret = ops->ndo_open(dev); 1158 1164 1159 - /* 1160 - * If it went open OK then: 1161 - */ 1162 - 1163 1165 if (ret) 1164 1166 clear_bit(__LINK_STATE_START, &dev->state); 1165 1167 else { 1166 - /* 1167 - * Set the flags. 1168 - */ 1169 1168 dev->flags |= IFF_UP; 1170 - 1171 - /* 1172 - * Enable NET_DMA 1173 - */ 1174 1169 net_dmaengine_get(); 1175 - 1176 - /* 1177 - * Initialize multicasting status 1178 - */ 1179 1170 dev_set_rx_mode(dev); 1180 - 1181 - /* 1182 - * Wakeup transmit queue engine 1183 - */ 1184 1171 dev_activate(dev); 1185 1172 } 1186 1173 ··· 1184 1209 { 1185 1210 int ret; 1186 1211 1187 - /* 1188 - * Is it already up? 1189 - */ 1190 1212 if (dev->flags & IFF_UP) 1191 1213 return 0; 1192 1214 1193 - /* 1194 - * Open device 1195 - */ 1196 1215 ret = __dev_open(dev); 1197 1216 if (ret < 0) 1198 1217 return ret; 1199 1218 1200 - /* 1201 - * ... and announce new interface. 1202 - */ 1203 1219 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1204 1220 call_netdevice_notifiers(NETDEV_UP, dev); 1205 1221 ··· 1206 1240 might_sleep(); 1207 1241 1208 1242 list_for_each_entry(dev, head, unreg_list) { 1209 - /* 1210 - * Tell people we are going down, so that they can 1211 - * prepare to death, when device is still operating. 1212 - */ 1213 1243 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1214 1244 1215 1245 clear_bit(__LINK_STATE_START, &dev->state); ··· 1234 1272 if (ops->ndo_stop) 1235 1273 ops->ndo_stop(dev); 1236 1274 1237 - /* 1238 - * Device is now down. 1239 - */ 1240 - 1241 1275 dev->flags &= ~IFF_UP; 1242 - 1243 - /* 1244 - * Shutdown NET_DMA 1245 - */ 1246 1276 net_dmaengine_put(); 1247 1277 } 1248 1278 ··· 1263 1309 1264 1310 __dev_close_many(head); 1265 1311 1266 - /* 1267 - * Tell people we are down 1268 - */ 1269 1312 list_for_each_entry(dev, head, unreg_list) { 1270 1313 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1271 1314 call_netdevice_notifiers(NETDEV_DOWN, dev); ··· 1321 1370 1322 1371 1323 1372 static int dev_boot_phase = 1; 1324 - 1325 - /* 1326 - * Device change register/unregister. These are not inline or static 1327 - * as we export them to the world. 1328 - */ 1329 1373 1330 1374 /** 1331 1375 * register_netdevice_notifier - register a network notifier block ··· 1423 1477 ASSERT_RTNL(); 1424 1478 return raw_notifier_call_chain(&netdev_chain, val, dev); 1425 1479 } 1480 + EXPORT_SYMBOL(call_netdevice_notifiers); 1426 1481 1427 1482 /* When > 0 there are consumers of rx skb time stamps */ 1428 1483 static atomic_t netstamp_needed = ATOMIC_INIT(0);
+16 -1
net/core/ethtool.c
··· 141 141 } 142 142 EXPORT_SYMBOL(ethtool_op_get_flags); 143 143 144 + /* Check if device can enable (or disable) particular feature coded in "data" 145 + * argument. Flags "supported" describe features that can be toggled by device. 146 + * If feature can not be toggled, it state (enabled or disabled) must match 147 + * hardcoded device features state, otherwise flags are marked as invalid. 148 + */ 149 + bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) 150 + { 151 + u32 features = dev->features & flags_dup_features; 152 + /* "data" can contain only flags_dup_features bits, 153 + * see __ethtool_set_flags */ 154 + 155 + return (features & ~supported) != (data & ~supported); 156 + } 157 + EXPORT_SYMBOL(ethtool_invalid_flags); 158 + 144 159 int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) 145 160 { 146 - if (data & ~supported) 161 + if (ethtool_invalid_flags(dev, data, supported)) 147 162 return -EINVAL; 148 163 149 164 dev->features = ((dev->features & ~flags_dup_features) |
+2 -2
net/ipv4/fib_trie.c
··· 1365 1365 err = fib_props[fa->fa_type].error; 1366 1366 if (err) { 1367 1367 #ifdef CONFIG_IP_FIB_TRIE_STATS 1368 - t->stats.semantic_match_miss++; 1368 + t->stats.semantic_match_passed++; 1369 1369 #endif 1370 - return 1; 1370 + return err; 1371 1371 } 1372 1372 if (fi->fib_flags & RTNH_F_DEAD) 1373 1373 continue;
+3 -3
net/ipv4/ip_options.c
··· 140 140 } else { 141 141 dopt->ts_needtime = 0; 142 142 143 - if (soffset + 8 <= optlen) { 143 + if (soffset + 7 <= optlen) { 144 144 __be32 addr; 145 145 146 - memcpy(&addr, sptr+soffset-1, 4); 147 - if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_LOCAL) { 146 + memcpy(&addr, dptr+soffset-1, 4); 147 + if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) { 148 148 dopt->ts_needtime = 1; 149 149 soffset += 8; 150 150 }
+1
net/ipv4/raw.c
··· 569 569 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 570 570 if (IS_ERR(rt)) { 571 571 err = PTR_ERR(rt); 572 + rt = NULL; 572 573 goto done; 573 574 } 574 575 }
+1 -1
net/ipv6/ip6mr.c
··· 663 663 skb_pull(skb, (u8 *)encap - skb->data); 664 664 skb_reset_network_header(skb); 665 665 skb->protocol = htons(ETH_P_IPV6); 666 - skb->ip_summed = 0; 666 + skb->ip_summed = CHECKSUM_NONE; 667 667 skb->pkt_type = PACKET_HOST; 668 668 669 669 skb_tunnel_rx(skb, reg_dev);
+6
net/irda/iriap.c
··· 656 656 n = 1; 657 657 658 658 name_len = fp[n++]; 659 + 660 + IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;); 661 + 659 662 memcpy(name, fp+n, name_len); n+=name_len; 660 663 name[name_len] = '\0'; 661 664 662 665 attr_len = fp[n++]; 666 + 667 + IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;); 668 + 663 669 memcpy(attr, fp+n, attr_len); n+=attr_len; 664 670 attr[attr_len] = '\0'; 665 671
+3
net/irda/irnet/irnet_ppp.c
··· 105 105 while(isspace(start[length - 1])) 106 106 length--; 107 107 108 + DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, 109 + -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); 110 + 108 111 /* Copy the name for later reuse */ 109 112 memcpy(ap->rname, start + 5, length - 5); 110 113 ap->rname[length - 5] = '\0';
+4 -4
net/rose/af_rose.c
··· 978 978 struct sock *make; 979 979 struct rose_sock *make_rose; 980 980 struct rose_facilities_struct facilities; 981 - int n, len; 981 + int n; 982 982 983 983 skb->sk = NULL; /* Initially we don't know who it's for */ 984 984 ··· 987 987 */ 988 988 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); 989 989 990 - len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; 991 - len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; 992 - if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { 990 + if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, 991 + skb->len - ROSE_CALL_REQ_FACILITIES_OFF, 992 + &facilities)) { 993 993 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); 994 994 return 0; 995 995 }
+12 -1
net/rose/rose_loopback.c
··· 73 73 unsigned int lci_i, lci_o; 74 74 75 75 while ((skb = skb_dequeue(&loopback_queue)) != NULL) { 76 + if (skb->len < ROSE_MIN_LEN) { 77 + kfree_skb(skb); 78 + continue; 79 + } 76 80 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 77 81 frametype = skb->data[2]; 78 - dest = (rose_address *)(skb->data + 4); 82 + if (frametype == ROSE_CALL_REQUEST && 83 + (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || 84 + skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != 85 + ROSE_CALL_REQ_ADDR_LEN_VAL)) { 86 + kfree_skb(skb); 87 + continue; 88 + } 89 + dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); 79 90 lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; 80 91 81 92 skb_reset_transport_header(skb);
+13 -7
net/rose/rose_route.c
··· 861 861 unsigned int lci, new_lci; 862 862 unsigned char cause, diagnostic; 863 863 struct net_device *dev; 864 - int len, res = 0; 864 + int res = 0; 865 865 char buf[11]; 866 866 867 867 #if 0 ··· 869 869 return res; 870 870 #endif 871 871 872 + if (skb->len < ROSE_MIN_LEN) 873 + return res; 872 874 frametype = skb->data[2]; 873 875 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 874 - src_addr = (rose_address *)(skb->data + 9); 875 - dest_addr = (rose_address *)(skb->data + 4); 876 + if (frametype == ROSE_CALL_REQUEST && 877 + (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || 878 + skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != 879 + ROSE_CALL_REQ_ADDR_LEN_VAL)) 880 + return res; 881 + src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF); 882 + dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); 876 883 877 884 spin_lock_bh(&rose_neigh_list_lock); 878 885 spin_lock_bh(&rose_route_list_lock); ··· 1017 1010 goto out; 1018 1011 } 1019 1012 1020 - len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; 1021 - len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; 1022 - 1023 1013 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); 1024 1014 1025 - if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { 1015 + if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, 1016 + skb->len - ROSE_CALL_REQ_FACILITIES_OFF, 1017 + &facilities)) { 1026 1018 rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76); 1027 1019 goto out; 1028 1020 }
+66 -29
net/rose/rose_subr.c
··· 142 142 *dptr++ = ROSE_GFI | lci1; 143 143 *dptr++ = lci2; 144 144 *dptr++ = frametype; 145 - *dptr++ = 0xAA; 145 + *dptr++ = ROSE_CALL_REQ_ADDR_LEN_VAL; 146 146 memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN); 147 147 dptr += ROSE_ADDR_LEN; 148 148 memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); ··· 246 246 do { 247 247 switch (*p & 0xC0) { 248 248 case 0x00: 249 + if (len < 2) 250 + return -1; 249 251 p += 2; 250 252 n += 2; 251 253 len -= 2; 252 254 break; 253 255 254 256 case 0x40: 257 + if (len < 3) 258 + return -1; 255 259 if (*p == FAC_NATIONAL_RAND) 256 260 facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF); 257 261 p += 3; ··· 264 260 break; 265 261 266 262 case 0x80: 263 + if (len < 4) 264 + return -1; 267 265 p += 4; 268 266 n += 4; 269 267 len -= 4; 270 268 break; 271 269 272 270 case 0xC0: 271 + if (len < 2) 272 + return -1; 273 273 l = p[1]; 274 + if (len < 2 + l) 275 + return -1; 274 276 if (*p == FAC_NATIONAL_DEST_DIGI) { 275 277 if (!fac_national_digis_received) { 278 + if (l < AX25_ADDR_LEN) 279 + return -1; 276 280 memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN); 277 281 facilities->source_ndigis = 1; 278 282 } 279 283 } 280 284 else if (*p == FAC_NATIONAL_SRC_DIGI) { 281 285 if (!fac_national_digis_received) { 286 + if (l < AX25_ADDR_LEN) 287 + return -1; 282 288 memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN); 283 289 facilities->dest_ndigis = 1; 284 290 } 285 291 } 286 292 else if (*p == FAC_NATIONAL_FAIL_CALL) { 293 + if (l < AX25_ADDR_LEN) 294 + return -1; 287 295 memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN); 288 296 } 289 297 else if (*p == FAC_NATIONAL_FAIL_ADD) { 298 + if (l < 1 + ROSE_ADDR_LEN) 299 + return -1; 290 300 memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN); 291 301 } 292 302 else if (*p == FAC_NATIONAL_DIGIS) { 303 + if (l % AX25_ADDR_LEN) 304 + return -1; 293 305 fac_national_digis_received = 1; 294 306 facilities->source_ndigis = 0; 295 307 facilities->dest_ndigis = 0; 296 308 for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { 297 - if (pt[6] & AX25_HBIT) 309 + if (pt[6] & AX25_HBIT) { 310 + if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) 311 + return -1; 298 312 memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); 299 - else 313 + } else { 314 + if (facilities->source_ndigis >= ROSE_MAX_DIGIS) 315 + return -1; 300 316 memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); 317 + } 301 318 } 302 319 } 303 320 p += l + 2; ··· 339 314 do { 340 315 switch (*p & 0xC0) { 341 316 case 0x00: 317 + if (len < 2) 318 + return -1; 342 319 p += 2; 343 320 n += 2; 344 321 len -= 2; 345 322 break; 346 323 347 324 case 0x40: 325 + if (len < 3) 326 + return -1; 348 327 p += 3; 349 328 n += 3; 350 329 len -= 3; 351 330 break; 352 331 353 332 case 0x80: 333 + if (len < 4) 334 + return -1; 354 335 p += 4; 355 336 n += 4; 356 337 len -= 4; 357 338 break; 358 339 359 340 case 0xC0: 341 + if (len < 2) 342 + return -1; 360 343 l = p[1]; 344 + 345 + /* Prevent overflows*/ 346 + if (l < 10 || l > 20) 347 + return -1; 348 + 361 349 if (*p == FAC_CCITT_DEST_NSAP) { 362 350 memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); 363 351 memcpy(callsign, p + 12, l - 10); ··· 393 355 return n; 394 356 } 395 357 396 - int rose_parse_facilities(unsigned char *p, 358 + int rose_parse_facilities(unsigned char *p, unsigned packet_len, 397 359 struct rose_facilities_struct *facilities) 398 360 { 399 361 int facilities_len, len; 400 362 401 363 facilities_len = *p++; 402 364 403 - if (facilities_len == 0) 365 + if (facilities_len == 0 || (unsigned)facilities_len > packet_len) 404 366 return 0; 405 367 406 - while (facilities_len > 0) { 407 - if (*p == 0x00) { 408 - facilities_len--; 409 - p++; 368 + while (facilities_len >= 3 && *p == 0x00) { 369 + facilities_len--; 370 + p++; 410 371 411 - switch (*p) { 412 - case FAC_NATIONAL: /* National */ 413 - len = rose_parse_national(p + 1, facilities, facilities_len - 1); 414 - facilities_len -= len + 1; 415 - p += len + 1; 416 - break; 372 + switch (*p) { 373 + case FAC_NATIONAL: /* National */ 374 + len = rose_parse_national(p + 1, facilities, facilities_len - 1); 375 + break; 417 376 418 - case FAC_CCITT: /* CCITT */ 419 - len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); 420 - facilities_len -= len + 1; 421 - p += len + 1; 422 - break; 377 + case FAC_CCITT: /* CCITT */ 378 + len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); 379 + break; 423 380 424 - default: 425 - printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); 426 - facilities_len--; 427 - p++; 428 - break; 429 - } 430 - } else 431 - break; /* Error in facilities format */ 381 + default: 382 + printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); 383 + len = 1; 384 + break; 385 + } 386 + 387 + if (len < 0) 388 + return 0; 389 + if (WARN_ON(len >= facilities_len)) 390 + return 0; 391 + facilities_len -= len + 1; 392 + p += len + 1; 432 393 } 433 394 434 - return 1; 395 + return facilities_len == 0; 435 396 } 436 397 437 398 static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
+3 -1
net/xfrm/xfrm_input.c
··· 173 173 goto drop_unlock; 174 174 } 175 175 176 - if (x->props.replay_window && x->repl->check(x, skb, seq)) { 176 + if (x->repl->check(x, skb, seq)) { 177 177 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 178 178 goto drop_unlock; 179 179 } ··· 189 189 190 190 XFRM_SKB_CB(skb)->seq.input.low = seq; 191 191 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; 192 + 193 + skb_dst_force(skb); 192 194 193 195 nexthdr = x->type->input(x, skb); 194 196
+3 -1
net/xfrm/xfrm_output.c
··· 78 78 79 79 spin_unlock_bh(&x->lock); 80 80 81 + skb_dst_force(skb); 82 + 81 83 err = x->type->output(x, skb); 82 84 if (err == -EINPROGRESS) 83 85 goto out_exit; ··· 96 94 err = -EHOSTUNREACH; 97 95 goto error_nolock; 98 96 } 99 - skb_dst_set(skb, dst_clone(dst)); 97 + skb_dst_set(skb, dst); 100 98 x = dst->xfrm; 101 99 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 102 100
+15 -2
net/xfrm/xfrm_replay.c
··· 118 118 u32 diff; 119 119 u32 seq = ntohl(net_seq); 120 120 121 + if (!x->props.replay_window) 122 + return 0; 123 + 121 124 if (unlikely(seq == 0)) 122 125 goto err; 123 126 ··· 196 193 { 197 194 unsigned int bitnr, nr; 198 195 struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 196 + u32 pos; 199 197 u32 seq = ntohl(net_seq); 200 198 u32 diff = replay_esn->seq - seq; 201 - u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; 199 + 200 + if (!replay_esn->replay_window) 201 + return 0; 202 + 203 + pos = (replay_esn->seq - 1) % replay_esn->replay_window; 202 204 203 205 if (unlikely(seq == 0)) 204 206 goto err; ··· 381 373 unsigned int bitnr, nr; 382 374 u32 diff; 383 375 struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 376 + u32 pos; 384 377 u32 seq = ntohl(net_seq); 385 - u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; 386 378 u32 wsize = replay_esn->replay_window; 387 379 u32 top = replay_esn->seq; 388 380 u32 bottom = top - wsize + 1; 381 + 382 + if (!wsize) 383 + return 0; 384 + 385 + pos = (replay_esn->seq - 1) % replay_esn->replay_window; 389 386 390 387 if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && 391 388 (replay_esn->seq < replay_esn->replay_window - 1)))
+6
net/xfrm/xfrm_state.c
··· 1181 1181 goto error; 1182 1182 } 1183 1183 1184 + if (orig->replay_esn) { 1185 + err = xfrm_replay_clone(x, orig); 1186 + if (err) 1187 + goto error; 1188 + } 1189 + 1184 1190 memcpy(&x->mark, &orig->mark, sizeof(x->mark)); 1185 1191 1186 1192 err = xfrm_init_state(x);
+24
net/xfrm/xfrm_user.c
··· 127 127 if (!rt) 128 128 return 0; 129 129 130 + if (p->id.proto != IPPROTO_ESP) 131 + return -EINVAL; 132 + 130 133 if (p->replay_window != 0) 131 134 return -EINVAL; 132 135 ··· 360 357 361 358 strcpy(p->alg_name, algo->name); 362 359 *algpp = p; 360 + return 0; 361 + } 362 + 363 + static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 364 + struct nlattr *rp) 365 + { 366 + struct xfrm_replay_state_esn *up; 367 + 368 + if (!replay_esn || !rp) 369 + return 0; 370 + 371 + up = nla_data(rp); 372 + 373 + if (xfrm_replay_state_esn_len(replay_esn) != 374 + xfrm_replay_state_esn_len(up)) 375 + return -EINVAL; 376 + 363 377 return 0; 364 378 } 365 379 ··· 1784 1764 return -ESRCH; 1785 1765 1786 1766 if (x->km.state != XFRM_STATE_VALID) 1767 + goto out; 1768 + 1769 + err = xfrm_replay_verify_len(x->replay_esn, rp); 1770 + if (err) 1787 1771 goto out; 1788 1772 1789 1773 spin_lock_bh(&x->lock);
+1 -1
sound/soc/soc-jack.c
··· 331 331 goto err; 332 332 333 333 if (gpios[i].wake) { 334 - ret = set_irq_wake(gpio_to_irq(gpios[i].gpio), 1); 334 + ret = irq_set_irq_wake(gpio_to_irq(gpios[i].gpio), 1); 335 335 if (ret != 0) 336 336 printk(KERN_ERR 337 337 "Failed to mark GPIO %d as wake source: %d\n",