Merge branches 'irq-cleanup-for-linus' and 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-cleanup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
vlynq: Convert irq functions

* 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
genirq; Fix cleanup fallout
genirq: Fix typo and remove unused variable
genirq: Fix new kernel-doc warnings
genirq: Add setter for AFFINITY_SET in irq_data state
genirq: Provide setter inline for IRQD_IRQ_INPROGRESS
genirq: Remove handle_IRQ_event
arm: Ns9xxx: Remove private irq flow handler
powerpc: cell: Use the core flow handler
genirq: Provide edge_eoi flow handler
genirq: Move INPROGRESS, MASKED and DISABLED state flags to irq_data
genirq: Split irq_set_affinity() so it can be called with lock held.
genirq: Add chip flag for restricting cpu_on/offline calls
genirq: Add chip hooks for taking CPUs on/off line.
genirq: Add irq disabled flag to irq_data state
genirq: Reserve the irq when calling irq_set_chip()

+297 -235
+4 -54
arch/arm/mach-ns9xxx/irq.c
··· 31 31 __raw_writel(ic, SYS_IC(prio / 4)); 32 32 } 33 33 34 - static void ns9xxx_ack_irq(struct irq_data *d) 34 + static void ns9xxx_eoi_irq(struct irq_data *d) 35 35 { 36 36 __raw_writel(0, SYS_ISRADDR); 37 - } 38 - 39 - static void ns9xxx_maskack_irq(struct irq_data *d) 40 - { 41 - ns9xxx_mask_irq(d); 42 - ns9xxx_ack_irq(d); 43 37 } 44 38 45 39 static void ns9xxx_unmask_irq(struct irq_data *d) ··· 46 52 } 47 53 48 54 static struct irq_chip ns9xxx_chip = { 49 - .irq_ack = ns9xxx_ack_irq, 55 + .irq_eoi = ns9xxx_eoi_irq, 50 56 .irq_mask = ns9xxx_mask_irq, 51 - .irq_mask_ack = ns9xxx_maskack_irq, 52 57 .irq_unmask = ns9xxx_unmask_irq, 53 58 }; 54 - 55 - #if 0 56 - #define handle_irq handle_level_irq 57 - #else 58 - static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) 59 - { 60 - struct irqaction *action; 61 - irqreturn_t action_ret; 62 - 63 - raw_spin_lock(&desc->lock); 64 - 65 - BUG_ON(desc->status & IRQ_INPROGRESS); 66 - 67 - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 68 - kstat_incr_irqs_this_cpu(irq, desc); 69 - 70 - action = desc->action; 71 - if (unlikely(!action || (desc->status & IRQ_DISABLED))) 72 - goto out_mask; 73 - 74 - desc->status |= IRQ_INPROGRESS; 75 - raw_spin_unlock(&desc->lock); 76 - 77 - action_ret = handle_IRQ_event(irq, action); 78 - 79 - /* XXX: There is no direct way to access noirqdebug, so check 80 - * unconditionally for spurious irqs... 81 - * Maybe this function should go to kernel/irq/chip.c? */ 82 - note_interrupt(irq, desc, action_ret); 83 - 84 - raw_spin_lock(&desc->lock); 85 - desc->status &= ~IRQ_INPROGRESS; 86 - 87 - if (desc->status & IRQ_DISABLED) 88 - out_mask: 89 - desc->irq_data.chip->irq_mask(&desc->irq_data); 90 - 91 - /* ack unconditionally to unmask lower prio irqs */ 92 - desc->irq_data.chip->irq_ack(&desc->irq_data); 93 - 94 - raw_spin_unlock(&desc->lock); 95 - } 96 - #define handle_irq handle_prio_irq 97 - #endif 98 59 99 60 void __init ns9xxx_init_irq(void) 100 61 { ··· 68 119 69 120 for (i = 0; i <= 31; ++i) { 70 121 set_irq_chip(i, &ns9xxx_chip); 71 - set_irq_handler(i, handle_irq); 122 + set_irq_handler(i, handle_fasteoi_irq); 72 123 set_irq_flags(i, IRQF_VALID); 124 + irq_set_status_flags(i, IRQ_LEVEL); 73 125 } 74 126 }
+1
arch/powerpc/platforms/cell/Kconfig
··· 9 9 select PPC_INDIRECT_IO 10 10 select PPC_NATIVE 11 11 select PPC_RTAS 12 + select IRQ_EDGE_EOI_HANDLER 12 13 13 14 config PPC_CELL_NATIVE 14 15 bool
+1 -49
arch/powerpc/platforms/cell/interrupt.c
··· 235 235 "IBM,CBEA-Internal-Interrupt-Controller"); 236 236 } 237 237 238 - extern int noirqdebug; 239 - 240 - static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) 241 - { 242 - struct irq_chip *chip = get_irq_desc_chip(desc); 243 - 244 - raw_spin_lock(&desc->lock); 245 - 246 - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 247 - 248 - /* 249 - * If we're currently running this IRQ, or its disabled, 250 - * we shouldn't process the IRQ. Mark it pending, handle 251 - * the necessary masking and go out 252 - */ 253 - if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 254 - !desc->action)) { 255 - desc->status |= IRQ_PENDING; 256 - goto out_eoi; 257 - } 258 - 259 - kstat_incr_irqs_this_cpu(irq, desc); 260 - 261 - /* Mark the IRQ currently in progress.*/ 262 - desc->status |= IRQ_INPROGRESS; 263 - 264 - do { 265 - struct irqaction *action = desc->action; 266 - irqreturn_t action_ret; 267 - 268 - if (unlikely(!action)) 269 - goto out_eoi; 270 - 271 - desc->status &= ~IRQ_PENDING; 272 - raw_spin_unlock(&desc->lock); 273 - action_ret = handle_IRQ_event(irq, action); 274 - if (!noirqdebug) 275 - note_interrupt(irq, desc, action_ret); 276 - raw_spin_lock(&desc->lock); 277 - 278 - } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 279 - 280 - desc->status &= ~IRQ_INPROGRESS; 281 - out_eoi: 282 - chip->irq_eoi(&desc->irq_data); 283 - raw_spin_unlock(&desc->lock); 284 - } 285 - 286 238 static int iic_host_map(struct irq_host *h, unsigned int virq, 287 239 irq_hw_number_t hw) 288 240 { ··· 247 295 handle_iic_irq); 248 296 break; 249 297 default: 250 - set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); 298 + set_irq_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); 251 299 } 252 300 return 0; 253 301 }
+31 -33
drivers/vlynq/vlynq.c
··· 135 135 msleep(5); 136 136 } 137 137 138 - static void vlynq_irq_unmask(unsigned int irq) 138 + static void vlynq_irq_unmask(struct irq_data *d) 139 139 { 140 - u32 val; 141 - struct vlynq_device *dev = get_irq_chip_data(irq); 140 + struct vlynq_device *dev = irq_data_get_irq_chip_data(d); 142 141 int virq; 142 + u32 val; 143 143 144 144 BUG_ON(!dev); 145 - virq = irq - dev->irq_start; 145 + virq = d->irq - dev->irq_start; 146 146 val = readl(&dev->remote->int_device[virq >> 2]); 147 147 val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); 148 148 writel(val, &dev->remote->int_device[virq >> 2]); 149 149 } 150 150 151 - static void vlynq_irq_mask(unsigned int irq) 151 + static void vlynq_irq_mask(struct irq_data *d) 152 152 { 153 - u32 val; 154 - struct vlynq_device *dev = get_irq_chip_data(irq); 153 + struct vlynq_device *dev = irq_data_get_irq_chip_data(d); 155 154 int virq; 155 + u32 val; 156 156 157 157 BUG_ON(!dev); 158 - virq = irq - dev->irq_start; 158 + virq = d->irq - dev->irq_start; 159 159 val = readl(&dev->remote->int_device[virq >> 2]); 160 160 val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); 161 161 writel(val, &dev->remote->int_device[virq >> 2]); 162 162 } 163 163 164 - static int vlynq_irq_type(unsigned int irq, unsigned int flow_type) 164 + static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type) 165 165 { 166 - u32 val; 167 - struct vlynq_device *dev = get_irq_chip_data(irq); 166 + struct vlynq_device *dev = irq_data_get_irq_chip_data(d); 168 167 int virq; 168 + u32 val; 169 169 170 170 BUG_ON(!dev); 171 - virq = irq - dev->irq_start; 171 + virq = d->irq - dev->irq_start; 172 172 val = readl(&dev->remote->int_device[virq >> 2]); 173 173 switch (flow_type & IRQ_TYPE_SENSE_MASK) { 174 174 case IRQ_TYPE_EDGE_RISING: ··· 192 192 return 0; 193 193 } 194 194 195 - static void vlynq_local_ack(unsigned int irq) 195 + static void vlynq_local_ack(struct irq_data *d) 196 196 { 197 - struct vlynq_device *dev = get_irq_chip_data(irq); 198 - 197 + struct vlynq_device *dev = irq_data_get_irq_chip_data(d); 199 198 u32 status = readl(&dev->local->status); 200 199 201 200 pr_debug("%s: local status: 0x%08x\n", ··· 202 203 writel(status, &dev->local->status); 203 204 } 204 205 205 - static void vlynq_remote_ack(unsigned int irq) 206 + static void vlynq_remote_ack(struct irq_data *d) 206 207 { 207 - struct vlynq_device *dev = get_irq_chip_data(irq); 208 - 208 + struct vlynq_device *dev = irq_data_get_irq_chip_data(d); 209 209 u32 status = readl(&dev->remote->status); 210 210 211 211 pr_debug("%s: remote status: 0x%08x\n", ··· 236 238 237 239 static struct irq_chip vlynq_irq_chip = { 238 240 .name = "vlynq", 239 - .unmask = vlynq_irq_unmask, 240 - .mask = vlynq_irq_mask, 241 - .set_type = vlynq_irq_type, 241 + .irq_unmask = vlynq_irq_unmask, 242 + .irq_mask = vlynq_irq_mask, 243 + .irq_set_type = vlynq_irq_type, 242 244 }; 243 245 244 246 static struct irq_chip vlynq_local_chip = { 245 247 .name = "vlynq local error", 246 - .unmask = vlynq_irq_unmask, 247 - .mask = vlynq_irq_mask, 248 - .ack = vlynq_local_ack, 248 + .irq_unmask = vlynq_irq_unmask, 249 + .irq_mask = vlynq_irq_mask, 250 + .irq_ack = vlynq_local_ack, 249 251 }; 250 252 251 253 static struct irq_chip vlynq_remote_chip = { 252 254 .name = "vlynq local error", 253 - .unmask = vlynq_irq_unmask, 254 - .mask = vlynq_irq_mask, 255 - .ack = vlynq_remote_ack, 255 + .irq_unmask = vlynq_irq_unmask, 256 + .irq_mask = vlynq_irq_mask, 257 + .irq_ack = vlynq_remote_ack, 256 258 }; 257 259 258 260 static int vlynq_setup_irq(struct vlynq_device *dev) ··· 289 291 for (i = dev->irq_start; i <= dev->irq_end; i++) { 290 292 virq = i - dev->irq_start; 291 293 if (virq == dev->local_irq) { 292 - set_irq_chip_and_handler(i, &vlynq_local_chip, 294 + irq_set_chip_and_handler(i, &vlynq_local_chip, 293 295 handle_level_irq); 294 - set_irq_chip_data(i, dev); 296 + irq_set_chip_data(i, dev); 295 297 } else if (virq == dev->remote_irq) { 296 - set_irq_chip_and_handler(i, &vlynq_remote_chip, 298 + irq_set_chip_and_handler(i, &vlynq_remote_chip, 297 299 handle_level_irq); 298 - set_irq_chip_data(i, dev); 300 + irq_set_chip_data(i, dev); 299 301 } else { 300 - set_irq_chip_and_handler(i, &vlynq_irq_chip, 302 + irq_set_chip_and_handler(i, &vlynq_irq_chip, 301 303 handle_simple_irq); 302 - set_irq_chip_data(i, dev); 304 + irq_set_chip_data(i, dev); 303 305 writel(0, &dev->remote->int_device[virq >> 2]); 304 306 } 305 307 }
+55 -4
include/linux/irq.h
··· 135 135 * struct irq_data - per irq and irq chip data passed down to chip functions 136 136 * @irq: interrupt number 137 137 * @node: node index useful for balancing 138 - * @state_use_accessor: status information for irq chip functions. 138 + * @state_use_accessors: status information for irq chip functions. 139 139 * Use accessor functions to deal with it 140 140 * @chip: low level interrupt hardware access 141 141 * @handler_data: per-IRQ data for the irq_chip methods ··· 174 174 * from suspend 175 175 * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process 176 176 * context 177 + * IRQD_IRQ_DISABLED - Disabled state of the interrupt 178 + * IRQD_IRQ_MASKED - Masked state of the interrupt 179 + * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 177 180 */ 178 181 enum { 179 182 IRQD_TRIGGER_MASK = 0xf, ··· 187 184 IRQD_LEVEL = (1 << 13), 188 185 IRQD_WAKEUP_STATE = (1 << 14), 189 186 IRQD_MOVE_PCNTXT = (1 << 15), 187 + IRQD_IRQ_DISABLED = (1 << 16), 188 + IRQD_IRQ_MASKED = (1 << 17), 189 + IRQD_IRQ_INPROGRESS = (1 << 18), 190 190 }; 191 191 192 192 static inline bool irqd_is_setaffinity_pending(struct irq_data *d) ··· 210 204 static inline bool irqd_affinity_was_set(struct irq_data *d) 211 205 { 212 206 return d->state_use_accessors & IRQD_AFFINITY_SET; 207 + } 208 + 209 + static inline void irqd_mark_affinity_was_set(struct irq_data *d) 210 + { 211 + d->state_use_accessors |= IRQD_AFFINITY_SET; 213 212 } 214 213 215 214 static inline u32 irqd_get_trigger_type(struct irq_data *d) ··· 244 233 static inline bool irqd_can_move_in_process_context(struct irq_data *d) 245 234 { 246 235 return d->state_use_accessors & IRQD_MOVE_PCNTXT; 236 + } 237 + 238 + static inline bool irqd_irq_disabled(struct irq_data *d) 239 + { 240 + return d->state_use_accessors & IRQD_IRQ_DISABLED; 241 + } 242 + 243 + static inline bool irqd_irq_masked(struct irq_data *d) 244 + { 245 + return d->state_use_accessors & IRQD_IRQ_MASKED; 246 + } 247 + 248 + static inline bool irqd_irq_inprogress(struct irq_data *d) 249 + { 250 + return d->state_use_accessors & IRQD_IRQ_INPROGRESS; 251 + } 252 + 253 + /* 254 + * Functions for chained handlers which can be enabled/disabled by the 255 + * standard disable_irq/enable_irq calls. Must be called with 256 + * irq_desc->lock held. 257 + */ 258 + static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) 259 + { 260 + d->state_use_accessors |= IRQD_IRQ_INPROGRESS; 261 + } 262 + 263 + static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) 264 + { 265 + d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; 247 266 } 248 267 249 268 /** ··· 312 271 * @irq_set_wake: enable/disable power-management wake-on of an IRQ 313 272 * @irq_bus_lock: function to lock access to slow bus (i2c) chips 314 273 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 274 + * @irq_cpu_online: configure an interrupt source for a secondary CPU 275 + * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU 315 276 * @irq_print_chip: optional to print special chip info in show_interrupts 316 277 * @flags: chip specific flags 317 278 * ··· 362 319 void (*irq_bus_lock)(struct irq_data *data); 363 320 void (*irq_bus_sync_unlock)(struct irq_data *data); 364 321 322 + void (*irq_cpu_online)(struct irq_data *data); 323 + void (*irq_cpu_offline)(struct irq_data *data); 324 + 365 325 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); 366 326 367 327 unsigned long flags; ··· 381 335 * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() 382 336 * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled 383 337 * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path 338 + * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks 339 + * when irq enabled 384 340 */ 385 341 enum { 386 342 IRQCHIP_SET_TYPE_MASKED = (1 << 0), 387 343 IRQCHIP_EOI_IF_HANDLED = (1 << 1), 388 344 IRQCHIP_MASK_ON_SUSPEND = (1 << 2), 345 + IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), 389 346 }; 390 347 391 348 /* This include will go away once we isolated irq_desc usage to core code */ ··· 413 364 extern int setup_irq(unsigned int irq, struct irqaction *new); 414 365 extern void remove_irq(unsigned int irq, struct irqaction *act); 415 366 367 + extern void irq_cpu_online(void); 368 + extern void irq_cpu_offline(void); 369 + extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); 370 + 416 371 #ifdef CONFIG_GENERIC_HARDIRQS 417 372 418 373 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) ··· 433 380 434 381 extern int no_irq_affinity; 435 382 436 - /* Handle irq action chains: */ 437 - extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); 438 - 439 383 /* 440 384 * Built-in IRQ handlers for various IRQ types, 441 385 * callable via desc->handle_irq() ··· 440 390 extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); 441 391 extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); 442 392 extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); 393 + extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); 443 394 extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 444 395 extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 445 396 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+4
kernel/irq/Kconfig
··· 51 51 config IRQ_PREFLOW_FASTEOI 52 52 bool 53 53 54 + # Edge style eoi based handler (cell) 55 + config IRQ_EDGE_EOI_HANDLER 56 + bool 57 + 54 58 # Support forced irq threading 55 59 config IRQ_FORCED_THREADING 56 60 bool
+133 -21
kernel/irq/chip.c
··· 37 37 irq_chip_set_defaults(chip); 38 38 desc->irq_data.chip = chip; 39 39 irq_put_desc_unlock(desc, flags); 40 + /* 41 + * For !CONFIG_SPARSE_IRQ make the irq show up in 42 + * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 43 + * already marked, and this call is harmless. 44 + */ 45 + irq_reserve_irq(irq); 40 46 return 0; 41 47 } 42 48 EXPORT_SYMBOL(irq_set_chip); ··· 140 134 141 135 static void irq_state_clr_disabled(struct irq_desc *desc) 142 136 { 143 - desc->istate &= ~IRQS_DISABLED; 137 + irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 144 138 irq_compat_clr_disabled(desc); 145 139 } 146 140 147 141 static void irq_state_set_disabled(struct irq_desc *desc) 148 142 { 149 - desc->istate |= IRQS_DISABLED; 143 + irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 150 144 irq_compat_set_disabled(desc); 151 145 } 152 146 153 147 static void irq_state_clr_masked(struct irq_desc *desc) 154 148 { 155 - desc->istate &= ~IRQS_MASKED; 149 + irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 156 150 irq_compat_clr_masked(desc); 157 151 } 158 152 159 153 static void irq_state_set_masked(struct irq_desc *desc) 160 154 { 161 - desc->istate |= IRQS_MASKED; 155 + irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 162 156 irq_compat_set_masked(desc); 163 157 } 164 158 ··· 378 372 kstat_incr_irqs_this_cpu(irq, desc); 379 373 380 374 action = desc->action; 381 - if (unlikely(!action || (desc->istate & IRQS_DISABLED))) 375 + if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 382 376 goto out_unlock; 383 377 384 378 irq_compat_set_progress(desc); 385 - desc->istate |= IRQS_INPROGRESS; 379 + irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 386 380 raw_spin_unlock_irq(&desc->lock); 387 381 388 382 action_ret = action->thread_fn(action->irq, action->dev_id); ··· 390 384 note_interrupt(irq, desc, action_ret); 391 385 392 386 raw_spin_lock_irq(&desc->lock); 393 - desc->istate &= ~IRQS_INPROGRESS; 387 + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 394 388 irq_compat_clr_progress(desc); 395 389 396 390 out_unlock: ··· 422 416 { 423 417 raw_spin_lock(&desc->lock); 424 418 425 - if (unlikely(desc->istate & IRQS_INPROGRESS)) 419 + if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 426 420 if (!irq_check_poll(desc)) 427 421 goto out_unlock; 428 422 429 423 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 430 424 kstat_incr_irqs_this_cpu(irq, desc); 431 425 432 - if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 426 + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 433 427 goto out_unlock; 434 428 435 429 handle_irq_event(desc); ··· 454 448 raw_spin_lock(&desc->lock); 455 449 mask_ack_irq(desc); 456 450 457 - if (unlikely(desc->istate & IRQS_INPROGRESS)) 451 + if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 458 452 if (!irq_check_poll(desc)) 459 453 goto out_unlock; 460 454 ··· 465 459 * If its disabled or no action available 466 460 * keep it masked and get out of here 467 461 */ 468 - if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 462 + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 469 463 goto out_unlock; 470 464 471 465 handle_irq_event(desc); 472 466 473 - if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) 467 + if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) 474 468 unmask_irq(desc); 475 469 out_unlock: 476 470 raw_spin_unlock(&desc->lock); ··· 502 496 { 503 497 raw_spin_lock(&desc->lock); 504 498 505 - if (unlikely(desc->istate & IRQS_INPROGRESS)) 499 + if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 506 500 if (!irq_check_poll(desc)) 507 501 goto out; 508 502 ··· 513 507 * If its disabled or no action available 514 508 * then mask it and get out of here: 515 509 */ 516 - if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { 510 + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 517 511 irq_compat_set_pending(desc); 518 512 desc->istate |= IRQS_PENDING; 519 513 mask_irq(desc); ··· 564 558 * we shouldn't process the IRQ. Mark it pending, handle 565 559 * the necessary masking and go out 566 560 */ 567 - if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || 568 - !desc->action))) { 561 + if (unlikely(irqd_irq_disabled(&desc->irq_data) || 562 + irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 569 563 if (!irq_check_poll(desc)) { 570 564 irq_compat_set_pending(desc); 571 565 desc->istate |= IRQS_PENDING; ··· 590 584 * Renable it, if it was not disabled in meantime. 591 585 */ 592 586 if (unlikely(desc->istate & IRQS_PENDING)) { 593 - if (!(desc->istate & IRQS_DISABLED) && 594 - (desc->istate & IRQS_MASKED)) 587 + if (!irqd_irq_disabled(&desc->irq_data) && 588 + irqd_irq_masked(&desc->irq_data)) 595 589 unmask_irq(desc); 596 590 } 597 591 598 592 handle_irq_event(desc); 599 593 600 594 } while ((desc->istate & IRQS_PENDING) && 601 - !(desc->istate & IRQS_DISABLED)); 595 + !irqd_irq_disabled(&desc->irq_data)); 602 596 603 597 out_unlock: 604 598 raw_spin_unlock(&desc->lock); 605 599 } 600 + 601 + #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 602 + /** 603 + * handle_edge_eoi_irq - edge eoi type IRQ handler 604 + * @irq: the interrupt number 605 + * @desc: the interrupt description structure for this irq 606 + * 607 + * Similar as the above handle_edge_irq, but using eoi and w/o the 608 + * mask/unmask logic. 609 + */ 610 + void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 611 + { 612 + struct irq_chip *chip = irq_desc_get_chip(desc); 613 + 614 + raw_spin_lock(&desc->lock); 615 + 616 + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 617 + /* 618 + * If we're currently running this IRQ, or its disabled, 619 + * we shouldn't process the IRQ. Mark it pending, handle 620 + * the necessary masking and go out 621 + */ 622 + if (unlikely(irqd_irq_disabled(&desc->irq_data) || 623 + irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 624 + if (!irq_check_poll(desc)) { 625 + desc->istate |= IRQS_PENDING; 626 + goto out_eoi; 627 + } 628 + } 629 + kstat_incr_irqs_this_cpu(irq, desc); 630 + 631 + do { 632 + if (unlikely(!desc->action)) 633 + goto out_eoi; 634 + 635 + handle_irq_event(desc); 636 + 637 + } while ((desc->istate & IRQS_PENDING) && 638 + !irqd_irq_disabled(&desc->irq_data)); 639 + 640 + out_unlock: 641 + chip->irq_eoi(&desc->irq_data); 642 + raw_spin_unlock(&desc->lock); 643 + } 644 + #endif 606 645 607 646 /** 608 647 * handle_percpu_irq - Per CPU local irq handler ··· 693 642 if (handle == handle_bad_irq) { 694 643 if (desc->irq_data.chip != &no_irq_chip) 695 644 mask_ack_irq(desc); 696 - irq_compat_set_disabled(desc); 697 - desc->istate |= IRQS_DISABLED; 645 + irq_state_set_disabled(desc); 698 646 desc->depth = 1; 699 647 } 700 648 desc->handle_irq = handle; ··· 734 684 irqd_set(&desc->irq_data, IRQD_PER_CPU); 735 685 if (irq_settings_can_move_pcntxt(desc)) 736 686 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 687 + if (irq_settings_is_level(desc)) 688 + irqd_set(&desc->irq_data, IRQD_LEVEL); 737 689 738 690 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 739 691 740 692 irq_put_desc_unlock(desc, flags); 693 + } 694 + 695 + /** 696 + * irq_cpu_online - Invoke all irq_cpu_online functions. 697 + * 698 + * Iterate through all irqs and invoke the chip.irq_cpu_online() 699 + * for each. 700 + */ 701 + void irq_cpu_online(void) 702 + { 703 + struct irq_desc *desc; 704 + struct irq_chip *chip; 705 + unsigned long flags; 706 + unsigned int irq; 707 + 708 + for_each_active_irq(irq) { 709 + desc = irq_to_desc(irq); 710 + if (!desc) 711 + continue; 712 + 713 + raw_spin_lock_irqsave(&desc->lock, flags); 714 + 715 + chip = irq_data_get_irq_chip(&desc->irq_data); 716 + if (chip && chip->irq_cpu_online && 717 + (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 718 + !irqd_irq_disabled(&desc->irq_data))) 719 + chip->irq_cpu_online(&desc->irq_data); 720 + 721 + raw_spin_unlock_irqrestore(&desc->lock, flags); 722 + } 723 + } 724 + 725 + /** 726 + * irq_cpu_offline - Invoke all irq_cpu_offline functions. 727 + * 728 + * Iterate through all irqs and invoke the chip.irq_cpu_offline() 729 + * for each. 730 + */ 731 + void irq_cpu_offline(void) 732 + { 733 + struct irq_desc *desc; 734 + struct irq_chip *chip; 735 + unsigned long flags; 736 + unsigned int irq; 737 + 738 + for_each_active_irq(irq) { 739 + desc = irq_to_desc(irq); 740 + if (!desc) 741 + continue; 742 + 743 + raw_spin_lock_irqsave(&desc->lock, flags); 744 + 745 + chip = irq_data_get_irq_chip(&desc->irq_data); 746 + if (chip && chip->irq_cpu_offline && 747 + (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 748 + !irqd_irq_disabled(&desc->irq_data))) 749 + chip->irq_cpu_offline(&desc->irq_data); 750 + 751 + raw_spin_unlock_irqrestore(&desc->lock, flags); 752 + } 741 753 }
+7 -3
kernel/irq/debug.h
··· 6 6 7 7 #define P(f) if (desc->status & f) printk("%14s set\n", #f) 8 8 #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 9 + /* FIXME */ 10 + #define PD(f) do { } while (0) 9 11 10 12 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 11 13 { ··· 30 28 P(IRQ_NOAUTOEN); 31 29 32 30 PS(IRQS_AUTODETECT); 33 - PS(IRQS_INPROGRESS); 34 31 PS(IRQS_REPLAY); 35 32 PS(IRQS_WAITING); 36 - PS(IRQS_DISABLED); 37 33 PS(IRQS_PENDING); 38 - PS(IRQS_MASKED); 34 + 35 + PD(IRQS_INPROGRESS); 36 + PD(IRQS_DISABLED); 37 + PD(IRQS_MASKED); 39 38 } 40 39 41 40 #undef P 42 41 #undef PS 42 + #undef PD
+2 -14
kernel/irq/handle.c
··· 178 178 irq_compat_clr_pending(desc); 179 179 desc->istate &= ~IRQS_PENDING; 180 180 irq_compat_set_progress(desc); 181 - desc->istate |= IRQS_INPROGRESS; 181 + irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 182 182 raw_spin_unlock(&desc->lock); 183 183 184 184 ret = handle_irq_event_percpu(desc, action); 185 185 186 186 raw_spin_lock(&desc->lock); 187 - desc->istate &= ~IRQS_INPROGRESS; 187 + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 188 188 irq_compat_clr_progress(desc); 189 189 return ret; 190 - } 191 - 192 - /** 193 - * handle_IRQ_event - irq action chain handler 194 - * @irq: the interrupt number 195 - * @action: the interrupt action chain for this irq 196 - * 197 - * Handles the action chain of an irq event 198 - */ 199 - irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 200 - { 201 - return handle_irq_event_percpu(irq_to_desc(irq), action); 202 190 }
-6
kernel/irq/internals.h
··· 44 44 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt 45 45 * detection 46 46 * IRQS_POLL_INPROGRESS - polling in progress 47 - * IRQS_INPROGRESS - Interrupt in progress 48 47 * IRQS_ONESHOT - irq is not unmasked in primary handler 49 48 * IRQS_REPLAY - irq is replayed 50 49 * IRQS_WAITING - irq is waiting 51 - * IRQS_DISABLED - irq is disabled 52 50 * IRQS_PENDING - irq is pending and replayed later 53 - * IRQS_MASKED - irq is masked 54 51 * IRQS_SUSPENDED - irq is suspended 55 52 */ 56 53 enum { 57 54 IRQS_AUTODETECT = 0x00000001, 58 55 IRQS_SPURIOUS_DISABLED = 0x00000002, 59 56 IRQS_POLL_INPROGRESS = 0x00000008, 60 - IRQS_INPROGRESS = 0x00000010, 61 57 IRQS_ONESHOT = 0x00000020, 62 58 IRQS_REPLAY = 0x00000040, 63 59 IRQS_WAITING = 0x00000080, 64 - IRQS_DISABLED = 0x00000100, 65 60 IRQS_PENDING = 0x00000200, 66 - IRQS_MASKED = 0x00000400, 67 61 IRQS_SUSPENDED = 0x00000800, 68 62 }; 69 63
+1 -2
kernel/irq/irqdesc.c
··· 80 80 desc->irq_data.handler_data = NULL; 81 81 desc->irq_data.msi_desc = NULL; 82 82 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 83 - desc->istate = IRQS_DISABLED; 83 + irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 84 84 desc->handle_irq = handle_bad_irq; 85 85 desc->depth = 1; 86 86 desc->irq_count = 0; ··· 238 238 239 239 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 240 240 [0 ... NR_IRQS-1] = { 241 - .istate = IRQS_DISABLED, 242 241 .handle_irq = handle_bad_irq, 243 242 .depth = 1, 244 243 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
+51 -41
kernel/irq/manage.c
··· 41 41 void synchronize_irq(unsigned int irq) 42 42 { 43 43 struct irq_desc *desc = irq_to_desc(irq); 44 - unsigned int state; 44 + bool inprogress; 45 45 46 46 if (!desc) 47 47 return; ··· 53 53 * Wait until we're out of the critical section. This might 54 54 * give the wrong answer due to the lack of memory barriers. 55 55 */ 56 - while (desc->istate & IRQS_INPROGRESS) 56 + while (irqd_irq_inprogress(&desc->irq_data)) 57 57 cpu_relax(); 58 58 59 59 /* Ok, that indicated we're done: double-check carefully. */ 60 60 raw_spin_lock_irqsave(&desc->lock, flags); 61 - state = desc->istate; 61 + inprogress = irqd_irq_inprogress(&desc->irq_data); 62 62 raw_spin_unlock_irqrestore(&desc->lock, flags); 63 63 64 64 /* Oops, that failed? */ 65 - } while (state & IRQS_INPROGRESS); 65 + } while (inprogress); 66 66 67 67 /* 68 68 * We made sure that no hardirq handler is running. Now verify ··· 112 112 } 113 113 114 114 #ifdef CONFIG_GENERIC_PENDING_IRQ 115 - static inline bool irq_can_move_pcntxt(struct irq_desc *desc) 115 + static inline bool irq_can_move_pcntxt(struct irq_data *data) 116 116 { 117 - return irq_settings_can_move_pcntxt(desc); 117 + return irqd_can_move_in_process_context(data); 118 118 } 119 - static inline bool irq_move_pending(struct irq_desc *desc) 119 + static inline bool irq_move_pending(struct irq_data *data) 120 120 { 121 - return irqd_is_setaffinity_pending(&desc->irq_data); 121 + return irqd_is_setaffinity_pending(data); 122 122 } 123 123 static inline void 124 124 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) ··· 131 131 cpumask_copy(mask, desc->pending_mask); 132 132 } 133 133 #else 134 - static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } 135 - static inline bool irq_move_pending(struct irq_desc *desc) { return false; } 134 + static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } 135 + static inline bool irq_move_pending(struct irq_desc *data) { return false; } 136 136 static inline void 137 137 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 138 138 static inline void 139 139 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 140 140 #endif 141 141 142 - /** 143 - * irq_set_affinity - Set the irq affinity of a given irq 144 - * @irq: Interrupt to set affinity 145 - * @cpumask: cpumask 146 - * 147 - */ 148 - int irq_set_affinity(unsigned int irq, const struct cpumask *mask) 142 + int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) 149 143 { 150 - struct irq_desc *desc = irq_to_desc(irq); 151 - struct irq_chip *chip = desc->irq_data.chip; 152 - unsigned long flags; 144 + struct irq_chip *chip = irq_data_get_irq_chip(data); 145 + struct irq_desc *desc = irq_data_to_desc(data); 153 146 int ret = 0; 154 147 155 - if (!chip->irq_set_affinity) 148 + if (!chip || !chip->irq_set_affinity) 156 149 return -EINVAL; 157 150 158 - raw_spin_lock_irqsave(&desc->lock, flags); 159 - 160 - if (irq_can_move_pcntxt(desc)) { 161 - ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 151 + if (irq_can_move_pcntxt(data)) { 152 + ret = chip->irq_set_affinity(data, mask, false); 162 153 switch (ret) { 163 154 case IRQ_SET_MASK_OK: 164 - cpumask_copy(desc->irq_data.affinity, mask); 155 + cpumask_copy(data->affinity, mask); 165 156 case IRQ_SET_MASK_OK_NOCOPY: 166 157 irq_set_thread_affinity(desc); 167 158 ret = 0; 168 159 } 169 160 } else { 170 - irqd_set_move_pending(&desc->irq_data); 161 + irqd_set_move_pending(data); 171 162 irq_copy_pending(desc, mask); 172 163 } 173 164 ··· 167 176 schedule_work(&desc->affinity_notify->work); 168 177 } 169 178 irq_compat_set_affinity(desc); 170 - irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); 179 + irqd_set(data, IRQD_AFFINITY_SET); 180 + 181 + return ret; 182 + } 183 + 184 + /** 185 + * irq_set_affinity - Set the irq affinity of a given irq 186 + * @irq: Interrupt to set affinity 187 + * @mask: cpumask 188 + * 189 + */ 190 + int irq_set_affinity(unsigned int irq, const struct cpumask *mask) 191 + { 192 + struct irq_desc *desc = irq_to_desc(irq); 193 + unsigned long flags; 194 + int ret; 195 + 196 + if (!desc) 197 + return -EINVAL; 198 + 199 + raw_spin_lock_irqsave(&desc->lock, flags); 200 + ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); 171 201 raw_spin_unlock_irqrestore(&desc->lock, flags); 172 202 return ret; 173 203 } ··· 218 206 goto out; 219 207 220 208 raw_spin_lock_irqsave(&desc->lock, flags); 221 - if (irq_move_pending(desc)) 209 + if (irq_move_pending(&desc->irq_data)) 222 210 irq_get_pending(cpumask, desc); 223 211 else 224 212 cpumask_copy(cpumask, desc->irq_data.affinity); ··· 563 551 flags &= IRQ_TYPE_SENSE_MASK; 564 552 565 553 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 566 - if (!(desc->istate & IRQS_MASKED)) 554 + if (!irqd_irq_masked(&desc->irq_data)) 567 555 mask_irq(desc); 568 - if (!(desc->istate & IRQS_DISABLED)) 556 + if (!irqd_irq_disabled(&desc->irq_data)) 569 557 unmask = 1; 570 558 } 571 559 ··· 663 651 * irq_wake_thread(). See the comment there which explains the 664 652 * serialization. 665 653 */ 666 - if (unlikely(desc->istate & IRQS_INPROGRESS)) { 654 + if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 667 655 raw_spin_unlock_irq(&desc->lock); 668 656 chip_bus_sync_unlock(desc); 669 657 cpu_relax(); ··· 680 668 681 669 desc->threads_oneshot &= ~action->thread_mask; 682 670 683 - if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && 684 - (desc->istate & IRQS_MASKED)) { 685 - irq_compat_clr_masked(desc); 686 - desc->istate &= ~IRQS_MASKED; 687 - desc->irq_data.chip->irq_unmask(&desc->irq_data); 688 - } 671 + if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 672 + irqd_irq_masked(&desc->irq_data)) 673 + unmask_irq(desc); 674 + 689 675 out_unlock: 690 676 raw_spin_unlock_irq(&desc->lock); 691 677 chip_bus_sync_unlock(desc); ··· 777 767 atomic_inc(&desc->threads_active); 778 768 779 769 raw_spin_lock_irq(&desc->lock); 780 - if (unlikely(desc->istate & IRQS_DISABLED)) { 770 + if (unlikely(irqd_irq_disabled(&desc->irq_data))) { 781 771 /* 782 772 * CHECKME: We might need a dedicated 783 773 * IRQ_THREAD_PENDING flag here, which ··· 995 985 } 996 986 997 987 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 998 - IRQS_INPROGRESS | IRQS_ONESHOT | \ 999 - IRQS_WAITING); 988 + IRQS_ONESHOT | IRQS_WAITING); 989 + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 1000 990 1001 991 if (new->flags & IRQF_PERCPU) { 1002 992 irqd_set(&desc->irq_data, IRQD_PER_CPU);
+2 -3
kernel/irq/migration.c
··· 60 60 61 61 void irq_move_irq(struct irq_data *idata) 62 62 { 63 - struct irq_desc *desc = irq_data_to_desc(idata); 64 63 bool masked; 65 64 66 65 if (likely(!irqd_is_setaffinity_pending(idata))) 67 66 return; 68 67 69 - if (unlikely(desc->istate & IRQS_DISABLED)) 68 + if (unlikely(irqd_irq_disabled(idata))) 70 69 return; 71 70 72 71 /* ··· 73 74 * threaded interrupt with ONESHOT set, we can end up with an 74 75 * interrupt storm. 75 76 */ 76 - masked = desc->istate & IRQS_MASKED; 77 + masked = irqd_irq_masked(idata); 77 78 if (!masked) 78 79 idata->chip->irq_mask(idata); 79 80 irq_move_masked_irq(idata);
+5 -5
kernel/irq/spurious.c
··· 45 45 #ifdef CONFIG_SMP 46 46 do { 47 47 raw_spin_unlock(&desc->lock); 48 - while (desc->istate & IRQS_INPROGRESS) 48 + while (irqd_irq_inprogress(&desc->irq_data)) 49 49 cpu_relax(); 50 50 raw_spin_lock(&desc->lock); 51 - } while (desc->istate & IRQS_INPROGRESS); 51 + } while (irqd_irq_inprogress(&desc->irq_data)); 52 52 /* Might have been disabled in meantime */ 53 - return !(desc->istate & IRQS_DISABLED) && desc->action; 53 + return !irqd_irq_disabled(&desc->irq_data) && desc->action; 54 54 #else 55 55 return false; 56 56 #endif ··· 75 75 * Do not poll disabled interrupts unless the spurious 76 76 * disabled poller asks explicitely. 77 77 */ 78 - if ((desc->istate & IRQS_DISABLED) && !force) 78 + if (irqd_irq_disabled(&desc->irq_data) && !force) 79 79 goto out; 80 80 81 81 /* ··· 88 88 goto out; 89 89 90 90 /* Already running on another processor */ 91 - if (desc->istate & IRQS_INPROGRESS) { 91 + if (irqd_irq_inprogress(&desc->irq_data)) { 92 92 /* 93 93 * Already running: If it is shared get the other 94 94 * CPU to go looking for our mystery interrupt too