genirq: Remove compat code

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+25 -452
-117
include/linux/irq.h
··· 92 IRQ_NO_BALANCING = (1 << 13), 93 IRQ_MOVE_PCNTXT = (1 << 14), 94 IRQ_NESTED_THREAD = (1 << 15), 95 - 96 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 97 - IRQ_INPROGRESS = (1 << 16), 98 - IRQ_REPLAY = (1 << 17), 99 - IRQ_WAITING = (1 << 18), 100 - IRQ_DISABLED = (1 << 19), 101 - IRQ_PENDING = (1 << 20), 102 - IRQ_MASKED = (1 << 21), 103 - IRQ_MOVE_PENDING = (1 << 22), 104 - IRQ_AFFINITY_SET = (1 << 23), 105 - IRQ_WAKEUP = (1 << 24), 106 - #endif 107 }; 108 109 #define IRQF_MODIFY_MASK \ ··· 309 */ 310 struct irq_chip { 311 const char *name; 312 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 313 - unsigned int (*startup)(unsigned int irq); 314 - void (*shutdown)(unsigned int irq); 315 - void (*enable)(unsigned int irq); 316 - void (*disable)(unsigned int irq); 317 - 318 - void (*ack)(unsigned int irq); 319 - void (*mask)(unsigned int irq); 320 - void (*mask_ack)(unsigned int irq); 321 - void (*unmask)(unsigned int irq); 322 - void (*eoi)(unsigned int irq); 323 - 324 - void (*end)(unsigned int irq); 325 - int (*set_affinity)(unsigned int irq, 326 - const struct cpumask *dest); 327 - int (*retrigger)(unsigned int irq); 328 - int (*set_type)(unsigned int irq, unsigned int flow_type); 329 - int (*set_wake)(unsigned int irq, unsigned int on); 330 - 331 - void (*bus_lock)(unsigned int irq); 332 - void (*bus_sync_unlock)(unsigned int irq); 333 - #endif 334 unsigned int (*irq_startup)(struct irq_data *data); 335 void (*irq_shutdown)(struct irq_data *data); 336 void (*irq_enable)(struct irq_data *data); ··· 554 { 555 return d->msi_desc; 556 } 557 - 558 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 559 - /* Please do not use: Use the replacement functions instead */ 560 - static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip) 561 - { 562 - return irq_set_chip(irq, chip); 563 - } 564 - static inline int set_irq_data(unsigned int irq, void *data) 565 - { 566 - return irq_set_handler_data(irq, data); 567 - } 568 - static inline int set_irq_chip_data(unsigned int irq, void *data) 569 - { 570 - return irq_set_chip_data(irq, data); 571 - } 572 - static inline int set_irq_type(unsigned int irq, unsigned int type) 573 - { 574 - return irq_set_irq_type(irq, type); 575 - } 576 - static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry) 577 - { 578 - return irq_set_msi_desc(irq, entry); 579 - } 580 - static inline struct irq_chip *get_irq_chip(unsigned int irq) 581 - { 582 - return irq_get_chip(irq); 583 - } 584 - static inline void *get_irq_chip_data(unsigned int irq) 585 - { 586 - return irq_get_chip_data(irq); 587 - } 588 - static inline void *get_irq_data(unsigned int irq) 589 - { 590 - return irq_get_handler_data(irq); 591 - } 592 - static inline void *irq_data_get_irq_data(struct irq_data *d) 593 - { 594 - return irq_data_get_irq_handler_data(d); 595 - } 596 - static inline struct msi_desc *get_irq_msi(unsigned int irq) 597 - { 598 - return irq_get_msi_desc(irq); 599 - } 600 - static inline void set_irq_noprobe(unsigned int irq) 601 - { 602 - irq_set_noprobe(irq); 603 - } 604 - static inline void set_irq_probe(unsigned int irq) 605 - { 606 - irq_set_probe(irq); 607 - } 608 - static inline void set_irq_nested_thread(unsigned int irq, int nest) 609 - { 610 - irq_set_nested_thread(irq, nest); 611 - } 612 - static inline void 613 - set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 614 - irq_flow_handler_t handle, const char *name) 615 - { 616 - irq_set_chip_and_handler_name(irq, chip, handle, name); 617 - } 618 - static inline void 619 - set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 620 - irq_flow_handler_t handle) 621 - { 622 - irq_set_chip_and_handler(irq, chip, handle); 623 - } 624 - static inline void 625 - __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 626 - const char *name) 627 - { 628 - __irq_set_handler(irq, handle, is_chained, name); 629 - } 630 - static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle) 631 - { 632 - irq_set_handler(irq, handle); 633 - } 634 - static inline void 635 - set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle) 636 - { 637 - irq_set_chained_handler(irq, handle); 638 - } 639 - #endif 640 641 int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); 642 void irq_free_descs(unsigned int irq, unsigned int cnt);
··· 92 IRQ_NO_BALANCING = (1 << 13), 93 IRQ_MOVE_PCNTXT = (1 << 14), 94 IRQ_NESTED_THREAD = (1 << 15), 95 }; 96 97 #define IRQF_MODIFY_MASK \ ··· 321 */ 322 struct irq_chip { 323 const char *name; 324 unsigned int (*irq_startup)(struct irq_data *data); 325 void (*irq_shutdown)(struct irq_data *data); 326 void (*irq_enable)(struct irq_data *data); ··· 588 { 589 return d->msi_desc; 590 } 591 592 int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); 593 void irq_free_descs(unsigned int irq, unsigned int cnt);
+1 -59
include/linux/irqdesc.h
··· 35 * @name: flow handler name for /proc/interrupts output 36 */ 37 struct irq_desc { 38 - 39 - #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 40 struct irq_data irq_data; 41 - #else 42 - /* 43 - * This union will go away, once we fixed the direct access to 44 - * irq_desc all over the place. The direct fields are a 1:1 45 - * overlay of irq_data. 46 - */ 47 - union { 48 - struct irq_data irq_data; 49 - struct { 50 - unsigned int irq; 51 - unsigned int node; 52 - unsigned int pad_do_not_even_think_about_it; 53 - struct irq_chip *chip; 54 - void *handler_data; 55 - void *chip_data; 56 - struct msi_desc *msi_desc; 57 - #ifdef CONFIG_SMP 58 - cpumask_var_t affinity; 59 - #endif 60 - }; 61 - }; 62 - #endif 63 - 64 struct timer_rand_state *timer_rand_state; 65 unsigned int __percpu *kstat_irqs; 66 irq_flow_handler_t handle_irq; ··· 43 irq_preflow_handler_t preflow_handler; 44 #endif 45 struct irqaction *action; /* IRQ action list */ 46 - #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 47 unsigned int status_use_accessors; 48 - #else 49 - unsigned int status; /* IRQ status */ 50 - #endif 51 unsigned int core_internal_state__do_not_mess_with_it; 52 unsigned int depth; /* nested irq disables */ 53 unsigned int wake_depth; /* nested wake enables */ ··· 98 return desc->irq_data.msi_desc; 99 } 100 101 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 102 - static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) 103 - { 104 - return irq_desc_get_chip(desc); 105 - } 106 - static inline void *get_irq_desc_data(struct irq_desc *desc) 107 - { 108 - return irq_desc_get_handler_data(desc); 109 - } 110 - 111 - static inline void *get_irq_desc_chip_data(struct irq_desc *desc) 112 - { 113 - return irq_desc_get_chip_data(desc); 114 - } 115 - 116 - static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) 117 - { 118 - return irq_desc_get_msi_desc(desc); 119 - } 120 - #endif 121 - 122 /* 123 * Architectures call this to let the generic IRQ layer 124 * handle an interrupt. If the descriptor is attached to an ··· 144 desc->name = name; 145 } 146 147 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 148 - static inline void __set_irq_handler_unlocked(int irq, 149 - irq_flow_handler_t handler) 150 - { 151 - __irq_set_handler_locked(irq, handler); 152 - } 153 - 154 static inline int irq_balancing_disabled(unsigned int irq) 155 { 156 struct irq_desc *desc; 157 158 desc = irq_to_desc(irq); 159 - return desc->status & IRQ_NO_BALANCING_MASK; 160 } 161 - #endif 162 163 static inline void 164 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
··· 35 * @name: flow handler name for /proc/interrupts output 36 */ 37 struct irq_desc { 38 struct irq_data irq_data; 39 struct timer_rand_state *timer_rand_state; 40 unsigned int __percpu *kstat_irqs; 41 irq_flow_handler_t handle_irq; ··· 68 irq_preflow_handler_t preflow_handler; 69 #endif 70 struct irqaction *action; /* IRQ action list */ 71 unsigned int status_use_accessors; 72 unsigned int core_internal_state__do_not_mess_with_it; 73 unsigned int depth; /* nested irq disables */ 74 unsigned int wake_depth; /* nested wake enables */ ··· 127 return desc->irq_data.msi_desc; 128 } 129 130 /* 131 * Architectures call this to let the generic IRQ layer 132 * handle an interrupt. If the descriptor is attached to an ··· 194 desc->name = name; 195 } 196 197 static inline int irq_balancing_disabled(unsigned int irq) 198 { 199 struct irq_desc *desc; 200 201 desc = irq_to_desc(irq); 202 + return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; 203 } 204 205 static inline void 206 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
-4
kernel/irq/Kconfig
··· 10 config GENERIC_HARDIRQS 11 def_bool y 12 13 - # Select this to disable the deprecated stuff 14 - config GENERIC_HARDIRQS_NO_DEPRECATED 15 - bool 16 - 17 config GENERIC_HARDIRQS_NO_COMPAT 18 bool 19
··· 10 config GENERIC_HARDIRQS 11 def_bool y 12 13 config GENERIC_HARDIRQS_NO_COMPAT 14 bool 15
+1 -3
kernel/irq/autoprobe.c
··· 70 raw_spin_lock_irq(&desc->lock); 71 if (!desc->action && irq_settings_can_probe(desc)) { 72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; 73 - if (irq_startup(desc)) { 74 - irq_compat_set_pending(desc); 75 desc->istate |= IRQS_PENDING; 76 - } 77 } 78 raw_spin_unlock_irq(&desc->lock); 79 }
··· 70 raw_spin_lock_irq(&desc->lock); 71 if (!desc->action && irq_settings_can_probe(desc)) { 72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; 73 + if (irq_startup(desc)) 74 desc->istate |= IRQS_PENDING; 75 } 76 raw_spin_unlock_irq(&desc->lock); 77 }
-129
kernel/irq/chip.c
··· 34 if (!chip) 35 chip = &no_irq_chip; 36 37 - irq_chip_set_defaults(chip); 38 desc->irq_data.chip = chip; 39 irq_put_desc_unlock(desc, flags); 40 /* ··· 140 static void irq_state_clr_disabled(struct irq_desc *desc) 141 { 142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 143 - irq_compat_clr_disabled(desc); 144 } 145 146 static void irq_state_set_disabled(struct irq_desc *desc) 147 { 148 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 149 - irq_compat_set_disabled(desc); 150 } 151 152 static void irq_state_clr_masked(struct irq_desc *desc) 153 { 154 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 155 - irq_compat_clr_masked(desc); 156 } 157 158 static void irq_state_set_masked(struct irq_desc *desc) 159 { 160 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 161 - irq_compat_set_masked(desc); 162 } 163 164 int irq_startup(struct irq_desc *desc) ··· 202 desc->irq_data.chip->irq_disable(&desc->irq_data); 203 irq_state_set_masked(desc); 204 } 205 - } 206 - 207 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 208 - /* Temporary migration helpers */ 209 - static void compat_irq_mask(struct irq_data *data) 210 - { 211 - data->chip->mask(data->irq); 212 - } 213 - 214 - static void compat_irq_unmask(struct irq_data *data) 215 - { 216 - data->chip->unmask(data->irq); 217 - } 218 - 219 - static void compat_irq_ack(struct irq_data *data) 220 - { 221 - data->chip->ack(data->irq); 222 - } 223 - 224 - static void compat_irq_mask_ack(struct irq_data *data) 225 - { 226 - data->chip->mask_ack(data->irq); 227 - } 228 - 229 - static void compat_irq_eoi(struct irq_data *data) 230 - { 231 - data->chip->eoi(data->irq); 232 - } 233 - 234 - static void compat_irq_enable(struct irq_data *data) 235 - { 236 - data->chip->enable(data->irq); 237 - } 238 - 239 - static void compat_irq_disable(struct irq_data *data) 240 - { 241 - data->chip->disable(data->irq); 242 - } 243 - 244 - static void compat_irq_shutdown(struct irq_data *data) 245 - { 246 - data->chip->shutdown(data->irq); 247 - } 248 - 249 - static unsigned int compat_irq_startup(struct irq_data *data) 250 - { 251 - return data->chip->startup(data->irq); 252 - } 253 - 254 - static int compat_irq_set_affinity(struct irq_data *data, 255 - const struct cpumask *dest, bool force) 256 - { 257 - return data->chip->set_affinity(data->irq, dest); 258 - } 259 - 260 - static int compat_irq_set_type(struct irq_data *data, unsigned int type) 261 - { 262 - return data->chip->set_type(data->irq, type); 263 - } 264 - 265 - static int compat_irq_set_wake(struct irq_data *data, unsigned int on) 266 - { 267 - return data->chip->set_wake(data->irq, on); 268 - } 269 - 270 - static int compat_irq_retrigger(struct irq_data *data) 271 - { 272 - return data->chip->retrigger(data->irq); 273 - } 274 - 275 - static void compat_bus_lock(struct irq_data *data) 276 - { 277 - data->chip->bus_lock(data->irq); 278 - } 279 - 280 - static void compat_bus_sync_unlock(struct irq_data *data) 281 - { 282 - data->chip->bus_sync_unlock(data->irq); 283 - } 284 - #endif 285 - 286 - /* 287 - * Fixup enable/disable function pointers 288 - */ 289 - void irq_chip_set_defaults(struct irq_chip *chip) 290 - { 291 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 292 - if (chip->enable) 293 - chip->irq_enable = compat_irq_enable; 294 - if (chip->disable) 295 - chip->irq_disable = compat_irq_disable; 296 - if (chip->shutdown) 297 - chip->irq_shutdown = compat_irq_shutdown; 298 - if (chip->startup) 299 - chip->irq_startup = compat_irq_startup; 300 - if (!chip->end) 301 - chip->end = dummy_irq_chip.end; 302 - if (chip->bus_lock) 303 - chip->irq_bus_lock = compat_bus_lock; 304 - if (chip->bus_sync_unlock) 305 - chip->irq_bus_sync_unlock = compat_bus_sync_unlock; 306 - if (chip->mask) 307 - chip->irq_mask = compat_irq_mask; 308 - if (chip->unmask) 309 - chip->irq_unmask = compat_irq_unmask; 310 - if (chip->ack) 311 - chip->irq_ack = compat_irq_ack; 312 - if (chip->mask_ack) 313 - chip->irq_mask_ack = compat_irq_mask_ack; 314 - if (chip->eoi) 315 - chip->irq_eoi = compat_irq_eoi; 316 - if (chip->set_affinity) 317 - chip->irq_set_affinity = compat_irq_set_affinity; 318 - if (chip->set_type) 319 - chip->irq_set_type = compat_irq_set_type; 320 - if (chip->set_wake) 321 - chip->irq_set_wake = compat_irq_set_wake; 322 - if (chip->retrigger) 323 - chip->irq_retrigger = compat_irq_retrigger; 324 - #endif 325 } 326 327 static inline void mask_ack_irq(struct irq_desc *desc) ··· 256 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 257 goto out_unlock; 258 259 - irq_compat_set_progress(desc); 260 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 261 raw_spin_unlock_irq(&desc->lock); 262 ··· 265 266 raw_spin_lock_irq(&desc->lock); 267 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 268 - irq_compat_clr_progress(desc); 269 270 out_unlock: 271 raw_spin_unlock_irq(&desc->lock); ··· 387 * then mask it and get out of here: 388 */ 389 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 390 - irq_compat_set_pending(desc); 391 desc->istate |= IRQS_PENDING; 392 mask_irq(desc); 393 goto out; ··· 439 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 440 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 441 if (!irq_check_poll(desc)) { 442 - irq_compat_set_pending(desc); 443 desc->istate |= IRQS_PENDING; 444 mask_ack_irq(desc); 445 goto out_unlock;
··· 34 if (!chip) 35 chip = &no_irq_chip; 36 37 desc->irq_data.chip = chip; 38 irq_put_desc_unlock(desc, flags); 39 /* ··· 141 static void irq_state_clr_disabled(struct irq_desc *desc) 142 { 143 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 144 } 145 146 static void irq_state_set_disabled(struct irq_desc *desc) 147 { 148 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 149 } 150 151 static void irq_state_clr_masked(struct irq_desc *desc) 152 { 153 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 154 } 155 156 static void irq_state_set_masked(struct irq_desc *desc) 157 { 158 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 159 } 160 161 int irq_startup(struct irq_desc *desc) ··· 207 desc->irq_data.chip->irq_disable(&desc->irq_data); 208 irq_state_set_masked(desc); 209 } 210 } 211 212 static inline void mask_ack_irq(struct irq_desc *desc) ··· 381 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 382 goto out_unlock; 383 384 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 385 raw_spin_unlock_irq(&desc->lock); 386 ··· 391 392 raw_spin_lock_irq(&desc->lock); 393 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 394 395 out_unlock: 396 raw_spin_unlock_irq(&desc->lock); ··· 514 * then mask it and get out of here: 515 */ 516 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 517 desc->istate |= IRQS_PENDING; 518 mask_irq(desc); 519 goto out; ··· 567 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 568 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 569 if (!irq_check_poll(desc)) { 570 desc->istate |= IRQS_PENDING; 571 mask_ack_irq(desc); 572 goto out_unlock;
-72
kernel/irq/compat.h
··· 1 - /* 2 - * Compat layer for transition period 3 - */ 4 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 5 - static inline void irq_compat_set_progress(struct irq_desc *desc) 6 - { 7 - desc->status |= IRQ_INPROGRESS; 8 - } 9 - 10 - static inline void irq_compat_clr_progress(struct irq_desc *desc) 11 - { 12 - desc->status &= ~IRQ_INPROGRESS; 13 - } 14 - static inline void irq_compat_set_disabled(struct irq_desc *desc) 15 - { 16 - desc->status |= IRQ_DISABLED; 17 - } 18 - static inline void irq_compat_clr_disabled(struct irq_desc *desc) 19 - { 20 - desc->status &= ~IRQ_DISABLED; 21 - } 22 - static inline void irq_compat_set_pending(struct irq_desc *desc) 23 - { 24 - desc->status |= IRQ_PENDING; 25 - } 26 - 27 - static inline void irq_compat_clr_pending(struct irq_desc *desc) 28 - { 29 - desc->status &= ~IRQ_PENDING; 30 - } 31 - static inline void irq_compat_set_masked(struct irq_desc *desc) 32 - { 33 - desc->status |= IRQ_MASKED; 34 - } 35 - 36 - static inline void irq_compat_clr_masked(struct irq_desc *desc) 37 - { 38 - desc->status &= ~IRQ_MASKED; 39 - } 40 - static inline void irq_compat_set_move_pending(struct irq_desc *desc) 41 - { 42 - desc->status |= IRQ_MOVE_PENDING; 43 - } 44 - 45 - static inline void irq_compat_clr_move_pending(struct irq_desc *desc) 46 - { 47 - desc->status &= ~IRQ_MOVE_PENDING; 48 - } 49 - static inline void irq_compat_set_affinity(struct irq_desc *desc) 50 - { 51 - desc->status |= IRQ_AFFINITY_SET; 52 - } 53 - 54 - static inline void irq_compat_clr_affinity(struct irq_desc *desc) 55 - { 56 - desc->status &= ~IRQ_AFFINITY_SET; 57 - } 58 - #else 59 - static inline void irq_compat_set_progress(struct irq_desc *desc) { } 60 - static inline void irq_compat_clr_progress(struct irq_desc *desc) { } 61 - static inline void irq_compat_set_disabled(struct irq_desc *desc) { } 62 - static inline void irq_compat_clr_disabled(struct irq_desc *desc) { } 63 - static inline void irq_compat_set_pending(struct irq_desc *desc) { } 64 - static inline void irq_compat_clr_pending(struct irq_desc *desc) { } 65 - static inline void irq_compat_set_masked(struct irq_desc *desc) { } 66 - static inline void irq_compat_clr_masked(struct irq_desc *desc) { } 67 - static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } 68 - static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } 69 - static inline void irq_compat_set_affinity(struct irq_desc *desc) { } 70 - static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } 71 - #endif 72 -
···
+1 -1
kernel/irq/debug.h
··· 4 5 #include <linux/kallsyms.h> 6 7 - #define P(f) if (desc->status & f) printk("%14s set\n", #f) 8 #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 9 /* FIXME */ 10 #define PD(f) do { } while (0)
··· 4 5 #include <linux/kallsyms.h> 6 7 + #define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) 8 #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 9 /* FIXME */ 10 #define PD(f) do { } while (0)
-9
kernel/irq/dummychip.c
··· 31 return 0; 32 } 33 34 - #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 35 - static void compat_noop(unsigned int irq) { } 36 - #define END_INIT .end = compat_noop 37 - #else 38 - #define END_INIT 39 - #endif 40 - 41 /* 42 * Generic no controller implementation 43 */ ··· 41 .irq_enable = noop, 42 .irq_disable = noop, 43 .irq_ack = ack_bad, 44 - END_INIT 45 }; 46 47 /* ··· 56 .irq_ack = noop, 57 .irq_mask = noop, 58 .irq_unmask = noop, 59 - END_INIT 60 };
··· 31 return 0; 32 } 33 34 /* 35 * Generic no controller implementation 36 */ ··· 48 .irq_enable = noop, 49 .irq_disable = noop, 50 .irq_ack = ack_bad, 51 }; 52 53 /* ··· 64 .irq_ack = noop, 65 .irq_mask = noop, 66 .irq_unmask = noop, 67 };
-3
kernel/irq/handle.c
··· 175 struct irqaction *action = desc->action; 176 irqreturn_t ret; 177 178 - irq_compat_clr_pending(desc); 179 desc->istate &= ~IRQS_PENDING; 180 - irq_compat_set_progress(desc); 181 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 182 raw_spin_unlock(&desc->lock); 183 ··· 183 184 raw_spin_lock(&desc->lock); 185 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 186 - irq_compat_clr_progress(desc); 187 return ret; 188 }
··· 175 struct irqaction *action = desc->action; 176 irqreturn_t ret; 177 178 desc->istate &= ~IRQS_PENDING; 179 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 180 raw_spin_unlock(&desc->lock); 181 ··· 185 186 raw_spin_lock(&desc->lock); 187 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 188 return ret; 189 }
-10
kernel/irq/internals.h
··· 15 16 #define istate core_internal_state__do_not_mess_with_it 17 18 - #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT 19 - # define status status_use_accessors 20 - #endif 21 - 22 extern int noirqdebug; 23 24 /* ··· 57 IRQS_SUSPENDED = 0x00000800, 58 }; 59 60 - #include "compat.h" 61 #include "debug.h" 62 #include "settings.h" 63 64 #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 65 - 66 - /* Set default functions for irq_chip structures: */ 67 - extern void irq_chip_set_defaults(struct irq_chip *chip); 68 69 extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 70 unsigned long flags); ··· 148 static inline void irqd_set_move_pending(struct irq_data *d) 149 { 150 d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; 151 - irq_compat_set_move_pending(irq_data_to_desc(d)); 152 } 153 154 static inline void irqd_clr_move_pending(struct irq_data *d) 155 { 156 d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; 157 - irq_compat_clr_move_pending(irq_data_to_desc(d)); 158 } 159 160 static inline void irqd_clear(struct irq_data *d, unsigned int mask)
··· 15 16 #define istate core_internal_state__do_not_mess_with_it 17 18 extern int noirqdebug; 19 20 /* ··· 61 IRQS_SUSPENDED = 0x00000800, 62 }; 63 64 #include "debug.h" 65 #include "settings.h" 66 67 #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 68 69 extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 70 unsigned long flags); ··· 156 static inline void irqd_set_move_pending(struct irq_data *d) 157 { 158 d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; 159 } 160 161 static inline void irqd_clr_move_pending(struct irq_data *d) 162 { 163 d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; 164 } 165 166 static inline void irqd_clear(struct irq_data *d, unsigned int mask)
+1 -9
kernel/irq/manage.c
··· 166 kref_get(&desc->affinity_notify->kref); 167 schedule_work(&desc->affinity_notify->work); 168 } 169 - irq_compat_set_affinity(desc); 170 irqd_set(data, IRQD_AFFINITY_SET); 171 172 return ret; ··· 296 if (cpumask_intersects(desc->irq_data.affinity, 297 cpu_online_mask)) 298 set = desc->irq_data.affinity; 299 - else { 300 - irq_compat_clr_affinity(desc); 301 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 302 - } 303 } 304 305 cpumask_and(mask, cpu_online_mask, set); ··· 584 irqd_set(&desc->irq_data, IRQD_LEVEL); 585 } 586 587 - if (chip != desc->irq_data.chip) 588 - irq_chip_set_defaults(desc->irq_data.chip); 589 ret = 0; 590 break; 591 default: ··· 780 * but AFAICT IRQS_PENDING should be fine as it 781 * retriggers the interrupt itself --- tglx 782 */ 783 - irq_compat_set_pending(desc); 784 desc->istate |= IRQS_PENDING; 785 raw_spin_unlock_irq(&desc->lock); 786 } else { ··· 975 new->thread_mask = 1 << ffz(thread_mask); 976 977 if (!shared) { 978 - irq_chip_set_defaults(desc->irq_data.chip); 979 - 980 init_waitqueue_head(&desc->wait_for_threads); 981 982 /* Setup the type (level, edge polarity) if configured: */
··· 166 kref_get(&desc->affinity_notify->kref); 167 schedule_work(&desc->affinity_notify->work); 168 } 169 irqd_set(data, IRQD_AFFINITY_SET); 170 171 return ret; ··· 297 if (cpumask_intersects(desc->irq_data.affinity, 298 cpu_online_mask)) 299 set = desc->irq_data.affinity; 300 + else 301 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 302 } 303 304 cpumask_and(mask, cpu_online_mask, set); ··· 587 irqd_set(&desc->irq_data, IRQD_LEVEL); 588 } 589 590 ret = 0; 591 break; 592 default: ··· 785 * but AFAICT IRQS_PENDING should be fine as it 786 * retriggers the interrupt itself --- tglx 787 */ 788 desc->istate |= IRQS_PENDING; 789 raw_spin_unlock_irq(&desc->lock); 790 } else { ··· 981 new->thread_mask = 1 << ffz(thread_mask); 982 983 if (!shared) { 984 init_waitqueue_head(&desc->wait_for_threads); 985 986 /* Setup the type (level, edge polarity) if configured: */
-1
kernel/irq/resend.c
··· 65 if (desc->istate & IRQS_REPLAY) 66 return; 67 if (desc->istate & IRQS_PENDING) { 68 - irq_compat_clr_pending(desc); 69 desc->istate &= ~IRQS_PENDING; 70 desc->istate |= IRQS_REPLAY; 71
··· 65 if (desc->istate & IRQS_REPLAY) 66 return; 67 if (desc->istate & IRQS_PENDING) { 68 desc->istate &= ~IRQS_PENDING; 69 desc->istate |= IRQS_REPLAY; 70
+21 -34
kernel/irq/settings.h
··· 15 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 16 }; 17 18 - #define IRQ_INPROGRESS GOT_YOU_MORON 19 - #define IRQ_REPLAY GOT_YOU_MORON 20 - #define IRQ_WAITING GOT_YOU_MORON 21 - #define IRQ_DISABLED GOT_YOU_MORON 22 - #define IRQ_PENDING GOT_YOU_MORON 23 - #define IRQ_MASKED GOT_YOU_MORON 24 - #define IRQ_WAKEUP GOT_YOU_MORON 25 - #define IRQ_MOVE_PENDING GOT_YOU_MORON 26 #define IRQ_PER_CPU GOT_YOU_MORON 27 #define IRQ_NO_BALANCING GOT_YOU_MORON 28 - #define IRQ_AFFINITY_SET GOT_YOU_MORON 29 #define IRQ_LEVEL GOT_YOU_MORON 30 #define IRQ_NOPROBE GOT_YOU_MORON 31 #define IRQ_NOREQUEST GOT_YOU_MORON ··· 28 static inline void 29 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) 30 { 31 - desc->status &= ~(clr & _IRQF_MODIFY_MASK); 32 - desc->status |= (set & _IRQF_MODIFY_MASK); 33 } 34 35 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) 36 { 37 - return desc->status & _IRQ_PER_CPU; 38 } 39 40 static inline void irq_settings_set_per_cpu(struct irq_desc *desc) 41 { 42 - desc->status |= _IRQ_PER_CPU; 43 } 44 45 static inline void irq_settings_set_no_balancing(struct irq_desc *desc) 46 { 47 - desc->status |= _IRQ_NO_BALANCING; 48 } 49 50 static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) 51 { 52 - return desc->status & _IRQ_NO_BALANCING; 53 } 54 55 static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) 56 { 57 - return desc->status & IRQ_TYPE_SENSE_MASK; 58 } 59 60 static inline void 61 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) 62 { 63 - desc->status &= ~IRQ_TYPE_SENSE_MASK; 64 - desc->status |= mask & IRQ_TYPE_SENSE_MASK; 65 } 66 67 static inline bool irq_settings_is_level(struct irq_desc *desc) 68 { 69 - return desc->status & _IRQ_LEVEL; 70 } 71 72 static inline void irq_settings_clr_level(struct irq_desc *desc) 73 { 74 - desc->status &= ~_IRQ_LEVEL; 75 } 76 77 static inline void irq_settings_set_level(struct irq_desc *desc) 78 { 79 - desc->status |= _IRQ_LEVEL; 80 } 81 82 static inline bool irq_settings_can_request(struct irq_desc *desc) 83 { 84 - return !(desc->status & _IRQ_NOREQUEST); 85 } 86 87 static inline void irq_settings_clr_norequest(struct irq_desc *desc) 88 { 89 - desc->status &= ~_IRQ_NOREQUEST; 90 } 91 92 static inline void irq_settings_set_norequest(struct irq_desc *desc) 93 { 94 - desc->status |= _IRQ_NOREQUEST; 95 } 96 97 static inline bool irq_settings_can_probe(struct irq_desc *desc) 98 { 99 - return !(desc->status & _IRQ_NOPROBE); 100 } 101 102 static inline void irq_settings_clr_noprobe(struct irq_desc *desc) 103 { 104 - desc->status &= ~_IRQ_NOPROBE; 105 } 106 107 static inline void irq_settings_set_noprobe(struct irq_desc *desc) 108 { 109 - desc->status |= _IRQ_NOPROBE; 110 } 111 112 static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) 113 { 114 - return desc->status & _IRQ_MOVE_PCNTXT; 115 } 116 117 static inline bool irq_settings_can_autoenable(struct irq_desc *desc) 118 { 119 - return !(desc->status & _IRQ_NOAUTOEN); 120 } 121 122 static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) 123 { 124 - return desc->status & _IRQ_NESTED_THREAD; 125 } 126 - 127 - /* Nothing should touch desc->status from now on */ 128 - #undef status 129 - #define status USE_THE_PROPER_WRAPPERS_YOU_MORON
··· 15 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 16 }; 17 18 #define IRQ_PER_CPU GOT_YOU_MORON 19 #define IRQ_NO_BALANCING GOT_YOU_MORON 20 #define IRQ_LEVEL GOT_YOU_MORON 21 #define IRQ_NOPROBE GOT_YOU_MORON 22 #define IRQ_NOREQUEST GOT_YOU_MORON ··· 37 static inline void 38 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) 39 { 40 + desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); 41 + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); 42 } 43 44 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) 45 { 46 + return desc->status_use_accessors & _IRQ_PER_CPU; 47 } 48 49 static inline void irq_settings_set_per_cpu(struct irq_desc *desc) 50 { 51 + desc->status_use_accessors |= _IRQ_PER_CPU; 52 } 53 54 static inline void irq_settings_set_no_balancing(struct irq_desc *desc) 55 { 56 + desc->status_use_accessors |= _IRQ_NO_BALANCING; 57 } 58 59 static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) 60 { 61 + return desc->status_use_accessors & _IRQ_NO_BALANCING; 62 } 63 64 static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) 65 { 66 + return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK; 67 } 68 69 static inline void 70 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) 71 { 72 + desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK; 73 + desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; 74 } 75 76 static inline bool irq_settings_is_level(struct irq_desc *desc) 77 { 78 + return desc->status_use_accessors & _IRQ_LEVEL; 79 } 80 81 static inline void irq_settings_clr_level(struct irq_desc *desc) 82 { 83 + desc->status_use_accessors &= ~_IRQ_LEVEL; 84 } 85 86 static inline void irq_settings_set_level(struct irq_desc *desc) 87 { 88 + desc->status_use_accessors |= _IRQ_LEVEL; 89 } 90 91 static inline bool irq_settings_can_request(struct irq_desc *desc) 92 { 93 + return !(desc->status_use_accessors & _IRQ_NOREQUEST); 94 } 95 96 static inline void irq_settings_clr_norequest(struct irq_desc *desc) 97 { 98 + desc->status_use_accessors &= ~_IRQ_NOREQUEST; 99 } 100 101 static inline void irq_settings_set_norequest(struct irq_desc *desc) 102 { 103 + desc->status_use_accessors |= _IRQ_NOREQUEST; 104 } 105 106 static inline bool irq_settings_can_probe(struct irq_desc *desc) 107 { 108 + return !(desc->status_use_accessors & _IRQ_NOPROBE); 109 } 110 111 static inline void irq_settings_clr_noprobe(struct irq_desc *desc) 112 { 113 + desc->status_use_accessors &= ~_IRQ_NOPROBE; 114 } 115 116 static inline void irq_settings_set_noprobe(struct irq_desc *desc) 117 { 118 + desc->status_use_accessors |= _IRQ_NOPROBE; 119 } 120 121 static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) 122 { 123 + return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; 124 } 125 126 static inline bool irq_settings_can_autoenable(struct irq_desc *desc) 127 { 128 + return !(desc->status_use_accessors & _IRQ_NOAUTOEN); 129 } 130 131 static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) 132 { 133 + return desc->status_use_accessors & _IRQ_NESTED_THREAD; 134 }
-1
kernel/irq/spurious.c
··· 93 * Already running: If it is shared get the other 94 * CPU to go looking for our mystery interrupt too 95 */ 96 - irq_compat_set_pending(desc); 97 desc->istate |= IRQS_PENDING; 98 goto out; 99 }
··· 93 * Already running: If it is shared get the other 94 * CPU to go looking for our mystery interrupt too 95 */ 96 desc->istate |= IRQS_PENDING; 97 goto out; 98 }