Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

genirq: Add support for per-cpu dev_id interrupts

The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.

While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.

For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.

The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:

int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);

The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs

Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.

Based on an initial patch by Thomas Gleixner.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Marc Zyngier and committed by
Thomas Gleixner
31d9d9b6 60f96b41

+345 -34
+27 -11
include/linux/interrupt.h
··· 95 95 * @flags: flags (see IRQF_* above) 96 96 * @name: name of the device 97 97 * @dev_id: cookie to identify the device 98 + * @percpu_dev_id: cookie to identify the device 98 99 * @next: pointer to the next irqaction for shared interrupts 99 100 * @irq: interrupt number 100 101 * @dir: pointer to the proc/irq/NN/name entry ··· 105 104 * @thread_mask: bitmask for keeping track of @thread activity 106 105 */ 107 106 struct irqaction { 108 - irq_handler_t handler; 109 - unsigned long flags; 110 - void *dev_id; 111 - struct irqaction *next; 112 - int irq; 113 - irq_handler_t thread_fn; 114 - struct task_struct *thread; 115 - unsigned long thread_flags; 116 - unsigned long thread_mask; 117 - const char *name; 118 - struct proc_dir_entry *dir; 107 + irq_handler_t handler; 108 + unsigned long flags; 109 + void *dev_id; 110 + void __percpu *percpu_dev_id; 111 + struct irqaction *next; 112 + int irq; 113 + irq_handler_t thread_fn; 114 + struct task_struct *thread; 115 + unsigned long thread_flags; 116 + unsigned long thread_mask; 117 + const char *name; 118 + struct proc_dir_entry *dir; 119 119 } ____cacheline_internodealigned_in_smp; 120 120 121 121 extern irqreturn_t no_action(int cpl, void *dev_id); ··· 137 135 extern int __must_check 138 136 request_any_context_irq(unsigned int irq, irq_handler_t handler, 139 137 unsigned long flags, const char *name, void *dev_id); 138 + 139 + extern int __must_check 140 + request_percpu_irq(unsigned int irq, irq_handler_t handler, 141 + const char *devname, void __percpu *percpu_dev_id); 140 142 141 143 extern void exit_irq_thread(void); 142 144 #else ··· 170 164 return request_irq(irq, handler, flags, name, dev_id); 171 165 } 172 166 167 + static inline int __must_check 168 + request_percpu_irq(unsigned int irq, irq_handler_t handler, 169 + const char *devname, void __percpu *percpu_dev_id) 170 + { 171 + return request_irq(irq, handler, 0, devname, percpu_dev_id); 172 + } 173 + 173 174 static inline void exit_irq_thread(void) { } 174 175 #endif 175 176 176 177 extern void free_irq(unsigned int, void *); 178 + extern void free_percpu_irq(unsigned int, void __percpu *); 177 179 178 180 struct device; 179 181 ··· 221 207 222 208 extern void disable_irq_nosync(unsigned int irq); 223 209 extern void disable_irq(unsigned int irq); 210 + extern void disable_percpu_irq(unsigned int irq); 224 211 extern void enable_irq(unsigned int irq); 212 + extern void enable_percpu_irq(unsigned int irq); 225 213 226 214 /* The following three functions are for the core kernel use only. */ 227 215 #ifdef CONFIG_GENERIC_HARDIRQS
+15 -1
include/linux/irq.h
··· 66 66 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) 67 67 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 68 68 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread 69 + * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable 69 70 */ 70 71 enum { 71 72 IRQ_TYPE_NONE = 0x00000000, ··· 89 88 IRQ_MOVE_PCNTXT = (1 << 14), 90 89 IRQ_NESTED_THREAD = (1 << 15), 91 90 IRQ_NOTHREAD = (1 << 16), 91 + IRQ_PER_CPU_DEVID = (1 << 17), 92 92 }; 93 93 94 94 #define IRQF_MODIFY_MASK \ 95 95 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 96 96 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 97 - IRQ_PER_CPU | IRQ_NESTED_THREAD) 97 + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) 98 98 99 99 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 100 100 ··· 369 367 struct irqaction; 370 368 extern int setup_irq(unsigned int irq, struct irqaction *new); 371 369 extern void remove_irq(unsigned int irq, struct irqaction *act); 370 + extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); 371 + extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); 372 372 373 373 extern void irq_cpu_online(void); 374 374 extern void irq_cpu_offline(void); ··· 398 394 extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); 399 395 extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 400 396 extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 397 + extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); 401 398 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 402 399 extern void handle_nested_irq(unsigned int irq); 403 400 ··· 426 421 { 427 422 irq_set_chip_and_handler_name(irq, chip, handle, NULL); 428 423 } 424 + 425 + extern int irq_set_percpu_devid(unsigned int irq); 429 426 430 427 extern void 431 428 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, ··· 488 481 irq_set_status_flags(irq, IRQ_NESTED_THREAD); 489 482 else 490 483 irq_clear_status_flags(irq, IRQ_NESTED_THREAD); 484 + } 485 + 486 + static inline void irq_set_percpu_devid_flags(unsigned int irq) 487 + { 488 + irq_set_status_flags(irq, 489 + IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | 490 + IRQ_NOPROBE | IRQ_PER_CPU_DEVID); 491 491 } 492 492 493 493 /* Handle dynamic irq creation and destruction */
+1
include/linux/irqdesc.h
··· 53 53 unsigned long last_unhandled; /* Aging timer for unhandled count */ 54 54 unsigned int irqs_unhandled; 55 55 raw_spinlock_t lock; 56 + struct cpumask *percpu_enabled; 56 57 #ifdef CONFIG_SMP 57 58 const struct cpumask *affinity_hint; 58 59 struct irq_affinity_notify *affinity_notify;
+57 -7
kernel/irq/chip.c
··· 26 26 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 27 27 { 28 28 unsigned long flags; 29 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 29 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 30 30 31 31 if (!desc) 32 32 return -EINVAL; ··· 54 54 int irq_set_irq_type(unsigned int irq, unsigned int type) 55 55 { 56 56 unsigned long flags; 57 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 57 + struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 58 58 int ret = 0; 59 59 60 60 if (!desc) ··· 78 78 int irq_set_handler_data(unsigned int irq, void *data) 79 79 { 80 80 unsigned long flags; 81 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 81 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 82 82 83 83 if (!desc) 84 84 return -EINVAL; ··· 98 98 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 99 99 { 100 100 unsigned long flags; 101 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 101 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 102 102 103 103 if (!desc) 104 104 return -EINVAL; ··· 119 119 int irq_set_chip_data(unsigned int irq, void *data) 120 120 { 121 121 unsigned long flags; 122 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 122 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 123 123 124 124 if (!desc) 125 125 return -EINVAL; ··· 202 202 desc->irq_data.chip->irq_disable(&desc->irq_data); 203 203 irq_state_set_masked(desc); 204 204 } 205 + } 206 + 207 + void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 208 + { 209 + if (desc->irq_data.chip->irq_enable) 210 + desc->irq_data.chip->irq_enable(&desc->irq_data); 211 + else 212 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 213 + cpumask_set_cpu(cpu, desc->percpu_enabled); 214 + } 215 + 216 + void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 217 + { 218 + if (desc->irq_data.chip->irq_disable) 219 + desc->irq_data.chip->irq_disable(&desc->irq_data); 220 + else 221 + desc->irq_data.chip->irq_mask(&desc->irq_data); 222 + cpumask_clear_cpu(cpu, desc->percpu_enabled); 205 223 } 206 224 207 225 static inline void mask_ack_irq(struct irq_desc *desc) ··· 562 544 chip->irq_eoi(&desc->irq_data); 563 545 } 564 546 547 + /** 548 + * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 549 + * @irq: the interrupt number 550 + * @desc: the interrupt description structure for this irq 551 + * 552 + * Per CPU interrupts on SMP machines without locking requirements. Same as 553 + * handle_percpu_irq() above but with the following extras: 554 + * 555 + * action->percpu_dev_id is a pointer to percpu variables which 556 + * contain the real device id for the cpu on which this handler is 557 + * called 558 + */ 559 + void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 560 + { 561 + struct irq_chip *chip = irq_desc_get_chip(desc); 562 + struct irqaction *action = desc->action; 563 + void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 564 + irqreturn_t res; 565 + 566 + kstat_incr_irqs_this_cpu(irq, desc); 567 + 568 + if (chip->irq_ack) 569 + chip->irq_ack(&desc->irq_data); 570 + 571 + trace_irq_handler_entry(irq, action); 572 + res = action->handler(irq, dev_id); 573 + trace_irq_handler_exit(irq, action, res); 574 + 575 + if (chip->irq_eoi) 576 + chip->irq_eoi(&desc->irq_data); 577 + } 578 + 565 579 void 566 580 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 567 581 const char *name) 568 582 { 569 583 unsigned long flags; 570 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 584 + struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 571 585 572 586 if (!desc) 573 587 return; ··· 643 593 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 644 594 { 645 595 unsigned long flags; 646 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 596 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 647 597 648 598 if (!desc) 649 599 return;
+14 -5
kernel/irq/internals.h
··· 71 71 extern void irq_shutdown(struct irq_desc *desc); 72 72 extern void irq_enable(struct irq_desc *desc); 73 73 extern void irq_disable(struct irq_desc *desc); 74 + extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); 75 + extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); 74 76 extern void mask_irq(struct irq_desc *desc); 75 77 extern void unmask_irq(struct irq_desc *desc); 76 78 ··· 116 114 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); 117 115 } 118 116 117 + #define _IRQ_DESC_CHECK (1 << 0) 118 + #define _IRQ_DESC_PERCPU (1 << 1) 119 + 120 + #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) 121 + #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) 122 + 119 123 struct irq_desc * 120 - __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus); 124 + __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 125 + unsigned int check); 121 126 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); 122 127 123 128 static inline struct irq_desc * 124 - irq_get_desc_buslock(unsigned int irq, unsigned long *flags) 129 + irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) 125 130 { 126 - return __irq_get_desc_lock(irq, flags, true); 131 + return __irq_get_desc_lock(irq, flags, true, check); 127 132 } 128 133 129 134 static inline void ··· 140 131 } 141 132 142 133 static inline struct irq_desc * 143 - irq_get_desc_lock(unsigned int irq, unsigned long *flags) 134 + irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) 144 135 { 145 - return __irq_get_desc_lock(irq, flags, false); 136 + return __irq_get_desc_lock(irq, flags, false, check); 146 137 } 147 138 148 139 static inline void
+31 -1
kernel/irq/irqdesc.c
··· 424 424 } 425 425 426 426 struct irq_desc * 427 - __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) 427 + __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 428 + unsigned int check) 428 429 { 429 430 struct irq_desc *desc = irq_to_desc(irq); 430 431 431 432 if (desc) { 433 + if (check & _IRQ_DESC_CHECK) { 434 + if ((check & _IRQ_DESC_PERCPU) && 435 + !irq_settings_is_per_cpu_devid(desc)) 436 + return NULL; 437 + 438 + if (!(check & _IRQ_DESC_PERCPU) && 439 + irq_settings_is_per_cpu_devid(desc)) 440 + return NULL; 441 + } 442 + 432 443 if (bus) 433 444 chip_bus_lock(desc); 434 445 raw_spin_lock_irqsave(&desc->lock, *flags); ··· 452 441 raw_spin_unlock_irqrestore(&desc->lock, flags); 453 442 if (bus) 454 443 chip_bus_sync_unlock(desc); 444 + } 445 + 446 + int irq_set_percpu_devid(unsigned int irq) 447 + { 448 + struct irq_desc *desc = irq_to_desc(irq); 449 + 450 + if (!desc) 451 + return -EINVAL; 452 + 453 + if (desc->percpu_enabled) 454 + return -EINVAL; 455 + 456 + desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); 457 + 458 + if (!desc->percpu_enabled) 459 + return -ENOMEM; 460 + 461 + irq_set_percpu_devid_flags(irq); 462 + return 0; 455 463 } 456 464 457 465 /**
+193 -9
kernel/irq/manage.c
··· 195 195 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 196 196 { 197 197 unsigned long flags; 198 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 198 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 199 199 200 200 if (!desc) 201 201 return -EINVAL; ··· 356 356 static int __disable_irq_nosync(unsigned int irq) 357 357 { 358 358 unsigned long flags; 359 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 359 + struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 360 360 361 361 if (!desc) 362 362 return -EINVAL; ··· 448 448 void enable_irq(unsigned int irq) 449 449 { 450 450 unsigned long flags; 451 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 451 + struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 452 452 453 453 if (!desc) 454 454 return; ··· 491 491 int irq_set_irq_wake(unsigned int irq, unsigned int on) 492 492 { 493 493 unsigned long flags; 494 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 494 + struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 495 495 int ret = 0; 496 496 497 497 if (!desc) ··· 532 532 int can_request_irq(unsigned int irq, unsigned long irqflags) 533 533 { 534 534 unsigned long flags; 535 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 535 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 536 536 int canrequest = 0; 537 537 538 538 if (!desc) ··· 1121 1121 int retval; 1122 1122 struct irq_desc *desc = irq_to_desc(irq); 1123 1123 1124 + if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1125 + return -EINVAL; 1124 1126 chip_bus_lock(desc); 1125 1127 retval = __setup_irq(irq, desc, act); 1126 1128 chip_bus_sync_unlock(desc); ··· 1131 1129 } 1132 1130 EXPORT_SYMBOL_GPL(setup_irq); 1133 1131 1134 - /* 1132 + /* 1135 1133 * Internal function to unregister an irqaction - used to free 1136 1134 * regular and special interrupts that are part of the architecture. 1137 1135 */ ··· 1229 1227 */ 1230 1228 void remove_irq(unsigned int irq, struct irqaction *act) 1231 1229 { 1232 - __free_irq(irq, act->dev_id); 1230 + struct irq_desc *desc = irq_to_desc(irq); 1231 + 1232 + if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1233 + __free_irq(irq, act->dev_id); 1233 1234 } 1234 1235 EXPORT_SYMBOL_GPL(remove_irq); 1235 1236 ··· 1254 1249 { 1255 1250 struct irq_desc *desc = irq_to_desc(irq); 1256 1251 1257 - if (!desc) 1252 + if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1258 1253 return; 1259 1254 1260 1255 #ifdef CONFIG_SMP ··· 1332 1327 if (!desc) 1333 1328 return -EINVAL; 1334 1329 1335 - if (!irq_settings_can_request(desc)) 1330 + if (!irq_settings_can_request(desc) || 1331 + WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1336 1332 return -EINVAL; 1337 1333 1338 1334 if (!handler) { ··· 1418 1412 return !ret ? IRQC_IS_HARDIRQ : ret; 1419 1413 } 1420 1414 EXPORT_SYMBOL_GPL(request_any_context_irq); 1415 + 1416 + void enable_percpu_irq(unsigned int irq) 1417 + { 1418 + unsigned int cpu = smp_processor_id(); 1419 + unsigned long flags; 1420 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1421 + 1422 + if (!desc) 1423 + return; 1424 + 1425 + irq_percpu_enable(desc, cpu); 1426 + irq_put_desc_unlock(desc, flags); 1427 + } 1428 + 1429 + void disable_percpu_irq(unsigned int irq) 1430 + { 1431 + unsigned int cpu = smp_processor_id(); 1432 + unsigned long flags; 1433 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1434 + 1435 + if (!desc) 1436 + return; 1437 + 1438 + irq_percpu_disable(desc, cpu); 1439 + irq_put_desc_unlock(desc, flags); 1440 + } 1441 + 1442 + /* 1443 + * Internal function to unregister a percpu irqaction. 1444 + */ 1445 + static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1446 + { 1447 + struct irq_desc *desc = irq_to_desc(irq); 1448 + struct irqaction *action; 1449 + unsigned long flags; 1450 + 1451 + WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1452 + 1453 + if (!desc) 1454 + return NULL; 1455 + 1456 + raw_spin_lock_irqsave(&desc->lock, flags); 1457 + 1458 + action = desc->action; 1459 + if (!action || action->percpu_dev_id != dev_id) { 1460 + WARN(1, "Trying to free already-free IRQ %d\n", irq); 1461 + goto bad; 1462 + } 1463 + 1464 + if (!cpumask_empty(desc->percpu_enabled)) { 1465 + WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 1466 + irq, cpumask_first(desc->percpu_enabled)); 1467 + goto bad; 1468 + } 1469 + 1470 + /* Found it - now remove it from the list of entries: */ 1471 + desc->action = NULL; 1472 + 1473 + raw_spin_unlock_irqrestore(&desc->lock, flags); 1474 + 1475 + unregister_handler_proc(irq, action); 1476 + 1477 + module_put(desc->owner); 1478 + return action; 1479 + 1480 + bad: 1481 + raw_spin_unlock_irqrestore(&desc->lock, flags); 1482 + return NULL; 1483 + } 1484 + 1485 + /** 1486 + * remove_percpu_irq - free a per-cpu interrupt 1487 + * @irq: Interrupt line to free 1488 + * @act: irqaction for the interrupt 1489 + * 1490 + * Used to remove interrupts statically setup by the early boot process. 1491 + */ 1492 + void remove_percpu_irq(unsigned int irq, struct irqaction *act) 1493 + { 1494 + struct irq_desc *desc = irq_to_desc(irq); 1495 + 1496 + if (desc && irq_settings_is_per_cpu_devid(desc)) 1497 + __free_percpu_irq(irq, act->percpu_dev_id); 1498 + } 1499 + 1500 + /** 1501 + * free_percpu_irq - free an interrupt allocated with request_percpu_irq 1502 + * @irq: Interrupt line to free 1503 + * @dev_id: Device identity to free 1504 + * 1505 + * Remove a percpu interrupt handler. The handler is removed, but 1506 + * the interrupt line is not disabled. This must be done on each 1507 + * CPU before calling this function. The function does not return 1508 + * until any executing interrupts for this IRQ have completed. 1509 + * 1510 + * This function must not be called from interrupt context. 1511 + */ 1512 + void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1513 + { 1514 + struct irq_desc *desc = irq_to_desc(irq); 1515 + 1516 + if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1517 + return; 1518 + 1519 + chip_bus_lock(desc); 1520 + kfree(__free_percpu_irq(irq, dev_id)); 1521 + chip_bus_sync_unlock(desc); 1522 + } 1523 + 1524 + /** 1525 + * setup_percpu_irq - setup a per-cpu interrupt 1526 + * @irq: Interrupt line to setup 1527 + * @act: irqaction for the interrupt 1528 + * 1529 + * Used to statically setup per-cpu interrupts in the early boot process. 1530 + */ 1531 + int setup_percpu_irq(unsigned int irq, struct irqaction *act) 1532 + { 1533 + struct irq_desc *desc = irq_to_desc(irq); 1534 + int retval; 1535 + 1536 + if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1537 + return -EINVAL; 1538 + chip_bus_lock(desc); 1539 + retval = __setup_irq(irq, desc, act); 1540 + chip_bus_sync_unlock(desc); 1541 + 1542 + return retval; 1543 + } 1544 + 1545 + /** 1546 + * request_percpu_irq - allocate a percpu interrupt line 1547 + * @irq: Interrupt line to allocate 1548 + * @handler: Function to be called when the IRQ occurs. 1549 + * @devname: An ascii name for the claiming device 1550 + * @dev_id: A percpu cookie passed back to the handler function 1551 + * 1552 + * This call allocates interrupt resources, but doesn't 1553 + * automatically enable the interrupt. It has to be done on each 1554 + * CPU using enable_percpu_irq(). 1555 + * 1556 + * Dev_id must be globally unique. It is a per-cpu variable, and 1557 + * the handler gets called with the interrupted CPU's instance of 1558 + * that variable. 1559 + */ 1560 + int request_percpu_irq(unsigned int irq, irq_handler_t handler, 1561 + const char *devname, void __percpu *dev_id) 1562 + { 1563 + struct irqaction *action; 1564 + struct irq_desc *desc; 1565 + int retval; 1566 + 1567 + if (!dev_id) 1568 + return -EINVAL; 1569 + 1570 + desc = irq_to_desc(irq); 1571 + if (!desc || !irq_settings_can_request(desc) || 1572 + !irq_settings_is_per_cpu_devid(desc)) 1573 + return -EINVAL; 1574 + 1575 + action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1576 + if (!action) 1577 + return -ENOMEM; 1578 + 1579 + action->handler = handler; 1580 + action->flags = IRQF_PERCPU; 1581 + action->name = devname; 1582 + action->percpu_dev_id = dev_id; 1583 + 1584 + chip_bus_lock(desc); 1585 + retval = __setup_irq(irq, desc, action); 1586 + chip_bus_sync_unlock(desc); 1587 + 1588 + if (retval) 1589 + kfree(action); 1590 + 1591 + return retval; 1592 + }
+7
kernel/irq/settings.h
··· 13 13 _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, 14 14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING, 15 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 16 + _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, 16 17 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 17 18 }; 18 19 ··· 25 24 #define IRQ_NOTHREAD GOT_YOU_MORON 26 25 #define IRQ_NOAUTOEN GOT_YOU_MORON 27 26 #define IRQ_NESTED_THREAD GOT_YOU_MORON 27 + #define IRQ_PER_CPU_DEVID GOT_YOU_MORON 28 28 #undef IRQF_MODIFY_MASK 29 29 #define IRQF_MODIFY_MASK GOT_YOU_MORON 30 30 ··· 39 37 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) 40 38 { 41 39 return desc->status_use_accessors & _IRQ_PER_CPU; 40 + } 41 + 42 + static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc) 43 + { 44 + return desc->status_use_accessors & _IRQ_PER_CPU_DEVID; 42 45 } 43 46 44 47 static inline void irq_settings_set_per_cpu(struct irq_desc *desc)