Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/* interrupt.h */
3#ifndef _LINUX_INTERRUPT_H
4#define _LINUX_INTERRUPT_H
5
6#include <linux/kernel.h>
7#include <linux/bitops.h>
8#include <linux/cpumask.h>
9#include <linux/irqreturn.h>
10#include <linux/irqnr.h>
11#include <linux/hardirq.h>
12#include <linux/irqflags.h>
13#include <linux/hrtimer.h>
14#include <linux/kref.h>
15#include <linux/workqueue.h>
16
17#include <linux/atomic.h>
18#include <asm/ptrace.h>
19#include <asm/irq.h>
20#include <asm/sections.h>
21
22/*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38/*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
42 * IRQF_SHARED - allow sharing the irq among several devices
43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 * registered first in a shared interrupt is considered for
49 * performance reasons)
50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 * Used by threaded interrupts which need to keep the
52 * irq line disabled until the threaded handler has been run.
53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
54 * that this interrupt will wake the system from a suspended
55 * state. See Documentation/power/suspend-and-interrupts.txt
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 * resume time.
60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61 * interrupt handler after suspending interrupts. For system
62 * wakeup devices users need to implement wakeup detection in
63 * their interrupt handlers.
64 */
65#define IRQF_SHARED 0x00000080
66#define IRQF_PROBE_SHARED 0x00000100
67#define __IRQF_TIMER 0x00000200
68#define IRQF_PERCPU 0x00000400
69#define IRQF_NOBALANCING 0x00000800
70#define IRQF_IRQPOLL 0x00001000
71#define IRQF_ONESHOT 0x00002000
72#define IRQF_NO_SUSPEND 0x00004000
73#define IRQF_FORCE_RESUME 0x00008000
74#define IRQF_NO_THREAD 0x00010000
75#define IRQF_EARLY_RESUME 0x00020000
76#define IRQF_COND_SUSPEND 0x00040000
77
78#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79
80/*
81 * These values can be returned by request_any_context_irq() and
82 * describe the context the interrupt will be run in.
83 *
84 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
86 */
87enum {
88 IRQC_IS_HARDIRQ = 0,
89 IRQC_IS_NESTED,
90};
91
92typedef irqreturn_t (*irq_handler_t)(int, void *);
93
94/**
95 * struct irqaction - per interrupt action descriptor
96 * @handler: interrupt handler function
97 * @name: name of the device
98 * @dev_id: cookie to identify the device
99 * @percpu_dev_id: cookie to identify the device
100 * @next: pointer to the next irqaction for shared interrupts
101 * @irq: interrupt number
102 * @flags: flags (see IRQF_* above)
103 * @thread_fn: interrupt handler function for threaded interrupts
104 * @thread: thread pointer for threaded interrupts
105 * @secondary: pointer to secondary irqaction (force threading)
106 * @thread_flags: flags related to @thread
107 * @thread_mask: bitmask for keeping track of @thread activity
108 * @dir: pointer to the proc/irq/NN/name entry
109 */
110struct irqaction {
111 irq_handler_t handler;
112 void *dev_id;
113 void __percpu *percpu_dev_id;
114 struct irqaction *next;
115 irq_handler_t thread_fn;
116 struct task_struct *thread;
117 struct irqaction *secondary;
118 unsigned int irq;
119 unsigned int flags;
120 unsigned long thread_flags;
121 unsigned long thread_mask;
122 const char *name;
123 struct proc_dir_entry *dir;
124} ____cacheline_internodealigned_in_smp;
125
126extern irqreturn_t no_action(int cpl, void *dev_id);
127
128/*
129 * If a (PCI) device interrupt is not connected we set dev->irq to
130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131 * can distingiush that case from other error returns.
132 *
133 * 0x80000000 is guaranteed to be outside the available range of interrupts
134 * and easy to distinguish from other possible incorrect values.
135 */
136#define IRQ_NOTCONNECTED (1U << 31)
137
138extern int __must_check
139request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 irq_handler_t thread_fn,
141 unsigned long flags, const char *name, void *dev);
142
143static inline int __must_check
144request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145 const char *name, void *dev)
146{
147 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
148}
149
150extern int __must_check
151request_any_context_irq(unsigned int irq, irq_handler_t handler,
152 unsigned long flags, const char *name, void *dev_id);
153
154extern int __must_check
155__request_percpu_irq(unsigned int irq, irq_handler_t handler,
156 unsigned long flags, const char *devname,
157 void __percpu *percpu_dev_id);
158
159static inline int __must_check
160request_percpu_irq(unsigned int irq, irq_handler_t handler,
161 const char *devname, void __percpu *percpu_dev_id)
162{
163 return __request_percpu_irq(irq, handler, 0,
164 devname, percpu_dev_id);
165}
166
167extern const void *free_irq(unsigned int, void *);
168extern void free_percpu_irq(unsigned int, void __percpu *);
169
170struct device;
171
172extern int __must_check
173devm_request_threaded_irq(struct device *dev, unsigned int irq,
174 irq_handler_t handler, irq_handler_t thread_fn,
175 unsigned long irqflags, const char *devname,
176 void *dev_id);
177
178static inline int __must_check
179devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
180 unsigned long irqflags, const char *devname, void *dev_id)
181{
182 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
183 devname, dev_id);
184}
185
186extern int __must_check
187devm_request_any_context_irq(struct device *dev, unsigned int irq,
188 irq_handler_t handler, unsigned long irqflags,
189 const char *devname, void *dev_id);
190
191extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
192
193/*
194 * On lockdep we dont want to enable hardirqs in hardirq
195 * context. Use local_irq_enable_in_hardirq() to annotate
196 * kernel code that has to do this nevertheless (pretty much
197 * the only valid case is for old/broken hardware that is
198 * insanely slow).
199 *
200 * NOTE: in theory this might break fragile code that relies
201 * on hardirq delivery - in practice we dont seem to have such
202 * places left. So the only effect should be slightly increased
203 * irqs-off latencies.
204 */
205#ifdef CONFIG_LOCKDEP
206# define local_irq_enable_in_hardirq() do { } while (0)
207#else
208# define local_irq_enable_in_hardirq() local_irq_enable()
209#endif
210
211extern void disable_irq_nosync(unsigned int irq);
212extern bool disable_hardirq(unsigned int irq);
213extern void disable_irq(unsigned int irq);
214extern void disable_percpu_irq(unsigned int irq);
215extern void enable_irq(unsigned int irq);
216extern void enable_percpu_irq(unsigned int irq, unsigned int type);
217extern bool irq_percpu_is_enabled(unsigned int irq);
218extern void irq_wake_thread(unsigned int irq, void *dev_id);
219
220/* The following three functions are for the core kernel use only. */
221extern void suspend_device_irqs(void);
222extern void resume_device_irqs(void);
223
224/**
225 * struct irq_affinity_notify - context for notification of IRQ affinity changes
226 * @irq: Interrupt to which notification applies
227 * @kref: Reference count, for internal use
228 * @work: Work item, for internal use
229 * @notify: Function to be called on change. This will be
230 * called in process context.
231 * @release: Function to be called on release. This will be
232 * called in process context. Once registered, the
233 * structure must only be freed when this function is
234 * called or later.
235 */
236struct irq_affinity_notify {
237 unsigned int irq;
238 struct kref kref;
239 struct work_struct work;
240 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
241 void (*release)(struct kref *ref);
242};
243
244/**
245 * struct irq_affinity - Description for automatic irq affinity assignements
246 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
247 * the MSI(-X) vector space
248 * @post_vectors: Don't apply affinity to @post_vectors at end of
249 * the MSI(-X) vector space
250 * @nr_sets: Length of passed in *sets array
251 * @sets: Number of affinitized sets
252 */
253struct irq_affinity {
254 int pre_vectors;
255 int post_vectors;
256 int nr_sets;
257 int *sets;
258};
259
260/**
261 * struct irq_affinity_desc - Interrupt affinity descriptor
262 * @mask: cpumask to hold the affinity assignment
263 * @is_managed: 1 if the interrupt is managed internally
264 */
265struct irq_affinity_desc {
266 struct cpumask mask;
267 unsigned int is_managed : 1;
268};
269
270#if defined(CONFIG_SMP)
271
272extern cpumask_var_t irq_default_affinity;
273
274/* Internal implementation. Use the helpers below */
275extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
276 bool force);
277
278/**
279 * irq_set_affinity - Set the irq affinity of a given irq
280 * @irq: Interrupt to set affinity
281 * @cpumask: cpumask
282 *
283 * Fails if cpumask does not contain an online CPU
284 */
285static inline int
286irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
287{
288 return __irq_set_affinity(irq, cpumask, false);
289}
290
291/**
292 * irq_force_affinity - Force the irq affinity of a given irq
293 * @irq: Interrupt to set affinity
294 * @cpumask: cpumask
295 *
296 * Same as irq_set_affinity, but without checking the mask against
297 * online cpus.
298 *
299 * Solely for low level cpu hotplug code, where we need to make per
300 * cpu interrupts affine before the cpu becomes online.
301 */
302static inline int
303irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
304{
305 return __irq_set_affinity(irq, cpumask, true);
306}
307
308extern int irq_can_set_affinity(unsigned int irq);
309extern int irq_select_affinity(unsigned int irq);
310
311extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
312
313extern int
314irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
315
316struct irq_affinity_desc *
317irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
318
319int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
320
321#else /* CONFIG_SMP */
322
323static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
324{
325 return -EINVAL;
326}
327
328static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
329{
330 return 0;
331}
332
333static inline int irq_can_set_affinity(unsigned int irq)
334{
335 return 0;
336}
337
338static inline int irq_select_affinity(unsigned int irq) { return 0; }
339
340static inline int irq_set_affinity_hint(unsigned int irq,
341 const struct cpumask *m)
342{
343 return -EINVAL;
344}
345
346static inline int
347irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
348{
349 return 0;
350}
351
352static inline struct irq_affinity_desc *
353irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
354{
355 return NULL;
356}
357
358static inline int
359irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
360{
361 return maxvec;
362}
363
364#endif /* CONFIG_SMP */
365
366/*
367 * Special lockdep variants of irq disabling/enabling.
368 * These should be used for locking constructs that
369 * know that a particular irq context which is disabled,
370 * and which is the only irq-context user of a lock,
371 * that it's safe to take the lock in the irq-disabled
372 * section without disabling hardirqs.
373 *
374 * On !CONFIG_LOCKDEP they are equivalent to the normal
375 * irq disable/enable methods.
376 */
377static inline void disable_irq_nosync_lockdep(unsigned int irq)
378{
379 disable_irq_nosync(irq);
380#ifdef CONFIG_LOCKDEP
381 local_irq_disable();
382#endif
383}
384
385static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
386{
387 disable_irq_nosync(irq);
388#ifdef CONFIG_LOCKDEP
389 local_irq_save(*flags);
390#endif
391}
392
393static inline void disable_irq_lockdep(unsigned int irq)
394{
395 disable_irq(irq);
396#ifdef CONFIG_LOCKDEP
397 local_irq_disable();
398#endif
399}
400
401static inline void enable_irq_lockdep(unsigned int irq)
402{
403#ifdef CONFIG_LOCKDEP
404 local_irq_enable();
405#endif
406 enable_irq(irq);
407}
408
409static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
410{
411#ifdef CONFIG_LOCKDEP
412 local_irq_restore(*flags);
413#endif
414 enable_irq(irq);
415}
416
417/* IRQ wakeup (PM) control: */
418extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
419
420static inline int enable_irq_wake(unsigned int irq)
421{
422 return irq_set_irq_wake(irq, 1);
423}
424
425static inline int disable_irq_wake(unsigned int irq)
426{
427 return irq_set_irq_wake(irq, 0);
428}
429
430/*
431 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
432 */
433enum irqchip_irq_state {
434 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
435 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
436 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
437 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
438};
439
440extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
441 bool *state);
442extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
443 bool state);
444
445#ifdef CONFIG_IRQ_FORCED_THREADING
446extern bool force_irqthreads;
447#else
448#define force_irqthreads (0)
449#endif
450
451#ifndef local_softirq_pending
452
453#ifndef local_softirq_pending_ref
454#define local_softirq_pending_ref irq_stat.__softirq_pending
455#endif
456
457#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
458#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
459#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
460
461#endif /* local_softirq_pending */
462
463/* Some architectures might implement lazy enabling/disabling of
464 * interrupts. In some cases, such as stop_machine, we might want
465 * to ensure that after a local_irq_disable(), interrupts have
466 * really been disabled in hardware. Such architectures need to
467 * implement the following hook.
468 */
469#ifndef hard_irq_disable
470#define hard_irq_disable() do { } while(0)
471#endif
472
473/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
474 frequency threaded job scheduling. For almost all the purposes
475 tasklets are more than enough. F.e. all serial device BHs et
476 al. should be converted to tasklets, not to softirqs.
477 */
478
479enum
480{
481 HI_SOFTIRQ=0,
482 TIMER_SOFTIRQ,
483 NET_TX_SOFTIRQ,
484 NET_RX_SOFTIRQ,
485 BLOCK_SOFTIRQ,
486 IRQ_POLL_SOFTIRQ,
487 TASKLET_SOFTIRQ,
488 SCHED_SOFTIRQ,
489 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
490 numbering. Sigh! */
491 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
492
493 NR_SOFTIRQS
494};
495
496#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
497
498/* map softirq index to softirq name. update 'softirq_to_name' in
499 * kernel/softirq.c when adding a new softirq.
500 */
501extern const char * const softirq_to_name[NR_SOFTIRQS];
502
503/* softirq mask and active fields moved to irq_cpustat_t in
504 * asm/hardirq.h to get better cache usage. KAO
505 */
506
507struct softirq_action
508{
509 void (*action)(struct softirq_action *);
510};
511
512asmlinkage void do_softirq(void);
513asmlinkage void __do_softirq(void);
514
515#ifdef __ARCH_HAS_DO_SOFTIRQ
516void do_softirq_own_stack(void);
517#else
518static inline void do_softirq_own_stack(void)
519{
520 __do_softirq();
521}
522#endif
523
524extern void open_softirq(int nr, void (*action)(struct softirq_action *));
525extern void softirq_init(void);
526extern void __raise_softirq_irqoff(unsigned int nr);
527
528extern void raise_softirq_irqoff(unsigned int nr);
529extern void raise_softirq(unsigned int nr);
530
531DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
532
533static inline struct task_struct *this_cpu_ksoftirqd(void)
534{
535 return this_cpu_read(ksoftirqd);
536}
537
538/* Tasklets --- multithreaded analogue of BHs.
539
540 Main feature differing them of generic softirqs: tasklet
541 is running only on one CPU simultaneously.
542
543 Main feature differing them of BHs: different tasklets
544 may be run simultaneously on different CPUs.
545
546 Properties:
547 * If tasklet_schedule() is called, then tasklet is guaranteed
548 to be executed on some cpu at least once after this.
549 * If the tasklet is already scheduled, but its execution is still not
550 started, it will be executed only once.
551 * If this tasklet is already running on another CPU (or schedule is called
552 from tasklet itself), it is rescheduled for later.
553 * Tasklet is strictly serialized wrt itself, but not
554 wrt another tasklets. If client needs some intertask synchronization,
555 he makes it with spinlocks.
556 */
557
558struct tasklet_struct
559{
560 struct tasklet_struct *next;
561 unsigned long state;
562 atomic_t count;
563 void (*func)(unsigned long);
564 unsigned long data;
565};
566
567#define DECLARE_TASKLET(name, func, data) \
568struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
569
570#define DECLARE_TASKLET_DISABLED(name, func, data) \
571struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
572
573
574enum
575{
576 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
577 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
578};
579
580#ifdef CONFIG_SMP
581static inline int tasklet_trylock(struct tasklet_struct *t)
582{
583 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
584}
585
586static inline void tasklet_unlock(struct tasklet_struct *t)
587{
588 smp_mb__before_atomic();
589 clear_bit(TASKLET_STATE_RUN, &(t)->state);
590}
591
592static inline void tasklet_unlock_wait(struct tasklet_struct *t)
593{
594 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
595}
596#else
597#define tasklet_trylock(t) 1
598#define tasklet_unlock_wait(t) do { } while (0)
599#define tasklet_unlock(t) do { } while (0)
600#endif
601
602extern void __tasklet_schedule(struct tasklet_struct *t);
603
604static inline void tasklet_schedule(struct tasklet_struct *t)
605{
606 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
607 __tasklet_schedule(t);
608}
609
610extern void __tasklet_hi_schedule(struct tasklet_struct *t);
611
612static inline void tasklet_hi_schedule(struct tasklet_struct *t)
613{
614 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
615 __tasklet_hi_schedule(t);
616}
617
618static inline void tasklet_disable_nosync(struct tasklet_struct *t)
619{
620 atomic_inc(&t->count);
621 smp_mb__after_atomic();
622}
623
624static inline void tasklet_disable(struct tasklet_struct *t)
625{
626 tasklet_disable_nosync(t);
627 tasklet_unlock_wait(t);
628 smp_mb();
629}
630
631static inline void tasklet_enable(struct tasklet_struct *t)
632{
633 smp_mb__before_atomic();
634 atomic_dec(&t->count);
635}
636
637extern void tasklet_kill(struct tasklet_struct *t);
638extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
639extern void tasklet_init(struct tasklet_struct *t,
640 void (*func)(unsigned long), unsigned long data);
641
642struct tasklet_hrtimer {
643 struct hrtimer timer;
644 struct tasklet_struct tasklet;
645 enum hrtimer_restart (*function)(struct hrtimer *);
646};
647
648extern void
649tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
650 enum hrtimer_restart (*function)(struct hrtimer *),
651 clockid_t which_clock, enum hrtimer_mode mode);
652
653static inline
654void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
655 const enum hrtimer_mode mode)
656{
657 hrtimer_start(&ttimer->timer, time, mode);
658}
659
660static inline
661void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
662{
663 hrtimer_cancel(&ttimer->timer);
664 tasklet_kill(&ttimer->tasklet);
665}
666
667/*
668 * Autoprobing for irqs:
669 *
670 * probe_irq_on() and probe_irq_off() provide robust primitives
671 * for accurate IRQ probing during kernel initialization. They are
672 * reasonably simple to use, are not "fooled" by spurious interrupts,
673 * and, unlike other attempts at IRQ probing, they do not get hung on
674 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
675 *
676 * For reasonably foolproof probing, use them as follows:
677 *
678 * 1. clear and/or mask the device's internal interrupt.
679 * 2. sti();
680 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
681 * 4. enable the device and cause it to trigger an interrupt.
682 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
683 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
684 * 7. service the device to clear its pending interrupt.
685 * 8. loop again if paranoia is required.
686 *
687 * probe_irq_on() returns a mask of allocated irq's.
688 *
689 * probe_irq_off() takes the mask as a parameter,
690 * and returns the irq number which occurred,
691 * or zero if none occurred, or a negative irq number
692 * if more than one irq occurred.
693 */
694
695#if !defined(CONFIG_GENERIC_IRQ_PROBE)
696static inline unsigned long probe_irq_on(void)
697{
698 return 0;
699}
700static inline int probe_irq_off(unsigned long val)
701{
702 return 0;
703}
704static inline unsigned int probe_irq_mask(unsigned long val)
705{
706 return 0;
707}
708#else
709extern unsigned long probe_irq_on(void); /* returns 0 on failure */
710extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
711extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
712#endif
713
714#ifdef CONFIG_PROC_FS
715/* Initialize /proc/irq/ */
716extern void init_irq_proc(void);
717#else
718static inline void init_irq_proc(void)
719{
720}
721#endif
722
723#ifdef CONFIG_IRQ_TIMINGS
724void irq_timings_enable(void);
725void irq_timings_disable(void);
726u64 irq_timings_next_event(u64 now);
727#endif
728
729struct seq_file;
730int show_interrupts(struct seq_file *p, void *v);
731int arch_show_interrupts(struct seq_file *p, int prec);
732
733extern int early_irq_init(void);
734extern int arch_probe_nr_irqs(void);
735extern int arch_early_irq_init(void);
736
737/*
738 * We want to know which function is an entrypoint of a hardirq or a softirq.
739 */
740#define __irq_entry __attribute__((__section__(".irqentry.text")))
741#define __softirq_entry \
742 __attribute__((__section__(".softirqentry.text")))
743
744#endif