Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/* interrupt.h */
3#ifndef _LINUX_INTERRUPT_H
4#define _LINUX_INTERRUPT_H
5
6#include <linux/kernel.h>
7#include <linux/bitops.h>
8#include <linux/cleanup.h>
9#include <linux/irqreturn.h>
10#include <linux/irqnr.h>
11#include <linux/hardirq.h>
12#include <linux/irqflags.h>
13#include <linux/hrtimer.h>
14#include <linux/kref.h>
15#include <linux/cpumask_types.h>
16#include <linux/workqueue.h>
17#include <linux/jump_label.h>
18
19#include <linux/atomic.h>
20#include <asm/ptrace.h>
21#include <asm/irq.h>
22#include <asm/sections.h>
23
24/*
25 * These correspond to the IORESOURCE_IRQ_* defines in
26 * linux/ioport.h to select the interrupt line behaviour. When
27 * requesting an interrupt without specifying a IRQF_TRIGGER, the
28 * setting should be assumed to be "as already configured", which
29 * may be as per machine or firmware initialisation.
30 */
31#define IRQF_TRIGGER_NONE 0x00000000
32#define IRQF_TRIGGER_RISING 0x00000001
33#define IRQF_TRIGGER_FALLING 0x00000002
34#define IRQF_TRIGGER_HIGH 0x00000004
35#define IRQF_TRIGGER_LOW 0x00000008
36#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
37 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
38#define IRQF_TRIGGER_PROBE 0x00000010
39
40/*
41 * These flags used only by the kernel as part of the
42 * irq handling routines.
43 *
44 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47 * IRQF_PERCPU - Interrupt is per cpu
48 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in a shared interrupt is considered for
51 * performance reasons)
52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53 * Used by threaded interrupts which need to keep the
54 * irq line disabled until the threaded handler has been run.
55 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
56 * that this interrupt will wake the system from a suspended
57 * state. See Documentation/power/suspend-and-interrupts.rst
58 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
59 * IRQF_NO_THREAD - Interrupt cannot be threaded
60 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
61 * resume time.
62 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
63 * interrupt handler after suspending interrupts. For system
64 * wakeup devices users need to implement wakeup detection in
65 * their interrupt handlers.
66 * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
67 * Users will enable it explicitly by enable_irq() or enable_nmi()
68 * later.
69 * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
70 * depends on IRQF_PERCPU.
71 * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
72 * interrupt.
73 */
74#define IRQF_SHARED 0x00000080
75#define IRQF_PROBE_SHARED 0x00000100
76#define __IRQF_TIMER 0x00000200
77#define IRQF_PERCPU 0x00000400
78#define IRQF_NOBALANCING 0x00000800
79#define IRQF_IRQPOLL 0x00001000
80#define IRQF_ONESHOT 0x00002000
81#define IRQF_NO_SUSPEND 0x00004000
82#define IRQF_FORCE_RESUME 0x00008000
83#define IRQF_NO_THREAD 0x00010000
84#define IRQF_EARLY_RESUME 0x00020000
85#define IRQF_COND_SUSPEND 0x00040000
86#define IRQF_NO_AUTOEN 0x00080000
87#define IRQF_NO_DEBUG 0x00100000
88#define IRQF_COND_ONESHOT 0x00200000
89
90#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
91
92/*
93 * These values can be returned by request_any_context_irq() and
94 * describe the context the interrupt will be run in.
95 *
96 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
97 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
98 */
99enum {
100 IRQC_IS_HARDIRQ = 0,
101 IRQC_IS_NESTED,
102};
103
104typedef irqreturn_t (*irq_handler_t)(int, void *);
105
106/**
107 * struct irqaction - per interrupt action descriptor
108 * @handler: interrupt handler function
109 * @name: name of the device
110 * @dev_id: cookie to identify the device
111 * @percpu_dev_id: cookie to identify the device
112 * @affinity: CPUs this irqaction is allowed to run on
113 * @next: pointer to the next irqaction for shared interrupts
114 * @irq: interrupt number
115 * @flags: flags (see IRQF_* above)
116 * @thread_fn: interrupt handler function for threaded interrupts
117 * @thread: thread pointer for threaded interrupts
118 * @secondary: pointer to secondary irqaction (force threading)
119 * @thread_flags: flags related to @thread
120 * @thread_mask: bitmask for keeping track of @thread activity
121 * @dir: pointer to the proc/irq/NN/name entry
122 */
123struct irqaction {
124 irq_handler_t handler;
125 union {
126 void *dev_id;
127 void __percpu *percpu_dev_id;
128 };
129 const struct cpumask *affinity;
130 struct irqaction *next;
131 irq_handler_t thread_fn;
132 struct task_struct *thread;
133 struct irqaction *secondary;
134 unsigned int irq;
135 unsigned int flags;
136 unsigned long thread_flags;
137 unsigned long thread_mask;
138 const char *name;
139 struct proc_dir_entry *dir;
140} ____cacheline_internodealigned_in_smp;
141
142extern irqreturn_t no_action(int cpl, void *dev_id);
143
144/*
145 * If a (PCI) device interrupt is not connected we set dev->irq to
146 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
147 * can distinguish that case from other error returns.
148 *
149 * 0x80000000 is guaranteed to be outside the available range of interrupts
150 * and easy to distinguish from other possible incorrect values.
151 */
152#define IRQ_NOTCONNECTED (1U << 31)
153
154extern int __must_check
155request_threaded_irq(unsigned int irq, irq_handler_t handler,
156 irq_handler_t thread_fn,
157 unsigned long flags, const char *name, void *dev);
158
159/**
160 * request_irq - Add a handler for an interrupt line
161 * @irq: The interrupt line to allocate
162 * @handler: Function to be called when the IRQ occurs.
163 * Primary handler for threaded interrupts
164 * If NULL, the default primary handler is installed
165 * @flags: Handling flags
166 * @name: Name of the device generating this interrupt
167 * @dev: A cookie passed to the handler function
168 *
169 * This call allocates an interrupt and establishes a handler; see
170 * the documentation for request_threaded_irq() for details.
171 */
172static inline int __must_check
173request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
174 const char *name, void *dev)
175{
176 return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev);
177}
178
179extern int __must_check
180request_any_context_irq(unsigned int irq, irq_handler_t handler,
181 unsigned long flags, const char *name, void *dev_id);
182
183extern int __must_check
184__request_percpu_irq(unsigned int irq, irq_handler_t handler,
185 unsigned long flags, const char *devname,
186 const cpumask_t *affinity, void __percpu *percpu_dev_id);
187
188extern int __must_check
189request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
190 const char *name, void *dev);
191
192static inline int __must_check
193request_percpu_irq(unsigned int irq, irq_handler_t handler,
194 const char *devname, void __percpu *percpu_dev_id)
195{
196 return __request_percpu_irq(irq, handler, 0,
197 devname, NULL, percpu_dev_id);
198}
199
200static inline int __must_check
201request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
202 const char *devname, const cpumask_t *affinity,
203 void __percpu *percpu_dev_id)
204{
205 return __request_percpu_irq(irq, handler, 0,
206 devname, affinity, percpu_dev_id);
207}
208
209extern int __must_check
210request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
211 const struct cpumask *affinity, void __percpu *dev_id);
212
213extern const void *free_irq(unsigned int, void *);
214extern void free_percpu_irq(unsigned int, void __percpu *);
215
216extern const void *free_nmi(unsigned int irq, void *dev_id);
217extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
218
219struct device;
220
221extern int __must_check
222devm_request_threaded_irq(struct device *dev, unsigned int irq,
223 irq_handler_t handler, irq_handler_t thread_fn,
224 unsigned long irqflags, const char *devname,
225 void *dev_id);
226
227static inline int __must_check
228devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
229 unsigned long irqflags, const char *devname, void *dev_id)
230{
231 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
232 devname, dev_id);
233}
234
235extern int __must_check
236devm_request_any_context_irq(struct device *dev, unsigned int irq,
237 irq_handler_t handler, unsigned long irqflags,
238 const char *devname, void *dev_id);
239
240extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
241
242bool irq_has_action(unsigned int irq);
243extern void disable_irq_nosync(unsigned int irq);
244extern bool disable_hardirq(unsigned int irq);
245extern void disable_irq(unsigned int irq);
246extern void disable_percpu_irq(unsigned int irq);
247extern void enable_irq(unsigned int irq);
248extern void enable_percpu_irq(unsigned int irq, unsigned int type);
249extern bool irq_percpu_is_enabled(unsigned int irq);
250extern void irq_wake_thread(unsigned int irq, void *dev_id);
251
252DEFINE_LOCK_GUARD_1(disable_irq, int,
253 disable_irq(*_T->lock), enable_irq(*_T->lock))
254
255extern void disable_nmi_nosync(unsigned int irq);
256extern void disable_percpu_nmi(unsigned int irq);
257extern void enable_nmi(unsigned int irq);
258extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
259extern int prepare_percpu_nmi(unsigned int irq);
260extern void teardown_percpu_nmi(unsigned int irq);
261
262extern int irq_inject_interrupt(unsigned int irq);
263
264/* The following three functions are for the core kernel use only. */
265extern void suspend_device_irqs(void);
266extern void resume_device_irqs(void);
267extern void rearm_wake_irq(unsigned int irq);
268
269/**
270 * struct irq_affinity_notify - context for notification of IRQ affinity changes
271 * @irq: Interrupt to which notification applies
272 * @kref: Reference count, for internal use
273 * @work: Work item, for internal use
274 * @notify: Function to be called on change. This will be
275 * called in process context.
276 * @release: Function to be called on release. This will be
277 * called in process context. Once registered, the
278 * structure must only be freed when this function is
279 * called or later.
280 */
281struct irq_affinity_notify {
282 unsigned int irq;
283 struct kref kref;
284 struct work_struct work;
285 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
286 void (*release)(struct kref *ref);
287};
288
289#define IRQ_AFFINITY_MAX_SETS 4
290
291/**
292 * struct irq_affinity - Description for automatic irq affinity assignments
293 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
294 * the MSI(-X) vector space
295 * @post_vectors: Don't apply affinity to @post_vectors at end of
296 * the MSI(-X) vector space
297 * @nr_sets: The number of interrupt sets for which affinity
298 * spreading is required
299 * @set_size: Array holding the size of each interrupt set
300 * @calc_sets: Callback for calculating the number and size
301 * of interrupt sets
302 * @priv: Private data for usage by @calc_sets, usually a
303 * pointer to driver/device specific data.
304 */
305struct irq_affinity {
306 unsigned int pre_vectors;
307 unsigned int post_vectors;
308 unsigned int nr_sets;
309 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
310 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
311 void *priv;
312};
313
314/**
315 * struct irq_affinity_desc - Interrupt affinity descriptor
316 * @mask: cpumask to hold the affinity assignment
317 * @is_managed: 1 if the interrupt is managed internally
318 */
319struct irq_affinity_desc {
320 struct cpumask mask;
321 unsigned int is_managed : 1;
322};
323
324#if defined(CONFIG_SMP)
325
326extern cpumask_var_t irq_default_affinity;
327
328extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
329extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
330
331extern int irq_can_set_affinity(unsigned int irq);
332extern int irq_select_affinity(unsigned int irq);
333
334extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
335 bool setaffinity);
336
337/**
338 * irq_update_affinity_hint - Update the affinity hint
339 * @irq: Interrupt to update
340 * @m: cpumask pointer (NULL to clear the hint)
341 *
342 * Updates the affinity hint, but does not change the affinity of the interrupt.
343 */
344static inline int
345irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
346{
347 return __irq_apply_affinity_hint(irq, m, false);
348}
349
350/**
351 * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
352 * cpumask to the interrupt
353 * @irq: Interrupt to update
354 * @m: cpumask pointer (NULL to clear the hint)
355 *
356 * Updates the affinity hint and if @m is not NULL it applies it as the
357 * affinity of that interrupt.
358 */
359static inline int
360irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
361{
362 return __irq_apply_affinity_hint(irq, m, true);
363}
364
365/*
366 * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
367 * instead.
368 */
369static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
370{
371 return irq_set_affinity_and_hint(irq, m);
372}
373
374extern int irq_update_affinity_desc(unsigned int irq,
375 struct irq_affinity_desc *affinity);
376
377extern int
378irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
379
380struct irq_affinity_desc *
381irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
382
383unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
384 const struct irq_affinity *affd);
385
386#else /* CONFIG_SMP */
387
388static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
389{
390 return -EINVAL;
391}
392
393static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
394{
395 return 0;
396}
397
398static inline int irq_can_set_affinity(unsigned int irq)
399{
400 return 0;
401}
402
403static inline int irq_select_affinity(unsigned int irq) { return 0; }
404
405static inline int irq_update_affinity_hint(unsigned int irq,
406 const struct cpumask *m)
407{
408 return -EINVAL;
409}
410
411static inline int irq_set_affinity_and_hint(unsigned int irq,
412 const struct cpumask *m)
413{
414 return -EINVAL;
415}
416
417static inline int irq_set_affinity_hint(unsigned int irq,
418 const struct cpumask *m)
419{
420 return -EINVAL;
421}
422
423static inline int irq_update_affinity_desc(unsigned int irq,
424 struct irq_affinity_desc *affinity)
425{
426 return -EINVAL;
427}
428
429static inline int
430irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
431{
432 return 0;
433}
434
435static inline struct irq_affinity_desc *
436irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
437{
438 return NULL;
439}
440
441static inline unsigned int
442irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
443 const struct irq_affinity *affd)
444{
445 return maxvec;
446}
447
448#endif /* CONFIG_SMP */
449
450/*
451 * Special lockdep variants of irq disabling/enabling.
452 * These should be used for locking constructs that
453 * know that a particular irq context which is disabled,
454 * and which is the only irq-context user of a lock,
455 * that it's safe to take the lock in the irq-disabled
456 * section without disabling hardirqs.
457 *
458 * On !CONFIG_LOCKDEP they are equivalent to the normal
459 * irq disable/enable methods.
460 */
461static inline void disable_irq_nosync_lockdep(unsigned int irq)
462{
463 disable_irq_nosync(irq);
464#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
465 local_irq_disable();
466#endif
467}
468
469static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
470{
471 disable_irq_nosync(irq);
472#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
473 local_irq_save(*flags);
474#endif
475}
476
477static inline void enable_irq_lockdep(unsigned int irq)
478{
479#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
480 local_irq_enable();
481#endif
482 enable_irq(irq);
483}
484
485static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
486{
487#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
488 local_irq_restore(*flags);
489#endif
490 enable_irq(irq);
491}
492
493/* IRQ wakeup (PM) control: */
494extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
495
496static inline int enable_irq_wake(unsigned int irq)
497{
498 return irq_set_irq_wake(irq, 1);
499}
500
501static inline int disable_irq_wake(unsigned int irq)
502{
503 return irq_set_irq_wake(irq, 0);
504}
505
506/*
507 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
508 */
509enum irqchip_irq_state {
510 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
511 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
512 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
513 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
514};
515
516extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
517 bool *state);
518extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
519 bool state);
520
521#ifdef CONFIG_IRQ_FORCED_THREADING
522# ifdef CONFIG_PREEMPT_RT
523# define force_irqthreads() (true)
524# else
525DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
526# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
527# endif
528#else
529#define force_irqthreads() (false)
530#endif
531
532#ifndef local_softirq_pending
533
534#ifndef local_softirq_pending_ref
535#define local_softirq_pending_ref irq_stat.__softirq_pending
536#endif
537
538#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
539#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
540#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
541
542#endif /* local_softirq_pending */
543
544/* Some architectures might implement lazy enabling/disabling of
545 * interrupts. In some cases, such as stop_machine, we might want
546 * to ensure that after a local_irq_disable(), interrupts have
547 * really been disabled in hardware. Such architectures need to
548 * implement the following hook.
549 */
550#ifndef hard_irq_disable
551#define hard_irq_disable() do { } while(0)
552#endif
553
554/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
555 frequency threaded job scheduling. For almost all the purposes
556 tasklets are more than enough. F.e. all serial device BHs et
557 al. should be converted to tasklets, not to softirqs.
558 */
559
560enum
561{
562 HI_SOFTIRQ=0,
563 TIMER_SOFTIRQ,
564 NET_TX_SOFTIRQ,
565 NET_RX_SOFTIRQ,
566 BLOCK_SOFTIRQ,
567 IRQ_POLL_SOFTIRQ,
568 TASKLET_SOFTIRQ,
569 SCHED_SOFTIRQ,
570 HRTIMER_SOFTIRQ,
571 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
572
573 NR_SOFTIRQS
574};
575
576/*
577 * The following vectors can be safely ignored after ksoftirqd is parked:
578 *
579 * _ RCU:
580 * 1) rcutree_migrate_callbacks() migrates the queue.
581 * 2) rcutree_report_cpu_dead() reports the final quiescent states.
582 *
583 * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
584 *
585 * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
586 */
587#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
588 BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
589
590
591/* map softirq index to softirq name. update 'softirq_to_name' in
592 * kernel/softirq.c when adding a new softirq.
593 */
594extern const char * const softirq_to_name[NR_SOFTIRQS];
595
596/* softirq mask and active fields moved to irq_cpustat_t in
597 * asm/hardirq.h to get better cache usage. KAO
598 */
599
600struct softirq_action
601{
602 void (*action)(void);
603};
604
605asmlinkage void do_softirq(void);
606asmlinkage void __do_softirq(void);
607
608#ifdef CONFIG_PREEMPT_RT
609extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
610#else
611static inline void do_softirq_post_smp_call_flush(unsigned int unused)
612{
613 do_softirq();
614}
615#endif
616
617extern void open_softirq(int nr, void (*action)(void));
618extern void softirq_init(void);
619extern void __raise_softirq_irqoff(unsigned int nr);
620
621extern void raise_softirq_irqoff(unsigned int nr);
622extern void raise_softirq(unsigned int nr);
623
624/*
625 * With forced-threaded interrupts enabled a raised softirq is deferred to
626 * ksoftirqd unless it can be handled within the threaded interrupt. This
627 * affects timer_list timers and hrtimers which are explicitly marked with
628 * HRTIMER_MODE_SOFT.
629 * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing
630 * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD.
631 * Userspace controlled timers (like the clock_nanosleep() interface) is divided
632 * into two categories: Tasks with elevated scheduling policy including
633 * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the
634 * elevated scheduling policy are woken up directly from the HARDIRQ while all
635 * other wake ups are delayed to softirq and so to ksoftirqd.
636 *
637 * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it
638 * handles the softirq in an overloaded situation (not handled everything
639 * within its last run).
640 * If the timers are handled at SCHED_OTHER priority then they competes with all
641 * other SCHED_OTHER tasks for CPU resources are possibly delayed.
642 * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures
643 * that timer are performed before scheduling any SCHED_OTHER thread.
644 */
645DECLARE_PER_CPU(struct task_struct *, ktimerd);
646DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
647void raise_ktimers_thread(unsigned int nr);
648
649static inline unsigned int local_timers_pending_force_th(void)
650{
651 return __this_cpu_read(pending_timer_softirq);
652}
653
654static inline void raise_timer_softirq(unsigned int nr)
655{
656 lockdep_assert_in_irq();
657 if (force_irqthreads())
658 raise_ktimers_thread(nr);
659 else
660 __raise_softirq_irqoff(nr);
661}
662
663static inline unsigned int local_timers_pending(void)
664{
665 if (force_irqthreads())
666 return local_timers_pending_force_th();
667 else
668 return local_softirq_pending();
669}
670
671DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
672
673static inline struct task_struct *this_cpu_ksoftirqd(void)
674{
675 return this_cpu_read(ksoftirqd);
676}
677
678/* Tasklets --- multithreaded analogue of BHs.
679
680 This API is deprecated. Please consider using threaded IRQs instead:
681 https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
682
683 Main feature differing them of generic softirqs: tasklet
684 is running only on one CPU simultaneously.
685
686 Main feature differing them of BHs: different tasklets
687 may be run simultaneously on different CPUs.
688
689 Properties:
690 * If tasklet_schedule() is called, then tasklet is guaranteed
691 to be executed on some cpu at least once after this.
692 * If the tasklet is already scheduled, but its execution is still not
693 started, it will be executed only once.
694 * If this tasklet is already running on another CPU (or schedule is called
695 from tasklet itself), it is rescheduled for later.
696 * Tasklet is strictly serialized wrt itself, but not
697 wrt another tasklets. If client needs some intertask synchronization,
698 he makes it with spinlocks.
699 */
700
701struct tasklet_struct
702{
703 struct tasklet_struct *next;
704 unsigned long state;
705 atomic_t count;
706 bool use_callback;
707 union {
708 void (*func)(unsigned long data);
709 void (*callback)(struct tasklet_struct *t);
710 };
711 unsigned long data;
712};
713
714#define DECLARE_TASKLET(name, _callback) \
715struct tasklet_struct name = { \
716 .count = ATOMIC_INIT(0), \
717 .callback = _callback, \
718 .use_callback = true, \
719}
720
721#define DECLARE_TASKLET_DISABLED(name, _callback) \
722struct tasklet_struct name = { \
723 .count = ATOMIC_INIT(1), \
724 .callback = _callback, \
725 .use_callback = true, \
726}
727
728#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
729 container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
730
731#define DECLARE_TASKLET_OLD(name, _func) \
732struct tasklet_struct name = { \
733 .count = ATOMIC_INIT(0), \
734 .func = _func, \
735}
736
737#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
738struct tasklet_struct name = { \
739 .count = ATOMIC_INIT(1), \
740 .func = _func, \
741}
742
743enum
744{
745 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
746 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
747};
748
749#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
750static inline int tasklet_trylock(struct tasklet_struct *t)
751{
752 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
753}
754
755void tasklet_unlock(struct tasklet_struct *t);
756void tasklet_unlock_wait(struct tasklet_struct *t);
757void tasklet_unlock_spin_wait(struct tasklet_struct *t);
758
759#else
760static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
761static inline void tasklet_unlock(struct tasklet_struct *t) { }
762static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
763static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
764#endif
765
766extern void __tasklet_schedule(struct tasklet_struct *t);
767
768static inline void tasklet_schedule(struct tasklet_struct *t)
769{
770 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
771 __tasklet_schedule(t);
772}
773
774extern void __tasklet_hi_schedule(struct tasklet_struct *t);
775
776static inline void tasklet_hi_schedule(struct tasklet_struct *t)
777{
778 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
779 __tasklet_hi_schedule(t);
780}
781
782static inline void tasklet_disable_nosync(struct tasklet_struct *t)
783{
784 atomic_inc(&t->count);
785 smp_mb__after_atomic();
786}
787
788/*
789 * Do not use in new code. Disabling tasklets from atomic contexts is
790 * error prone and should be avoided.
791 */
792static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
793{
794 tasklet_disable_nosync(t);
795 tasklet_unlock_spin_wait(t);
796 smp_mb();
797}
798
799static inline void tasklet_disable(struct tasklet_struct *t)
800{
801 tasklet_disable_nosync(t);
802 tasklet_unlock_wait(t);
803 smp_mb();
804}
805
806static inline void tasklet_enable(struct tasklet_struct *t)
807{
808 smp_mb__before_atomic();
809 atomic_dec(&t->count);
810}
811
812extern void tasklet_kill(struct tasklet_struct *t);
813extern void tasklet_init(struct tasklet_struct *t,
814 void (*func)(unsigned long), unsigned long data);
815extern void tasklet_setup(struct tasklet_struct *t,
816 void (*callback)(struct tasklet_struct *));
817
818/*
819 * Autoprobing for irqs:
820 *
821 * probe_irq_on() and probe_irq_off() provide robust primitives
822 * for accurate IRQ probing during kernel initialization. They are
823 * reasonably simple to use, are not "fooled" by spurious interrupts,
824 * and, unlike other attempts at IRQ probing, they do not get hung on
825 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
826 *
827 * For reasonably foolproof probing, use them as follows:
828 *
829 * 1. clear and/or mask the device's internal interrupt.
830 * 2. sti();
831 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
832 * 4. enable the device and cause it to trigger an interrupt.
833 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
834 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
835 * 7. service the device to clear its pending interrupt.
836 * 8. loop again if paranoia is required.
837 *
838 * probe_irq_on() returns a mask of allocated irq's.
839 *
840 * probe_irq_off() takes the mask as a parameter,
841 * and returns the irq number which occurred,
842 * or zero if none occurred, or a negative irq number
843 * if more than one irq occurred.
844 */
845
846#if !defined(CONFIG_GENERIC_IRQ_PROBE)
847static inline unsigned long probe_irq_on(void)
848{
849 return 0;
850}
851static inline int probe_irq_off(unsigned long val)
852{
853 return 0;
854}
855static inline unsigned int probe_irq_mask(unsigned long val)
856{
857 return 0;
858}
859#else
860extern unsigned long probe_irq_on(void); /* returns 0 on failure */
861extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
862extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
863#endif
864
865#ifdef CONFIG_PROC_FS
866/* Initialize /proc/irq/ */
867extern void init_irq_proc(void);
868#else
869static inline void init_irq_proc(void)
870{
871}
872#endif
873
874#ifdef CONFIG_IRQ_TIMINGS
875void irq_timings_enable(void);
876void irq_timings_disable(void);
877u64 irq_timings_next_event(u64 now);
878#endif
879
880struct seq_file;
881int show_interrupts(struct seq_file *p, void *v);
882int arch_show_interrupts(struct seq_file *p, int prec);
883
884extern int early_irq_init(void);
885extern int arch_probe_nr_irqs(void);
886extern int arch_early_irq_init(void);
887
888/*
889 * We want to know which function is an entrypoint of a hardirq or a softirq.
890 */
891#ifndef __irq_entry
892# define __irq_entry __section(".irqentry.text")
893#endif
894
895#define __softirq_entry __section(".softirqentry.text")
896
897#endif