at master 8.2 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_IRQDESC_H 3#define _LINUX_IRQDESC_H 4 5#include <linux/rcupdate.h> 6#include <linux/kobject.h> 7#include <linux/mutex.h> 8 9/* 10 * Core internal functions to deal with irq descriptors 11 */ 12 13struct irq_affinity_notify; 14struct proc_dir_entry; 15struct module; 16struct irq_desc; 17struct irq_domain; 18struct pt_regs; 19 20/** 21 * struct irqstat - interrupt statistics 22 * @cnt: real-time interrupt count 23 * @ref: snapshot of interrupt count 24 */ 25struct irqstat { 26 unsigned int cnt; 27#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT 28 unsigned int ref; 29#endif 30}; 31 32/** 33 * struct irq_desc - interrupt descriptor 34 * @irq_common_data: per irq and chip data passed down to chip functions 35 * @kstat_irqs: irq stats per cpu 36 * @handle_irq: highlevel irq-events handler 37 * @action: the irq action chain 38 * @status_use_accessors: status information 39 * @core_internal_state__do_not_mess_with_it: core internal status information 40 * @depth: disable-depth, for nested irq_disable() calls 41 * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers 42 * @tot_count: stats field for non-percpu irqs 43 * @irq_count: stats field to detect stalled irqs 44 * @last_unhandled: aging timer for unhandled count 45 * @irqs_unhandled: stats field for spurious unhandled interrupts 46 * @threads_handled: stats field for deferred spurious detection of threaded handlers 47 * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers 48 * @lock: locking for SMP 49 * @affinity_hint: hint to user space for preferred irq affinity 50 * @affinity_notify: context for notification of affinity changes 51 * @pending_mask: pending rebalanced interrupts 52 * @threads_oneshot: bitfield to handle shared oneshot threads 53 * @threads_active: number of irqaction threads currently running 54 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 55 * @nr_actions: number of installed actions on this descriptor 56 * @no_suspend_depth: number of irqactions on a irq descriptor with 57 * IRQF_NO_SUSPEND set 58 * @force_resume_depth: number of irqactions on a irq descriptor with 59 * IRQF_FORCE_RESUME set 60 * @rcu: rcu head for delayed free 61 * @kobj: kobject used to represent this struct in sysfs 62 * @request_mutex: mutex to protect request/free before locking desc->lock 63 * @dir: /proc/irq/ procfs entry 64 * @debugfs_file: dentry for the debugfs file 65 * @name: flow handler name for /proc/interrupts output 66 */ 67struct irq_desc { 68 struct irq_common_data irq_common_data; 69 struct irq_data irq_data; 70 struct irqstat __percpu *kstat_irqs; 71 irq_flow_handler_t handle_irq; 72 struct irqaction *action; /* IRQ action list */ 73 unsigned int status_use_accessors; 74 unsigned int core_internal_state__do_not_mess_with_it; 75 unsigned int depth; /* nested irq disables */ 76 unsigned int wake_depth; /* nested wake enables */ 77 unsigned int tot_count; 78 unsigned int irq_count; /* For detecting broken IRQs */ 79 unsigned long last_unhandled; /* Aging timer for unhandled count */ 80 unsigned int irqs_unhandled; 81 atomic_t threads_handled; 82 int threads_handled_last; 83 raw_spinlock_t lock; 84 struct cpumask *percpu_enabled; 85#ifdef CONFIG_SMP 86 const struct cpumask *affinity_hint; 87 struct irq_affinity_notify *affinity_notify; 88#ifdef CONFIG_GENERIC_PENDING_IRQ 89 cpumask_var_t pending_mask; 90#endif 91#endif 92 unsigned long threads_oneshot; 93 atomic_t threads_active; 94 wait_queue_head_t wait_for_threads; 95#ifdef CONFIG_PM_SLEEP 96 unsigned int nr_actions; 97 unsigned int no_suspend_depth; 98 unsigned int cond_suspend_depth; 99 unsigned int force_resume_depth; 100#endif 101#ifdef CONFIG_PROC_FS 102 struct proc_dir_entry *dir; 103#endif 104#ifdef CONFIG_GENERIC_IRQ_DEBUGFS 105 struct dentry *debugfs_file; 106 const char *dev_name; 107#endif 108#ifdef CONFIG_SPARSE_IRQ 109 struct rcu_head rcu; 110 struct kobject kobj; 111#endif 112 struct mutex request_mutex; 113 int parent_irq; 114 struct module *owner; 115 const char *name; 116#ifdef CONFIG_HARDIRQS_SW_RESEND 117 struct hlist_node resend_node; 118#endif 119} ____cacheline_internodealigned_in_smp; 120 121#ifdef CONFIG_SPARSE_IRQ 122extern void irq_lock_sparse(void); 123extern void irq_unlock_sparse(void); 124#else 125static inline void irq_lock_sparse(void) { } 126static inline void irq_unlock_sparse(void) { } 127extern struct irq_desc irq_desc[NR_IRQS]; 128#endif 129 130static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc, 131 unsigned int cpu) 132{ 133 return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; 134} 135 136static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) 137{ 138 return container_of(data->common, struct irq_desc, irq_common_data); 139} 140 141static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) 142{ 143 return desc->irq_data.irq; 144} 145 146static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) 147{ 148 return &desc->irq_data; 149} 150 151static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) 152{ 153 return desc->irq_data.chip; 154} 155 156static inline void *irq_desc_get_chip_data(struct irq_desc *desc) 157{ 158 return desc->irq_data.chip_data; 159} 160 161static inline void *irq_desc_get_handler_data(struct irq_desc *desc) 162{ 163 return desc->irq_common_data.handler_data; 164} 165 166/* 167 * Architectures call this to let the generic IRQ layer 168 * handle an interrupt. 169 */ 170static inline void generic_handle_irq_desc(struct irq_desc *desc) 171{ 172 desc->handle_irq(desc); 173} 174 175int handle_irq_desc(struct irq_desc *desc); 176int generic_handle_irq(unsigned int irq); 177int generic_handle_irq_safe(unsigned int irq); 178 179#ifdef CONFIG_IRQ_DOMAIN 180/* 181 * Convert a HW interrupt number to a logical one using a IRQ domain, 182 * and handle the result interrupt number. Return -EINVAL if 183 * conversion failed. 184 */ 185int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq); 186int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq); 187int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq); 188#endif 189 190/* Test to see if a driver has successfully requested an irq */ 191static inline int irq_desc_has_action(struct irq_desc *desc) 192{ 193 return desc && desc->action != NULL; 194} 195 196/** 197 * irq_set_handler_locked - Set irq handler from a locked region 198 * @data: Pointer to the irq_data structure which identifies the irq 199 * @handler: Flow control handler function for this interrupt 200 * 201 * Sets the handler in the irq descriptor associated to @data. 202 * 203 * Must be called with irq_desc locked and valid parameters. Typical 204 * call site is the irq_set_type() callback. 205 */ 206static inline void irq_set_handler_locked(struct irq_data *data, 207 irq_flow_handler_t handler) 208{ 209 struct irq_desc *desc = irq_data_to_desc(data); 210 211 desc->handle_irq = handler; 212} 213 214/** 215 * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region 216 * @data: Pointer to the irq_data structure for which the chip is set 217 * @chip: Pointer to the new irq chip 218 * @handler: Flow control handler function for this interrupt 219 * @name: Name of the interrupt 220 * 221 * Replace the irq chip at the proper hierarchy level in @data and 222 * sets the handler and name in the associated irq descriptor. 223 * 224 * Must be called with irq_desc locked and valid parameters. 225 */ 226static inline void 227irq_set_chip_handler_name_locked(struct irq_data *data, 228 const struct irq_chip *chip, 229 irq_flow_handler_t handler, const char *name) 230{ 231 struct irq_desc *desc = irq_data_to_desc(data); 232 233 desc->handle_irq = handler; 234 desc->name = name; 235 data->chip = (struct irq_chip *)chip; 236} 237 238bool irq_check_status_bit(unsigned int irq, unsigned int bitmask); 239 240static inline bool irq_balancing_disabled(unsigned int irq) 241{ 242 return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK); 243} 244 245static inline bool irq_is_percpu(unsigned int irq) 246{ 247 return irq_check_status_bit(irq, IRQ_PER_CPU); 248} 249 250static inline bool irq_is_percpu_devid(unsigned int irq) 251{ 252 return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID); 253} 254 255void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, 256 struct lock_class_key *request_class); 257static inline void 258irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, 259 struct lock_class_key *request_class) 260{ 261 if (IS_ENABLED(CONFIG_LOCKDEP)) 262 __irq_set_lockdep_class(irq, lock_class, request_class); 263} 264 265#endif