at v5.13 25 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* interrupt.h */ 3#ifndef _LINUX_INTERRUPT_H 4#define _LINUX_INTERRUPT_H 5 6#include <linux/kernel.h> 7#include <linux/bitops.h> 8#include <linux/cpumask.h> 9#include <linux/irqreturn.h> 10#include <linux/irqnr.h> 11#include <linux/hardirq.h> 12#include <linux/irqflags.h> 13#include <linux/hrtimer.h> 14#include <linux/kref.h> 15#include <linux/workqueue.h> 16 17#include <linux/atomic.h> 18#include <asm/ptrace.h> 19#include <asm/irq.h> 20#include <asm/sections.h> 21 22/* 23 * These correspond to the IORESOURCE_IRQ_* defines in 24 * linux/ioport.h to select the interrupt line behaviour. When 25 * requesting an interrupt without specifying a IRQF_TRIGGER, the 26 * setting should be assumed to be "as already configured", which 27 * may be as per machine or firmware initialisation. 28 */ 29#define IRQF_TRIGGER_NONE 0x00000000 30#define IRQF_TRIGGER_RISING 0x00000001 31#define IRQF_TRIGGER_FALLING 0x00000002 32#define IRQF_TRIGGER_HIGH 0x00000004 33#define IRQF_TRIGGER_LOW 0x00000008 34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ 35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) 36#define IRQF_TRIGGER_PROBE 0x00000010 37 38/* 39 * These flags used only by the kernel as part of the 40 * irq handling routines. 41 * 42 * IRQF_SHARED - allow sharing the irq among several devices 43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 45 * IRQF_PERCPU - Interrupt is per cpu 46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 48 * registered first in a shared interrupt is considered for 49 * performance reasons) 50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 51 * Used by threaded interrupts which need to keep the 52 * irq line disabled until the threaded handler has been run. 53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee 54 * that this interrupt will wake the system from a suspended 55 * state. See Documentation/power/suspend-and-interrupts.rst 56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set 57 * IRQF_NO_THREAD - Interrupt cannot be threaded 58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device 59 * resume time. 60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this 61 * interrupt handler after suspending interrupts. For system 62 * wakeup devices users need to implement wakeup detection in 63 * their interrupt handlers. 64 * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. 65 * Users will enable it explicitly by enable_irq() or enable_nmi() 66 * later. 67 */ 68#define IRQF_SHARED 0x00000080 69#define IRQF_PROBE_SHARED 0x00000100 70#define __IRQF_TIMER 0x00000200 71#define IRQF_PERCPU 0x00000400 72#define IRQF_NOBALANCING 0x00000800 73#define IRQF_IRQPOLL 0x00001000 74#define IRQF_ONESHOT 0x00002000 75#define IRQF_NO_SUSPEND 0x00004000 76#define IRQF_FORCE_RESUME 0x00008000 77#define IRQF_NO_THREAD 0x00010000 78#define IRQF_EARLY_RESUME 0x00020000 79#define IRQF_COND_SUSPEND 0x00040000 80#define IRQF_NO_AUTOEN 0x00080000 81 82#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 83 84/* 85 * These values can be returned by request_any_context_irq() and 86 * describe the context the interrupt will be run in. 87 * 88 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context 89 * IRQC_IS_NESTED - interrupt runs in a nested threaded context 90 */ 91enum { 92 IRQC_IS_HARDIRQ = 0, 93 IRQC_IS_NESTED, 94}; 95 96typedef irqreturn_t (*irq_handler_t)(int, void *); 97 98/** 99 * struct irqaction - per interrupt action descriptor 100 * @handler: interrupt handler function 101 * @name: name of the device 102 * @dev_id: cookie to identify the device 103 * @percpu_dev_id: cookie to identify the device 104 * @next: pointer to the next irqaction for shared interrupts 105 * @irq: interrupt number 106 * @flags: flags (see IRQF_* above) 107 * @thread_fn: interrupt handler function for threaded interrupts 108 * @thread: thread pointer for threaded interrupts 109 * @secondary: pointer to secondary irqaction (force threading) 110 * @thread_flags: flags related to @thread 111 * @thread_mask: bitmask for keeping track of @thread activity 112 * @dir: pointer to the proc/irq/NN/name entry 113 */ 114struct irqaction { 115 irq_handler_t handler; 116 void *dev_id; 117 void __percpu *percpu_dev_id; 118 struct irqaction *next; 119 irq_handler_t thread_fn; 120 struct task_struct *thread; 121 struct irqaction *secondary; 122 unsigned int irq; 123 unsigned int flags; 124 unsigned long thread_flags; 125 unsigned long thread_mask; 126 const char *name; 127 struct proc_dir_entry *dir; 128} ____cacheline_internodealigned_in_smp; 129 130extern irqreturn_t no_action(int cpl, void *dev_id); 131 132/* 133 * If a (PCI) device interrupt is not connected we set dev->irq to 134 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we 135 * can distingiush that case from other error returns. 136 * 137 * 0x80000000 is guaranteed to be outside the available range of interrupts 138 * and easy to distinguish from other possible incorrect values. 139 */ 140#define IRQ_NOTCONNECTED (1U << 31) 141 142extern int __must_check 143request_threaded_irq(unsigned int irq, irq_handler_t handler, 144 irq_handler_t thread_fn, 145 unsigned long flags, const char *name, void *dev); 146 147/** 148 * request_irq - Add a handler for an interrupt line 149 * @irq: The interrupt line to allocate 150 * @handler: Function to be called when the IRQ occurs. 151 * Primary handler for threaded interrupts 152 * If NULL, the default primary handler is installed 153 * @flags: Handling flags 154 * @name: Name of the device generating this interrupt 155 * @dev: A cookie passed to the handler function 156 * 157 * This call allocates an interrupt and establishes a handler; see 158 * the documentation for request_threaded_irq() for details. 159 */ 160static inline int __must_check 161request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 162 const char *name, void *dev) 163{ 164 return request_threaded_irq(irq, handler, NULL, flags, name, dev); 165} 166 167extern int __must_check 168request_any_context_irq(unsigned int irq, irq_handler_t handler, 169 unsigned long flags, const char *name, void *dev_id); 170 171extern int __must_check 172__request_percpu_irq(unsigned int irq, irq_handler_t handler, 173 unsigned long flags, const char *devname, 174 void __percpu *percpu_dev_id); 175 176extern int __must_check 177request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, 178 const char *name, void *dev); 179 180static inline int __must_check 181request_percpu_irq(unsigned int irq, irq_handler_t handler, 182 const char *devname, void __percpu *percpu_dev_id) 183{ 184 return __request_percpu_irq(irq, handler, 0, 185 devname, percpu_dev_id); 186} 187 188extern int __must_check 189request_percpu_nmi(unsigned int irq, irq_handler_t handler, 190 const char *devname, void __percpu *dev); 191 192extern const void *free_irq(unsigned int, void *); 193extern void free_percpu_irq(unsigned int, void __percpu *); 194 195extern const void *free_nmi(unsigned int irq, void *dev_id); 196extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); 197 198struct device; 199 200extern int __must_check 201devm_request_threaded_irq(struct device *dev, unsigned int irq, 202 irq_handler_t handler, irq_handler_t thread_fn, 203 unsigned long irqflags, const char *devname, 204 void *dev_id); 205 206static inline int __must_check 207devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, 208 unsigned long irqflags, const char *devname, void *dev_id) 209{ 210 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, 211 devname, dev_id); 212} 213 214extern int __must_check 215devm_request_any_context_irq(struct device *dev, unsigned int irq, 216 irq_handler_t handler, unsigned long irqflags, 217 const char *devname, void *dev_id); 218 219extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 220 221/* 222 * On lockdep we dont want to enable hardirqs in hardirq 223 * context. Use local_irq_enable_in_hardirq() to annotate 224 * kernel code that has to do this nevertheless (pretty much 225 * the only valid case is for old/broken hardware that is 226 * insanely slow). 227 * 228 * NOTE: in theory this might break fragile code that relies 229 * on hardirq delivery - in practice we dont seem to have such 230 * places left. So the only effect should be slightly increased 231 * irqs-off latencies. 232 */ 233#ifdef CONFIG_LOCKDEP 234# define local_irq_enable_in_hardirq() do { } while (0) 235#else 236# define local_irq_enable_in_hardirq() local_irq_enable() 237#endif 238 239bool irq_has_action(unsigned int irq); 240extern void disable_irq_nosync(unsigned int irq); 241extern bool disable_hardirq(unsigned int irq); 242extern void disable_irq(unsigned int irq); 243extern void disable_percpu_irq(unsigned int irq); 244extern void enable_irq(unsigned int irq); 245extern void enable_percpu_irq(unsigned int irq, unsigned int type); 246extern bool irq_percpu_is_enabled(unsigned int irq); 247extern void irq_wake_thread(unsigned int irq, void *dev_id); 248 249extern void disable_nmi_nosync(unsigned int irq); 250extern void disable_percpu_nmi(unsigned int irq); 251extern void enable_nmi(unsigned int irq); 252extern void enable_percpu_nmi(unsigned int irq, unsigned int type); 253extern int prepare_percpu_nmi(unsigned int irq); 254extern void teardown_percpu_nmi(unsigned int irq); 255 256extern int irq_inject_interrupt(unsigned int irq); 257 258/* The following three functions are for the core kernel use only. */ 259extern void suspend_device_irqs(void); 260extern void resume_device_irqs(void); 261extern void rearm_wake_irq(unsigned int irq); 262 263/** 264 * struct irq_affinity_notify - context for notification of IRQ affinity changes 265 * @irq: Interrupt to which notification applies 266 * @kref: Reference count, for internal use 267 * @work: Work item, for internal use 268 * @notify: Function to be called on change. This will be 269 * called in process context. 270 * @release: Function to be called on release. This will be 271 * called in process context. Once registered, the 272 * structure must only be freed when this function is 273 * called or later. 274 */ 275struct irq_affinity_notify { 276 unsigned int irq; 277 struct kref kref; 278 struct work_struct work; 279 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 280 void (*release)(struct kref *ref); 281}; 282 283#define IRQ_AFFINITY_MAX_SETS 4 284 285/** 286 * struct irq_affinity - Description for automatic irq affinity assignements 287 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of 288 * the MSI(-X) vector space 289 * @post_vectors: Don't apply affinity to @post_vectors at end of 290 * the MSI(-X) vector space 291 * @nr_sets: The number of interrupt sets for which affinity 292 * spreading is required 293 * @set_size: Array holding the size of each interrupt set 294 * @calc_sets: Callback for calculating the number and size 295 * of interrupt sets 296 * @priv: Private data for usage by @calc_sets, usually a 297 * pointer to driver/device specific data. 298 */ 299struct irq_affinity { 300 unsigned int pre_vectors; 301 unsigned int post_vectors; 302 unsigned int nr_sets; 303 unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; 304 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); 305 void *priv; 306}; 307 308/** 309 * struct irq_affinity_desc - Interrupt affinity descriptor 310 * @mask: cpumask to hold the affinity assignment 311 * @is_managed: 1 if the interrupt is managed internally 312 */ 313struct irq_affinity_desc { 314 struct cpumask mask; 315 unsigned int is_managed : 1; 316}; 317 318#if defined(CONFIG_SMP) 319 320extern cpumask_var_t irq_default_affinity; 321 322/* Internal implementation. Use the helpers below */ 323extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, 324 bool force); 325 326/** 327 * irq_set_affinity - Set the irq affinity of a given irq 328 * @irq: Interrupt to set affinity 329 * @cpumask: cpumask 330 * 331 * Fails if cpumask does not contain an online CPU 332 */ 333static inline int 334irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 335{ 336 return __irq_set_affinity(irq, cpumask, false); 337} 338 339/** 340 * irq_force_affinity - Force the irq affinity of a given irq 341 * @irq: Interrupt to set affinity 342 * @cpumask: cpumask 343 * 344 * Same as irq_set_affinity, but without checking the mask against 345 * online cpus. 346 * 347 * Solely for low level cpu hotplug code, where we need to make per 348 * cpu interrupts affine before the cpu becomes online. 349 */ 350static inline int 351irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 352{ 353 return __irq_set_affinity(irq, cpumask, true); 354} 355 356extern int irq_can_set_affinity(unsigned int irq); 357extern int irq_select_affinity(unsigned int irq); 358 359extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 360extern int irq_update_affinity_desc(unsigned int irq, 361 struct irq_affinity_desc *affinity); 362 363extern int 364irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 365 366struct irq_affinity_desc * 367irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); 368 369unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, 370 const struct irq_affinity *affd); 371 372#else /* CONFIG_SMP */ 373 374static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 375{ 376 return -EINVAL; 377} 378 379static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 380{ 381 return 0; 382} 383 384static inline int irq_can_set_affinity(unsigned int irq) 385{ 386 return 0; 387} 388 389static inline int irq_select_affinity(unsigned int irq) { return 0; } 390 391static inline int irq_set_affinity_hint(unsigned int irq, 392 const struct cpumask *m) 393{ 394 return -EINVAL; 395} 396 397static inline int irq_update_affinity_desc(unsigned int irq, 398 struct irq_affinity_desc *affinity) 399{ 400 return -EINVAL; 401} 402 403static inline int 404irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 405{ 406 return 0; 407} 408 409static inline struct irq_affinity_desc * 410irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) 411{ 412 return NULL; 413} 414 415static inline unsigned int 416irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, 417 const struct irq_affinity *affd) 418{ 419 return maxvec; 420} 421 422#endif /* CONFIG_SMP */ 423 424/* 425 * Special lockdep variants of irq disabling/enabling. 426 * These should be used for locking constructs that 427 * know that a particular irq context which is disabled, 428 * and which is the only irq-context user of a lock, 429 * that it's safe to take the lock in the irq-disabled 430 * section without disabling hardirqs. 431 * 432 * On !CONFIG_LOCKDEP they are equivalent to the normal 433 * irq disable/enable methods. 434 */ 435static inline void disable_irq_nosync_lockdep(unsigned int irq) 436{ 437 disable_irq_nosync(irq); 438#ifdef CONFIG_LOCKDEP 439 local_irq_disable(); 440#endif 441} 442 443static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) 444{ 445 disable_irq_nosync(irq); 446#ifdef CONFIG_LOCKDEP 447 local_irq_save(*flags); 448#endif 449} 450 451static inline void disable_irq_lockdep(unsigned int irq) 452{ 453 disable_irq(irq); 454#ifdef CONFIG_LOCKDEP 455 local_irq_disable(); 456#endif 457} 458 459static inline void enable_irq_lockdep(unsigned int irq) 460{ 461#ifdef CONFIG_LOCKDEP 462 local_irq_enable(); 463#endif 464 enable_irq(irq); 465} 466 467static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) 468{ 469#ifdef CONFIG_LOCKDEP 470 local_irq_restore(*flags); 471#endif 472 enable_irq(irq); 473} 474 475/* IRQ wakeup (PM) control: */ 476extern int irq_set_irq_wake(unsigned int irq, unsigned int on); 477 478static inline int enable_irq_wake(unsigned int irq) 479{ 480 return irq_set_irq_wake(irq, 1); 481} 482 483static inline int disable_irq_wake(unsigned int irq) 484{ 485 return irq_set_irq_wake(irq, 0); 486} 487 488/* 489 * irq_get_irqchip_state/irq_set_irqchip_state specific flags 490 */ 491enum irqchip_irq_state { 492 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ 493 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ 494 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ 495 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ 496}; 497 498extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 499 bool *state); 500extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 501 bool state); 502 503#ifdef CONFIG_IRQ_FORCED_THREADING 504# ifdef CONFIG_PREEMPT_RT 505# define force_irqthreads (true) 506# else 507extern bool force_irqthreads; 508# endif 509#else 510#define force_irqthreads (0) 511#endif 512 513#ifndef local_softirq_pending 514 515#ifndef local_softirq_pending_ref 516#define local_softirq_pending_ref irq_stat.__softirq_pending 517#endif 518 519#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) 520#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) 521#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) 522 523#endif /* local_softirq_pending */ 524 525/* Some architectures might implement lazy enabling/disabling of 526 * interrupts. In some cases, such as stop_machine, we might want 527 * to ensure that after a local_irq_disable(), interrupts have 528 * really been disabled in hardware. Such architectures need to 529 * implement the following hook. 530 */ 531#ifndef hard_irq_disable 532#define hard_irq_disable() do { } while(0) 533#endif 534 535/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 536 frequency threaded job scheduling. For almost all the purposes 537 tasklets are more than enough. F.e. all serial device BHs et 538 al. should be converted to tasklets, not to softirqs. 539 */ 540 541enum 542{ 543 HI_SOFTIRQ=0, 544 TIMER_SOFTIRQ, 545 NET_TX_SOFTIRQ, 546 NET_RX_SOFTIRQ, 547 BLOCK_SOFTIRQ, 548 IRQ_POLL_SOFTIRQ, 549 TASKLET_SOFTIRQ, 550 SCHED_SOFTIRQ, 551 HRTIMER_SOFTIRQ, 552 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 553 554 NR_SOFTIRQS 555}; 556 557#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) 558 559/* map softirq index to softirq name. update 'softirq_to_name' in 560 * kernel/softirq.c when adding a new softirq. 561 */ 562extern const char * const softirq_to_name[NR_SOFTIRQS]; 563 564/* softirq mask and active fields moved to irq_cpustat_t in 565 * asm/hardirq.h to get better cache usage. KAO 566 */ 567 568struct softirq_action 569{ 570 void (*action)(struct softirq_action *); 571}; 572 573asmlinkage void do_softirq(void); 574asmlinkage void __do_softirq(void); 575 576extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 577extern void softirq_init(void); 578extern void __raise_softirq_irqoff(unsigned int nr); 579 580extern void raise_softirq_irqoff(unsigned int nr); 581extern void raise_softirq(unsigned int nr); 582 583DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 584 585static inline struct task_struct *this_cpu_ksoftirqd(void) 586{ 587 return this_cpu_read(ksoftirqd); 588} 589 590/* Tasklets --- multithreaded analogue of BHs. 591 592 This API is deprecated. Please consider using threaded IRQs instead: 593 https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de 594 595 Main feature differing them of generic softirqs: tasklet 596 is running only on one CPU simultaneously. 597 598 Main feature differing them of BHs: different tasklets 599 may be run simultaneously on different CPUs. 600 601 Properties: 602 * If tasklet_schedule() is called, then tasklet is guaranteed 603 to be executed on some cpu at least once after this. 604 * If the tasklet is already scheduled, but its execution is still not 605 started, it will be executed only once. 606 * If this tasklet is already running on another CPU (or schedule is called 607 from tasklet itself), it is rescheduled for later. 608 * Tasklet is strictly serialized wrt itself, but not 609 wrt another tasklets. If client needs some intertask synchronization, 610 he makes it with spinlocks. 611 */ 612 613struct tasklet_struct 614{ 615 struct tasklet_struct *next; 616 unsigned long state; 617 atomic_t count; 618 bool use_callback; 619 union { 620 void (*func)(unsigned long data); 621 void (*callback)(struct tasklet_struct *t); 622 }; 623 unsigned long data; 624}; 625 626#define DECLARE_TASKLET(name, _callback) \ 627struct tasklet_struct name = { \ 628 .count = ATOMIC_INIT(0), \ 629 .callback = _callback, \ 630 .use_callback = true, \ 631} 632 633#define DECLARE_TASKLET_DISABLED(name, _callback) \ 634struct tasklet_struct name = { \ 635 .count = ATOMIC_INIT(1), \ 636 .callback = _callback, \ 637 .use_callback = true, \ 638} 639 640#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ 641 container_of(callback_tasklet, typeof(*var), tasklet_fieldname) 642 643#define DECLARE_TASKLET_OLD(name, _func) \ 644struct tasklet_struct name = { \ 645 .count = ATOMIC_INIT(0), \ 646 .func = _func, \ 647} 648 649#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ 650struct tasklet_struct name = { \ 651 .count = ATOMIC_INIT(1), \ 652 .func = _func, \ 653} 654 655enum 656{ 657 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ 658 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ 659}; 660 661#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 662static inline int tasklet_trylock(struct tasklet_struct *t) 663{ 664 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); 665} 666 667void tasklet_unlock(struct tasklet_struct *t); 668void tasklet_unlock_wait(struct tasklet_struct *t); 669void tasklet_unlock_spin_wait(struct tasklet_struct *t); 670 671#else 672static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } 673static inline void tasklet_unlock(struct tasklet_struct *t) { } 674static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } 675static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } 676#endif 677 678extern void __tasklet_schedule(struct tasklet_struct *t); 679 680static inline void tasklet_schedule(struct tasklet_struct *t) 681{ 682 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 683 __tasklet_schedule(t); 684} 685 686extern void __tasklet_hi_schedule(struct tasklet_struct *t); 687 688static inline void tasklet_hi_schedule(struct tasklet_struct *t) 689{ 690 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 691 __tasklet_hi_schedule(t); 692} 693 694static inline void tasklet_disable_nosync(struct tasklet_struct *t) 695{ 696 atomic_inc(&t->count); 697 smp_mb__after_atomic(); 698} 699 700/* 701 * Do not use in new code. Disabling tasklets from atomic contexts is 702 * error prone and should be avoided. 703 */ 704static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) 705{ 706 tasklet_disable_nosync(t); 707 tasklet_unlock_spin_wait(t); 708 smp_mb(); 709} 710 711static inline void tasklet_disable(struct tasklet_struct *t) 712{ 713 tasklet_disable_nosync(t); 714 tasklet_unlock_wait(t); 715 smp_mb(); 716} 717 718static inline void tasklet_enable(struct tasklet_struct *t) 719{ 720 smp_mb__before_atomic(); 721 atomic_dec(&t->count); 722} 723 724extern void tasklet_kill(struct tasklet_struct *t); 725extern void tasklet_init(struct tasklet_struct *t, 726 void (*func)(unsigned long), unsigned long data); 727extern void tasklet_setup(struct tasklet_struct *t, 728 void (*callback)(struct tasklet_struct *)); 729 730/* 731 * Autoprobing for irqs: 732 * 733 * probe_irq_on() and probe_irq_off() provide robust primitives 734 * for accurate IRQ probing during kernel initialization. They are 735 * reasonably simple to use, are not "fooled" by spurious interrupts, 736 * and, unlike other attempts at IRQ probing, they do not get hung on 737 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). 738 * 739 * For reasonably foolproof probing, use them as follows: 740 * 741 * 1. clear and/or mask the device's internal interrupt. 742 * 2. sti(); 743 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs 744 * 4. enable the device and cause it to trigger an interrupt. 745 * 5. wait for the device to interrupt, using non-intrusive polling or a delay. 746 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple 747 * 7. service the device to clear its pending interrupt. 748 * 8. loop again if paranoia is required. 749 * 750 * probe_irq_on() returns a mask of allocated irq's. 751 * 752 * probe_irq_off() takes the mask as a parameter, 753 * and returns the irq number which occurred, 754 * or zero if none occurred, or a negative irq number 755 * if more than one irq occurred. 756 */ 757 758#if !defined(CONFIG_GENERIC_IRQ_PROBE) 759static inline unsigned long probe_irq_on(void) 760{ 761 return 0; 762} 763static inline int probe_irq_off(unsigned long val) 764{ 765 return 0; 766} 767static inline unsigned int probe_irq_mask(unsigned long val) 768{ 769 return 0; 770} 771#else 772extern unsigned long probe_irq_on(void); /* returns 0 on failure */ 773extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ 774extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ 775#endif 776 777#ifdef CONFIG_PROC_FS 778/* Initialize /proc/irq/ */ 779extern void init_irq_proc(void); 780#else 781static inline void init_irq_proc(void) 782{ 783} 784#endif 785 786#ifdef CONFIG_IRQ_TIMINGS 787void irq_timings_enable(void); 788void irq_timings_disable(void); 789u64 irq_timings_next_event(u64 now); 790#endif 791 792struct seq_file; 793int show_interrupts(struct seq_file *p, void *v); 794int arch_show_interrupts(struct seq_file *p, int prec); 795 796extern int early_irq_init(void); 797extern int arch_probe_nr_irqs(void); 798extern int arch_early_irq_init(void); 799 800/* 801 * We want to know which function is an entrypoint of a hardirq or a softirq. 802 */ 803#ifndef __irq_entry 804# define __irq_entry __section(".irqentry.text") 805#endif 806 807#define __softirq_entry __section(".softirqentry.text") 808 809#endif