at v3.7-rc1 21 kB view raw
1/* interrupt.h */ 2#ifndef _LINUX_INTERRUPT_H 3#define _LINUX_INTERRUPT_H 4 5#include <linux/kernel.h> 6#include <linux/linkage.h> 7#include <linux/bitops.h> 8#include <linux/preempt.h> 9#include <linux/cpumask.h> 10#include <linux/irqreturn.h> 11#include <linux/irqnr.h> 12#include <linux/hardirq.h> 13#include <linux/irqflags.h> 14#include <linux/smp.h> 15#include <linux/percpu.h> 16#include <linux/hrtimer.h> 17#include <linux/kref.h> 18#include <linux/workqueue.h> 19 20#include <linux/atomic.h> 21#include <asm/ptrace.h> 22 23/* 24 * These correspond to the IORESOURCE_IRQ_* defines in 25 * linux/ioport.h to select the interrupt line behaviour. When 26 * requesting an interrupt without specifying a IRQF_TRIGGER, the 27 * setting should be assumed to be "as already configured", which 28 * may be as per machine or firmware initialisation. 29 */ 30#define IRQF_TRIGGER_NONE 0x00000000 31#define IRQF_TRIGGER_RISING 0x00000001 32#define IRQF_TRIGGER_FALLING 0x00000002 33#define IRQF_TRIGGER_HIGH 0x00000004 34#define IRQF_TRIGGER_LOW 0x00000008 35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ 36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) 37#define IRQF_TRIGGER_PROBE 0x00000010 38 39/* 40 * These flags used only by the kernel as part of the 41 * irq handling routines. 42 * 43 * IRQF_DISABLED - keep irqs disabled when calling the action handler. 44 * DEPRECATED. This flag is a NOOP and scheduled to be removed 45 * IRQF_SHARED - allow sharing the irq among several devices 46 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 47 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 48 * IRQF_PERCPU - Interrupt is per cpu 49 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 50 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 51 * registered first in an shared interrupt is considered for 52 * performance reasons) 53 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 54 * Used by threaded interrupts which need to keep the 55 * irq line disabled until the threaded handler has been run. 56 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend 57 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set 58 * IRQF_NO_THREAD - Interrupt cannot be threaded 59 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device 60 * resume time. 61 */ 62#define IRQF_DISABLED 0x00000020 63#define IRQF_SHARED 0x00000080 64#define IRQF_PROBE_SHARED 0x00000100 65#define __IRQF_TIMER 0x00000200 66#define IRQF_PERCPU 0x00000400 67#define IRQF_NOBALANCING 0x00000800 68#define IRQF_IRQPOLL 0x00001000 69#define IRQF_ONESHOT 0x00002000 70#define IRQF_NO_SUSPEND 0x00004000 71#define IRQF_FORCE_RESUME 0x00008000 72#define IRQF_NO_THREAD 0x00010000 73#define IRQF_EARLY_RESUME 0x00020000 74 75#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 76 77/* 78 * These values can be returned by request_any_context_irq() and 79 * describe the context the interrupt will be run in. 80 * 81 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context 82 * IRQC_IS_NESTED - interrupt runs in a nested threaded context 83 */ 84enum { 85 IRQC_IS_HARDIRQ = 0, 86 IRQC_IS_NESTED, 87}; 88 89typedef irqreturn_t (*irq_handler_t)(int, void *); 90 91/** 92 * struct irqaction - per interrupt action descriptor 93 * @handler: interrupt handler function 94 * @name: name of the device 95 * @dev_id: cookie to identify the device 96 * @percpu_dev_id: cookie to identify the device 97 * @next: pointer to the next irqaction for shared interrupts 98 * @irq: interrupt number 99 * @flags: flags (see IRQF_* above) 100 * @thread_fn: interrupt handler function for threaded interrupts 101 * @thread: thread pointer for threaded interrupts 102 * @thread_flags: flags related to @thread 103 * @thread_mask: bitmask for keeping track of @thread activity 104 * @dir: pointer to the proc/irq/NN/name entry 105 */ 106struct irqaction { 107 irq_handler_t handler; 108 void *dev_id; 109 void __percpu *percpu_dev_id; 110 struct irqaction *next; 111 irq_handler_t thread_fn; 112 struct task_struct *thread; 113 unsigned int irq; 114 unsigned int flags; 115 unsigned long thread_flags; 116 unsigned long thread_mask; 117 const char *name; 118 struct proc_dir_entry *dir; 119} ____cacheline_internodealigned_in_smp; 120 121extern irqreturn_t no_action(int cpl, void *dev_id); 122 123#ifdef CONFIG_GENERIC_HARDIRQS 124extern int __must_check 125request_threaded_irq(unsigned int irq, irq_handler_t handler, 126 irq_handler_t thread_fn, 127 unsigned long flags, const char *name, void *dev); 128 129static inline int __must_check 130request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 131 const char *name, void *dev) 132{ 133 return request_threaded_irq(irq, handler, NULL, flags, name, dev); 134} 135 136extern int __must_check 137request_any_context_irq(unsigned int irq, irq_handler_t handler, 138 unsigned long flags, const char *name, void *dev_id); 139 140extern int __must_check 141request_percpu_irq(unsigned int irq, irq_handler_t handler, 142 const char *devname, void __percpu *percpu_dev_id); 143#else 144 145extern int __must_check 146request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 147 const char *name, void *dev); 148 149/* 150 * Special function to avoid ifdeffery in kernel/irq/devres.c which 151 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, 152 * m68k). I really love these $@%#!* obvious Makefile references: 153 * ../../../kernel/irq/devres.o 154 */ 155static inline int __must_check 156request_threaded_irq(unsigned int irq, irq_handler_t handler, 157 irq_handler_t thread_fn, 158 unsigned long flags, const char *name, void *dev) 159{ 160 return request_irq(irq, handler, flags, name, dev); 161} 162 163static inline int __must_check 164request_any_context_irq(unsigned int irq, irq_handler_t handler, 165 unsigned long flags, const char *name, void *dev_id) 166{ 167 return request_irq(irq, handler, flags, name, dev_id); 168} 169 170static inline int __must_check 171request_percpu_irq(unsigned int irq, irq_handler_t handler, 172 const char *devname, void __percpu *percpu_dev_id) 173{ 174 return request_irq(irq, handler, 0, devname, percpu_dev_id); 175} 176#endif 177 178extern void free_irq(unsigned int, void *); 179extern void free_percpu_irq(unsigned int, void __percpu *); 180 181struct device; 182 183extern int __must_check 184devm_request_threaded_irq(struct device *dev, unsigned int irq, 185 irq_handler_t handler, irq_handler_t thread_fn, 186 unsigned long irqflags, const char *devname, 187 void *dev_id); 188 189static inline int __must_check 190devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, 191 unsigned long irqflags, const char *devname, void *dev_id) 192{ 193 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, 194 devname, dev_id); 195} 196 197extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 198 199/* 200 * On lockdep we dont want to enable hardirqs in hardirq 201 * context. Use local_irq_enable_in_hardirq() to annotate 202 * kernel code that has to do this nevertheless (pretty much 203 * the only valid case is for old/broken hardware that is 204 * insanely slow). 205 * 206 * NOTE: in theory this might break fragile code that relies 207 * on hardirq delivery - in practice we dont seem to have such 208 * places left. So the only effect should be slightly increased 209 * irqs-off latencies. 210 */ 211#ifdef CONFIG_LOCKDEP 212# define local_irq_enable_in_hardirq() do { } while (0) 213#else 214# define local_irq_enable_in_hardirq() local_irq_enable() 215#endif 216 217extern void disable_irq_nosync(unsigned int irq); 218extern void disable_irq(unsigned int irq); 219extern void disable_percpu_irq(unsigned int irq); 220extern void enable_irq(unsigned int irq); 221extern void enable_percpu_irq(unsigned int irq, unsigned int type); 222 223/* The following three functions are for the core kernel use only. */ 224#ifdef CONFIG_GENERIC_HARDIRQS 225extern void suspend_device_irqs(void); 226extern void resume_device_irqs(void); 227#ifdef CONFIG_PM_SLEEP 228extern int check_wakeup_irqs(void); 229#else 230static inline int check_wakeup_irqs(void) { return 0; } 231#endif 232#else 233static inline void suspend_device_irqs(void) { }; 234static inline void resume_device_irqs(void) { }; 235static inline int check_wakeup_irqs(void) { return 0; } 236#endif 237 238#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 239 240extern cpumask_var_t irq_default_affinity; 241 242extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 243extern int irq_can_set_affinity(unsigned int irq); 244extern int irq_select_affinity(unsigned int irq); 245 246extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 247 248/** 249 * struct irq_affinity_notify - context for notification of IRQ affinity changes 250 * @irq: Interrupt to which notification applies 251 * @kref: Reference count, for internal use 252 * @work: Work item, for internal use 253 * @notify: Function to be called on change. This will be 254 * called in process context. 255 * @release: Function to be called on release. This will be 256 * called in process context. Once registered, the 257 * structure must only be freed when this function is 258 * called or later. 259 */ 260struct irq_affinity_notify { 261 unsigned int irq; 262 struct kref kref; 263 struct work_struct work; 264 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 265 void (*release)(struct kref *ref); 266}; 267 268extern int 269irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 270 271static inline void irq_run_affinity_notifiers(void) 272{ 273 flush_scheduled_work(); 274} 275 276#else /* CONFIG_SMP */ 277 278static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 279{ 280 return -EINVAL; 281} 282 283static inline int irq_can_set_affinity(unsigned int irq) 284{ 285 return 0; 286} 287 288static inline int irq_select_affinity(unsigned int irq) { return 0; } 289 290static inline int irq_set_affinity_hint(unsigned int irq, 291 const struct cpumask *m) 292{ 293 return -EINVAL; 294} 295#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ 296 297#ifdef CONFIG_GENERIC_HARDIRQS 298/* 299 * Special lockdep variants of irq disabling/enabling. 300 * These should be used for locking constructs that 301 * know that a particular irq context which is disabled, 302 * and which is the only irq-context user of a lock, 303 * that it's safe to take the lock in the irq-disabled 304 * section without disabling hardirqs. 305 * 306 * On !CONFIG_LOCKDEP they are equivalent to the normal 307 * irq disable/enable methods. 308 */ 309static inline void disable_irq_nosync_lockdep(unsigned int irq) 310{ 311 disable_irq_nosync(irq); 312#ifdef CONFIG_LOCKDEP 313 local_irq_disable(); 314#endif 315} 316 317static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) 318{ 319 disable_irq_nosync(irq); 320#ifdef CONFIG_LOCKDEP 321 local_irq_save(*flags); 322#endif 323} 324 325static inline void disable_irq_lockdep(unsigned int irq) 326{ 327 disable_irq(irq); 328#ifdef CONFIG_LOCKDEP 329 local_irq_disable(); 330#endif 331} 332 333static inline void enable_irq_lockdep(unsigned int irq) 334{ 335#ifdef CONFIG_LOCKDEP 336 local_irq_enable(); 337#endif 338 enable_irq(irq); 339} 340 341static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) 342{ 343#ifdef CONFIG_LOCKDEP 344 local_irq_restore(*flags); 345#endif 346 enable_irq(irq); 347} 348 349/* IRQ wakeup (PM) control: */ 350extern int irq_set_irq_wake(unsigned int irq, unsigned int on); 351 352static inline int enable_irq_wake(unsigned int irq) 353{ 354 return irq_set_irq_wake(irq, 1); 355} 356 357static inline int disable_irq_wake(unsigned int irq) 358{ 359 return irq_set_irq_wake(irq, 0); 360} 361 362#else /* !CONFIG_GENERIC_HARDIRQS */ 363/* 364 * NOTE: non-genirq architectures, if they want to support the lock 365 * validator need to define the methods below in their asm/irq.h 366 * files, under an #ifdef CONFIG_LOCKDEP section. 367 */ 368#ifndef CONFIG_LOCKDEP 369# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) 370# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ 371 disable_irq_nosync(irq) 372# define disable_irq_lockdep(irq) disable_irq(irq) 373# define enable_irq_lockdep(irq) enable_irq(irq) 374# define enable_irq_lockdep_irqrestore(irq, flags) \ 375 enable_irq(irq) 376# endif 377 378static inline int enable_irq_wake(unsigned int irq) 379{ 380 return 0; 381} 382 383static inline int disable_irq_wake(unsigned int irq) 384{ 385 return 0; 386} 387#endif /* CONFIG_GENERIC_HARDIRQS */ 388 389 390#ifdef CONFIG_IRQ_FORCED_THREADING 391extern bool force_irqthreads; 392#else 393#define force_irqthreads (0) 394#endif 395 396#ifndef __ARCH_SET_SOFTIRQ_PENDING 397#define set_softirq_pending(x) (local_softirq_pending() = (x)) 398#define or_softirq_pending(x) (local_softirq_pending() |= (x)) 399#endif 400 401/* Some architectures might implement lazy enabling/disabling of 402 * interrupts. In some cases, such as stop_machine, we might want 403 * to ensure that after a local_irq_disable(), interrupts have 404 * really been disabled in hardware. Such architectures need to 405 * implement the following hook. 406 */ 407#ifndef hard_irq_disable 408#define hard_irq_disable() do { } while(0) 409#endif 410 411/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 412 frequency threaded job scheduling. For almost all the purposes 413 tasklets are more than enough. F.e. all serial device BHs et 414 al. should be converted to tasklets, not to softirqs. 415 */ 416 417enum 418{ 419 HI_SOFTIRQ=0, 420 TIMER_SOFTIRQ, 421 NET_TX_SOFTIRQ, 422 NET_RX_SOFTIRQ, 423 BLOCK_SOFTIRQ, 424 BLOCK_IOPOLL_SOFTIRQ, 425 TASKLET_SOFTIRQ, 426 SCHED_SOFTIRQ, 427 HRTIMER_SOFTIRQ, 428 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 429 430 NR_SOFTIRQS 431}; 432 433#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) 434 435/* map softirq index to softirq name. update 'softirq_to_name' in 436 * kernel/softirq.c when adding a new softirq. 437 */ 438extern char *softirq_to_name[NR_SOFTIRQS]; 439 440/* softirq mask and active fields moved to irq_cpustat_t in 441 * asm/hardirq.h to get better cache usage. KAO 442 */ 443 444struct softirq_action 445{ 446 void (*action)(struct softirq_action *); 447}; 448 449asmlinkage void do_softirq(void); 450asmlinkage void __do_softirq(void); 451extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 452extern void softirq_init(void); 453extern void __raise_softirq_irqoff(unsigned int nr); 454 455extern void raise_softirq_irqoff(unsigned int nr); 456extern void raise_softirq(unsigned int nr); 457 458/* This is the worklist that queues up per-cpu softirq work. 459 * 460 * send_remote_sendirq() adds work to these lists, and 461 * the softirq handler itself dequeues from them. The queues 462 * are protected by disabling local cpu interrupts and they must 463 * only be accessed by the local cpu that they are for. 464 */ 465DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 466 467DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 468 469static inline struct task_struct *this_cpu_ksoftirqd(void) 470{ 471 return this_cpu_read(ksoftirqd); 472} 473 474/* Try to send a softirq to a remote cpu. If this cannot be done, the 475 * work will be queued to the local cpu. 476 */ 477extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); 478 479/* Like send_remote_softirq(), but the caller must disable local cpu interrupts 480 * and compute the current cpu, passed in as 'this_cpu'. 481 */ 482extern void __send_remote_softirq(struct call_single_data *cp, int cpu, 483 int this_cpu, int softirq); 484 485/* Tasklets --- multithreaded analogue of BHs. 486 487 Main feature differing them of generic softirqs: tasklet 488 is running only on one CPU simultaneously. 489 490 Main feature differing them of BHs: different tasklets 491 may be run simultaneously on different CPUs. 492 493 Properties: 494 * If tasklet_schedule() is called, then tasklet is guaranteed 495 to be executed on some cpu at least once after this. 496 * If the tasklet is already scheduled, but its execution is still not 497 started, it will be executed only once. 498 * If this tasklet is already running on another CPU (or schedule is called 499 from tasklet itself), it is rescheduled for later. 500 * Tasklet is strictly serialized wrt itself, but not 501 wrt another tasklets. If client needs some intertask synchronization, 502 he makes it with spinlocks. 503 */ 504 505struct tasklet_struct 506{ 507 struct tasklet_struct *next; 508 unsigned long state; 509 atomic_t count; 510 void (*func)(unsigned long); 511 unsigned long data; 512}; 513 514#define DECLARE_TASKLET(name, func, data) \ 515struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } 516 517#define DECLARE_TASKLET_DISABLED(name, func, data) \ 518struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } 519 520 521enum 522{ 523 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ 524 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ 525}; 526 527#ifdef CONFIG_SMP 528static inline int tasklet_trylock(struct tasklet_struct *t) 529{ 530 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); 531} 532 533static inline void tasklet_unlock(struct tasklet_struct *t) 534{ 535 smp_mb__before_clear_bit(); 536 clear_bit(TASKLET_STATE_RUN, &(t)->state); 537} 538 539static inline void tasklet_unlock_wait(struct tasklet_struct *t) 540{ 541 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } 542} 543#else 544#define tasklet_trylock(t) 1 545#define tasklet_unlock_wait(t) do { } while (0) 546#define tasklet_unlock(t) do { } while (0) 547#endif 548 549extern void __tasklet_schedule(struct tasklet_struct *t); 550 551static inline void tasklet_schedule(struct tasklet_struct *t) 552{ 553 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 554 __tasklet_schedule(t); 555} 556 557extern void __tasklet_hi_schedule(struct tasklet_struct *t); 558 559static inline void tasklet_hi_schedule(struct tasklet_struct *t) 560{ 561 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 562 __tasklet_hi_schedule(t); 563} 564 565extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); 566 567/* 568 * This version avoids touching any other tasklets. Needed for kmemcheck 569 * in order not to take any page faults while enqueueing this tasklet; 570 * consider VERY carefully whether you really need this or 571 * tasklet_hi_schedule()... 572 */ 573static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) 574{ 575 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 576 __tasklet_hi_schedule_first(t); 577} 578 579 580static inline void tasklet_disable_nosync(struct tasklet_struct *t) 581{ 582 atomic_inc(&t->count); 583 smp_mb__after_atomic_inc(); 584} 585 586static inline void tasklet_disable(struct tasklet_struct *t) 587{ 588 tasklet_disable_nosync(t); 589 tasklet_unlock_wait(t); 590 smp_mb(); 591} 592 593static inline void tasklet_enable(struct tasklet_struct *t) 594{ 595 smp_mb__before_atomic_dec(); 596 atomic_dec(&t->count); 597} 598 599static inline void tasklet_hi_enable(struct tasklet_struct *t) 600{ 601 smp_mb__before_atomic_dec(); 602 atomic_dec(&t->count); 603} 604 605extern void tasklet_kill(struct tasklet_struct *t); 606extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 607extern void tasklet_init(struct tasklet_struct *t, 608 void (*func)(unsigned long), unsigned long data); 609 610struct tasklet_hrtimer { 611 struct hrtimer timer; 612 struct tasklet_struct tasklet; 613 enum hrtimer_restart (*function)(struct hrtimer *); 614}; 615 616extern void 617tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, 618 enum hrtimer_restart (*function)(struct hrtimer *), 619 clockid_t which_clock, enum hrtimer_mode mode); 620 621static inline 622int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, 623 const enum hrtimer_mode mode) 624{ 625 return hrtimer_start(&ttimer->timer, time, mode); 626} 627 628static inline 629void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) 630{ 631 hrtimer_cancel(&ttimer->timer); 632 tasklet_kill(&ttimer->tasklet); 633} 634 635/* 636 * Autoprobing for irqs: 637 * 638 * probe_irq_on() and probe_irq_off() provide robust primitives 639 * for accurate IRQ probing during kernel initialization. They are 640 * reasonably simple to use, are not "fooled" by spurious interrupts, 641 * and, unlike other attempts at IRQ probing, they do not get hung on 642 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). 643 * 644 * For reasonably foolproof probing, use them as follows: 645 * 646 * 1. clear and/or mask the device's internal interrupt. 647 * 2. sti(); 648 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs 649 * 4. enable the device and cause it to trigger an interrupt. 650 * 5. wait for the device to interrupt, using non-intrusive polling or a delay. 651 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple 652 * 7. service the device to clear its pending interrupt. 653 * 8. loop again if paranoia is required. 654 * 655 * probe_irq_on() returns a mask of allocated irq's. 656 * 657 * probe_irq_off() takes the mask as a parameter, 658 * and returns the irq number which occurred, 659 * or zero if none occurred, or a negative irq number 660 * if more than one irq occurred. 661 */ 662 663#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 664static inline unsigned long probe_irq_on(void) 665{ 666 return 0; 667} 668static inline int probe_irq_off(unsigned long val) 669{ 670 return 0; 671} 672static inline unsigned int probe_irq_mask(unsigned long val) 673{ 674 return 0; 675} 676#else 677extern unsigned long probe_irq_on(void); /* returns 0 on failure */ 678extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ 679extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ 680#endif 681 682#ifdef CONFIG_PROC_FS 683/* Initialize /proc/irq/ */ 684extern void init_irq_proc(void); 685#else 686static inline void init_irq_proc(void) 687{ 688} 689#endif 690 691struct seq_file; 692int show_interrupts(struct seq_file *p, void *v); 693int arch_show_interrupts(struct seq_file *p, int prec); 694 695extern int early_irq_init(void); 696extern int arch_probe_nr_irqs(void); 697extern int arch_early_irq_init(void); 698 699#endif