Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

include/linux: Remove all users of FASTCALL() macro

FASTCALL() is always expanded to empty, remove it.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Harvey Harrison and committed by
Linus Torvalds
b3c97528 aa02cd2d

+95 -100
+10 -10
include/linux/aio.h
··· 206 206 /* prototypes */ 207 207 extern unsigned aio_max_size; 208 208 209 - extern ssize_t FASTCALL(wait_on_sync_kiocb(struct kiocb *iocb)); 210 - extern int FASTCALL(aio_put_req(struct kiocb *iocb)); 211 - extern void FASTCALL(kick_iocb(struct kiocb *iocb)); 212 - extern int FASTCALL(aio_complete(struct kiocb *iocb, long res, long res2)); 213 - extern void FASTCALL(__put_ioctx(struct kioctx *ctx)); 209 + extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); 210 + extern int aio_put_req(struct kiocb *iocb); 211 + extern void kick_iocb(struct kiocb *iocb); 212 + extern int aio_complete(struct kiocb *iocb, long res, long res2); 213 + extern void __put_ioctx(struct kioctx *ctx); 214 214 struct mm_struct; 215 - extern void FASTCALL(exit_aio(struct mm_struct *mm)); 215 + extern void exit_aio(struct mm_struct *mm); 216 216 extern struct kioctx *lookup_ioctx(unsigned long ctx_id); 217 - extern int FASTCALL(io_submit_one(struct kioctx *ctx, 218 - struct iocb __user *user_iocb, struct iocb *iocb)); 217 + extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 218 + struct iocb *iocb); 219 219 220 220 /* semi private, but used by the 32bit emulations: */ 221 221 struct kioctx *lookup_ioctx(unsigned long ctx_id); 222 - int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 223 - struct iocb *iocb)); 222 + int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 223 + struct iocb *iocb); 224 224 225 225 #define get_ioctx(kioctx) do { \ 226 226 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
+3 -3
include/linux/buffer_head.h
··· 144 144 * Declarations 145 145 */ 146 146 147 - void FASTCALL(mark_buffer_dirty(struct buffer_head *bh)); 147 + void mark_buffer_dirty(struct buffer_head *bh); 148 148 void init_buffer(struct buffer_head *, bh_end_io_t *, void *); 149 149 void set_bh_page(struct buffer_head *bh, 150 150 struct page *page, unsigned long offset); ··· 185 185 void invalidate_bh_lrus(void); 186 186 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 187 187 void free_buffer_head(struct buffer_head * bh); 188 - void FASTCALL(unlock_buffer(struct buffer_head *bh)); 189 - void FASTCALL(__lock_buffer(struct buffer_head *bh)); 188 + void unlock_buffer(struct buffer_head *bh); 189 + void __lock_buffer(struct buffer_head *bh); 190 190 void ll_rw_block(int, int, struct buffer_head * bh[]); 191 191 int sync_dirty_buffer(struct buffer_head *bh); 192 192 int submit_bh(int, struct buffer_head *);
+8 -8
include/linux/file.h
··· 59 59 60 60 extern struct kmem_cache *filp_cachep; 61 61 62 - extern void FASTCALL(__fput(struct file *)); 63 - extern void FASTCALL(fput(struct file *)); 62 + extern void __fput(struct file *); 63 + extern void fput(struct file *); 64 64 65 65 struct file_operations; 66 66 struct vfsmount; ··· 77 77 fput(file); 78 78 } 79 79 80 - extern struct file * FASTCALL(fget(unsigned int fd)); 81 - extern struct file * FASTCALL(fget_light(unsigned int fd, int *fput_needed)); 82 - extern void FASTCALL(set_close_on_exec(unsigned int fd, int flag)); 80 + extern struct file *fget(unsigned int fd); 81 + extern struct file *fget_light(unsigned int fd, int *fput_needed); 82 + extern void set_close_on_exec(unsigned int fd, int flag); 83 83 extern void put_filp(struct file *); 84 84 extern int get_unused_fd(void); 85 85 extern int get_unused_fd_flags(int flags); 86 - extern void FASTCALL(put_unused_fd(unsigned int fd)); 86 + extern void put_unused_fd(unsigned int fd); 87 87 struct kmem_cache; 88 88 89 89 extern int expand_files(struct files_struct *, int nr); ··· 110 110 */ 111 111 #define fcheck(fd) fcheck_files(current->files, fd) 112 112 113 - extern void FASTCALL(fd_install(unsigned int fd, struct file * file)); 113 + extern void fd_install(unsigned int fd, struct file *file); 114 114 115 115 struct task_struct; 116 116 117 117 struct files_struct *get_files_struct(struct task_struct *); 118 - void FASTCALL(put_files_struct(struct files_struct *fs)); 118 + void put_files_struct(struct files_struct *fs); 119 119 void reset_files_struct(struct task_struct *, struct files_struct *); 120 120 121 121 extern struct kmem_cache *files_cachep;
+7 -8
include/linux/gfp.h
··· 172 172 static inline void arch_alloc_page(struct page *page, int order) { } 173 173 #endif 174 174 175 - extern struct page * 176 - FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); 175 + extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *); 177 176 178 177 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 179 178 unsigned int order) ··· 208 209 #endif 209 210 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 210 211 211 - extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order)); 212 - extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask)); 212 + extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 213 + extern unsigned long get_zeroed_page(gfp_t gfp_mask); 213 214 214 215 #define __get_free_page(gfp_mask) \ 215 216 __get_free_pages((gfp_mask),0) ··· 217 218 #define __get_dma_pages(gfp_mask, order) \ 218 219 __get_free_pages((gfp_mask) | GFP_DMA,(order)) 219 220 220 - extern void FASTCALL(__free_pages(struct page *page, unsigned int order)); 221 - extern void FASTCALL(free_pages(unsigned long addr, unsigned int order)); 222 - extern void FASTCALL(free_hot_page(struct page *page)); 223 - extern void FASTCALL(free_cold_page(struct page *page)); 221 + extern void __free_pages(struct page *page, unsigned int order); 222 + extern void free_pages(unsigned long addr, unsigned int order); 223 + extern void free_hot_page(struct page *page); 224 + extern void free_cold_page(struct page *page); 224 225 225 226 #define __free_page(page) __free_pages((page), 0) 226 227 #define free_page(addr) free_pages((addr),0)
+4 -4
include/linux/interrupt.h
··· 273 273 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); 274 274 extern void softirq_init(void); 275 275 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) 276 - extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); 277 - extern void FASTCALL(raise_softirq(unsigned int nr)); 276 + extern void raise_softirq_irqoff(unsigned int nr); 277 + extern void raise_softirq(unsigned int nr); 278 278 279 279 280 280 /* Tasklets --- multithreaded analogue of BHs. ··· 341 341 #define tasklet_unlock(t) do { } while (0) 342 342 #endif 343 343 344 - extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); 344 + extern void __tasklet_schedule(struct tasklet_struct *t); 345 345 346 346 static inline void tasklet_schedule(struct tasklet_struct *t) 347 347 { ··· 349 349 __tasklet_schedule(t); 350 350 } 351 351 352 - extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t)); 352 + extern void __tasklet_hi_schedule(struct tasklet_struct *t); 353 353 354 354 static inline void tasklet_hi_schedule(struct tasklet_struct *t) 355 355 {
+2 -2
include/linux/mm.h
··· 786 786 int __set_page_dirty_no_writeback(struct page *page); 787 787 int redirty_page_for_writepage(struct writeback_control *wbc, 788 788 struct page *page); 789 - int FASTCALL(set_page_dirty(struct page *page)); 789 + int set_page_dirty(struct page *page); 790 790 int set_page_dirty_lock(struct page *page); 791 791 int clear_page_dirty_for_io(struct page *page); 792 792 ··· 829 829 830 830 int vma_wants_writenotify(struct vm_area_struct *vma); 831 831 832 - extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); 832 + extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); 833 833 834 834 #ifdef __PAGETABLE_PUD_FOLDED 835 835 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+1 -1
include/linux/mutex-debug.h
··· 18 18 __mutex_init((mutex), #mutex, &__key); \ 19 19 } while (0) 20 20 21 - extern void FASTCALL(mutex_destroy(struct mutex *lock)); 21 + extern void mutex_destroy(struct mutex *lock); 22 22 23 23 #endif
+3 -3
include/linux/namei.h
··· 62 62 #define LOOKUP_ACCESS (0x0400) 63 63 #define LOOKUP_CHDIR (0x0800) 64 64 65 - extern int FASTCALL(__user_walk(const char __user *, unsigned, struct nameidata *)); 66 - extern int FASTCALL(__user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *)); 65 + extern int __user_walk(const char __user *, unsigned, struct nameidata *); 66 + extern int __user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *); 67 67 #define user_path_walk(name,nd) \ 68 68 __user_walk_fd(AT_FDCWD, name, LOOKUP_FOLLOW, nd) 69 69 #define user_path_walk_link(name,nd) \ 70 70 __user_walk_fd(AT_FDCWD, name, 0, nd) 71 - extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); 71 + extern int path_lookup(const char *, unsigned, struct nameidata *); 72 72 extern int vfs_path_lookup(struct dentry *, struct vfsmount *, 73 73 const char *, unsigned int, struct nameidata *); 74 74 extern void path_release(struct nameidata *);
+1 -1
include/linux/netdevice.h
··· 322 322 NAPI_STATE_DISABLE, /* Disable pending */ 323 323 }; 324 324 325 - extern void FASTCALL(__napi_schedule(struct napi_struct *n)); 325 + extern void __napi_schedule(struct napi_struct *n); 326 326 327 327 static inline int napi_disable_pending(struct napi_struct *n) 328 328 {
+5 -5
include/linux/pagemap.h
··· 156 156 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 157 157 } 158 158 159 - extern void FASTCALL(__lock_page(struct page *page)); 160 - extern int FASTCALL(__lock_page_killable(struct page *page)); 161 - extern void FASTCALL(__lock_page_nosync(struct page *page)); 162 - extern void FASTCALL(unlock_page(struct page *page)); 159 + extern void __lock_page(struct page *page); 160 + extern int __lock_page_killable(struct page *page); 161 + extern void __lock_page_nosync(struct page *page); 162 + extern void unlock_page(struct page *page); 163 163 164 164 /* 165 165 * lock_page may only be called if we have the page's inode pinned. ··· 199 199 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 200 200 * Never use this directly! 201 201 */ 202 - extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr)); 202 + extern void wait_on_page_bit(struct page *page, int bit_nr); 203 203 204 204 /* 205 205 * Wait for a page to be unlocked.
+10 -11
include/linux/pid.h
··· 79 79 return pid; 80 80 } 81 81 82 - extern void FASTCALL(put_pid(struct pid *pid)); 83 - extern struct task_struct *FASTCALL(pid_task(struct pid *pid, enum pid_type)); 84 - extern struct task_struct *FASTCALL(get_pid_task(struct pid *pid, 85 - enum pid_type)); 82 + extern void put_pid(struct pid *pid); 83 + extern struct task_struct *pid_task(struct pid *pid, enum pid_type); 84 + extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); 86 85 87 86 extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); 88 87 ··· 89 90 * attach_pid() and detach_pid() must be called with the tasklist_lock 90 91 * write-held. 91 92 */ 92 - extern int FASTCALL(attach_pid(struct task_struct *task, 93 - enum pid_type type, struct pid *pid)); 94 - extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type)); 95 - extern void FASTCALL(transfer_pid(struct task_struct *old, 96 - struct task_struct *new, enum pid_type)); 93 + extern int attach_pid(struct task_struct *task, enum pid_type type, 94 + struct pid *pid); 95 + extern void detach_pid(struct task_struct *task, enum pid_type); 96 + extern void transfer_pid(struct task_struct *old, struct task_struct *new, 97 + enum pid_type); 97 98 98 99 struct pid_namespace; 99 100 extern struct pid_namespace init_pid_ns; ··· 108 109 * 109 110 * see also find_task_by_pid() set in include/linux/sched.h 110 111 */ 111 - extern struct pid *FASTCALL(find_pid_ns(int nr, struct pid_namespace *ns)); 112 + extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); 112 113 extern struct pid *find_vpid(int nr); 113 114 extern struct pid *find_pid(int nr); 114 115 ··· 120 121 int next_pidmap(struct pid_namespace *pid_ns, int last); 121 122 122 123 extern struct pid *alloc_pid(struct pid_namespace *ns); 123 - extern void FASTCALL(free_pid(struct pid *pid)); 124 + extern void free_pid(struct pid *pid); 124 125 125 126 /* 126 127 * the helpers to get the pid's id seen from different namespaces
+8 -8
include/linux/rwsem-spinlock.h
··· 60 60 __init_rwsem((sem), #sem, &__key); \ 61 61 } while (0) 62 62 63 - extern void FASTCALL(__down_read(struct rw_semaphore *sem)); 64 - extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); 65 - extern void FASTCALL(__down_write(struct rw_semaphore *sem)); 66 - extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass)); 67 - extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); 68 - extern void FASTCALL(__up_read(struct rw_semaphore *sem)); 69 - extern void FASTCALL(__up_write(struct rw_semaphore *sem)); 70 - extern void FASTCALL(__downgrade_write(struct rw_semaphore *sem)); 63 + extern void __down_read(struct rw_semaphore *sem); 64 + extern int __down_read_trylock(struct rw_semaphore *sem); 65 + extern void __down_write(struct rw_semaphore *sem); 66 + extern void __down_write_nested(struct rw_semaphore *sem, int subclass); 67 + extern int __down_write_trylock(struct rw_semaphore *sem); 68 + extern void __up_read(struct rw_semaphore *sem); 69 + extern void __up_write(struct rw_semaphore *sem); 70 + extern void __downgrade_write(struct rw_semaphore *sem); 71 71 72 72 static inline int rwsem_is_locked(struct rw_semaphore *sem) 73 73 {
+7 -7
include/linux/sched.h
··· 323 323 extern int in_sched_functions(unsigned long addr); 324 324 325 325 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 326 - extern signed long FASTCALL(schedule_timeout(signed long timeout)); 326 + extern signed long schedule_timeout(signed long timeout); 327 327 extern signed long schedule_timeout_interruptible(signed long timeout); 328 328 extern signed long schedule_timeout_killable(signed long timeout); 329 329 extern signed long schedule_timeout_uninterruptible(signed long timeout); ··· 1648 1648 1649 1649 extern void do_timer(unsigned long ticks); 1650 1650 1651 - extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); 1652 - extern int FASTCALL(wake_up_process(struct task_struct * tsk)); 1653 - extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, 1654 - unsigned long clone_flags)); 1651 + extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1652 + extern int wake_up_process(struct task_struct *tsk); 1653 + extern void wake_up_new_task(struct task_struct *tsk, 1654 + unsigned long clone_flags); 1655 1655 #ifdef CONFIG_SMP 1656 1656 extern void kick_process(struct task_struct *tsk); 1657 1657 #else ··· 1741 1741 extern struct mm_struct * mm_alloc(void); 1742 1742 1743 1743 /* mmdrop drops the mm and the page tables */ 1744 - extern void FASTCALL(__mmdrop(struct mm_struct *)); 1744 + extern void __mmdrop(struct mm_struct *); 1745 1745 static inline void mmdrop(struct mm_struct * mm) 1746 1746 { 1747 1747 if (unlikely(atomic_dec_and_test(&mm->mm_count))) ··· 1925 1925 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 1926 1926 } 1927 1927 1928 - extern int FASTCALL(__fatal_signal_pending(struct task_struct *p)); 1928 + extern int __fatal_signal_pending(struct task_struct *p); 1929 1929 1930 1930 static inline int fatal_signal_pending(struct task_struct *p) 1931 1931 {
+4 -4
include/linux/swap.h
··· 171 171 172 172 173 173 /* linux/mm/swap.c */ 174 - extern void FASTCALL(lru_cache_add(struct page *)); 175 - extern void FASTCALL(lru_cache_add_active(struct page *)); 176 - extern void FASTCALL(activate_page(struct page *)); 177 - extern void FASTCALL(mark_page_accessed(struct page *)); 174 + extern void lru_cache_add(struct page *); 175 + extern void lru_cache_add_active(struct page *); 176 + extern void activate_page(struct page *); 177 + extern void mark_page_accessed(struct page *); 178 178 extern void lru_add_drain(void); 179 179 extern int lru_add_drain_all(void); 180 180 extern int rotate_reclaimable_page(struct page *page);
+16 -18
include/linux/wait.h
··· 117 117 */ 118 118 #define is_sync_wait(wait) (!(wait) || ((wait)->private)) 119 119 120 - extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); 121 - extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); 122 - extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); 120 + extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 121 + extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); 122 + extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 123 123 124 124 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) 125 125 { ··· 141 141 list_del(&old->task_list); 142 142 } 143 143 144 - void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key)); 145 - extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); 146 - extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); 147 - void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int)); 148 - int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned)); 149 - int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned)); 150 - void FASTCALL(wake_up_bit(void *, int)); 151 - int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned)); 152 - int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned)); 153 - wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int)); 144 + void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 145 + extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); 146 + extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 147 + void __wake_up_bit(wait_queue_head_t *, void *, int); 148 + int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 149 + int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 150 + void wake_up_bit(void *, int); 151 + int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); 152 + int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); 153 + wait_queue_head_t *bit_waitqueue(void *, int); 154 154 155 155 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 156 156 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) ··· 437 437 /* 438 438 * Waitqueues which are removed from the waitqueue_head at wakeup time 439 439 */ 440 - void FASTCALL(prepare_to_wait(wait_queue_head_t *q, 441 - wait_queue_t *wait, int state)); 442 - void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q, 443 - wait_queue_t *wait, int state)); 444 - void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait)); 440 + void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 441 + void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 442 + void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 445 443 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 446 444 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 447 445
+6 -7
include/linux/workqueue.h
··· 178 178 179 179 extern void destroy_workqueue(struct workqueue_struct *wq); 180 180 181 - extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 182 - extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, 183 - struct delayed_work *work, unsigned long delay)); 181 + extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); 182 + extern int queue_delayed_work(struct workqueue_struct *wq, 183 + struct delayed_work *work, unsigned long delay); 184 184 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 185 185 struct delayed_work *work, unsigned long delay); 186 186 187 - extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 187 + extern void flush_workqueue(struct workqueue_struct *wq); 188 188 extern void flush_scheduled_work(void); 189 189 190 - extern int FASTCALL(schedule_work(struct work_struct *work)); 191 - extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, 192 - unsigned long delay)); 190 + extern int schedule_work(struct work_struct *work); 191 + extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); 193 192 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, 194 193 unsigned long delay); 195 194 extern int schedule_on_each_cpu(work_func_t func);