at v2.6.39 3.8 kB view raw
1#ifndef _LINUX_POLL_H 2#define _LINUX_POLL_H 3 4#include <asm/poll.h> 5 6#ifdef __KERNEL__ 7 8#include <linux/compiler.h> 9#include <linux/ktime.h> 10#include <linux/wait.h> 11#include <linux/string.h> 12#include <linux/fs.h> 13#include <linux/sysctl.h> 14#include <asm/uaccess.h> 15 16extern struct ctl_table epoll_table[]; /* for sysctl */ 17/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating 18 additional memory. */ 19#define MAX_STACK_ALLOC 832 20#define FRONTEND_STACK_ALLOC 256 21#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC 22#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC 23#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) 24#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) 25 26#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) 27 28struct poll_table_struct; 29 30/* 31 * structures and helpers for f_op->poll implementations 32 */ 33typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); 34 35typedef struct poll_table_struct { 36 poll_queue_proc qproc; 37 unsigned long key; 38} poll_table; 39 40static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) 41{ 42 if (p && wait_address) 43 p->qproc(filp, wait_address, p); 44} 45 46static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) 47{ 48 pt->qproc = qproc; 49 pt->key = ~0UL; /* all events enabled */ 50} 51 52struct poll_table_entry { 53 struct file *filp; 54 unsigned long key; 55 wait_queue_t wait; 56 wait_queue_head_t *wait_address; 57}; 58 59/* 60 * Structures and helpers for select/poll syscall 61 */ 62struct poll_wqueues { 63 poll_table pt; 64 struct poll_table_page *table; 65 struct task_struct *polling_task; 66 int triggered; 67 int error; 68 int inline_index; 69 struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; 70}; 71 72extern void poll_initwait(struct poll_wqueues *pwq); 73extern void poll_freewait(struct poll_wqueues *pwq); 74extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 75 ktime_t *expires, unsigned long slack); 76extern long select_estimate_accuracy(struct timespec *tv); 77 78 79static inline int poll_schedule(struct poll_wqueues *pwq, int state) 80{ 81 return poll_schedule_timeout(pwq, state, NULL, 0); 82} 83 84/* 85 * Scalable version of the fd_set. 86 */ 87 88typedef struct { 89 unsigned long *in, *out, *ex; 90 unsigned long *res_in, *res_out, *res_ex; 91} fd_set_bits; 92 93/* 94 * How many longwords for "nr" bits? 95 */ 96#define FDS_BITPERLONG (8*sizeof(long)) 97#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) 98#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) 99 100/* 101 * We do a VERIFY_WRITE here even though we are only reading this time: 102 * we'll write to it eventually.. 103 * 104 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. 105 */ 106static inline 107int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 108{ 109 nr = FDS_BYTES(nr); 110 if (ufdset) 111 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; 112 113 memset(fdset, 0, nr); 114 return 0; 115} 116 117static inline unsigned long __must_check 118set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 119{ 120 if (ufdset) 121 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); 122 return 0; 123} 124 125static inline 126void zero_fd_set(unsigned long nr, unsigned long *fdset) 127{ 128 memset(fdset, 0, FDS_BYTES(nr)); 129} 130 131#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) 132 133extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); 134extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, 135 struct timespec *end_time); 136extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 137 fd_set __user *exp, struct timespec *end_time); 138 139extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); 140 141#endif /* KERNEL */ 142 143#endif /* _LINUX_POLL_H */