Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: lustre: remove l_wait_event() and related code

These macros are no longer used, so they can
be removed.

Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Patrick Farrell <paf@cray.com>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

NeilBrown and committed by
Greg Kroah-Hartman
058643de 1c6ce082

-249
-249
drivers/staging/lustre/lustre/include/lustre_lib.h
··· 76 76 77 77 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id); 78 78 79 - /* 80 - * l_wait_event is a flexible sleeping function, permitting simple caller 81 - * configuration of interrupt and timeout sensitivity along with actions to 82 - * be performed in the event of either exception. 83 - * 84 - * The first form of usage looks like this: 85 - * 86 - * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler, 87 - * intr_handler, callback_data); 88 - * rc = l_wait_event(waitq, condition, &lwi); 89 - * 90 - * l_wait_event() makes the current process wait on 'waitq' until 'condition' 91 - * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It 92 - * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before 93 - * 'condition' becomes true, it optionally calls the specified 'intr_handler' 94 - * if not NULL, and returns -EINTR. 95 - * 96 - * If a non-zero timeout is specified, signals are ignored until the timeout 97 - * has expired. At this time, if 'timeout_handler' is not NULL it is called. 98 - * If it returns FALSE l_wait_event() continues to wait as described above with 99 - * signals enabled. Otherwise it returns -ETIMEDOUT. 100 - * 101 - * LWI_INTR(intr_handler, callback_data) is shorthand for 102 - * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data) 103 - * 104 - * The second form of usage looks like this: 105 - * 106 - * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler); 107 - * rc = l_wait_event(waitq, condition, &lwi); 108 - * 109 - * This form is the same as the first except that it COMPLETELY IGNORES 110 - * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if 111 - * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that 112 - * can unblock the current process is 'condition' becoming TRUE. 113 - * 114 - * Another form of usage is: 115 - * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval, 116 - * timeout_handler); 117 - * rc = l_wait_event(waitq, condition, &lwi); 118 - * This is the same as previous case, but condition is checked once every 119 - * 'interval' jiffies (if non-zero). 120 - * 121 - * Subtle synchronization point: this macro does *not* necessary takes 122 - * wait-queue spin-lock before returning, and, hence, following idiom is safe 123 - * ONLY when caller provides some external locking: 124 - * 125 - * Thread1 Thread2 126 - * 127 - * l_wait_event(&obj->wq, ....); (1) 128 - * 129 - * wake_up(&obj->wq): (2) 130 - * spin_lock(&q->lock); (2.1) 131 - * __wake_up_common(q, ...); (2.2) 132 - * spin_unlock(&q->lock, flags); (2.3) 133 - * 134 - * kfree(obj); (3) 135 - * 136 - * As l_wait_event() may "short-cut" execution and return without taking 137 - * wait-queue spin-lock, some additional synchronization is necessary to 138 - * guarantee that step (3) can begin only after (2.3) finishes. 139 - * 140 - * XXX nikita: some ptlrpc daemon threads have races of that sort. 141 - * 142 - */ 143 - 144 - #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1)) 145 - 146 - struct l_wait_info { 147 - long lwi_timeout; 148 - long lwi_interval; 149 - int lwi_allow_intr; 150 - int (*lwi_on_timeout)(void *); 151 - void (*lwi_on_signal)(void *); 152 - void *lwi_cb_data; 153 - }; 154 - 155 - /* NB: LWI_TIMEOUT ignores signals completely */ 156 - #define LWI_TIMEOUT(time, cb, data) \ 157 - ((struct l_wait_info) { \ 158 - .lwi_timeout = time, \ 159 - .lwi_on_timeout = cb, \ 160 - .lwi_cb_data = data, \ 161 - .lwi_interval = 0, \ 162 - .lwi_allow_intr = 0 \ 163 - }) 164 - 165 - #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \ 166 - ((struct l_wait_info) { \ 167 - .lwi_timeout = time, \ 168 - .lwi_on_timeout = cb, \ 169 - .lwi_cb_data = data, \ 170 - .lwi_interval = interval, \ 171 - .lwi_allow_intr = 0 \ 172 - }) 173 - 174 - #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \ 175 - ((struct l_wait_info) { \ 176 - .lwi_timeout = time, \ 177 - .lwi_on_timeout = time_cb, \ 178 - .lwi_on_signal = sig_cb, \ 179 - .lwi_cb_data = data, \ 180 - .lwi_interval = 0, \ 181 - .lwi_allow_intr = 0 \ 182 - }) 183 - 184 - #define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \ 185 - ((struct l_wait_info) { \ 186 - .lwi_timeout = time, \ 187 - .lwi_on_timeout = time_cb, \ 188 - .lwi_on_signal = sig_cb, \ 189 - .lwi_cb_data = data, \ 190 - .lwi_interval = 0, \ 191 - .lwi_allow_intr = 1 \ 192 - }) 193 - 194 - #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data) 195 - 196 79 #define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \ 197 80 sigmask(SIGTERM) | sigmask(SIGQUIT) | \ 198 81 sigmask(SIGALRM)) ··· 83 200 { 84 201 return signal_pending(p) && sigtestsetmask(&p->pending.signal, LUSTRE_FATAL_SIGS); 85 202 } 86 - 87 - /** 88 - * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively 89 - * waiting threads, which is not always desirable because all threads will 90 - * be waken up again and again, even user only needs a few of them to be 91 - * active most time. This is not good for performance because cache can 92 - * be polluted by different threads. 93 - * 94 - * LIFO list can resolve this problem because we always wakeup the most 95 - * recent active thread by default. 96 - * 97 - * NB: please don't call non-exclusive & exclusive wait on the same 98 - * waitq if add_wait_queue_exclusive_head is used. 99 - */ 100 - #define add_wait_queue_exclusive_head(waitq, link) \ 101 - { \ 102 - unsigned long flags; \ 103 - \ 104 - spin_lock_irqsave(&((waitq)->lock), flags); \ 105 - __add_wait_queue_exclusive(waitq, link); \ 106 - spin_unlock_irqrestore(&((waitq)->lock), flags); \ 107 - } 108 - 109 - /* 110 - * wait for @condition to become true, but no longer than timeout, specified 111 - * by @info. 112 - */ 113 - #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ 114 - do { \ 115 - wait_queue_entry_t __wait; \ 116 - long __timeout = info->lwi_timeout; \ 117 - sigset_t __blocked; \ 118 - int __allow_intr = info->lwi_allow_intr; \ 119 - \ 120 - ret = 0; \ 121 - if (condition) \ 122 - break; \ 123 - \ 124 - init_waitqueue_entry(&__wait, current); \ 125 - l_add_wait(&wq, &__wait); \ 126 - \ 127 - /* Block all signals (just the non-fatal ones if no timeout). */ \ 128 - if (info->lwi_on_signal && (__timeout == 0 || __allow_intr)) \ 129 - __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \ 130 - else \ 131 - __blocked = cfs_block_sigsinv(0); \ 132 - \ 133 - for (;;) { \ 134 - if (condition) \ 135 - break; \ 136 - \ 137 - set_current_state(TASK_INTERRUPTIBLE); \ 138 - \ 139 - if (__timeout == 0) { \ 140 - schedule(); \ 141 - } else { \ 142 - long interval = info->lwi_interval ? \ 143 - min_t(long, \ 144 - info->lwi_interval, __timeout) : \ 145 - __timeout; \ 146 - long remaining = schedule_timeout(interval);\ 147 - __timeout = cfs_time_sub(__timeout, \ 148 - cfs_time_sub(interval, remaining));\ 149 - if (__timeout == 0) { \ 150 - if (!info->lwi_on_timeout || \ 151 - info->lwi_on_timeout(info->lwi_cb_data)) { \ 152 - ret = -ETIMEDOUT; \ 153 - break; \ 154 - } \ 155 - /* Take signals after the timeout expires. */ \ 156 - if (info->lwi_on_signal) \ 157 - (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\ 158 - } \ 159 - } \ 160 - \ 161 - set_current_state(TASK_RUNNING); \ 162 - \ 163 - if (condition) \ 164 - break; \ 165 - if (signal_pending(current)) { \ 166 - if (info->lwi_on_signal && \ 167 - (__timeout == 0 || __allow_intr)) { \ 168 - if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \ 169 - info->lwi_on_signal(info->lwi_cb_data);\ 170 - ret = -EINTR; \ 171 - break; \ 172 - } \ 173 - /* We have to do this here because some signals */ \ 174 - /* are not blockable - ie from strace(1). */ \ 175 - /* In these cases we want to schedule_timeout() */ \ 176 - /* again, because we don't want that to return */ \ 177 - /* -EINTR when the RPC actually succeeded. */ \ 178 - /* the recalc_sigpending() below will deliver the */ \ 179 - /* signal properly. */ \ 180 - cfs_clear_sigpending(); \ 181 - } \ 182 - } \ 183 - \ 184 - cfs_restore_sigs(__blocked); \ 185 - \ 186 - remove_wait_queue(&wq, &__wait); \ 187 - } while (0) 188 - 189 - #define l_wait_event(wq, condition, info) \ 190 - ({ \ 191 - int __ret; \ 192 - struct l_wait_info *__info = (info); \ 193 - \ 194 - __l_wait_event(wq, condition, __info, \ 195 - __ret, add_wait_queue); \ 196 - __ret; \ 197 - }) 198 - 199 - #define l_wait_event_exclusive(wq, condition, info) \ 200 - ({ \ 201 - int __ret; \ 202 - struct l_wait_info *__info = (info); \ 203 - \ 204 - __l_wait_event(wq, condition, __info, \ 205 - __ret, add_wait_queue_exclusive); \ 206 - __ret; \ 207 - }) 208 - 209 - #define l_wait_event_exclusive_head(wq, condition, info) \ 210 - ({ \ 211 - int __ret; \ 212 - struct l_wait_info *__info = (info); \ 213 - \ 214 - __l_wait_event(wq, condition, __info, \ 215 - __ret, add_wait_queue_exclusive_head); \ 216 - __ret; \ 217 - }) 218 203 219 204 /** @} lib */ 220 205