Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

SUNRPC: Add function rpc_sleep_on_timeout()

Clean up the RPC task sleep interfaces by replacing the task->tk_timeout
'hidden parameter' to rpc_sleep_on() with a new function that takes an
absolute timeout.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Trond Myklebust and committed by
Anna Schumaker
6b2e6856 8357a9b6

+103 -42
+12 -6
fs/nfs/nfs4proc.c
··· 978 978 if (res->sr_slot != NULL) 979 979 goto out_start; 980 980 981 - if (session) { 981 + if (session) 982 982 tbl = &session->fc_slot_table; 983 - task->tk_timeout = 0; 984 - } 985 983 986 984 spin_lock(&tbl->slot_tbl_lock); 987 985 /* The state manager will wait until the slot table is empty */ ··· 988 990 989 991 slot = nfs4_alloc_slot(tbl); 990 992 if (IS_ERR(slot)) { 991 - /* Try again in 1/4 second */ 992 993 if (slot == ERR_PTR(-ENOMEM)) 993 - task->tk_timeout = HZ >> 2; 994 + goto out_sleep_timeout; 994 995 goto out_sleep; 995 996 } 996 997 spin_unlock(&tbl->slot_tbl_lock); ··· 1001 1004 nfs41_sequence_res_init(res); 1002 1005 rpc_call_start(task); 1003 1006 return 0; 1004 - 1007 + out_sleep_timeout: 1008 + /* Try again in 1/4 second */ 1009 + if (args->sa_privileged) 1010 + rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1011 + jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1012 + else 1013 + rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1014 + NULL, jiffies + (HZ >> 2)); 1015 + spin_unlock(&tbl->slot_tbl_lock); 1016 + return -EAGAIN; 1005 1017 out_sleep: 1006 1018 if (args->sa_privileged) 1007 1019 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
+8 -1
include/linux/sunrpc/sched.h
··· 35 35 struct list_head list; /* wait queue links */ 36 36 struct list_head links; /* Links to related tasks */ 37 37 struct list_head timer_list; /* Timer list */ 38 - unsigned long expires; 39 38 }; 40 39 41 40 /* ··· 226 227 void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); 227 228 void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); 228 229 void rpc_destroy_wait_queue(struct rpc_wait_queue *); 230 + void rpc_sleep_on_timeout(struct rpc_wait_queue *queue, 231 + struct rpc_task *task, 232 + rpc_action action, 233 + unsigned long timeout); 229 234 void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, 230 235 rpc_action action); 236 + void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue, 237 + struct rpc_task *task, 238 + unsigned long timeout, 239 + int priority); 231 240 void rpc_sleep_on_priority(struct rpc_wait_queue *, 232 241 struct rpc_task *, 233 242 int priority);
+2 -3
net/sunrpc/auth_gss/auth_gss.c
··· 581 581 /* XXX: warning on the first, under the assumption we 582 582 * shouldn't normally hit this case on a refresh. */ 583 583 warn_gssd(); 584 - task->tk_timeout = 15*HZ; 585 - rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 584 + rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue, 585 + task, NULL, jiffies + (15 * HZ)); 586 586 err = -EAGAIN; 587 587 goto out; 588 588 } ··· 595 595 if (gss_cred->gc_upcall != NULL) 596 596 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 597 597 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 598 - task->tk_timeout = 0; 599 598 gss_cred->gc_upcall = gss_msg; 600 599 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 601 600 refcount_inc(&gss_msg->count);
-1
net/sunrpc/clnt.c
··· 1851 1851 if (!xprt_prepare_transmit(task)) 1852 1852 return; 1853 1853 1854 - task->tk_timeout = xprt->bind_timeout; 1855 1854 xprt->ops->rpcbind(task); 1856 1855 } 1857 1856
+2 -1
net/sunrpc/rpcb_clnt.c
··· 694 694 695 695 /* Put self on the wait queue to ensure we get notified if 696 696 * some other task is already attempting to bind the port */ 697 - rpc_sleep_on(&xprt->binding, task, NULL); 697 + rpc_sleep_on_timeout(&xprt->binding, task, 698 + NULL, jiffies + xprt->bind_timeout); 698 699 699 700 if (xprt_test_and_set_binding(xprt)) { 700 701 dprintk("RPC: %5u %s: waiting for another binder\n",
+58 -15
net/sunrpc/sched.c
··· 66 66 static void 67 67 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 68 68 { 69 - if (task->tk_timeout == 0) 69 + if (list_empty(&task->u.tk_wait.timer_list)) 70 70 return; 71 71 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 72 72 task->tk_timeout = 0; ··· 86 86 * Set up a timer for the current task. 87 87 */ 88 88 static void 89 - __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 89 + __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, 90 + unsigned long timeout) 90 91 { 91 - if (!task->tk_timeout) 92 - return; 93 - 94 92 dprintk("RPC: %5u setting alarm for %u ms\n", 95 - task->tk_pid, jiffies_to_msecs(task->tk_timeout)); 93 + task->tk_pid, jiffies_to_msecs(timeout - jiffies)); 96 94 97 - task->u.tk_wait.expires = jiffies + task->tk_timeout; 98 - if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) 99 - rpc_set_queue_timer(queue, task->u.tk_wait.expires); 95 + task->tk_timeout = timeout; 96 + if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) 97 + rpc_set_queue_timer(queue, timeout); 100 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 101 99 } 102 100 ··· 186 188 if (RPC_IS_QUEUED(task)) 187 189 return; 188 190 191 + INIT_LIST_HEAD(&task->u.tk_wait.timer_list); 189 192 if (RPC_IS_PRIORITY(queue)) 190 193 __rpc_add_wait_queue_priority(queue, task, queue_priority); 191 194 else if (RPC_IS_SWAPPER(task)) ··· 370 371 371 372 __rpc_add_wait_queue(q, task, queue_priority); 372 373 373 - __rpc_add_timer(q, task); 374 + } 375 + 376 + static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 377 + struct rpc_task *task, unsigned long timeout, 378 + unsigned char queue_priority) 379 + { 380 + if (time_is_after_jiffies(timeout)) { 381 + __rpc_sleep_on_priority(q, task, queue_priority); 382 + __rpc_add_timer(q, task, timeout); 383 + } else 384 + task->tk_status = -ETIMEDOUT; 374 385 } 375 386 376 387 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) ··· 400 391 return true; 401 392 } 402 393 403 - void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 404 - rpc_action action) 394 + void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, 395 + rpc_action action, unsigned long timeout) 405 396 { 406 397 if (!rpc_sleep_check_activated(task)) 407 398 return; ··· 412 403 * Protect the queue operations. 413 404 */ 414 405 spin_lock_bh(&q->lock); 406 + __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); 407 + spin_unlock_bh(&q->lock); 408 + } 409 + EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); 410 + 411 + void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 412 + rpc_action action) 413 + { 414 + if (!rpc_sleep_check_activated(task)) 415 + return; 416 + 417 + rpc_set_tk_callback(task, action); 418 + 419 + WARN_ON_ONCE(task->tk_timeout != 0); 420 + /* 421 + * Protect the queue operations. 422 + */ 423 + spin_lock_bh(&q->lock); 415 424 __rpc_sleep_on_priority(q, task, task->tk_priority); 416 425 spin_unlock_bh(&q->lock); 417 426 } 418 427 EXPORT_SYMBOL_GPL(rpc_sleep_on); 428 + 429 + void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 430 + struct rpc_task *task, unsigned long timeout, int priority) 431 + { 432 + if (!rpc_sleep_check_activated(task)) 433 + return; 434 + 435 + priority -= RPC_PRIORITY_LOW; 436 + /* 437 + * Protect the queue operations. 438 + */ 439 + spin_lock_bh(&q->lock); 440 + __rpc_sleep_on_priority_timeout(q, task, timeout, priority); 441 + spin_unlock_bh(&q->lock); 442 + } 443 + EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); 419 444 420 445 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 421 446 int priority) ··· 457 414 if (!rpc_sleep_check_activated(task)) 458 415 return; 459 416 417 + WARN_ON_ONCE(task->tk_timeout != 0); 460 418 priority -= RPC_PRIORITY_LOW; 461 419 /* 462 420 * Protect the queue operations. ··· 755 711 spin_lock(&queue->lock); 756 712 expires = now = jiffies; 757 713 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 758 - timeo = task->u.tk_wait.expires; 714 + timeo = task->tk_timeout; 759 715 if (time_after_eq(now, timeo)) { 760 716 dprintk("RPC: %5u timeout\n", task->tk_pid); 761 717 task->tk_status = -ETIMEDOUT; ··· 781 737 */ 782 738 void rpc_delay(struct rpc_task *task, unsigned long delay) 783 739 { 784 - task->tk_timeout = delay; 785 - rpc_sleep_on(&delay_queue, task, __rpc_atrun); 740 + rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); 786 741 } 787 742 EXPORT_SYMBOL_GPL(rpc_delay); 788 743
+21 -15
net/sunrpc/xprt.c
··· 209 209 out_sleep: 210 210 dprintk("RPC: %5u failed to lock transport %p\n", 211 211 task->tk_pid, xprt); 212 - task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; 213 212 task->tk_status = -EAGAIN; 214 - rpc_sleep_on(&xprt->sending, task, NULL); 213 + if (RPC_IS_SOFT(task)) 214 + rpc_sleep_on_timeout(&xprt->sending, task, NULL, 215 + jiffies + req->rq_timeout); 216 + else 217 + rpc_sleep_on(&xprt->sending, task, NULL); 215 218 return 0; 216 219 } 217 220 EXPORT_SYMBOL_GPL(xprt_reserve_xprt); ··· 276 273 xprt_clear_locked(xprt); 277 274 out_sleep: 278 275 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 279 - task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; 280 276 task->tk_status = -EAGAIN; 281 - rpc_sleep_on(&xprt->sending, task, NULL); 277 + if (RPC_IS_SOFT(task)) 278 + rpc_sleep_on_timeout(&xprt->sending, task, NULL, 279 + jiffies + req->rq_timeout); 280 + else 281 + rpc_sleep_on(&xprt->sending, task, NULL); 282 282 return 0; 283 283 } 284 284 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); ··· 793 787 xprt->ops->close(xprt); 794 788 795 789 if (!xprt_connected(xprt)) { 796 - task->tk_timeout = task->tk_rqstp->rq_timeout; 797 790 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 798 - rpc_sleep_on(&xprt->pending, task, NULL); 791 + rpc_sleep_on_timeout(&xprt->pending, task, NULL, 792 + jiffies + task->tk_rqstp->rq_timeout); 799 793 800 794 if (test_bit(XPRT_CLOSING, &xprt->state)) 801 795 return; ··· 1086 1080 { 1087 1081 struct rpc_rqst *req = task->tk_rqstp; 1088 1082 1089 - task->tk_timeout = req->rq_timeout; 1090 - rpc_sleep_on(&req->rq_xprt->pending, task, xprt_timer); 1083 + rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 1084 + jiffies + req->rq_timeout); 1091 1085 } 1092 1086 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def); 1093 1087 ··· 1105 1099 struct rpc_rtt *rtt = clnt->cl_rtt; 1106 1100 struct rpc_rqst *req = task->tk_rqstp; 1107 1101 unsigned long max_timeout = clnt->cl_timeout->to_maxval; 1102 + unsigned long timeout; 1108 1103 1109 - task->tk_timeout = rpc_calc_rto(rtt, timer); 1110 - task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 1111 - if (task->tk_timeout > max_timeout || task->tk_timeout == 0) 1112 - task->tk_timeout = max_timeout; 1113 - rpc_sleep_on(&req->rq_xprt->pending, task, xprt_timer); 1104 + timeout = rpc_calc_rto(rtt, timer); 1105 + timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 1106 + if (timeout > max_timeout || timeout == 0) 1107 + timeout = max_timeout; 1108 + rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 1109 + jiffies + timeout); 1114 1110 } 1115 1111 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt); 1116 1112 ··· 1664 1656 if (task->tk_rqstp != NULL) 1665 1657 return; 1666 1658 1667 - task->tk_timeout = 0; 1668 1659 task->tk_status = -EAGAIN; 1669 1660 if (!xprt_throttle_congested(xprt, task)) 1670 1661 xprt_do_reserve(xprt, task); ··· 1686 1679 if (task->tk_rqstp != NULL) 1687 1680 return; 1688 1681 1689 - task->tk_timeout = 0; 1690 1682 task->tk_status = -EAGAIN; 1691 1683 xprt_do_reserve(xprt, task); 1692 1684 }