Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

SUNRPC: Refactor rpc_sleep_on()

rpc_sleep_on() does not need to set the task->tk_callback under the
queue lock, so move that out.
Also refactor the check for whether the task is active.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Trond Myklebust and committed by
Anna Schumaker
87150aae 8ba6a92d

+25 -17
+25 -17
net/sunrpc/sched.c
··· 362 362 */ 363 363 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 364 364 struct rpc_task *task, 365 - rpc_action action, 366 365 unsigned char queue_priority) 367 366 { 368 367 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", ··· 371 372 372 373 __rpc_add_wait_queue(q, task, queue_priority); 373 374 374 - WARN_ON_ONCE(task->tk_callback != NULL); 375 - task->tk_callback = action; 376 375 __rpc_add_timer(q, task); 376 + } 377 + 378 + static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) 379 + { 380 + if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) 381 + task->tk_callback = action; 382 + } 383 + 384 + static bool rpc_sleep_check_activated(struct rpc_task *task) 385 + { 386 + /* We shouldn't ever put an inactive task to sleep */ 387 + if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { 388 + task->tk_status = -EIO; 389 + rpc_put_task_async(task); 390 + return false; 391 + } 392 + return true; 377 393 } 378 394 379 395 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 380 396 rpc_action action) 381 397 { 382 - /* We shouldn't ever put an inactive task to sleep */ 383 - WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); 384 - if (!RPC_IS_ACTIVATED(task)) { 385 - task->tk_status = -EIO; 386 - rpc_put_task_async(task); 398 + if (!rpc_sleep_check_activated(task)) 387 399 return; 388 - } 400 + 401 + rpc_set_tk_callback(task, action); 389 402 390 403 /* 391 404 * Protect the queue operations. 392 405 */ 393 406 spin_lock_bh(&q->lock); 394 - __rpc_sleep_on_priority(q, task, action, task->tk_priority); 407 + __rpc_sleep_on_priority(q, task, task->tk_priority); 395 408 spin_unlock_bh(&q->lock); 396 409 } 397 410 EXPORT_SYMBOL_GPL(rpc_sleep_on); ··· 411 400 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 412 401 rpc_action action, int priority) 413 402 { 414 - /* We shouldn't ever put an inactive task to sleep */ 415 - WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); 416 - if (!RPC_IS_ACTIVATED(task)) { 417 - task->tk_status = -EIO; 418 - rpc_put_task_async(task); 403 + if (!rpc_sleep_check_activated(task)) 419 404 return; 420 - } 405 + 406 + rpc_set_tk_callback(task, action); 421 407 422 408 /* 423 409 * Protect the queue operations. 424 410 */ 425 411 spin_lock_bh(&q->lock); 426 - __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); 412 + __rpc_sleep_on_priority(q, task, priority - RPC_PRIORITY_LOW); 427 413 spin_unlock_bh(&q->lock); 428 414 } 429 415 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);