Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

NFS: Add an iocounter wait function for async RPC tasks

By sleeping on a new NFS Unlock-On-Close waitqueue, rpc tasks may wait for
a lock context's iocounter to reach zero. The rpc waitqueue is only woken
when the open_context has the NFS_CONTEXT_UNLOCK flag set in order to
mitigate spurious wake-ups for any iocounter reaching zero.

Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
Reviewed-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>

authored by

Benjamin Coddington and committed by
Trond Myklebust
7d6ddf88 50f2112c

+37 -1
+1
fs/nfs/client.c
··· 218 218 static void pnfs_init_server(struct nfs_server *server) 219 219 { 220 220 rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); 221 + rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC"); 221 222 } 222 223 223 224 #else
+33 -1
fs/nfs/pagelist.c
··· 102 102 TASK_KILLABLE); 103 103 } 104 104 105 + /** 106 + * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O 107 + * to complete 108 + * @task: the rpc_task that should wait 109 + * @l_ctx: nfs_lock_context with io_counter to check 110 + * 111 + * Returns true if there is outstanding I/O to wait on and the 112 + * task has been put to sleep. 113 + */ 114 + bool 115 + nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) 116 + { 117 + struct inode *inode = d_inode(l_ctx->open_context->dentry); 118 + bool ret = false; 119 + 120 + if (atomic_read(&l_ctx->io_count) > 0) { 121 + rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL); 122 + ret = true; 123 + } 124 + 125 + if (atomic_read(&l_ctx->io_count) == 0) { 126 + rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task); 127 + ret = false; 128 + } 129 + 130 + return ret; 131 + } 132 + EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); 133 + 105 134 /* 106 135 * nfs_page_group_lock - lock the head of the page group 107 136 * @req - request in group that is to be locked ··· 414 385 req->wb_page = NULL; 415 386 } 416 387 if (l_ctx != NULL) { 417 - if (atomic_dec_and_test(&l_ctx->io_count)) 388 + if (atomic_dec_and_test(&l_ctx->io_count)) { 418 389 wake_up_atomic_t(&l_ctx->io_count); 390 + if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags)) 391 + rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq); 392 + } 419 393 nfs_put_lock_context(l_ctx); 420 394 req->wb_lock_context = NULL; 421 395 }
+1
include/linux/nfs_fs.h
··· 76 76 #define NFS_CONTEXT_ERROR_WRITE (0) 77 77 #define NFS_CONTEXT_RESEND_WRITES (1) 78 78 #define NFS_CONTEXT_BAD (2) 79 + #define NFS_CONTEXT_UNLOCK (3) 79 80 int error; 80 81 81 82 struct list_head list;
+1
include/linux/nfs_fs_sb.h
··· 222 222 u32 mountd_version; 223 223 unsigned short mountd_port; 224 224 unsigned short mountd_protocol; 225 + struct rpc_wait_queue uoc_rpcwaitq; 225 226 }; 226 227 227 228 /* Server capabilities */
+1
include/linux/nfs_page.h
··· 141 141 extern void nfs_page_group_lock_wait(struct nfs_page *); 142 142 extern void nfs_page_group_unlock(struct nfs_page *); 143 143 extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); 144 + extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); 144 145 145 146 /* 146 147 * Lock the page of an asynchronous request