Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

NFS add session back channel draining

Currently session draining only drains the fore channel.
The back channel processing must also be drained.

Use the back channel highest_slot_used to indicate that a callback is being
processed by the callback thread. Move the session complete to be per channel.

When the session is draininig, wait for any current back channel processing
to complete and stop all new back channel processing by returning NFS4ERR_DELAY
to the back channel client.

Drain the back channel, then the fore channel.

Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>

authored by

Andy Adamson and committed by
Trond Myklebust
42acd021 ece0de63

+86 -16
+2 -1
fs/nfs/callback.h
··· 138 138 void *dummy, 139 139 struct cb_process_state *cps); 140 140 141 + extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); 142 + extern void nfs4_cb_take_slot(struct nfs_client *clp); 141 143 #endif /* CONFIG_NFS_V4_1 */ 142 144 143 145 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, ··· 147 145 struct cb_process_state *cps); 148 146 extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, 149 147 struct cb_process_state *cps); 150 - 151 148 #ifdef CONFIG_NFS_V4 152 149 extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); 153 150 extern void nfs_callback_down(int minorversion);
+7
fs/nfs/callback_proc.c
··· 253 253 if (clp == NULL) 254 254 goto out; 255 255 256 + /* state manager is resetting the session */ 257 + if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { 258 + status = NFS4ERR_DELAY; 259 + goto out; 260 + } 261 + 256 262 status = validate_seqid(&clp->cl_session->bc_slot_table, args); 257 263 if (status) 258 264 goto out; ··· 279 273 res->csr_slotid = args->csa_slotid; 280 274 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 281 275 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 276 + nfs4_cb_take_slot(clp); 282 277 cps->clp = clp; /* put in nfs4_callback_compound */ 283 278 284 279 out:
+35
fs/nfs/callback_xdr.c
··· 596 596 return htonl(NFS_OK); 597 597 } 598 598 599 + static void nfs4_callback_free_slot(struct nfs4_session *session) 600 + { 601 + struct nfs4_slot_table *tbl = &session->bc_slot_table; 602 + 603 + spin_lock(&tbl->slot_tbl_lock); 604 + /* 605 + * Let the state manager know callback processing done. 606 + * A single slot, so highest used slotid is either 0 or -1 607 + */ 608 + tbl->highest_used_slotid--; 609 + nfs4_check_drain_bc_complete(session); 610 + spin_unlock(&tbl->slot_tbl_lock); 611 + } 612 + 613 + static void nfs4_cb_free_slot(struct nfs_client *clp) 614 + { 615 + if (clp && clp->cl_session) 616 + nfs4_callback_free_slot(clp->cl_session); 617 + } 618 + 619 + /* A single slot, so highest used slotid is either 0 or -1 */ 620 + void nfs4_cb_take_slot(struct nfs_client *clp) 621 + { 622 + struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table; 623 + 624 + spin_lock(&tbl->slot_tbl_lock); 625 + tbl->highest_used_slotid++; 626 + BUG_ON(tbl->highest_used_slotid != 0); 627 + spin_unlock(&tbl->slot_tbl_lock); 628 + } 629 + 599 630 #else /* CONFIG_NFS_V4_1 */ 600 631 601 632 static __be32 ··· 635 604 return htonl(NFS4ERR_MINOR_VERS_MISMATCH); 636 605 } 637 606 607 + static void nfs4_cb_free_slot(struct nfs_client *clp) 608 + { 609 + } 638 610 #endif /* CONFIG_NFS_V4_1 */ 639 611 640 612 static __be32 ··· 758 724 759 725 *hdr_res.status = status; 760 726 *hdr_res.nops = htonl(nops); 727 + nfs4_cb_free_slot(cps.clp); 761 728 nfs_put_client(cps.clp); 762 729 dprintk("%s: done, status = %u\n", __func__, ntohl(status)); 763 730 return rpc_success;
+19 -7
fs/nfs/nfs4proc.c
··· 356 356 } 357 357 358 358 /* 359 - * Signal state manager thread if session is drained 359 + * Signal state manager thread if session fore channel is drained 360 360 */ 361 - static void nfs41_check_drain_session_complete(struct nfs4_session *ses) 361 + static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) 362 362 { 363 363 struct rpc_task *task; 364 364 ··· 372 372 if (ses->fc_slot_table.highest_used_slotid != -1) 373 373 return; 374 374 375 - dprintk("%s COMPLETE: Session Drained\n", __func__); 376 - complete(&ses->complete); 375 + dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); 376 + complete(&ses->fc_slot_table.complete); 377 + } 378 + 379 + /* 380 + * Signal state manager thread if session back channel is drained 381 + */ 382 + void nfs4_check_drain_bc_complete(struct nfs4_session *ses) 383 + { 384 + if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || 385 + ses->bc_slot_table.highest_used_slotid != -1) 386 + return; 387 + dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); 388 + complete(&ses->bc_slot_table.complete); 377 389 } 378 390 379 391 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) ··· 402 390 403 391 spin_lock(&tbl->slot_tbl_lock); 404 392 nfs4_free_slot(tbl, res->sr_slot); 405 - nfs41_check_drain_session_complete(res->sr_session); 393 + nfs4_check_drain_fc_complete(res->sr_session); 406 394 spin_unlock(&tbl->slot_tbl_lock); 407 395 res->sr_slot = NULL; 408 396 } ··· 4789 4777 if (!session) 4790 4778 return NULL; 4791 4779 4792 - init_completion(&session->complete); 4793 - 4794 4780 tbl = &session->fc_slot_table; 4795 4781 tbl->highest_used_slotid = -1; 4796 4782 spin_lock_init(&tbl->slot_tbl_lock); 4797 4783 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); 4784 + init_completion(&tbl->complete); 4798 4785 4799 4786 tbl = &session->bc_slot_table; 4800 4787 tbl->highest_used_slotid = -1; 4801 4788 spin_lock_init(&tbl->slot_tbl_lock); 4802 4789 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 4790 + init_completion(&tbl->complete); 4803 4791 4804 4792 session->session_state = 1<<NFS4_SESSION_INITING; 4805 4793
+22 -7
fs/nfs/nfs4state.c
··· 142 142 return status; 143 143 } 144 144 145 + /* 146 + * Back channel returns NFS4ERR_DELAY for new requests when 147 + * NFS4_SESSION_DRAINING is set so there is no work to be done when draining 148 + * is ended. 149 + */ 145 150 static void nfs4_end_drain_session(struct nfs_client *clp) 146 151 { 147 152 struct nfs4_session *ses = clp->cl_session; ··· 170 165 } 171 166 } 172 167 173 - static int nfs4_begin_drain_session(struct nfs_client *clp) 168 + static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) 174 169 { 175 - struct nfs4_session *ses = clp->cl_session; 176 - struct nfs4_slot_table *tbl = &ses->fc_slot_table; 177 - 178 170 spin_lock(&tbl->slot_tbl_lock); 179 - set_bit(NFS4_SESSION_DRAINING, &ses->session_state); 180 171 if (tbl->highest_used_slotid != -1) { 181 - INIT_COMPLETION(ses->complete); 172 + INIT_COMPLETION(tbl->complete); 182 173 spin_unlock(&tbl->slot_tbl_lock); 183 - return wait_for_completion_interruptible(&ses->complete); 174 + return wait_for_completion_interruptible(&tbl->complete); 184 175 } 185 176 spin_unlock(&tbl->slot_tbl_lock); 186 177 return 0; 178 + } 179 + 180 + static int nfs4_begin_drain_session(struct nfs_client *clp) 181 + { 182 + struct nfs4_session *ses = clp->cl_session; 183 + int ret = 0; 184 + 185 + set_bit(NFS4_SESSION_DRAINING, &ses->session_state); 186 + /* back channel */ 187 + ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table); 188 + if (ret) 189 + return ret; 190 + /* fore channel */ 191 + return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); 187 192 } 188 193 189 194 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+1 -1
include/linux/nfs_fs_sb.h
··· 197 197 * op for dynamic resizing */ 198 198 int target_max_slots; /* Set by CB_RECALL_SLOT as 199 199 * the new max_slots */ 200 + struct completion complete; 200 201 }; 201 202 202 203 static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp) ··· 214 213 unsigned long session_state; 215 214 u32 hash_alg; 216 215 u32 ssv_len; 217 - struct completion complete; 218 216 219 217 /* The fore and back channel */ 220 218 struct nfs4_channel_attrs fc_attrs;