Merge branch 'nfs-for-2.6.34' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6

* 'nfs-for-2.6.34' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (44 commits)
NFS: Remove requirement for inode->i_mutex from nfs_invalidate_mapping
NFS: Clean up nfs_sync_mapping
NFS: Simplify nfs_wb_page()
NFS: Replace __nfs_write_mapping with sync_inode()
NFS: Simplify nfs_wb_page_cancel()
NFS: Ensure inode is always marked I_DIRTY_DATASYNC, if it has unstable pages
NFS: Run COMMIT as an asynchronous RPC call when wbc->for_background is set
NFS: Reduce the number of unnecessary COMMIT calls
NFS: Add a count of the number of unstable writes carried by an inode
NFS: Cleanup - move nfs_write_inode() into fs/nfs/write.c
nfs41 fix NFS4ERR_CLID_INUSE for exchange id
NFS: Fix an allocation-under-spinlock bug
SUNRPC: Handle EINVAL error returns from the TCP connect operation
NFSv4.1: Various fixes to the sequence flag error handling
nfs4: renewd renew operations should take/put a client reference
nfs41: renewd sequence operations should take/put client reference
nfs: prevent backlogging of renewd requests
nfs: kill renewd before clearing client minor version
NFS: Make close(2) asynchronous when closing NFS O_DIRECT files
NFS: Improve NFS iostat byte count accuracy for writes
...

+664 -465
+8
fs/nfs/callback.h
··· 119 119 }; 120 120 121 121 extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); 122 + 123 + struct cb_recallslotargs { 124 + struct sockaddr *crsa_addr; 125 + uint32_t crsa_target_max_slots; 126 + }; 127 + extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args, 128 + void *dummy); 129 + 122 130 #endif /* CONFIG_NFS_V4_1 */ 123 131 124 132 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
+137 -28
fs/nfs/callback_proc.c
··· 143 143 * Return success if the sequenceID is one more than what we last saw on 144 144 * this slot, accounting for wraparound. Increments the slot's sequence. 145 145 * 146 - * We don't yet implement a duplicate request cache, so at this time 147 - * we will log replays, and process them as if we had not seen them before, 148 - * but we don't bump the sequence in the slot. Not too worried about it, 146 + * We don't yet implement a duplicate request cache, instead we set the 147 + * back channel ca_maxresponsesize_cached to zero. This is OK for now 149 148 * since we only currently implement idempotent callbacks anyway. 150 149 * 151 150 * We have a single slot backchannel at this time, so we don't bother 152 151 * checking the used_slots bit array on the table. The lower layer guarantees 153 152 * a single outstanding callback request at a time. 154 153 */ 155 - static int 156 - validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid) 154 + static __be32 155 + validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) 157 156 { 158 157 struct nfs4_slot *slot; 159 158 160 159 dprintk("%s enter. slotid %d seqid %d\n", 161 - __func__, slotid, seqid); 160 + __func__, args->csa_slotid, args->csa_sequenceid); 162 161 163 - if (slotid > NFS41_BC_MAX_CALLBACKS) 162 + if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS) 164 163 return htonl(NFS4ERR_BADSLOT); 165 164 166 - slot = tbl->slots + slotid; 165 + slot = tbl->slots + args->csa_slotid; 167 166 dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr); 168 167 169 168 /* Normal */ 170 - if (likely(seqid == slot->seq_nr + 1)) { 169 + if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { 171 170 slot->seq_nr++; 172 171 return htonl(NFS4_OK); 173 172 } 174 173 175 174 /* Replay */ 176 - if (seqid == slot->seq_nr) { 177 - dprintk("%s seqid %d is a replay - no DRC available\n", 178 - __func__, seqid); 179 - return htonl(NFS4_OK); 175 + if (args->csa_sequenceid == slot->seq_nr) { 176 + dprintk("%s seqid %d is a replay\n", 177 + __func__, args->csa_sequenceid); 178 + /* Signal process_op to set this error on next op */ 179 + if (args->csa_cachethis == 0) 180 + return htonl(NFS4ERR_RETRY_UNCACHED_REP); 181 + 182 + /* The ca_maxresponsesize_cached is 0 with no DRC */ 183 + else if (args->csa_cachethis == 1) 184 + return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); 180 185 } 181 186 182 187 /* Wraparound */ 183 - if (seqid == 1 && (slot->seq_nr + 1) == 0) { 188 + if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { 184 189 slot->seq_nr = 1; 185 190 return htonl(NFS4_OK); 186 191 } ··· 230 225 return NULL; 231 226 } 232 227 233 - /* FIXME: referring calls should be processed */ 234 - unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, 228 + /* 229 + * For each referring call triple, check the session's slot table for 230 + * a match. If the slot is in use and the sequence numbers match, the 231 + * client is still waiting for a response to the original request. 232 + */ 233 + static bool referring_call_exists(struct nfs_client *clp, 234 + uint32_t nrclists, 235 + struct referring_call_list *rclists) 236 + { 237 + bool status = 0; 238 + int i, j; 239 + struct nfs4_session *session; 240 + struct nfs4_slot_table *tbl; 241 + struct referring_call_list *rclist; 242 + struct referring_call *ref; 243 + 244 + /* 245 + * XXX When client trunking is implemented, this becomes 246 + * a session lookup from within the loop 247 + */ 248 + session = clp->cl_session; 249 + tbl = &session->fc_slot_table; 250 + 251 + for (i = 0; i < nrclists; i++) { 252 + rclist = &rclists[i]; 253 + if (memcmp(session->sess_id.data, 254 + rclist->rcl_sessionid.data, 255 + NFS4_MAX_SESSIONID_LEN) != 0) 256 + continue; 257 + 258 + for (j = 0; j < rclist->rcl_nrefcalls; j++) { 259 + ref = &rclist->rcl_refcalls[j]; 260 + 261 + dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u " 262 + "slotid %u\n", __func__, 263 + ((u32 *)&rclist->rcl_sessionid.data)[0], 264 + ((u32 *)&rclist->rcl_sessionid.data)[1], 265 + ((u32 *)&rclist->rcl_sessionid.data)[2], 266 + ((u32 *)&rclist->rcl_sessionid.data)[3], 267 + ref->rc_sequenceid, ref->rc_slotid); 268 + 269 + spin_lock(&tbl->slot_tbl_lock); 270 + status = (test_bit(ref->rc_slotid, tbl->used_slots) && 271 + tbl->slots[ref->rc_slotid].seq_nr == 272 + ref->rc_sequenceid); 273 + spin_unlock(&tbl->slot_tbl_lock); 274 + if (status) 275 + goto out; 276 + } 277 + } 278 + 279 + out: 280 + return status; 281 + } 282 + 283 + __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, 235 284 struct cb_sequenceres *res) 236 285 { 237 286 struct nfs_client *clp; 238 - int i, status; 239 - 240 - for (i = 0; i < args->csa_nrclists; i++) 241 - kfree(args->csa_rclists[i].rcl_refcalls); 242 - kfree(args->csa_rclists); 287 + int i; 288 + __be32 status; 243 289 244 290 status = htonl(NFS4ERR_BADSESSION); 245 291 clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); 246 292 if (clp == NULL) 247 293 goto out; 248 294 249 - status = validate_seqid(&clp->cl_session->bc_slot_table, 250 - args->csa_slotid, args->csa_sequenceid); 295 + status = validate_seqid(&clp->cl_session->bc_slot_table, args); 251 296 if (status) 252 297 goto out_putclient; 298 + 299 + /* 300 + * Check for pending referring calls. If a match is found, a 301 + * related callback was received before the response to the original 302 + * call. 303 + */ 304 + if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { 305 + status = htonl(NFS4ERR_DELAY); 306 + goto out_putclient; 307 + } 253 308 254 309 memcpy(&res->csr_sessionid, &args->csa_sessionid, 255 310 sizeof(res->csr_sessionid)); ··· 321 256 out_putclient: 322 257 nfs_put_client(clp); 323 258 out: 324 - dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 325 - res->csr_status = status; 326 - return res->csr_status; 259 + for (i = 0; i < args->csa_nrclists; i++) 260 + kfree(args->csa_rclists[i].rcl_refcalls); 261 + kfree(args->csa_rclists); 262 + 263 + if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) 264 + res->csr_status = 0; 265 + else 266 + res->csr_status = status; 267 + dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, 268 + ntohl(status), ntohl(res->csr_status)); 269 + return status; 327 270 } 328 271 329 - unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) 272 + __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) 330 273 { 331 274 struct nfs_client *clp; 332 - int status; 275 + __be32 status; 333 276 fmode_t flags = 0; 334 277 335 278 status = htonl(NFS4ERR_OP_NOT_IN_SESSION); ··· 358 285 if (flags) 359 286 nfs_expire_all_delegation_types(clp, flags); 360 287 status = htonl(NFS4_OK); 288 + out: 289 + dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 290 + return status; 291 + } 292 + 293 + /* Reduce the fore channel's max_slots to the target value */ 294 + __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy) 295 + { 296 + struct nfs_client *clp; 297 + struct nfs4_slot_table *fc_tbl; 298 + __be32 status; 299 + 300 + status = htonl(NFS4ERR_OP_NOT_IN_SESSION); 301 + clp = nfs_find_client(args->crsa_addr, 4); 302 + if (clp == NULL) 303 + goto out; 304 + 305 + dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", 306 + rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 307 + args->crsa_target_max_slots); 308 + 309 + fc_tbl = &clp->cl_session->fc_slot_table; 310 + 311 + status = htonl(NFS4ERR_BAD_HIGH_SLOT); 312 + if (args->crsa_target_max_slots > fc_tbl->max_slots || 313 + args->crsa_target_max_slots < 1) 314 + goto out_putclient; 315 + 316 + status = htonl(NFS4_OK); 317 + if (args->crsa_target_max_slots == fc_tbl->max_slots) 318 + goto out_putclient; 319 + 320 + fc_tbl->target_max_slots = args->crsa_target_max_slots; 321 + nfs41_handle_recall_slot(clp); 322 + out_putclient: 323 + nfs_put_client(clp); /* balance nfs_find_client */ 361 324 out: 362 325 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 363 326 return status;
+73 -32
fs/nfs/callback_xdr.c
··· 24 24 #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ 25 25 4 + 1 + 3) 26 26 #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 27 + #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 27 28 #endif /* CONFIG_NFS_V4_1 */ 28 29 29 30 #define NFSDBG_FACILITY NFSDBG_CALLBACK 31 + 32 + /* Internal error code */ 33 + #define NFS4ERR_RESOURCE_HDR 11050 30 34 31 35 typedef __be32 (*callback_process_op_t)(void *, void *); 32 36 typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); ··· 177 173 __be32 *p; 178 174 p = read_buf(xdr, 4); 179 175 if (unlikely(p == NULL)) 180 - return htonl(NFS4ERR_RESOURCE); 176 + return htonl(NFS4ERR_RESOURCE_HDR); 181 177 *op = ntohl(*p); 182 178 return 0; 183 179 } ··· 219 215 220 216 #if defined(CONFIG_NFS_V4_1) 221 217 222 - static unsigned decode_sessionid(struct xdr_stream *xdr, 218 + static __be32 decode_sessionid(struct xdr_stream *xdr, 223 219 struct nfs4_sessionid *sid) 224 220 { 225 - uint32_t *p; 221 + __be32 *p; 226 222 int len = NFS4_MAX_SESSIONID_LEN; 227 223 228 224 p = read_buf(xdr, len); ··· 233 229 return 0; 234 230 } 235 231 236 - static unsigned decode_rc_list(struct xdr_stream *xdr, 232 + static __be32 decode_rc_list(struct xdr_stream *xdr, 237 233 struct referring_call_list *rc_list) 238 234 { 239 - uint32_t *p; 235 + __be32 *p; 240 236 int i; 241 - unsigned status; 237 + __be32 status; 242 238 243 239 status = decode_sessionid(xdr, &rc_list->rcl_sessionid); 244 240 if (status) ··· 271 267 return status; 272 268 } 273 269 274 - static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp, 270 + static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, 275 271 struct xdr_stream *xdr, 276 272 struct cb_sequenceargs *args) 277 273 { 278 - uint32_t *p; 274 + __be32 *p; 279 275 int i; 280 - unsigned status; 276 + __be32 status; 281 277 282 278 status = decode_sessionid(xdr, &args->csa_sessionid); 283 279 if (status) ··· 331 327 goto out; 332 328 } 333 329 334 - static unsigned decode_recallany_args(struct svc_rqst *rqstp, 330 + static __be32 decode_recallany_args(struct svc_rqst *rqstp, 335 331 struct xdr_stream *xdr, 336 332 struct cb_recallanyargs *args) 337 333 { 338 - uint32_t *p; 334 + __be32 *p; 339 335 340 336 args->craa_addr = svc_addr(rqstp); 341 337 p = read_buf(xdr, 4); ··· 347 343 return htonl(NFS4ERR_BADXDR); 348 344 args->craa_type_mask = ntohl(*p); 349 345 346 + return 0; 347 + } 348 + 349 + static __be32 decode_recallslot_args(struct svc_rqst *rqstp, 350 + struct xdr_stream *xdr, 351 + struct cb_recallslotargs *args) 352 + { 353 + __be32 *p; 354 + 355 + args->crsa_addr = svc_addr(rqstp); 356 + p = read_buf(xdr, 4); 357 + if (unlikely(p == NULL)) 358 + return htonl(NFS4ERR_BADXDR); 359 + args->crsa_target_max_slots = ntohl(*p++); 350 360 return 0; 351 361 } 352 362 ··· 483 465 484 466 p = xdr_reserve_space(xdr, 8); 485 467 if (unlikely(p == NULL)) 486 - return htonl(NFS4ERR_RESOURCE); 468 + return htonl(NFS4ERR_RESOURCE_HDR); 487 469 *p++ = htonl(op); 488 470 *p = res; 489 471 return 0; ··· 517 499 518 500 #if defined(CONFIG_NFS_V4_1) 519 501 520 - static unsigned encode_sessionid(struct xdr_stream *xdr, 502 + static __be32 encode_sessionid(struct xdr_stream *xdr, 521 503 const struct nfs4_sessionid *sid) 522 504 { 523 - uint32_t *p; 505 + __be32 *p; 524 506 int len = NFS4_MAX_SESSIONID_LEN; 525 507 526 508 p = xdr_reserve_space(xdr, len); ··· 531 513 return 0; 532 514 } 533 515 534 - static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp, 516 + static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, 535 517 struct xdr_stream *xdr, 536 518 const struct cb_sequenceres *res) 537 519 { 538 - uint32_t *p; 520 + __be32 *p; 539 521 unsigned status = res->csr_status; 540 522 541 523 if (unlikely(status != 0)) ··· 572 554 case OP_CB_RECALL: 573 555 case OP_CB_SEQUENCE: 574 556 case OP_CB_RECALL_ANY: 557 + case OP_CB_RECALL_SLOT: 575 558 *op = &callback_ops[op_nr]; 576 559 break; 577 560 ··· 581 562 case OP_CB_NOTIFY: 582 563 case OP_CB_PUSH_DELEG: 583 564 case OP_CB_RECALLABLE_OBJ_AVAIL: 584 - case OP_CB_RECALL_SLOT: 585 565 case OP_CB_WANTS_CANCELLED: 586 566 case OP_CB_NOTIFY_LOCK: 587 567 return htonl(NFS4ERR_NOTSUPP); ··· 620 602 static __be32 process_op(uint32_t minorversion, int nop, 621 603 struct svc_rqst *rqstp, 622 604 struct xdr_stream *xdr_in, void *argp, 623 - struct xdr_stream *xdr_out, void *resp) 605 + struct xdr_stream *xdr_out, void *resp, int* drc_status) 624 606 { 625 607 struct callback_op *op = &callback_ops[0]; 626 - unsigned int op_nr = OP_CB_ILLEGAL; 608 + unsigned int op_nr; 627 609 __be32 status; 628 610 long maxlen; 629 611 __be32 res; 630 612 631 613 dprintk("%s: start\n", __func__); 632 614 status = decode_op_hdr(xdr_in, &op_nr); 633 - if (unlikely(status)) { 634 - status = htonl(NFS4ERR_OP_ILLEGAL); 635 - goto out; 636 - } 615 + if (unlikely(status)) 616 + return status; 637 617 638 618 dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", 639 619 __func__, minorversion, nop, op_nr); ··· 640 624 preprocess_nfs4_op(op_nr, &op); 641 625 if (status == htonl(NFS4ERR_OP_ILLEGAL)) 642 626 op_nr = OP_CB_ILLEGAL; 643 - out: 627 + if (status) 628 + goto encode_hdr; 629 + 630 + if (*drc_status) { 631 + status = *drc_status; 632 + goto encode_hdr; 633 + } 634 + 644 635 maxlen = xdr_out->end - xdr_out->p; 645 636 if (maxlen > 0 && maxlen < PAGE_SIZE) { 646 - if (likely(status == 0 && op->decode_args != NULL)) 647 - status = op->decode_args(rqstp, xdr_in, argp); 648 - if (likely(status == 0 && op->process_op != NULL)) 637 + status = op->decode_args(rqstp, xdr_in, argp); 638 + if (likely(status == 0)) 649 639 status = op->process_op(argp, resp); 650 640 } else 651 641 status = htonl(NFS4ERR_RESOURCE); 652 642 643 + /* Only set by OP_CB_SEQUENCE processing */ 644 + if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { 645 + *drc_status = status; 646 + status = 0; 647 + } 648 + 649 + encode_hdr: 653 650 res = encode_op_hdr(xdr_out, op_nr, status); 654 - if (status == 0) 655 - status = res; 651 + if (unlikely(res)) 652 + return res; 656 653 if (op->encode_res != NULL && status == 0) 657 654 status = op->encode_res(rqstp, xdr_out, resp); 658 655 dprintk("%s: done, status = %d\n", __func__, ntohl(status)); ··· 681 652 struct cb_compound_hdr_res hdr_res = { NULL }; 682 653 struct xdr_stream xdr_in, xdr_out; 683 654 __be32 *p; 684 - __be32 status; 655 + __be32 status, drc_status = 0; 685 656 unsigned int nops = 0; 686 657 687 658 dprintk("%s: start\n", __func__); ··· 701 672 return rpc_system_err; 702 673 703 674 while (status == 0 && nops != hdr_arg.nops) { 704 - status = process_op(hdr_arg.minorversion, nops, 705 - rqstp, &xdr_in, argp, &xdr_out, resp); 675 + status = process_op(hdr_arg.minorversion, nops, rqstp, 676 + &xdr_in, argp, &xdr_out, resp, &drc_status); 706 677 nops++; 678 + } 679 + 680 + /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return 681 + * resource error in cb_compound status without returning op */ 682 + if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { 683 + status = htonl(NFS4ERR_RESOURCE); 684 + nops--; 707 685 } 708 686 709 687 *hdr_res.status = status; ··· 748 712 .process_op = (callback_process_op_t)nfs4_callback_recallany, 749 713 .decode_args = (callback_decode_arg_t)decode_recallany_args, 750 714 .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, 715 + }, 716 + [OP_CB_RECALL_SLOT] = { 717 + .process_op = (callback_process_op_t)nfs4_callback_recallslot, 718 + .decode_args = (callback_decode_arg_t)decode_recallslot_args, 719 + .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, 751 720 }, 752 721 #endif /* CONFIG_NFS_V4_1 */ 753 722 };
+26 -26
fs/nfs/client.c
··· 164 164 return ERR_PTR(err); 165 165 } 166 166 167 - static void nfs4_shutdown_client(struct nfs_client *clp) 168 - { 169 167 #ifdef CONFIG_NFS_V4 170 - if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 171 - nfs4_kill_renewd(clp); 172 - BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners)); 173 - if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 174 - nfs_idmap_delete(clp); 175 - 176 - rpc_destroy_wait_queue(&clp->cl_rpcwaitq); 177 - #endif 178 - } 179 - 180 - /* 181 - * Destroy the NFS4 callback service 182 - */ 183 - static void nfs4_destroy_callback(struct nfs_client *clp) 184 - { 185 - #ifdef CONFIG_NFS_V4 186 - if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 187 - nfs_callback_down(clp->cl_minorversion); 188 - #endif /* CONFIG_NFS_V4 */ 189 - } 190 - 191 168 /* 192 169 * Clears/puts all minor version specific parts from an nfs_client struct 193 170 * reverting it to minorversion 0. ··· 179 202 180 203 clp->cl_call_sync = _nfs4_call_sync; 181 204 #endif /* CONFIG_NFS_V4_1 */ 182 - 183 - nfs4_destroy_callback(clp); 184 205 } 206 + 207 + /* 208 + * Destroy the NFS4 callback service 209 + */ 210 + static void nfs4_destroy_callback(struct nfs_client *clp) 211 + { 212 + if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 213 + nfs_callback_down(clp->cl_minorversion); 214 + } 215 + 216 + static void nfs4_shutdown_client(struct nfs_client *clp) 217 + { 218 + if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 219 + nfs4_kill_renewd(clp); 220 + nfs4_clear_client_minor_version(clp); 221 + nfs4_destroy_callback(clp); 222 + if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 223 + nfs_idmap_delete(clp); 224 + 225 + rpc_destroy_wait_queue(&clp->cl_rpcwaitq); 226 + } 227 + #else 228 + static void nfs4_shutdown_client(struct nfs_client *clp) 229 + { 230 + } 231 + #endif /* CONFIG_NFS_V4 */ 185 232 186 233 /* 187 234 * Destroy a shared client record ··· 214 213 { 215 214 dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); 216 215 217 - nfs4_clear_client_minor_version(clp); 218 216 nfs4_shutdown_client(clp); 219 217 220 218 nfs_fscache_release_client_cookie(clp);
+1 -1
fs/nfs/dir.c
··· 560 560 desc->entry = &my_entry; 561 561 562 562 nfs_block_sillyrename(dentry); 563 - res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping); 563 + res = nfs_revalidate_mapping(inode, filp->f_mapping); 564 564 if (res < 0) 565 565 goto out; 566 566
+15 -3
fs/nfs/dns_resolve.c
··· 36 36 }; 37 37 38 38 39 + static void nfs_dns_ent_update(struct cache_head *cnew, 40 + struct cache_head *ckey) 41 + { 42 + struct nfs_dns_ent *new; 43 + struct nfs_dns_ent *key; 44 + 45 + new = container_of(cnew, struct nfs_dns_ent, h); 46 + key = container_of(ckey, struct nfs_dns_ent, h); 47 + 48 + memcpy(&new->addr, &key->addr, key->addrlen); 49 + new->addrlen = key->addrlen; 50 + } 51 + 39 52 static void nfs_dns_ent_init(struct cache_head *cnew, 40 53 struct cache_head *ckey) 41 54 { ··· 62 49 new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); 63 50 if (new->hostname) { 64 51 new->namelen = key->namelen; 65 - memcpy(&new->addr, &key->addr, key->addrlen); 66 - new->addrlen = key->addrlen; 52 + nfs_dns_ent_update(cnew, ckey); 67 53 } else { 68 54 new->namelen = 0; 69 55 new->addrlen = 0; ··· 246 234 .cache_show = nfs_dns_show, 247 235 .match = nfs_dns_match, 248 236 .init = nfs_dns_ent_init, 249 - .update = nfs_dns_ent_init, 237 + .update = nfs_dns_ent_update, 250 238 .alloc = nfs_dns_ent_alloc, 251 239 }; 252 240
+22 -8
fs/nfs/file.c
··· 123 123 filp->f_path.dentry->d_parent->d_name.name, 124 124 filp->f_path.dentry->d_name.name); 125 125 126 + nfs_inc_stats(inode, NFSIOS_VFSOPEN); 126 127 res = nfs_check_flags(filp->f_flags); 127 128 if (res) 128 129 return res; 129 130 130 - nfs_inc_stats(inode, NFSIOS_VFSOPEN); 131 131 res = nfs_open(inode, filp); 132 132 return res; 133 133 } ··· 237 237 dentry->d_parent->d_name.name, 238 238 dentry->d_name.name); 239 239 240 + nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 240 241 if ((file->f_mode & FMODE_WRITE) == 0) 241 242 return 0; 242 - nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 243 243 244 244 /* Flush writes to the server and return any errors */ 245 245 return nfs_do_fsync(ctx, inode); ··· 262 262 (unsigned long) count, (unsigned long) pos); 263 263 264 264 result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); 265 - nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count); 266 - if (!result) 265 + if (!result) { 267 266 result = generic_file_aio_read(iocb, iov, nr_segs, pos); 267 + if (result > 0) 268 + nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); 269 + } 268 270 return result; 269 271 } 270 272 ··· 284 282 (unsigned long) count, (unsigned long long) *ppos); 285 283 286 284 res = nfs_revalidate_mapping(inode, filp->f_mapping); 287 - if (!res) 285 + if (!res) { 288 286 res = generic_file_splice_read(filp, ppos, pipe, count, flags); 287 + if (res > 0) 288 + nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res); 289 + } 289 290 return res; 290 291 } 291 292 ··· 601 596 { 602 597 struct dentry * dentry = iocb->ki_filp->f_path.dentry; 603 598 struct inode * inode = dentry->d_inode; 599 + unsigned long written = 0; 604 600 ssize_t result; 605 601 size_t count = iov_length(iov, nr_segs); 606 602 ··· 628 622 if (!count) 629 623 goto out; 630 624 631 - nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); 632 625 result = generic_file_aio_write(iocb, iov, nr_segs, pos); 626 + if (result > 0) 627 + written = result; 628 + 633 629 /* Return error values for O_DSYNC and IS_SYNC() */ 634 630 if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { 635 631 int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); 636 632 if (err < 0) 637 633 result = err; 638 634 } 635 + if (result > 0) 636 + nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); 639 637 out: 640 638 return result; 641 639 ··· 654 644 { 655 645 struct dentry *dentry = filp->f_path.dentry; 656 646 struct inode *inode = dentry->d_inode; 647 + unsigned long written = 0; 657 648 ssize_t ret; 658 649 659 650 dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", ··· 665 654 * The combination of splice and an O_APPEND destination is disallowed. 666 655 */ 667 656 668 - nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); 669 - 670 657 ret = generic_file_splice_write(pipe, filp, ppos, count, flags); 658 + if (ret > 0) 659 + written = ret; 660 + 671 661 if (ret >= 0 && nfs_need_sync_write(filp, inode)) { 672 662 int err = nfs_do_fsync(nfs_file_open_context(filp), inode); 673 663 if (err < 0) 674 664 ret = err; 675 665 } 666 + if (ret > 0) 667 + nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); 676 668 return ret; 677 669 } 678 670
+13 -77
fs/nfs/inode.c
··· 97 97 return ino; 98 98 } 99 99 100 - int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 101 - { 102 - int ret; 103 - 104 - ret = nfs_commit_inode(inode, 105 - wbc->sync_mode == WB_SYNC_ALL ? FLUSH_SYNC : 0); 106 - if (ret >= 0) 107 - return 0; 108 - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 109 - return ret; 110 - } 111 - 112 100 void nfs_clear_inode(struct inode *inode) 113 101 { 114 102 /* ··· 114 126 */ 115 127 int nfs_sync_mapping(struct address_space *mapping) 116 128 { 117 - int ret; 129 + int ret = 0; 118 130 119 - if (mapping->nrpages == 0) 120 - return 0; 121 - unmap_mapping_range(mapping, 0, 0, 0); 122 - ret = filemap_write_and_wait(mapping); 123 - if (ret != 0) 124 - goto out; 125 - ret = nfs_wb_all(mapping->host); 126 - out: 131 + if (mapping->nrpages != 0) { 132 + unmap_mapping_range(mapping, 0, 0, 0); 133 + ret = nfs_wb_all(mapping->host); 134 + } 127 135 return ret; 128 136 } 129 137 ··· 491 507 int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; 492 508 int err; 493 509 494 - /* 495 - * Flush out writes to the server in order to update c/mtime. 496 - * 497 - * Hold the i_mutex to suspend application writes temporarily; 498 - * this prevents long-running writing applications from blocking 499 - * nfs_wb_nocommit. 500 - */ 510 + /* Flush out writes to the server in order to update c/mtime. */ 501 511 if (S_ISREG(inode->i_mode)) { 502 - mutex_lock(&inode->i_mutex); 503 - nfs_wb_nocommit(inode); 504 - mutex_unlock(&inode->i_mutex); 512 + err = filemap_write_and_wait(inode->i_mapping); 513 + if (err) 514 + goto out; 505 515 } 506 516 507 517 /* ··· 519 541 generic_fillattr(inode, stat); 520 542 stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); 521 543 } 544 + out: 522 545 return err; 523 546 } 524 547 ··· 595 616 __put_nfs_open_context(ctx, 0); 596 617 } 597 618 598 - static void put_nfs_open_context_sync(struct nfs_open_context *ctx) 599 - { 600 - __put_nfs_open_context(ctx, 1); 601 - } 602 - 603 619 /* 604 620 * Ensure that mmap has a recent RPC credential for use when writing out 605 621 * shared pages ··· 641 667 spin_lock(&inode->i_lock); 642 668 list_move_tail(&ctx->list, &NFS_I(inode)->open_files); 643 669 spin_unlock(&inode->i_lock); 644 - put_nfs_open_context_sync(ctx); 670 + __put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1); 645 671 } 646 672 } 647 673 ··· 749 775 return __nfs_revalidate_inode(server, inode); 750 776 } 751 777 752 - static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) 778 + static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) 753 779 { 754 780 struct nfs_inode *nfsi = NFS_I(inode); 755 781 ··· 770 796 return 0; 771 797 } 772 798 773 - static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) 774 - { 775 - int ret = 0; 776 - 777 - mutex_lock(&inode->i_mutex); 778 - if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_DATA) { 779 - ret = nfs_sync_mapping(mapping); 780 - if (ret == 0) 781 - ret = nfs_invalidate_mapping_nolock(inode, mapping); 782 - } 783 - mutex_unlock(&inode->i_mutex); 784 - return ret; 785 - } 786 - 787 - /** 788 - * nfs_revalidate_mapping_nolock - Revalidate the pagecache 789 - * @inode - pointer to host inode 790 - * @mapping - pointer to mapping 791 - */ 792 - int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) 793 - { 794 - struct nfs_inode *nfsi = NFS_I(inode); 795 - int ret = 0; 796 - 797 - if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 798 - || nfs_attribute_timeout(inode) || NFS_STALE(inode)) { 799 - ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); 800 - if (ret < 0) 801 - goto out; 802 - } 803 - if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 804 - ret = nfs_invalidate_mapping_nolock(inode, mapping); 805 - out: 806 - return ret; 807 - } 808 - 809 799 /** 810 800 * nfs_revalidate_mapping - Revalidate the pagecache 811 801 * @inode - pointer to host inode 812 802 * @mapping - pointer to mapping 813 - * 814 - * This version of the function will take the inode->i_mutex and attempt to 815 - * flush out all dirty data if it needs to invalidate the page cache. 816 803 */ 817 804 int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) 818 805 { ··· 1351 1416 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1352 1417 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1353 1418 nfsi->npages = 0; 1419 + nfsi->ncommit = 0; 1354 1420 atomic_set(&nfsi->silly_count, 1); 1355 1421 INIT_HLIST_HEAD(&nfsi->silly_list); 1356 1422 init_waitqueue_head(&nfsi->waitqueue);
+5 -4
fs/nfs/nfs3proc.c
··· 22 22 23 23 #define NFSDBG_FACILITY NFSDBG_PROC 24 24 25 - /* A wrapper to handle the EJUKEBOX error message */ 25 + /* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */ 26 26 static int 27 27 nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 28 28 { 29 29 int res; 30 30 do { 31 31 res = rpc_call_sync(clnt, msg, flags); 32 - if (res != -EJUKEBOX) 32 + if (res != -EJUKEBOX && res != -EKEYEXPIRED) 33 33 break; 34 34 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 35 35 res = -ERESTARTSYS; ··· 42 42 static int 43 43 nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode) 44 44 { 45 - if (task->tk_status != -EJUKEBOX) 45 + if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED) 46 46 return 0; 47 - nfs_inc_stats(inode, NFSIOS_DELAY); 47 + if (task->tk_status == -EJUKEBOX) 48 + nfs_inc_stats(inode, NFSIOS_DELAY); 48 49 task->tk_status = 0; 49 50 rpc_restart_call(task); 50 51 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
+2
fs/nfs/nfs4_fs.h
··· 46 46 NFS4CLNT_DELEGRETURN, 47 47 NFS4CLNT_SESSION_RESET, 48 48 NFS4CLNT_SESSION_DRAINING, 49 + NFS4CLNT_RECALL_SLOT, 49 50 }; 50 51 51 52 /* ··· 281 280 extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 282 281 extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); 283 282 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 283 + extern void nfs41_handle_recall_slot(struct nfs_client *clp); 284 284 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 285 285 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 286 286 extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
+68 -35
fs/nfs/nfs4proc.c
··· 281 281 } 282 282 case -NFS4ERR_GRACE: 283 283 case -NFS4ERR_DELAY: 284 + case -EKEYEXPIRED: 284 285 ret = nfs4_delay(server->client, &exception->timeout); 285 286 if (ret != 0) 286 287 break; ··· 419 418 clp->cl_last_renewal = timestamp; 420 419 spin_unlock(&clp->cl_lock); 421 420 /* Check sequence flags */ 422 - nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 421 + if (atomic_read(&clp->cl_count) > 1) 422 + nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 423 423 } 424 424 out: 425 425 /* The session may be reset by one of the error handlers. */ ··· 1165 1163 int err; 1166 1164 do { 1167 1165 err = _nfs4_do_open_reclaim(ctx, state); 1168 - if (err != -NFS4ERR_DELAY) 1166 + if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED) 1169 1167 break; 1170 1168 nfs4_handle_exception(server, err, &exception); 1171 1169 } while (exception.retry); ··· 1584 1582 goto out; 1585 1583 case -NFS4ERR_GRACE: 1586 1584 case -NFS4ERR_DELAY: 1585 + case -EKEYEXPIRED: 1587 1586 nfs4_handle_exception(server, err, &exception); 1588 1587 err = 0; 1589 1588 } ··· 3148 3145 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3149 3146 * standalone procedure for queueing an asynchronous RENEW. 3150 3147 */ 3148 + static void nfs4_renew_release(void *data) 3149 + { 3150 + struct nfs_client *clp = data; 3151 + 3152 + if (atomic_read(&clp->cl_count) > 1) 3153 + nfs4_schedule_state_renewal(clp); 3154 + nfs_put_client(clp); 3155 + } 3156 + 3151 3157 static void nfs4_renew_done(struct rpc_task *task, void *data) 3152 3158 { 3153 - struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp; 3154 - unsigned long timestamp = (unsigned long)data; 3159 + struct nfs_client *clp = data; 3160 + unsigned long timestamp = task->tk_start; 3155 3161 3156 3162 if (task->tk_status < 0) { 3157 3163 /* Unless we're shutting down, schedule state recovery! */ ··· 3176 3164 3177 3165 static const struct rpc_call_ops nfs4_renew_ops = { 3178 3166 .rpc_call_done = nfs4_renew_done, 3167 + .rpc_release = nfs4_renew_release, 3179 3168 }; 3180 3169 3181 3170 int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) ··· 3187 3174 .rpc_cred = cred, 3188 3175 }; 3189 3176 3177 + if (!atomic_inc_not_zero(&clp->cl_count)) 3178 + return -EIO; 3190 3179 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3191 - &nfs4_renew_ops, (void *)jiffies); 3180 + &nfs4_renew_ops, clp); 3192 3181 } 3193 3182 3194 3183 int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) ··· 3467 3452 if (server) 3468 3453 nfs_inc_server_stats(server, NFSIOS_DELAY); 3469 3454 case -NFS4ERR_GRACE: 3455 + case -EKEYEXPIRED: 3470 3456 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3471 3457 task->tk_status = 0; 3472 3458 return -EAGAIN; ··· 3580 3564 case -NFS4ERR_RESOURCE: 3581 3565 /* The IBM lawyers misread another document! */ 3582 3566 case -NFS4ERR_DELAY: 3567 + case -EKEYEXPIRED: 3583 3568 err = nfs4_delay(clp->cl_rpcclient, &timeout); 3584 3569 } 3585 3570 } while (err == 0); ··· 4196 4179 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4197 4180 return 0; 4198 4181 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4199 - if (err != -NFS4ERR_DELAY) 4182 + if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED) 4200 4183 break; 4201 4184 nfs4_handle_exception(server, err, &exception); 4202 4185 } while (exception.retry); ··· 4221 4204 goto out; 4222 4205 case -NFS4ERR_GRACE: 4223 4206 case -NFS4ERR_DELAY: 4207 + case -EKEYEXPIRED: 4224 4208 nfs4_handle_exception(server, err, &exception); 4225 4209 err = 0; 4226 4210 } ··· 4373 4355 err = 0; 4374 4356 goto out; 4375 4357 case -NFS4ERR_DELAY: 4358 + case -EKEYEXPIRED: 4376 4359 break; 4377 4360 } 4378 4361 err = nfs4_handle_exception(server, err, &exception); ··· 4519 4500 4520 4501 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 4521 4502 4522 - if (status != NFS4ERR_CLID_INUSE) 4503 + if (status != -NFS4ERR_CLID_INUSE) 4523 4504 break; 4524 4505 4525 4506 if (signalled()) ··· 4573 4554 switch (task->tk_status) { 4574 4555 case -NFS4ERR_DELAY: 4575 4556 case -NFS4ERR_GRACE: 4557 + case -EKEYEXPIRED: 4576 4558 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 4577 4559 rpc_delay(task, NFS4_POLL_RETRY_MIN); 4578 4560 task->tk_status = 0; ··· 4631 4611 /* 4632 4612 * Reset a slot table 4633 4613 */ 4634 - static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots, 4635 - int old_max_slots, int ivalue) 4614 + static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 4615 + int ivalue) 4636 4616 { 4617 + struct nfs4_slot *new = NULL; 4637 4618 int i; 4638 4619 int ret = 0; 4639 4620 4640 - dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl); 4621 + dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, 4622 + max_reqs, tbl->max_slots); 4641 4623 4642 - /* 4643 - * Until we have dynamic slot table adjustment, insist 4644 - * upon the same slot table size 4645 - */ 4646 - if (max_slots != old_max_slots) { 4647 - dprintk("%s reset slot table does't match old\n", 4648 - __func__); 4649 - ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */ 4650 - goto out; 4624 + /* Does the newly negotiated max_reqs match the existing slot table? */ 4625 + if (max_reqs != tbl->max_slots) { 4626 + ret = -ENOMEM; 4627 + new = kmalloc(max_reqs * sizeof(struct nfs4_slot), 4628 + GFP_KERNEL); 4629 + if (!new) 4630 + goto out; 4631 + ret = 0; 4632 + kfree(tbl->slots); 4651 4633 } 4652 4634 spin_lock(&tbl->slot_tbl_lock); 4653 - for (i = 0; i < max_slots; ++i) 4635 + if (new) { 4636 + tbl->slots = new; 4637 + tbl->max_slots = max_reqs; 4638 + } 4639 + for (i = 0; i < tbl->max_slots; ++i) 4654 4640 tbl->slots[i].seq_nr = ivalue; 4655 4641 spin_unlock(&tbl->slot_tbl_lock); 4656 4642 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, ··· 4674 4648 int status; 4675 4649 4676 4650 status = nfs4_reset_slot_table(&session->fc_slot_table, 4677 - session->fc_attrs.max_reqs, 4678 - session->fc_slot_table.max_slots, 4679 - 1); 4651 + session->fc_attrs.max_reqs, 1); 4680 4652 if (status) 4681 4653 return status; 4682 4654 4683 4655 status = nfs4_reset_slot_table(&session->bc_slot_table, 4684 - session->bc_attrs.max_reqs, 4685 - session->bc_slot_table.max_slots, 4686 - 0); 4656 + session->bc_attrs.max_reqs, 0); 4687 4657 return status; 4688 4658 } 4689 4659 ··· 4820 4798 args->fc_attrs.headerpadsz = 0; 4821 4799 args->fc_attrs.max_rqst_sz = mxrqst_sz; 4822 4800 args->fc_attrs.max_resp_sz = mxresp_sz; 4823 - args->fc_attrs.max_resp_sz_cached = mxresp_sz; 4824 4801 args->fc_attrs.max_ops = NFS4_MAX_OPS; 4825 4802 args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; 4826 4803 4827 4804 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 4828 - "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 4805 + "max_ops=%u max_reqs=%u\n", 4829 4806 __func__, 4830 4807 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 4831 - args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops, 4832 - args->fc_attrs.max_reqs); 4808 + args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 4833 4809 4834 4810 /* Back channel attributes */ 4835 4811 args->bc_attrs.headerpadsz = 0; ··· 5036 5016 &res, args.sa_cache_this, 1); 5037 5017 } 5038 5018 5039 - void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5019 + static void nfs41_sequence_release(void *data) 5020 + { 5021 + struct nfs_client *clp = (struct nfs_client *)data; 5022 + 5023 + if (atomic_read(&clp->cl_count) > 1) 5024 + nfs4_schedule_state_renewal(clp); 5025 + nfs_put_client(clp); 5026 + } 5027 + 5028 + static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5040 5029 { 5041 5030 struct nfs_client *clp = (struct nfs_client *)data; 5042 5031 ··· 5053 5024 5054 5025 if (task->tk_status < 0) { 5055 5026 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5027 + if (atomic_read(&clp->cl_count) == 1) 5028 + goto out; 5056 5029 5057 5030 if (_nfs4_async_handle_error(task, NULL, clp, NULL) 5058 5031 == -EAGAIN) { ··· 5063 5032 } 5064 5033 } 5065 5034 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5066 - 5035 + out: 5067 5036 kfree(task->tk_msg.rpc_argp); 5068 5037 kfree(task->tk_msg.rpc_resp); 5069 5038 ··· 5088 5057 static const struct rpc_call_ops nfs41_sequence_ops = { 5089 5058 .rpc_call_done = nfs41_sequence_call_done, 5090 5059 .rpc_call_prepare = nfs41_sequence_prepare, 5060 + .rpc_release = nfs41_sequence_release, 5091 5061 }; 5092 5062 5093 5063 static int nfs41_proc_async_sequence(struct nfs_client *clp, ··· 5101 5069 .rpc_cred = cred, 5102 5070 }; 5103 5071 5072 + if (!atomic_inc_not_zero(&clp->cl_count)) 5073 + return -EIO; 5104 5074 args = kzalloc(sizeof(*args), GFP_KERNEL); 5105 - if (!args) 5106 - return -ENOMEM; 5107 5075 res = kzalloc(sizeof(*res), GFP_KERNEL); 5108 - if (!res) { 5076 + if (!args || !res) { 5109 5077 kfree(args); 5078 + nfs_put_client(clp); 5110 5079 return -ENOMEM; 5111 5080 } 5112 5081 res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+7 -17
fs/nfs/nfs4renewd.c
··· 36 36 * as an rpc_task, not a real kernel thread, so it always runs in rpciod's 37 37 * context. There is one renewd per nfs_server. 38 38 * 39 - * TODO: If the send queue gets backlogged (e.g., if the server goes down), 40 - * we will keep filling the queue with periodic RENEW requests. We need a 41 - * mechanism for ensuring that if renewd successfully sends off a request, 42 - * then it only wakes up when the request is finished. Maybe use the 43 - * child task framework of the RPC layer? 44 39 */ 45 40 46 41 #include <linux/mm.h> ··· 58 63 struct nfs_client *clp = 59 64 container_of(work, struct nfs_client, cl_renewd.work); 60 65 struct rpc_cred *cred; 61 - long lease, timeout; 66 + long lease; 62 67 unsigned long last, now; 63 68 64 69 ops = nfs4_state_renewal_ops[clp->cl_minorversion]; ··· 70 75 lease = clp->cl_lease_time; 71 76 last = clp->cl_last_renewal; 72 77 now = jiffies; 73 - timeout = (2 * lease) / 3 + (long)last - (long)now; 74 78 /* Are we close to a lease timeout? */ 75 79 if (time_after(now, last + lease/3)) { 76 80 cred = ops->get_state_renewal_cred_locked(clp); ··· 84 90 /* Queue an asynchronous RENEW. */ 85 91 ops->sched_state_renewal(clp, cred); 86 92 put_rpccred(cred); 93 + goto out_exp; 87 94 } 88 - timeout = (2 * lease) / 3; 89 - spin_lock(&clp->cl_lock); 90 - } else 95 + } else { 91 96 dprintk("%s: failed to call renewd. Reason: lease not expired \n", 92 97 __func__); 93 - if (timeout < 5 * HZ) /* safeguard */ 94 - timeout = 5 * HZ; 95 - dprintk("%s: requeueing work. Lease period = %ld\n", 96 - __func__, (timeout + HZ - 1) / HZ); 97 - cancel_delayed_work(&clp->cl_renewd); 98 - schedule_delayed_work(&clp->cl_renewd, timeout); 99 - spin_unlock(&clp->cl_lock); 98 + spin_unlock(&clp->cl_lock); 99 + } 100 + nfs4_schedule_state_renewal(clp); 101 + out_exp: 100 102 nfs_expire_unreferenced_delegations(clp); 101 103 out: 102 104 dprintk("%s: done\n", __func__);
+98 -20
fs/nfs/nfs4state.c
··· 1249 1249 } 1250 1250 1251 1251 #ifdef CONFIG_NFS_V4_1 1252 + void nfs41_handle_recall_slot(struct nfs_client *clp) 1253 + { 1254 + set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1255 + nfs4_schedule_state_recovery(clp); 1256 + } 1257 + 1258 + static void nfs4_reset_all_state(struct nfs_client *clp) 1259 + { 1260 + if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1261 + clp->cl_boot_time = CURRENT_TIME; 1262 + nfs4_state_start_reclaim_nograce(clp); 1263 + nfs4_schedule_state_recovery(clp); 1264 + } 1265 + } 1266 + 1267 + static void nfs41_handle_server_reboot(struct nfs_client *clp) 1268 + { 1269 + if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1270 + nfs4_state_start_reclaim_reboot(clp); 1271 + nfs4_schedule_state_recovery(clp); 1272 + } 1273 + } 1274 + 1275 + static void nfs41_handle_state_revoked(struct nfs_client *clp) 1276 + { 1277 + /* Temporary */ 1278 + nfs4_reset_all_state(clp); 1279 + } 1280 + 1281 + static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 1282 + { 1283 + /* This will need to handle layouts too */ 1284 + nfs_expire_all_delegations(clp); 1285 + } 1286 + 1287 + static void nfs41_handle_cb_path_down(struct nfs_client *clp) 1288 + { 1289 + nfs_expire_all_delegations(clp); 1290 + if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 1291 + nfs4_schedule_state_recovery(clp); 1292 + } 1293 + 1252 1294 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1253 1295 { 1254 1296 if (!flags) 1255 1297 return; 1256 - else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) { 1257 - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1258 - nfs4_state_start_reclaim_reboot(clp); 1259 - nfs4_schedule_state_recovery(clp); 1260 - } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 1298 + else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 1299 + nfs41_handle_server_reboot(clp); 1300 + else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 1261 1301 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 1262 1302 SEQ4_STATUS_ADMIN_STATE_REVOKED | 1263 - SEQ4_STATUS_RECALLABLE_STATE_REVOKED | 1264 - SEQ4_STATUS_LEASE_MOVED)) { 1265 - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1266 - nfs4_state_start_reclaim_nograce(clp); 1267 - nfs4_schedule_state_recovery(clp); 1268 - } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | 1303 + SEQ4_STATUS_LEASE_MOVED)) 1304 + nfs41_handle_state_revoked(clp); 1305 + else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 1306 + nfs41_handle_recallable_state_revoked(clp); 1307 + else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | 1269 1308 SEQ4_STATUS_BACKCHANNEL_FAULT | 1270 1309 SEQ4_STATUS_CB_PATH_DOWN_SESSION)) 1271 - nfs_expire_all_delegations(clp); 1310 + nfs41_handle_cb_path_down(clp); 1272 1311 } 1273 1312 1274 1313 static int nfs4_reset_session(struct nfs_client *clp) ··· 1324 1285 1325 1286 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); 1326 1287 status = nfs4_proc_create_session(clp); 1327 - if (status) 1288 + if (status) { 1328 1289 status = nfs4_recovery_handle_error(clp, status); 1290 + goto out; 1291 + } 1292 + /* create_session negotiated new slot table */ 1293 + clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1329 1294 1330 - out: 1331 - /* 1332 - * Let the state manager reestablish state 1333 - */ 1334 - if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1335 - status == 0) 1295 + /* Let the state manager reestablish state */ 1296 + if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1336 1297 nfs41_setup_state_renewal(clp); 1337 - 1298 + out: 1338 1299 return status; 1300 + } 1301 + 1302 + static int nfs4_recall_slot(struct nfs_client *clp) 1303 + { 1304 + struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table; 1305 + struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs; 1306 + struct nfs4_slot *new, *old; 1307 + int i; 1308 + 1309 + nfs4_begin_drain_session(clp); 1310 + new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), 1311 + GFP_KERNEL); 1312 + if (!new) 1313 + return -ENOMEM; 1314 + 1315 + spin_lock(&fc_tbl->slot_tbl_lock); 1316 + for (i = 0; i < fc_tbl->target_max_slots; i++) 1317 + new[i].seq_nr = fc_tbl->slots[i].seq_nr; 1318 + old = fc_tbl->slots; 1319 + fc_tbl->slots = new; 1320 + fc_tbl->max_slots = fc_tbl->target_max_slots; 1321 + fc_tbl->target_max_slots = 0; 1322 + fc_attrs->max_reqs = fc_tbl->max_slots; 1323 + spin_unlock(&fc_tbl->slot_tbl_lock); 1324 + 1325 + kfree(old); 1326 + nfs4_end_drain_session(clp); 1327 + return 0; 1339 1328 } 1340 1329 1341 1330 #else /* CONFIG_NFS_V4_1 */ 1342 1331 static int nfs4_reset_session(struct nfs_client *clp) { return 0; } 1343 1332 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } 1333 + static int nfs4_recall_slot(struct nfs_client *clp) { return 0; } 1344 1334 #endif /* CONFIG_NFS_V4_1 */ 1345 1335 1346 1336 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors ··· 1382 1314 case -NFS4ERR_DELAY: 1383 1315 case -NFS4ERR_CLID_INUSE: 1384 1316 case -EAGAIN: 1317 + case -EKEYEXPIRED: 1385 1318 break; 1386 1319 1387 1320 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery ··· 1466 1397 nfs_client_return_marked_delegations(clp); 1467 1398 continue; 1468 1399 } 1400 + /* Recall session slots */ 1401 + if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state) 1402 + && nfs4_has_session(clp)) { 1403 + status = nfs4_recall_slot(clp); 1404 + if (status < 0) 1405 + goto out_error; 1406 + continue; 1407 + } 1408 + 1469 1409 1470 1410 nfs4_clear_state_manager_bit(clp); 1471 1411 /* Did we race with an attempt to give us more work? */
+9 -1
fs/nfs/nfs4xdr.c
··· 1578 1578 char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; 1579 1579 uint32_t len; 1580 1580 struct nfs_client *clp = args->client; 1581 + u32 max_resp_sz_cached; 1582 + 1583 + /* 1584 + * Assumes OPEN is the biggest non-idempotent compound. 1585 + * 2 is the verifier. 1586 + */ 1587 + max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 1588 + RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; 1581 1589 1582 1590 len = scnprintf(machine_name, sizeof(machine_name), "%s", 1583 1591 clp->cl_ipaddr); ··· 1600 1592 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ 1601 1593 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ 1602 1594 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ 1603 - *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */ 1595 + *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ 1604 1596 *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ 1605 1597 *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ 1606 1598 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
+41
fs/nfs/proc.c
··· 47 47 #define NFSDBG_FACILITY NFSDBG_PROC 48 48 49 49 /* 50 + * wrapper to handle the -EKEYEXPIRED error message. This should generally 51 + * only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't 52 + * support the NFSERR_JUKEBOX error code, but we handle this situation in the 53 + * same way that we handle that error with NFSv3. 54 + */ 55 + static int 56 + nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 57 + { 58 + int res; 59 + do { 60 + res = rpc_call_sync(clnt, msg, flags); 61 + if (res != -EKEYEXPIRED) 62 + break; 63 + schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 64 + res = -ERESTARTSYS; 65 + } while (!fatal_signal_pending(current)); 66 + return res; 67 + } 68 + 69 + #define rpc_call_sync(clnt, msg, flags) nfs_rpc_wrapper(clnt, msg, flags) 70 + 71 + static int 72 + nfs_async_handle_expired_key(struct rpc_task *task) 73 + { 74 + if (task->tk_status != -EKEYEXPIRED) 75 + return 0; 76 + task->tk_status = 0; 77 + rpc_restart_call(task); 78 + rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); 79 + return 1; 80 + } 81 + 82 + /* 50 83 * Bare-bones access to getattr: this is for nfs_read_super. 51 84 */ 52 85 static int ··· 340 307 341 308 static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) 342 309 { 310 + if (nfs_async_handle_expired_key(task)) 311 + return 0; 343 312 nfs_mark_for_revalidate(dir); 344 313 return 1; 345 314 } ··· 595 560 596 561 static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) 597 562 { 563 + if (nfs_async_handle_expired_key(task)) 564 + return -EAGAIN; 565 + 598 566 nfs_invalidate_atime(data->inode); 599 567 if (task->tk_status >= 0) { 600 568 nfs_refresh_inode(data->inode, data->res.fattr); ··· 617 579 618 580 static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) 619 581 { 582 + if (nfs_async_handle_expired_key(task)) 583 + return -EAGAIN; 584 + 620 585 if (task->tk_status >= 0) 621 586 nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); 622 587 return 0;
+1 -1
fs/nfs/symlink.c
··· 50 50 struct page *page; 51 51 void *err; 52 52 53 - err = ERR_PTR(nfs_revalidate_mapping_nolock(inode, inode->i_mapping)); 53 + err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping)); 54 54 if (err) 55 55 goto read_failed; 56 56 page = read_cache_page(&inode->i_data, 0,
+96 -191
fs/nfs/write.c
··· 438 438 radix_tree_tag_set(&nfsi->nfs_page_tree, 439 439 req->wb_index, 440 440 NFS_PAGE_TAG_COMMIT); 441 + nfsi->ncommit++; 441 442 spin_unlock(&inode->i_lock); 442 443 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 443 444 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); ··· 502 501 } 503 502 #endif 504 503 505 - /* 506 - * Wait for a request to complete. 507 - * 508 - * Interruptible by fatal signals only. 509 - */ 510 - static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) 511 - { 512 - struct nfs_inode *nfsi = NFS_I(inode); 513 - struct nfs_page *req; 514 - pgoff_t idx_end, next; 515 - unsigned int res = 0; 516 - int error; 517 - 518 - if (npages == 0) 519 - idx_end = ~0; 520 - else 521 - idx_end = idx_start + npages - 1; 522 - 523 - next = idx_start; 524 - while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { 525 - if (req->wb_index > idx_end) 526 - break; 527 - 528 - next = req->wb_index + 1; 529 - BUG_ON(!NFS_WBACK_BUSY(req)); 530 - 531 - kref_get(&req->wb_kref); 532 - spin_unlock(&inode->i_lock); 533 - error = nfs_wait_on_request(req); 534 - nfs_release_request(req); 535 - spin_lock(&inode->i_lock); 536 - if (error < 0) 537 - return error; 538 - res++; 539 - } 540 - return res; 541 - } 542 - 543 - static void nfs_cancel_commit_list(struct list_head *head) 544 - { 545 - struct nfs_page *req; 546 - 547 - while(!list_empty(head)) { 548 - req = nfs_list_entry(head->next); 549 - nfs_list_remove_request(req); 550 - nfs_clear_request_commit(req); 551 - nfs_inode_remove_request(req); 552 - nfs_unlock_request(req); 553 - } 554 - } 555 - 556 504 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 557 505 static int 558 506 nfs_need_commit(struct nfs_inode *nfsi) ··· 523 573 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) 524 574 { 525 575 struct nfs_inode *nfsi = NFS_I(inode); 576 + int ret; 526 577 527 578 if (!nfs_need_commit(nfsi)) 528 579 return 0; 529 580 530 - return nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); 581 + ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); 582 + if (ret > 0) 583 + nfsi->ncommit -= ret; 584 + if (nfs_need_commit(NFS_I(inode))) 585 + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 586 + return ret; 531 587 } 532 588 #else 533 589 static inline int nfs_need_commit(struct nfs_inode *nfsi) ··· 598 642 spin_lock(&inode->i_lock); 599 643 } 600 644 601 - if (nfs_clear_request_commit(req)) 602 - radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, 603 - req->wb_index, NFS_PAGE_TAG_COMMIT); 645 + if (nfs_clear_request_commit(req) && 646 + radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, 647 + req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) 648 + NFS_I(inode)->ncommit--; 604 649 605 650 /* Okay, the request matches. Update the region */ 606 651 if (offset < req->wb_offset) { ··· 1348 1391 .rpc_release = nfs_commit_release, 1349 1392 }; 1350 1393 1351 - int nfs_commit_inode(struct inode *inode, int how) 1394 + static int nfs_commit_inode(struct inode *inode, int how) 1352 1395 { 1353 1396 LIST_HEAD(head); 1354 1397 int res; ··· 1363 1406 } 1364 1407 return res; 1365 1408 } 1409 + 1410 + static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1411 + { 1412 + struct nfs_inode *nfsi = NFS_I(inode); 1413 + int flags = FLUSH_SYNC; 1414 + int ret = 0; 1415 + 1416 + /* Don't commit yet if this is a non-blocking flush and there are 1417 + * lots of outstanding writes for this mapping. 1418 + */ 1419 + if (wbc->sync_mode == WB_SYNC_NONE && 1420 + nfsi->ncommit <= (nfsi->npages >> 1)) 1421 + goto out_mark_dirty; 1422 + 1423 + if (wbc->nonblocking || wbc->for_background) 1424 + flags = 0; 1425 + ret = nfs_commit_inode(inode, flags); 1426 + if (ret >= 0) { 1427 + if (wbc->sync_mode == WB_SYNC_NONE) { 1428 + if (ret < wbc->nr_to_write) 1429 + wbc->nr_to_write -= ret; 1430 + else 1431 + wbc->nr_to_write = 0; 1432 + } 1433 + return 0; 1434 + } 1435 + out_mark_dirty: 1436 + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1437 + return ret; 1438 + } 1366 1439 #else 1367 - static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) 1440 + static int nfs_commit_inode(struct inode *inode, int how) 1441 + { 1442 + return 0; 1443 + } 1444 + 1445 + static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1368 1446 { 1369 1447 return 0; 1370 1448 } 1371 1449 #endif 1372 1450 1373 - long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1451 + int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1374 1452 { 1375 - struct inode *inode = mapping->host; 1376 - pgoff_t idx_start, idx_end; 1377 - unsigned int npages = 0; 1378 - LIST_HEAD(head); 1379 - int nocommit = how & FLUSH_NOCOMMIT; 1380 - long pages, ret; 1381 - 1382 - /* FIXME */ 1383 - if (wbc->range_cyclic) 1384 - idx_start = 0; 1385 - else { 1386 - idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; 1387 - idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; 1388 - if (idx_end > idx_start) { 1389 - pgoff_t l_npages = 1 + idx_end - idx_start; 1390 - npages = l_npages; 1391 - if (sizeof(npages) != sizeof(l_npages) && 1392 - (pgoff_t)npages != l_npages) 1393 - npages = 0; 1394 - } 1395 - } 1396 - how &= ~FLUSH_NOCOMMIT; 1397 - spin_lock(&inode->i_lock); 1398 - do { 1399 - ret = nfs_wait_on_requests_locked(inode, idx_start, npages); 1400 - if (ret != 0) 1401 - continue; 1402 - if (nocommit) 1403 - break; 1404 - pages = nfs_scan_commit(inode, &head, idx_start, npages); 1405 - if (pages == 0) 1406 - break; 1407 - if (how & FLUSH_INVALIDATE) { 1408 - spin_unlock(&inode->i_lock); 1409 - nfs_cancel_commit_list(&head); 1410 - ret = pages; 1411 - spin_lock(&inode->i_lock); 1412 - continue; 1413 - } 1414 - pages += nfs_scan_commit(inode, &head, 0, 0); 1415 - spin_unlock(&inode->i_lock); 1416 - ret = nfs_commit_list(inode, &head, how); 1417 - spin_lock(&inode->i_lock); 1418 - 1419 - } while (ret >= 0); 1420 - spin_unlock(&inode->i_lock); 1421 - return ret; 1422 - } 1423 - 1424 - static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) 1425 - { 1426 - int ret; 1427 - 1428 - ret = nfs_writepages(mapping, wbc); 1429 - if (ret < 0) 1430 - goto out; 1431 - ret = nfs_sync_mapping_wait(mapping, wbc, how); 1432 - if (ret < 0) 1433 - goto out; 1434 - return 0; 1435 - out: 1436 - __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1437 - return ret; 1438 - } 1439 - 1440 - /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ 1441 - static int nfs_write_mapping(struct address_space *mapping, int how) 1442 - { 1443 - struct writeback_control wbc = { 1444 - .bdi = mapping->backing_dev_info, 1445 - .sync_mode = WB_SYNC_ALL, 1446 - .nr_to_write = LONG_MAX, 1447 - .range_start = 0, 1448 - .range_end = LLONG_MAX, 1449 - }; 1450 - 1451 - return __nfs_write_mapping(mapping, &wbc, how); 1453 + return nfs_commit_unstable_pages(inode, wbc); 1452 1454 } 1453 1455 1454 1456 /* ··· 1415 1499 */ 1416 1500 int nfs_wb_all(struct inode *inode) 1417 1501 { 1418 - return nfs_write_mapping(inode->i_mapping, 0); 1419 - } 1502 + struct writeback_control wbc = { 1503 + .sync_mode = WB_SYNC_ALL, 1504 + .nr_to_write = LONG_MAX, 1505 + .range_start = 0, 1506 + .range_end = LLONG_MAX, 1507 + }; 1420 1508 1421 - int nfs_wb_nocommit(struct inode *inode) 1422 - { 1423 - return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT); 1509 + return sync_inode(inode, &wbc); 1424 1510 } 1425 1511 1426 1512 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 1427 1513 { 1428 1514 struct nfs_page *req; 1429 - loff_t range_start = page_offset(page); 1430 - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1431 - struct writeback_control wbc = { 1432 - .bdi = page->mapping->backing_dev_info, 1433 - .sync_mode = WB_SYNC_ALL, 1434 - .nr_to_write = LONG_MAX, 1435 - .range_start = range_start, 1436 - .range_end = range_end, 1437 - }; 1438 1515 int ret = 0; 1439 1516 1440 1517 BUG_ON(!PageLocked(page)); 1441 1518 for (;;) { 1442 1519 req = nfs_page_find_request(page); 1443 1520 if (req == NULL) 1444 - goto out; 1445 - if (test_bit(PG_CLEAN, &req->wb_flags)) { 1446 - nfs_release_request(req); 1447 1521 break; 1448 - } 1449 1522 if (nfs_lock_request_dontget(req)) { 1450 1523 nfs_inode_remove_request(req); 1451 1524 /* ··· 1448 1543 ret = nfs_wait_on_request(req); 1449 1544 nfs_release_request(req); 1450 1545 if (ret < 0) 1451 - goto out; 1452 - } 1453 - if (!PagePrivate(page)) 1454 - return 0; 1455 - ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE); 1456 - out: 1457 - return ret; 1458 - } 1459 - 1460 - static int nfs_wb_page_priority(struct inode *inode, struct page *page, 1461 - int how) 1462 - { 1463 - loff_t range_start = page_offset(page); 1464 - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1465 - struct writeback_control wbc = { 1466 - .bdi = page->mapping->backing_dev_info, 1467 - .sync_mode = WB_SYNC_ALL, 1468 - .nr_to_write = LONG_MAX, 1469 - .range_start = range_start, 1470 - .range_end = range_end, 1471 - }; 1472 - int ret; 1473 - 1474 - do { 1475 - if (clear_page_dirty_for_io(page)) { 1476 - ret = nfs_writepage_locked(page, &wbc); 1477 - if (ret < 0) 1478 - goto out_error; 1479 - } else if (!PagePrivate(page)) 1480 1546 break; 1481 - ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); 1482 - if (ret < 0) 1483 - goto out_error; 1484 - } while (PagePrivate(page)); 1485 - return 0; 1486 - out_error: 1487 - __mark_inode_dirty(inode, I_DIRTY_PAGES); 1547 + } 1488 1548 return ret; 1489 1549 } 1490 1550 1491 1551 /* 1492 1552 * Write back all requests on one page - we do this before reading it. 1493 1553 */ 1494 - int nfs_wb_page(struct inode *inode, struct page* page) 1554 + int nfs_wb_page(struct inode *inode, struct page *page) 1495 1555 { 1496 - return nfs_wb_page_priority(inode, page, FLUSH_STABLE); 1556 + loff_t range_start = page_offset(page); 1557 + loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1558 + struct writeback_control wbc = { 1559 + .sync_mode = WB_SYNC_ALL, 1560 + .nr_to_write = 0, 1561 + .range_start = range_start, 1562 + .range_end = range_end, 1563 + }; 1564 + struct nfs_page *req; 1565 + int need_commit; 1566 + int ret; 1567 + 1568 + while(PagePrivate(page)) { 1569 + if (clear_page_dirty_for_io(page)) { 1570 + ret = nfs_writepage_locked(page, &wbc); 1571 + if (ret < 0) 1572 + goto out_error; 1573 + } 1574 + req = nfs_find_and_lock_request(page); 1575 + if (!req) 1576 + break; 1577 + if (IS_ERR(req)) { 1578 + ret = PTR_ERR(req); 1579 + goto out_error; 1580 + } 1581 + need_commit = test_bit(PG_CLEAN, &req->wb_flags); 1582 + nfs_clear_page_tag_locked(req); 1583 + if (need_commit) { 1584 + ret = nfs_commit_inode(inode, FLUSH_SYNC); 1585 + if (ret < 0) 1586 + goto out_error; 1587 + } 1588 + } 1589 + return 0; 1590 + out_error: 1591 + return ret; 1497 1592 } 1498 1593 1499 1594 #ifdef CONFIG_MIGRATION
+1 -13
include/linux/nfs_fs.h
··· 33 33 #define FLUSH_STABLE 4 /* commit to stable storage */ 34 34 #define FLUSH_LOWPRI 8 /* low priority background flush */ 35 35 #define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */ 36 - #define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */ 37 - #define FLUSH_INVALIDATE 64 /* Invalidate the page cache */ 38 - #define FLUSH_NOWRITEPAGE 128 /* Don't call writepage() */ 39 36 40 37 #ifdef __KERNEL__ 41 38 ··· 163 166 struct radix_tree_root nfs_page_tree; 164 167 165 168 unsigned long npages; 169 + unsigned long ncommit; 166 170 167 171 /* Open contexts for shared mmap writes */ 168 172 struct list_head open_files; ··· 347 349 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); 348 350 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); 349 351 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); 350 - extern int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping); 351 352 extern int nfs_setattr(struct dentry *, struct iattr *); 352 353 extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); 353 354 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); ··· 474 477 * Try to write back everything synchronously (but check the 475 478 * return value!) 476 479 */ 477 - extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int); 478 480 extern int nfs_wb_all(struct inode *inode); 479 - extern int nfs_wb_nocommit(struct inode *inode); 480 481 extern int nfs_wb_page(struct inode *inode, struct page* page); 481 482 extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 482 483 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 483 - extern int nfs_commit_inode(struct inode *, int); 484 484 extern struct nfs_write_data *nfs_commitdata_alloc(void); 485 485 extern void nfs_commit_free(struct nfs_write_data *wdata); 486 - #else 487 - static inline int 488 - nfs_commit_inode(struct inode *inode, int how) 489 - { 490 - return 0; 491 - } 492 486 #endif 493 487 494 488 static inline int
+2
include/linux/nfs_fs_sb.h
··· 193 193 int max_slots; /* # slots in table */ 194 194 int highest_used_slotid; /* sent to server on each SEQ. 195 195 * op for dynamic resizing */ 196 + int target_max_slots; /* Set by CB_RECALL_SLOT as 197 + * the new max_slots */ 196 198 }; 197 199 198 200 static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
+15
include/linux/sunrpc/bc_xprt.h
··· 38 38 void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); 39 39 void bc_release_request(struct rpc_task *); 40 40 int bc_send(struct rpc_rqst *req); 41 + 42 + /* 43 + * Determine if a shared backchannel is in use 44 + */ 45 + static inline int svc_is_backchannel(const struct svc_rqst *rqstp) 46 + { 47 + if (rqstp->rq_server->bc_xprt) 48 + return 1; 49 + return 0; 50 + } 41 51 #else /* CONFIG_NFS_V4_1 */ 42 52 static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, 43 53 unsigned int min_reqs) 54 + { 55 + return 0; 56 + } 57 + 58 + static inline int svc_is_backchannel(const struct svc_rqst *rqstp) 44 59 { 45 60 return 0; 46 61 }
+4 -4
net/sunrpc/addr.c
··· 71 71 if (unlikely(len == 0)) 72 72 return len; 73 73 74 - if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 75 - !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) 74 + if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 75 + return len; 76 + if (sin6->sin6_scope_id == 0) 76 77 return len; 77 78 78 79 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", ··· 166 165 if (*delim != IPV6_SCOPE_DELIMITER) 167 166 return 0; 168 167 169 - if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 170 - !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) 168 + if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 171 169 return 0; 172 170 173 171 len = (buf + buflen) - delim - 1;
+9 -2
net/sunrpc/auth_gss/auth_gss.c
··· 206 206 ctx->gc_win = window_size; 207 207 /* gssd signals an error by passing ctx->gc_win = 0: */ 208 208 if (ctx->gc_win == 0) { 209 - /* in which case, p points to an error code which we ignore */ 210 - p = ERR_PTR(-EACCES); 209 + /* 210 + * in which case, p points to an error code. Anything other 211 + * than -EKEYEXPIRED gets converted to -EACCES. 212 + */ 213 + p = simple_get_bytes(p, end, &ret, sizeof(ret)); 214 + if (!IS_ERR(p)) 215 + p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 216 + ERR_PTR(-EACCES); 211 217 goto err; 212 218 } 213 219 /* copy the opaque wire context */ ··· 652 646 err = PTR_ERR(p); 653 647 switch (err) { 654 648 case -EACCES: 649 + case -EKEYEXPIRED: 655 650 gss_msg->msg.errno = err; 656 651 err = mlen; 657 652 break;
+4
net/sunrpc/svc.c
··· 506 506 { 507 507 unsigned int pages, arghi; 508 508 509 + /* bc_xprt uses fore channel allocated buffers */ 510 + if (svc_is_backchannel(rqstp)) 511 + return 1; 512 + 509 513 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 510 514 * We assume one is at most one page 511 515 */
+7 -2
net/sunrpc/xprtsock.c
··· 1912 1912 case -EALREADY: 1913 1913 xprt_clear_connecting(xprt); 1914 1914 return; 1915 + case -EINVAL: 1916 + /* Happens, for instance, if the user specified a link 1917 + * local IPv6 address without a scope-id. 1918 + */ 1919 + goto out; 1915 1920 } 1916 1921 out_eagain: 1917 1922 status = -EAGAIN; ··· 2105 2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2106 2101 * to use the server side send routines. 2107 2102 */ 2108 - void *bc_malloc(struct rpc_task *task, size_t size) 2103 + static void *bc_malloc(struct rpc_task *task, size_t size) 2109 2104 { 2110 2105 struct page *page; 2111 2106 struct rpc_buffer *buf; ··· 2125 2120 /* 2126 2121 * Free the space allocated in the bc_alloc routine 2127 2122 */ 2128 - void bc_free(void *buffer) 2123 + static void bc_free(void *buffer) 2129 2124 { 2130 2125 struct rpc_buffer *buf; 2131 2126