Merge branch 'nfs-for-2.6.34' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6

* 'nfs-for-2.6.34' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (44 commits)
NFS: Remove requirement for inode->i_mutex from nfs_invalidate_mapping
NFS: Clean up nfs_sync_mapping
NFS: Simplify nfs_wb_page()
NFS: Replace __nfs_write_mapping with sync_inode()
NFS: Simplify nfs_wb_page_cancel()
NFS: Ensure inode is always marked I_DIRTY_DATASYNC, if it has unstable pages
NFS: Run COMMIT as an asynchronous RPC call when wbc->for_background is set
NFS: Reduce the number of unnecessary COMMIT calls
NFS: Add a count of the number of unstable writes carried by an inode
NFS: Cleanup - move nfs_write_inode() into fs/nfs/write.c
nfs41 fix NFS4ERR_CLID_INUSE for exchange id
NFS: Fix an allocation-under-spinlock bug
SUNRPC: Handle EINVAL error returns from the TCP connect operation
NFSv4.1: Various fixes to the sequence flag error handling
nfs4: renewd renew operations should take/put a client reference
nfs41: renewd sequence operations should take/put client reference
nfs: prevent backlogging of renewd requests
nfs: kill renewd before clearing client minor version
NFS: Make close(2) asynchronous when closing NFS O_DIRECT files
NFS: Improve NFS iostat byte count accuracy for writes
...

+664 -465
+8
fs/nfs/callback.h
··· 119 }; 120 121 extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); 122 #endif /* CONFIG_NFS_V4_1 */ 123 124 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
··· 119 }; 120 121 extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); 122 + 123 + struct cb_recallslotargs { 124 + struct sockaddr *crsa_addr; 125 + uint32_t crsa_target_max_slots; 126 + }; 127 + extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args, 128 + void *dummy); 129 + 130 #endif /* CONFIG_NFS_V4_1 */ 131 132 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
+137 -28
fs/nfs/callback_proc.c
··· 143 * Return success if the sequenceID is one more than what we last saw on 144 * this slot, accounting for wraparound. Increments the slot's sequence. 145 * 146 - * We don't yet implement a duplicate request cache, so at this time 147 - * we will log replays, and process them as if we had not seen them before, 148 - * but we don't bump the sequence in the slot. Not too worried about it, 149 * since we only currently implement idempotent callbacks anyway. 150 * 151 * We have a single slot backchannel at this time, so we don't bother 152 * checking the used_slots bit array on the table. The lower layer guarantees 153 * a single outstanding callback request at a time. 154 */ 155 - static int 156 - validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid) 157 { 158 struct nfs4_slot *slot; 159 160 dprintk("%s enter. slotid %d seqid %d\n", 161 - __func__, slotid, seqid); 162 163 - if (slotid > NFS41_BC_MAX_CALLBACKS) 164 return htonl(NFS4ERR_BADSLOT); 165 166 - slot = tbl->slots + slotid; 167 dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr); 168 169 /* Normal */ 170 - if (likely(seqid == slot->seq_nr + 1)) { 171 slot->seq_nr++; 172 return htonl(NFS4_OK); 173 } 174 175 /* Replay */ 176 - if (seqid == slot->seq_nr) { 177 - dprintk("%s seqid %d is a replay - no DRC available\n", 178 - __func__, seqid); 179 - return htonl(NFS4_OK); 180 } 181 182 /* Wraparound */ 183 - if (seqid == 1 && (slot->seq_nr + 1) == 0) { 184 slot->seq_nr = 1; 185 return htonl(NFS4_OK); 186 } ··· 230 return NULL; 231 } 232 233 - /* FIXME: referring calls should be processed */ 234 - unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, 235 struct cb_sequenceres *res) 236 { 237 struct nfs_client *clp; 238 - int i, status; 239 - 240 - for (i = 0; i < args->csa_nrclists; i++) 241 - kfree(args->csa_rclists[i].rcl_refcalls); 242 - kfree(args->csa_rclists); 243 244 status = htonl(NFS4ERR_BADSESSION); 245 clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); 246 if (clp == NULL) 247 goto out; 248 249 - status = validate_seqid(&clp->cl_session->bc_slot_table, 250 - args->csa_slotid, args->csa_sequenceid); 251 if (status) 252 goto out_putclient; 253 254 memcpy(&res->csr_sessionid, &args->csa_sessionid, 255 sizeof(res->csr_sessionid)); ··· 321 out_putclient: 322 nfs_put_client(clp); 323 out: 324 - dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 325 - res->csr_status = status; 326 - return res->csr_status; 327 } 328 329 - unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) 330 { 331 struct nfs_client *clp; 332 - int status; 333 fmode_t flags = 0; 334 335 status = htonl(NFS4ERR_OP_NOT_IN_SESSION); ··· 358 if (flags) 359 nfs_expire_all_delegation_types(clp, flags); 360 status = htonl(NFS4_OK); 361 out: 362 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 363 return status;
··· 143 * Return success if the sequenceID is one more than what we last saw on 144 * this slot, accounting for wraparound. Increments the slot's sequence. 145 * 146 + * We don't yet implement a duplicate request cache, instead we set the 147 + * back channel ca_maxresponsesize_cached to zero. This is OK for now 148 * since we only currently implement idempotent callbacks anyway. 149 * 150 * We have a single slot backchannel at this time, so we don't bother 151 * checking the used_slots bit array on the table. The lower layer guarantees 152 * a single outstanding callback request at a time. 153 */ 154 + static __be32 155 + validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) 156 { 157 struct nfs4_slot *slot; 158 159 dprintk("%s enter. slotid %d seqid %d\n", 160 + __func__, args->csa_slotid, args->csa_sequenceid); 161 162 + if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS) 163 return htonl(NFS4ERR_BADSLOT); 164 165 + slot = tbl->slots + args->csa_slotid; 166 dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr); 167 168 /* Normal */ 169 + if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { 170 slot->seq_nr++; 171 return htonl(NFS4_OK); 172 } 173 174 /* Replay */ 175 + if (args->csa_sequenceid == slot->seq_nr) { 176 + dprintk("%s seqid %d is a replay\n", 177 + __func__, args->csa_sequenceid); 178 + /* Signal process_op to set this error on next op */ 179 + if (args->csa_cachethis == 0) 180 + return htonl(NFS4ERR_RETRY_UNCACHED_REP); 181 + 182 + /* The ca_maxresponsesize_cached is 0 with no DRC */ 183 + else if (args->csa_cachethis == 1) 184 + return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); 185 } 186 187 /* Wraparound */ 188 + if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { 189 slot->seq_nr = 1; 190 return htonl(NFS4_OK); 191 } ··· 225 return NULL; 226 } 227 228 + /* 229 + * For each referring call triple, check the session's slot table for 230 + * a match. If the slot is in use and the sequence numbers match, the 231 + * client is still waiting for a response to the original request. 232 + */ 233 + static bool referring_call_exists(struct nfs_client *clp, 234 + uint32_t nrclists, 235 + struct referring_call_list *rclists) 236 + { 237 + bool status = 0; 238 + int i, j; 239 + struct nfs4_session *session; 240 + struct nfs4_slot_table *tbl; 241 + struct referring_call_list *rclist; 242 + struct referring_call *ref; 243 + 244 + /* 245 + * XXX When client trunking is implemented, this becomes 246 + * a session lookup from within the loop 247 + */ 248 + session = clp->cl_session; 249 + tbl = &session->fc_slot_table; 250 + 251 + for (i = 0; i < nrclists; i++) { 252 + rclist = &rclists[i]; 253 + if (memcmp(session->sess_id.data, 254 + rclist->rcl_sessionid.data, 255 + NFS4_MAX_SESSIONID_LEN) != 0) 256 + continue; 257 + 258 + for (j = 0; j < rclist->rcl_nrefcalls; j++) { 259 + ref = &rclist->rcl_refcalls[j]; 260 + 261 + dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u " 262 + "slotid %u\n", __func__, 263 + ((u32 *)&rclist->rcl_sessionid.data)[0], 264 + ((u32 *)&rclist->rcl_sessionid.data)[1], 265 + ((u32 *)&rclist->rcl_sessionid.data)[2], 266 + ((u32 *)&rclist->rcl_sessionid.data)[3], 267 + ref->rc_sequenceid, ref->rc_slotid); 268 + 269 + spin_lock(&tbl->slot_tbl_lock); 270 + status = (test_bit(ref->rc_slotid, tbl->used_slots) && 271 + tbl->slots[ref->rc_slotid].seq_nr == 272 + ref->rc_sequenceid); 273 + spin_unlock(&tbl->slot_tbl_lock); 274 + if (status) 275 + goto out; 276 + } 277 + } 278 + 279 + out: 280 + return status; 281 + } 282 + 283 + __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, 284 struct cb_sequenceres *res) 285 { 286 struct nfs_client *clp; 287 + int i; 288 + __be32 status; 289 290 status = htonl(NFS4ERR_BADSESSION); 291 clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); 292 if (clp == NULL) 293 goto out; 294 295 + status = validate_seqid(&clp->cl_session->bc_slot_table, args); 296 if (status) 297 goto out_putclient; 298 + 299 + /* 300 + * Check for pending referring calls. If a match is found, a 301 + * related callback was received before the response to the original 302 + * call. 303 + */ 304 + if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { 305 + status = htonl(NFS4ERR_DELAY); 306 + goto out_putclient; 307 + } 308 309 memcpy(&res->csr_sessionid, &args->csa_sessionid, 310 sizeof(res->csr_sessionid)); ··· 256 out_putclient: 257 nfs_put_client(clp); 258 out: 259 + for (i = 0; i < args->csa_nrclists; i++) 260 + kfree(args->csa_rclists[i].rcl_refcalls); 261 + kfree(args->csa_rclists); 262 + 263 + if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) 264 + res->csr_status = 0; 265 + else 266 + res->csr_status = status; 267 + dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, 268 + ntohl(status), ntohl(res->csr_status)); 269 + return status; 270 } 271 272 + __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) 273 { 274 struct nfs_client *clp; 275 + __be32 status; 276 fmode_t flags = 0; 277 278 status = htonl(NFS4ERR_OP_NOT_IN_SESSION); ··· 285 if (flags) 286 nfs_expire_all_delegation_types(clp, flags); 287 status = htonl(NFS4_OK); 288 + out: 289 + dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 290 + return status; 291 + } 292 + 293 + /* Reduce the fore channel's max_slots to the target value */ 294 + __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy) 295 + { 296 + struct nfs_client *clp; 297 + struct nfs4_slot_table *fc_tbl; 298 + __be32 status; 299 + 300 + status = htonl(NFS4ERR_OP_NOT_IN_SESSION); 301 + clp = nfs_find_client(args->crsa_addr, 4); 302 + if (clp == NULL) 303 + goto out; 304 + 305 + dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", 306 + rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 307 + args->crsa_target_max_slots); 308 + 309 + fc_tbl = &clp->cl_session->fc_slot_table; 310 + 311 + status = htonl(NFS4ERR_BAD_HIGH_SLOT); 312 + if (args->crsa_target_max_slots > fc_tbl->max_slots || 313 + args->crsa_target_max_slots < 1) 314 + goto out_putclient; 315 + 316 + status = htonl(NFS4_OK); 317 + if (args->crsa_target_max_slots == fc_tbl->max_slots) 318 + goto out_putclient; 319 + 320 + fc_tbl->target_max_slots = args->crsa_target_max_slots; 321 + nfs41_handle_recall_slot(clp); 322 + out_putclient: 323 + nfs_put_client(clp); /* balance nfs_find_client */ 324 out: 325 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 326 return status;
+73 -32
fs/nfs/callback_xdr.c
··· 24 #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ 25 4 + 1 + 3) 26 #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 27 #endif /* CONFIG_NFS_V4_1 */ 28 29 #define NFSDBG_FACILITY NFSDBG_CALLBACK 30 31 typedef __be32 (*callback_process_op_t)(void *, void *); 32 typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); ··· 177 __be32 *p; 178 p = read_buf(xdr, 4); 179 if (unlikely(p == NULL)) 180 - return htonl(NFS4ERR_RESOURCE); 181 *op = ntohl(*p); 182 return 0; 183 } ··· 219 220 #if defined(CONFIG_NFS_V4_1) 221 222 - static unsigned decode_sessionid(struct xdr_stream *xdr, 223 struct nfs4_sessionid *sid) 224 { 225 - uint32_t *p; 226 int len = NFS4_MAX_SESSIONID_LEN; 227 228 p = read_buf(xdr, len); ··· 233 return 0; 234 } 235 236 - static unsigned decode_rc_list(struct xdr_stream *xdr, 237 struct referring_call_list *rc_list) 238 { 239 - uint32_t *p; 240 int i; 241 - unsigned status; 242 243 status = decode_sessionid(xdr, &rc_list->rcl_sessionid); 244 if (status) ··· 271 return status; 272 } 273 274 - static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp, 275 struct xdr_stream *xdr, 276 struct cb_sequenceargs *args) 277 { 278 - uint32_t *p; 279 int i; 280 - unsigned status; 281 282 status = decode_sessionid(xdr, &args->csa_sessionid); 283 if (status) ··· 331 goto out; 332 } 333 334 - static unsigned decode_recallany_args(struct svc_rqst *rqstp, 335 struct xdr_stream *xdr, 336 struct cb_recallanyargs *args) 337 { 338 - uint32_t *p; 339 340 args->craa_addr = svc_addr(rqstp); 341 p = read_buf(xdr, 4); ··· 347 return htonl(NFS4ERR_BADXDR); 348 args->craa_type_mask = ntohl(*p); 349 350 return 0; 351 } 352 ··· 483 484 p = xdr_reserve_space(xdr, 8); 485 if (unlikely(p == NULL)) 486 - return htonl(NFS4ERR_RESOURCE); 487 *p++ = htonl(op); 488 *p = res; 489 return 0; ··· 517 518 #if defined(CONFIG_NFS_V4_1) 519 520 - static unsigned encode_sessionid(struct xdr_stream *xdr, 521 const struct nfs4_sessionid *sid) 522 { 523 - uint32_t *p; 524 int len = NFS4_MAX_SESSIONID_LEN; 525 526 p = xdr_reserve_space(xdr, len); ··· 531 return 0; 532 } 533 534 - static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp, 535 struct xdr_stream *xdr, 536 const struct cb_sequenceres *res) 537 { 538 - uint32_t *p; 539 unsigned status = res->csr_status; 540 541 if (unlikely(status != 0)) ··· 572 case OP_CB_RECALL: 573 case OP_CB_SEQUENCE: 574 case OP_CB_RECALL_ANY: 575 *op = &callback_ops[op_nr]; 576 break; 577 ··· 581 case OP_CB_NOTIFY: 582 case OP_CB_PUSH_DELEG: 583 case OP_CB_RECALLABLE_OBJ_AVAIL: 584 - case OP_CB_RECALL_SLOT: 585 case OP_CB_WANTS_CANCELLED: 586 case OP_CB_NOTIFY_LOCK: 587 return htonl(NFS4ERR_NOTSUPP); ··· 620 static __be32 process_op(uint32_t minorversion, int nop, 621 struct svc_rqst *rqstp, 622 struct xdr_stream *xdr_in, void *argp, 623 - struct xdr_stream *xdr_out, void *resp) 624 { 625 struct callback_op *op = &callback_ops[0]; 626 - unsigned int op_nr = OP_CB_ILLEGAL; 627 __be32 status; 628 long maxlen; 629 __be32 res; 630 631 dprintk("%s: start\n", __func__); 632 status = decode_op_hdr(xdr_in, &op_nr); 633 - if (unlikely(status)) { 634 - status = htonl(NFS4ERR_OP_ILLEGAL); 635 - goto out; 636 - } 637 638 dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", 639 __func__, minorversion, nop, op_nr); ··· 640 preprocess_nfs4_op(op_nr, &op); 641 if (status == htonl(NFS4ERR_OP_ILLEGAL)) 642 op_nr = OP_CB_ILLEGAL; 643 - out: 644 maxlen = xdr_out->end - xdr_out->p; 645 if (maxlen > 0 && maxlen < PAGE_SIZE) { 646 - if (likely(status == 0 && op->decode_args != NULL)) 647 - status = op->decode_args(rqstp, xdr_in, argp); 648 - if (likely(status == 0 && op->process_op != NULL)) 649 status = op->process_op(argp, resp); 650 } else 651 status = htonl(NFS4ERR_RESOURCE); 652 653 res = encode_op_hdr(xdr_out, op_nr, status); 654 - if (status == 0) 655 - status = res; 656 if (op->encode_res != NULL && status == 0) 657 status = op->encode_res(rqstp, xdr_out, resp); 658 dprintk("%s: done, status = %d\n", __func__, ntohl(status)); ··· 681 struct cb_compound_hdr_res hdr_res = { NULL }; 682 struct xdr_stream xdr_in, xdr_out; 683 __be32 *p; 684 - __be32 status; 685 unsigned int nops = 0; 686 687 dprintk("%s: start\n", __func__); ··· 701 return rpc_system_err; 702 703 while (status == 0 && nops != hdr_arg.nops) { 704 - status = process_op(hdr_arg.minorversion, nops, 705 - rqstp, &xdr_in, argp, &xdr_out, resp); 706 nops++; 707 } 708 709 *hdr_res.status = status; ··· 748 .process_op = (callback_process_op_t)nfs4_callback_recallany, 749 .decode_args = (callback_decode_arg_t)decode_recallany_args, 750 .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, 751 }, 752 #endif /* CONFIG_NFS_V4_1 */ 753 };
··· 24 #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ 25 4 + 1 + 3) 26 #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 27 + #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 28 #endif /* CONFIG_NFS_V4_1 */ 29 30 #define NFSDBG_FACILITY NFSDBG_CALLBACK 31 + 32 + /* Internal error code */ 33 + #define NFS4ERR_RESOURCE_HDR 11050 34 35 typedef __be32 (*callback_process_op_t)(void *, void *); 36 typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); ··· 173 __be32 *p; 174 p = read_buf(xdr, 4); 175 if (unlikely(p == NULL)) 176 + return htonl(NFS4ERR_RESOURCE_HDR); 177 *op = ntohl(*p); 178 return 0; 179 } ··· 215 216 #if defined(CONFIG_NFS_V4_1) 217 218 + static __be32 decode_sessionid(struct xdr_stream *xdr, 219 struct nfs4_sessionid *sid) 220 { 221 + __be32 *p; 222 int len = NFS4_MAX_SESSIONID_LEN; 223 224 p = read_buf(xdr, len); ··· 229 return 0; 230 } 231 232 + static __be32 decode_rc_list(struct xdr_stream *xdr, 233 struct referring_call_list *rc_list) 234 { 235 + __be32 *p; 236 int i; 237 + __be32 status; 238 239 status = decode_sessionid(xdr, &rc_list->rcl_sessionid); 240 if (status) ··· 267 return status; 268 } 269 270 + static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, 271 struct xdr_stream *xdr, 272 struct cb_sequenceargs *args) 273 { 274 + __be32 *p; 275 int i; 276 + __be32 status; 277 278 status = decode_sessionid(xdr, &args->csa_sessionid); 279 if (status) ··· 327 goto out; 328 } 329 330 + static __be32 decode_recallany_args(struct svc_rqst *rqstp, 331 struct xdr_stream *xdr, 332 struct cb_recallanyargs *args) 333 { 334 + __be32 *p; 335 336 args->craa_addr = svc_addr(rqstp); 337 p = read_buf(xdr, 4); ··· 343 return htonl(NFS4ERR_BADXDR); 344 args->craa_type_mask = ntohl(*p); 345 346 + return 0; 347 + } 348 + 349 + static __be32 decode_recallslot_args(struct svc_rqst *rqstp, 350 + struct xdr_stream *xdr, 351 + struct cb_recallslotargs *args) 352 + { 353 + __be32 *p; 354 + 355 + args->crsa_addr = svc_addr(rqstp); 356 + p = read_buf(xdr, 4); 357 + if (unlikely(p == NULL)) 358 + return htonl(NFS4ERR_BADXDR); 359 + args->crsa_target_max_slots = ntohl(*p++); 360 return 0; 361 } 362 ··· 465 466 p = xdr_reserve_space(xdr, 8); 467 if (unlikely(p == NULL)) 468 + return htonl(NFS4ERR_RESOURCE_HDR); 469 *p++ = htonl(op); 470 *p = res; 471 return 0; ··· 499 500 #if defined(CONFIG_NFS_V4_1) 501 502 + static __be32 encode_sessionid(struct xdr_stream *xdr, 503 const struct nfs4_sessionid *sid) 504 { 505 + __be32 *p; 506 int len = NFS4_MAX_SESSIONID_LEN; 507 508 p = xdr_reserve_space(xdr, len); ··· 513 return 0; 514 } 515 516 + static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, 517 struct xdr_stream *xdr, 518 const struct cb_sequenceres *res) 519 { 520 + __be32 *p; 521 unsigned status = res->csr_status; 522 523 if (unlikely(status != 0)) ··· 554 case OP_CB_RECALL: 555 case OP_CB_SEQUENCE: 556 case OP_CB_RECALL_ANY: 557 + case OP_CB_RECALL_SLOT: 558 *op = &callback_ops[op_nr]; 559 break; 560 ··· 562 case OP_CB_NOTIFY: 563 case OP_CB_PUSH_DELEG: 564 case OP_CB_RECALLABLE_OBJ_AVAIL: 565 case OP_CB_WANTS_CANCELLED: 566 case OP_CB_NOTIFY_LOCK: 567 return htonl(NFS4ERR_NOTSUPP); ··· 602 static __be32 process_op(uint32_t minorversion, int nop, 603 struct svc_rqst *rqstp, 604 struct xdr_stream *xdr_in, void *argp, 605 + struct xdr_stream *xdr_out, void *resp, int* drc_status) 606 { 607 struct callback_op *op = &callback_ops[0]; 608 + unsigned int op_nr; 609 __be32 status; 610 long maxlen; 611 __be32 res; 612 613 dprintk("%s: start\n", __func__); 614 status = decode_op_hdr(xdr_in, &op_nr); 615 + if (unlikely(status)) 616 + return status; 617 618 dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", 619 __func__, minorversion, nop, op_nr); ··· 624 preprocess_nfs4_op(op_nr, &op); 625 if (status == htonl(NFS4ERR_OP_ILLEGAL)) 626 op_nr = OP_CB_ILLEGAL; 627 + if (status) 628 + goto encode_hdr; 629 + 630 + if (*drc_status) { 631 + status = *drc_status; 632 + goto encode_hdr; 633 + } 634 + 635 maxlen = xdr_out->end - xdr_out->p; 636 if (maxlen > 0 && maxlen < PAGE_SIZE) { 637 + status = op->decode_args(rqstp, xdr_in, argp); 638 + if (likely(status == 0)) 639 status = op->process_op(argp, resp); 640 } else 641 status = htonl(NFS4ERR_RESOURCE); 642 643 + /* Only set by OP_CB_SEQUENCE processing */ 644 + if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { 645 + *drc_status = status; 646 + status = 0; 647 + } 648 + 649 + encode_hdr: 650 res = encode_op_hdr(xdr_out, op_nr, status); 651 + if (unlikely(res)) 652 + return res; 653 if (op->encode_res != NULL && status == 0) 654 status = op->encode_res(rqstp, xdr_out, resp); 655 dprintk("%s: done, status = %d\n", __func__, ntohl(status)); ··· 652 struct cb_compound_hdr_res hdr_res = { NULL }; 653 struct xdr_stream xdr_in, xdr_out; 654 __be32 *p; 655 + __be32 status, drc_status = 0; 656 unsigned int nops = 0; 657 658 dprintk("%s: start\n", __func__); ··· 672 return rpc_system_err; 673 674 while (status == 0 && nops != hdr_arg.nops) { 675 + status = process_op(hdr_arg.minorversion, nops, rqstp, 676 + &xdr_in, argp, &xdr_out, resp, &drc_status); 677 nops++; 678 + } 679 + 680 + /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return 681 + * resource error in cb_compound status without returning op */ 682 + if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { 683 + status = htonl(NFS4ERR_RESOURCE); 684 + nops--; 685 } 686 687 *hdr_res.status = status; ··· 712 .process_op = (callback_process_op_t)nfs4_callback_recallany, 713 .decode_args = (callback_decode_arg_t)decode_recallany_args, 714 .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, 715 + }, 716 + [OP_CB_RECALL_SLOT] = { 717 + .process_op = (callback_process_op_t)nfs4_callback_recallslot, 718 + .decode_args = (callback_decode_arg_t)decode_recallslot_args, 719 + .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, 720 }, 721 #endif /* CONFIG_NFS_V4_1 */ 722 };
+26 -26
fs/nfs/client.c
··· 164 return ERR_PTR(err); 165 } 166 167 - static void nfs4_shutdown_client(struct nfs_client *clp) 168 - { 169 #ifdef CONFIG_NFS_V4 170 - if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 171 - nfs4_kill_renewd(clp); 172 - BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners)); 173 - if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 174 - nfs_idmap_delete(clp); 175 - 176 - rpc_destroy_wait_queue(&clp->cl_rpcwaitq); 177 - #endif 178 - } 179 - 180 - /* 181 - * Destroy the NFS4 callback service 182 - */ 183 - static void nfs4_destroy_callback(struct nfs_client *clp) 184 - { 185 - #ifdef CONFIG_NFS_V4 186 - if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 187 - nfs_callback_down(clp->cl_minorversion); 188 - #endif /* CONFIG_NFS_V4 */ 189 - } 190 - 191 /* 192 * Clears/puts all minor version specific parts from an nfs_client struct 193 * reverting it to minorversion 0. ··· 179 180 clp->cl_call_sync = _nfs4_call_sync; 181 #endif /* CONFIG_NFS_V4_1 */ 182 - 183 - nfs4_destroy_callback(clp); 184 } 185 186 /* 187 * Destroy a shared client record ··· 214 { 215 dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); 216 217 - nfs4_clear_client_minor_version(clp); 218 nfs4_shutdown_client(clp); 219 220 nfs_fscache_release_client_cookie(clp);
··· 164 return ERR_PTR(err); 165 } 166 167 #ifdef CONFIG_NFS_V4 168 /* 169 * Clears/puts all minor version specific parts from an nfs_client struct 170 * reverting it to minorversion 0. ··· 202 203 clp->cl_call_sync = _nfs4_call_sync; 204 #endif /* CONFIG_NFS_V4_1 */ 205 } 206 + 207 + /* 208 + * Destroy the NFS4 callback service 209 + */ 210 + static void nfs4_destroy_callback(struct nfs_client *clp) 211 + { 212 + if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 213 + nfs_callback_down(clp->cl_minorversion); 214 + } 215 + 216 + static void nfs4_shutdown_client(struct nfs_client *clp) 217 + { 218 + if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 219 + nfs4_kill_renewd(clp); 220 + nfs4_clear_client_minor_version(clp); 221 + nfs4_destroy_callback(clp); 222 + if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 223 + nfs_idmap_delete(clp); 224 + 225 + rpc_destroy_wait_queue(&clp->cl_rpcwaitq); 226 + } 227 + #else 228 + static void nfs4_shutdown_client(struct nfs_client *clp) 229 + { 230 + } 231 + #endif /* CONFIG_NFS_V4 */ 232 233 /* 234 * Destroy a shared client record ··· 213 { 214 dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); 215 216 nfs4_shutdown_client(clp); 217 218 nfs_fscache_release_client_cookie(clp);
+1 -1
fs/nfs/dir.c
··· 560 desc->entry = &my_entry; 561 562 nfs_block_sillyrename(dentry); 563 - res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping); 564 if (res < 0) 565 goto out; 566
··· 560 desc->entry = &my_entry; 561 562 nfs_block_sillyrename(dentry); 563 + res = nfs_revalidate_mapping(inode, filp->f_mapping); 564 if (res < 0) 565 goto out; 566
+15 -3
fs/nfs/dns_resolve.c
··· 36 }; 37 38 39 static void nfs_dns_ent_init(struct cache_head *cnew, 40 struct cache_head *ckey) 41 { ··· 62 new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); 63 if (new->hostname) { 64 new->namelen = key->namelen; 65 - memcpy(&new->addr, &key->addr, key->addrlen); 66 - new->addrlen = key->addrlen; 67 } else { 68 new->namelen = 0; 69 new->addrlen = 0; ··· 246 .cache_show = nfs_dns_show, 247 .match = nfs_dns_match, 248 .init = nfs_dns_ent_init, 249 - .update = nfs_dns_ent_init, 250 .alloc = nfs_dns_ent_alloc, 251 }; 252
··· 36 }; 37 38 39 + static void nfs_dns_ent_update(struct cache_head *cnew, 40 + struct cache_head *ckey) 41 + { 42 + struct nfs_dns_ent *new; 43 + struct nfs_dns_ent *key; 44 + 45 + new = container_of(cnew, struct nfs_dns_ent, h); 46 + key = container_of(ckey, struct nfs_dns_ent, h); 47 + 48 + memcpy(&new->addr, &key->addr, key->addrlen); 49 + new->addrlen = key->addrlen; 50 + } 51 + 52 static void nfs_dns_ent_init(struct cache_head *cnew, 53 struct cache_head *ckey) 54 { ··· 49 new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); 50 if (new->hostname) { 51 new->namelen = key->namelen; 52 + nfs_dns_ent_update(cnew, ckey); 53 } else { 54 new->namelen = 0; 55 new->addrlen = 0; ··· 234 .cache_show = nfs_dns_show, 235 .match = nfs_dns_match, 236 .init = nfs_dns_ent_init, 237 + .update = nfs_dns_ent_update, 238 .alloc = nfs_dns_ent_alloc, 239 }; 240
+22 -8
fs/nfs/file.c
··· 123 filp->f_path.dentry->d_parent->d_name.name, 124 filp->f_path.dentry->d_name.name); 125 126 res = nfs_check_flags(filp->f_flags); 127 if (res) 128 return res; 129 130 - nfs_inc_stats(inode, NFSIOS_VFSOPEN); 131 res = nfs_open(inode, filp); 132 return res; 133 } ··· 237 dentry->d_parent->d_name.name, 238 dentry->d_name.name); 239 240 if ((file->f_mode & FMODE_WRITE) == 0) 241 return 0; 242 - nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 243 244 /* Flush writes to the server and return any errors */ 245 return nfs_do_fsync(ctx, inode); ··· 262 (unsigned long) count, (unsigned long) pos); 263 264 result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); 265 - nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count); 266 - if (!result) 267 result = generic_file_aio_read(iocb, iov, nr_segs, pos); 268 return result; 269 } 270 ··· 284 (unsigned long) count, (unsigned long long) *ppos); 285 286 res = nfs_revalidate_mapping(inode, filp->f_mapping); 287 - if (!res) 288 res = generic_file_splice_read(filp, ppos, pipe, count, flags); 289 return res; 290 } 291 ··· 601 { 602 struct dentry * dentry = iocb->ki_filp->f_path.dentry; 603 struct inode * inode = dentry->d_inode; 604 ssize_t result; 605 size_t count = iov_length(iov, nr_segs); 606 ··· 628 if (!count) 629 goto out; 630 631 - nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); 632 result = generic_file_aio_write(iocb, iov, nr_segs, pos); 633 /* Return error values for O_DSYNC and IS_SYNC() */ 634 if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { 635 int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); 636 if (err < 0) 637 result = err; 638 } 639 out: 640 return result; 641 ··· 654 { 655 struct dentry *dentry = filp->f_path.dentry; 656 struct inode *inode = dentry->d_inode; 657 ssize_t ret; 658 659 dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", ··· 665 * The combination of splice and an O_APPEND destination is disallowed. 666 */ 667 668 - nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); 669 - 670 ret = generic_file_splice_write(pipe, filp, ppos, count, flags); 671 if (ret >= 0 && nfs_need_sync_write(filp, inode)) { 672 int err = nfs_do_fsync(nfs_file_open_context(filp), inode); 673 if (err < 0) 674 ret = err; 675 } 676 return ret; 677 } 678
··· 123 filp->f_path.dentry->d_parent->d_name.name, 124 filp->f_path.dentry->d_name.name); 125 126 + nfs_inc_stats(inode, NFSIOS_VFSOPEN); 127 res = nfs_check_flags(filp->f_flags); 128 if (res) 129 return res; 130 131 res = nfs_open(inode, filp); 132 return res; 133 } ··· 237 dentry->d_parent->d_name.name, 238 dentry->d_name.name); 239 240 + nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 241 if ((file->f_mode & FMODE_WRITE) == 0) 242 return 0; 243 244 /* Flush writes to the server and return any errors */ 245 return nfs_do_fsync(ctx, inode); ··· 262 (unsigned long) count, (unsigned long) pos); 263 264 result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); 265 + if (!result) { 266 result = generic_file_aio_read(iocb, iov, nr_segs, pos); 267 + if (result > 0) 268 + nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); 269 + } 270 return result; 271 } 272 ··· 282 (unsigned long) count, (unsigned long long) *ppos); 283 284 res = nfs_revalidate_mapping(inode, filp->f_mapping); 285 + if (!res) { 286 res = generic_file_splice_read(filp, ppos, pipe, count, flags); 287 + if (res > 0) 288 + nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res); 289 + } 290 return res; 291 } 292 ··· 596 { 597 struct dentry * dentry = iocb->ki_filp->f_path.dentry; 598 struct inode * inode = dentry->d_inode; 599 + unsigned long written = 0; 600 ssize_t result; 601 size_t count = iov_length(iov, nr_segs); 602 ··· 622 if (!count) 623 goto out; 624 625 result = generic_file_aio_write(iocb, iov, nr_segs, pos); 626 + if (result > 0) 627 + written = result; 628 + 629 /* Return error values for O_DSYNC and IS_SYNC() */ 630 if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { 631 int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); 632 if (err < 0) 633 result = err; 634 } 635 + if (result > 0) 636 + nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); 637 out: 638 return result; 639 ··· 644 { 645 struct dentry *dentry = filp->f_path.dentry; 646 struct inode *inode = dentry->d_inode; 647 + unsigned long written = 0; 648 ssize_t ret; 649 650 dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", ··· 654 * The combination of splice and an O_APPEND destination is disallowed. 655 */ 656 657 ret = generic_file_splice_write(pipe, filp, ppos, count, flags); 658 + if (ret > 0) 659 + written = ret; 660 + 661 if (ret >= 0 && nfs_need_sync_write(filp, inode)) { 662 int err = nfs_do_fsync(nfs_file_open_context(filp), inode); 663 if (err < 0) 664 ret = err; 665 } 666 + if (ret > 0) 667 + nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); 668 return ret; 669 } 670
+13 -77
fs/nfs/inode.c
··· 97 return ino; 98 } 99 100 - int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 101 - { 102 - int ret; 103 - 104 - ret = nfs_commit_inode(inode, 105 - wbc->sync_mode == WB_SYNC_ALL ? FLUSH_SYNC : 0); 106 - if (ret >= 0) 107 - return 0; 108 - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 109 - return ret; 110 - } 111 - 112 void nfs_clear_inode(struct inode *inode) 113 { 114 /* ··· 114 */ 115 int nfs_sync_mapping(struct address_space *mapping) 116 { 117 - int ret; 118 119 - if (mapping->nrpages == 0) 120 - return 0; 121 - unmap_mapping_range(mapping, 0, 0, 0); 122 - ret = filemap_write_and_wait(mapping); 123 - if (ret != 0) 124 - goto out; 125 - ret = nfs_wb_all(mapping->host); 126 - out: 127 return ret; 128 } 129 ··· 491 int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; 492 int err; 493 494 - /* 495 - * Flush out writes to the server in order to update c/mtime. 496 - * 497 - * Hold the i_mutex to suspend application writes temporarily; 498 - * this prevents long-running writing applications from blocking 499 - * nfs_wb_nocommit. 500 - */ 501 if (S_ISREG(inode->i_mode)) { 502 - mutex_lock(&inode->i_mutex); 503 - nfs_wb_nocommit(inode); 504 - mutex_unlock(&inode->i_mutex); 505 } 506 507 /* ··· 519 generic_fillattr(inode, stat); 520 stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); 521 } 522 return err; 523 } 524 ··· 595 __put_nfs_open_context(ctx, 0); 596 } 597 598 - static void put_nfs_open_context_sync(struct nfs_open_context *ctx) 599 - { 600 - __put_nfs_open_context(ctx, 1); 601 - } 602 - 603 /* 604 * Ensure that mmap has a recent RPC credential for use when writing out 605 * shared pages ··· 641 spin_lock(&inode->i_lock); 642 list_move_tail(&ctx->list, &NFS_I(inode)->open_files); 643 spin_unlock(&inode->i_lock); 644 - put_nfs_open_context_sync(ctx); 645 } 646 } 647 ··· 749 return __nfs_revalidate_inode(server, inode); 750 } 751 752 - static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) 753 { 754 struct nfs_inode *nfsi = NFS_I(inode); 755 ··· 770 return 0; 771 } 772 773 - static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) 774 - { 775 - int ret = 0; 776 - 777 - mutex_lock(&inode->i_mutex); 778 - if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_DATA) { 779 - ret = nfs_sync_mapping(mapping); 780 - if (ret == 0) 781 - ret = nfs_invalidate_mapping_nolock(inode, mapping); 782 - } 783 - mutex_unlock(&inode->i_mutex); 784 - return ret; 785 - } 786 - 787 - /** 788 - * nfs_revalidate_mapping_nolock - Revalidate the pagecache 789 - * @inode - pointer to host inode 790 - * @mapping - pointer to mapping 791 - */ 792 - int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) 793 - { 794 - struct nfs_inode *nfsi = NFS_I(inode); 795 - int ret = 0; 796 - 797 - if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 798 - || nfs_attribute_timeout(inode) || NFS_STALE(inode)) { 799 - ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); 800 - if (ret < 0) 801 - goto out; 802 - } 803 - if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 804 - ret = nfs_invalidate_mapping_nolock(inode, mapping); 805 - out: 806 - return ret; 807 - } 808 - 809 /** 810 * nfs_revalidate_mapping - Revalidate the pagecache 811 * @inode - pointer to host inode 812 * @mapping - pointer to mapping 813 - * 814 - * This version of the function will take the inode->i_mutex and attempt to 815 - * flush out all dirty data if it needs to invalidate the page cache. 816 */ 817 int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) 818 { ··· 1351 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1352 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1353 nfsi->npages = 0; 1354 atomic_set(&nfsi->silly_count, 1); 1355 INIT_HLIST_HEAD(&nfsi->silly_list); 1356 init_waitqueue_head(&nfsi->waitqueue);
··· 97 return ino; 98 } 99 100 void nfs_clear_inode(struct inode *inode) 101 { 102 /* ··· 126 */ 127 int nfs_sync_mapping(struct address_space *mapping) 128 { 129 + int ret = 0; 130 131 + if (mapping->nrpages != 0) { 132 + unmap_mapping_range(mapping, 0, 0, 0); 133 + ret = nfs_wb_all(mapping->host); 134 + } 135 return ret; 136 } 137 ··· 507 int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; 508 int err; 509 510 + /* Flush out writes to the server in order to update c/mtime. */ 511 if (S_ISREG(inode->i_mode)) { 512 + err = filemap_write_and_wait(inode->i_mapping); 513 + if (err) 514 + goto out; 515 } 516 517 /* ··· 541 generic_fillattr(inode, stat); 542 stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); 543 } 544 + out: 545 return err; 546 } 547 ··· 616 __put_nfs_open_context(ctx, 0); 617 } 618 619 /* 620 * Ensure that mmap has a recent RPC credential for use when writing out 621 * shared pages ··· 667 spin_lock(&inode->i_lock); 668 list_move_tail(&ctx->list, &NFS_I(inode)->open_files); 669 spin_unlock(&inode->i_lock); 670 + __put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1); 671 } 672 } 673 ··· 775 return __nfs_revalidate_inode(server, inode); 776 } 777 778 + static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) 779 { 780 struct nfs_inode *nfsi = NFS_I(inode); 781 ··· 796 return 0; 797 } 798 799 /** 800 * nfs_revalidate_mapping - Revalidate the pagecache 801 * @inode - pointer to host inode 802 * @mapping - pointer to mapping 803 */ 804 int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) 805 { ··· 1416 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1417 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1418 nfsi->npages = 0; 1419 + nfsi->ncommit = 0; 1420 atomic_set(&nfsi->silly_count, 1); 1421 INIT_HLIST_HEAD(&nfsi->silly_list); 1422 init_waitqueue_head(&nfsi->waitqueue);
+5 -4
fs/nfs/nfs3proc.c
··· 22 23 #define NFSDBG_FACILITY NFSDBG_PROC 24 25 - /* A wrapper to handle the EJUKEBOX error message */ 26 static int 27 nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 28 { 29 int res; 30 do { 31 res = rpc_call_sync(clnt, msg, flags); 32 - if (res != -EJUKEBOX) 33 break; 34 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 35 res = -ERESTARTSYS; ··· 42 static int 43 nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode) 44 { 45 - if (task->tk_status != -EJUKEBOX) 46 return 0; 47 - nfs_inc_stats(inode, NFSIOS_DELAY); 48 task->tk_status = 0; 49 rpc_restart_call(task); 50 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
··· 22 23 #define NFSDBG_FACILITY NFSDBG_PROC 24 25 + /* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */ 26 static int 27 nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 28 { 29 int res; 30 do { 31 res = rpc_call_sync(clnt, msg, flags); 32 + if (res != -EJUKEBOX && res != -EKEYEXPIRED) 33 break; 34 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 35 res = -ERESTARTSYS; ··· 42 static int 43 nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode) 44 { 45 + if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED) 46 return 0; 47 + if (task->tk_status == -EJUKEBOX) 48 + nfs_inc_stats(inode, NFSIOS_DELAY); 49 task->tk_status = 0; 50 rpc_restart_call(task); 51 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
+2
fs/nfs/nfs4_fs.h
··· 46 NFS4CLNT_DELEGRETURN, 47 NFS4CLNT_SESSION_RESET, 48 NFS4CLNT_SESSION_DRAINING, 49 }; 50 51 /* ··· 281 extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 282 extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); 283 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 284 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 285 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 286 extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
··· 46 NFS4CLNT_DELEGRETURN, 47 NFS4CLNT_SESSION_RESET, 48 NFS4CLNT_SESSION_DRAINING, 49 + NFS4CLNT_RECALL_SLOT, 50 }; 51 52 /* ··· 280 extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 281 extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); 282 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 283 + extern void nfs41_handle_recall_slot(struct nfs_client *clp); 284 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 285 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 286 extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
+68 -35
fs/nfs/nfs4proc.c
··· 281 } 282 case -NFS4ERR_GRACE: 283 case -NFS4ERR_DELAY: 284 ret = nfs4_delay(server->client, &exception->timeout); 285 if (ret != 0) 286 break; ··· 419 clp->cl_last_renewal = timestamp; 420 spin_unlock(&clp->cl_lock); 421 /* Check sequence flags */ 422 - nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 423 } 424 out: 425 /* The session may be reset by one of the error handlers. */ ··· 1165 int err; 1166 do { 1167 err = _nfs4_do_open_reclaim(ctx, state); 1168 - if (err != -NFS4ERR_DELAY) 1169 break; 1170 nfs4_handle_exception(server, err, &exception); 1171 } while (exception.retry); ··· 1584 goto out; 1585 case -NFS4ERR_GRACE: 1586 case -NFS4ERR_DELAY: 1587 nfs4_handle_exception(server, err, &exception); 1588 err = 0; 1589 } ··· 3148 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3149 * standalone procedure for queueing an asynchronous RENEW. 3150 */ 3151 static void nfs4_renew_done(struct rpc_task *task, void *data) 3152 { 3153 - struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp; 3154 - unsigned long timestamp = (unsigned long)data; 3155 3156 if (task->tk_status < 0) { 3157 /* Unless we're shutting down, schedule state recovery! */ ··· 3176 3177 static const struct rpc_call_ops nfs4_renew_ops = { 3178 .rpc_call_done = nfs4_renew_done, 3179 }; 3180 3181 int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) ··· 3187 .rpc_cred = cred, 3188 }; 3189 3190 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3191 - &nfs4_renew_ops, (void *)jiffies); 3192 } 3193 3194 int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) ··· 3467 if (server) 3468 nfs_inc_server_stats(server, NFSIOS_DELAY); 3469 case -NFS4ERR_GRACE: 3470 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3471 task->tk_status = 0; 3472 return -EAGAIN; ··· 3580 case -NFS4ERR_RESOURCE: 3581 /* The IBM lawyers misread another document! */ 3582 case -NFS4ERR_DELAY: 3583 err = nfs4_delay(clp->cl_rpcclient, &timeout); 3584 } 3585 } while (err == 0); ··· 4196 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4197 return 0; 4198 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4199 - if (err != -NFS4ERR_DELAY) 4200 break; 4201 nfs4_handle_exception(server, err, &exception); 4202 } while (exception.retry); ··· 4221 goto out; 4222 case -NFS4ERR_GRACE: 4223 case -NFS4ERR_DELAY: 4224 nfs4_handle_exception(server, err, &exception); 4225 err = 0; 4226 } ··· 4373 err = 0; 4374 goto out; 4375 case -NFS4ERR_DELAY: 4376 break; 4377 } 4378 err = nfs4_handle_exception(server, err, &exception); ··· 4519 4520 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 4521 4522 - if (status != NFS4ERR_CLID_INUSE) 4523 break; 4524 4525 if (signalled()) ··· 4573 switch (task->tk_status) { 4574 case -NFS4ERR_DELAY: 4575 case -NFS4ERR_GRACE: 4576 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 4577 rpc_delay(task, NFS4_POLL_RETRY_MIN); 4578 task->tk_status = 0; ··· 4631 /* 4632 * Reset a slot table 4633 */ 4634 - static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots, 4635 - int old_max_slots, int ivalue) 4636 { 4637 int i; 4638 int ret = 0; 4639 4640 - dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl); 4641 4642 - /* 4643 - * Until we have dynamic slot table adjustment, insist 4644 - * upon the same slot table size 4645 - */ 4646 - if (max_slots != old_max_slots) { 4647 - dprintk("%s reset slot table does't match old\n", 4648 - __func__); 4649 - ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */ 4650 - goto out; 4651 } 4652 spin_lock(&tbl->slot_tbl_lock); 4653 - for (i = 0; i < max_slots; ++i) 4654 tbl->slots[i].seq_nr = ivalue; 4655 spin_unlock(&tbl->slot_tbl_lock); 4656 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, ··· 4674 int status; 4675 4676 status = nfs4_reset_slot_table(&session->fc_slot_table, 4677 - session->fc_attrs.max_reqs, 4678 - session->fc_slot_table.max_slots, 4679 - 1); 4680 if (status) 4681 return status; 4682 4683 status = nfs4_reset_slot_table(&session->bc_slot_table, 4684 - session->bc_attrs.max_reqs, 4685 - session->bc_slot_table.max_slots, 4686 - 0); 4687 return status; 4688 } 4689 ··· 4820 args->fc_attrs.headerpadsz = 0; 4821 args->fc_attrs.max_rqst_sz = mxrqst_sz; 4822 args->fc_attrs.max_resp_sz = mxresp_sz; 4823 - args->fc_attrs.max_resp_sz_cached = mxresp_sz; 4824 args->fc_attrs.max_ops = NFS4_MAX_OPS; 4825 args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; 4826 4827 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 4828 - "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 4829 __func__, 4830 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 4831 - args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops, 4832 - args->fc_attrs.max_reqs); 4833 4834 /* Back channel attributes */ 4835 args->bc_attrs.headerpadsz = 0; ··· 5036 &res, args.sa_cache_this, 1); 5037 } 5038 5039 - void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5040 { 5041 struct nfs_client *clp = (struct nfs_client *)data; 5042 ··· 5053 5054 if (task->tk_status < 0) { 5055 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5056 5057 if (_nfs4_async_handle_error(task, NULL, clp, NULL) 5058 == -EAGAIN) { ··· 5063 } 5064 } 5065 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5066 - 5067 kfree(task->tk_msg.rpc_argp); 5068 kfree(task->tk_msg.rpc_resp); 5069 ··· 5088 static const struct rpc_call_ops nfs41_sequence_ops = { 5089 .rpc_call_done = nfs41_sequence_call_done, 5090 .rpc_call_prepare = nfs41_sequence_prepare, 5091 }; 5092 5093 static int nfs41_proc_async_sequence(struct nfs_client *clp, ··· 5101 .rpc_cred = cred, 5102 }; 5103 5104 args = kzalloc(sizeof(*args), GFP_KERNEL); 5105 - if (!args) 5106 - return -ENOMEM; 5107 res = kzalloc(sizeof(*res), GFP_KERNEL); 5108 - if (!res) { 5109 kfree(args); 5110 return -ENOMEM; 5111 } 5112 res->sr_slotid = NFS4_MAX_SLOT_TABLE;
··· 281 } 282 case -NFS4ERR_GRACE: 283 case -NFS4ERR_DELAY: 284 + case -EKEYEXPIRED: 285 ret = nfs4_delay(server->client, &exception->timeout); 286 if (ret != 0) 287 break; ··· 418 clp->cl_last_renewal = timestamp; 419 spin_unlock(&clp->cl_lock); 420 /* Check sequence flags */ 421 + if (atomic_read(&clp->cl_count) > 1) 422 + nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 423 } 424 out: 425 /* The session may be reset by one of the error handlers. */ ··· 1163 int err; 1164 do { 1165 err = _nfs4_do_open_reclaim(ctx, state); 1166 + if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED) 1167 break; 1168 nfs4_handle_exception(server, err, &exception); 1169 } while (exception.retry); ··· 1582 goto out; 1583 case -NFS4ERR_GRACE: 1584 case -NFS4ERR_DELAY: 1585 + case -EKEYEXPIRED: 1586 nfs4_handle_exception(server, err, &exception); 1587 err = 0; 1588 } ··· 3145 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3146 * standalone procedure for queueing an asynchronous RENEW. 3147 */ 3148 + static void nfs4_renew_release(void *data) 3149 + { 3150 + struct nfs_client *clp = data; 3151 + 3152 + if (atomic_read(&clp->cl_count) > 1) 3153 + nfs4_schedule_state_renewal(clp); 3154 + nfs_put_client(clp); 3155 + } 3156 + 3157 static void nfs4_renew_done(struct rpc_task *task, void *data) 3158 { 3159 + struct nfs_client *clp = data; 3160 + unsigned long timestamp = task->tk_start; 3161 3162 if (task->tk_status < 0) { 3163 /* Unless we're shutting down, schedule state recovery! */ ··· 3164 3165 static const struct rpc_call_ops nfs4_renew_ops = { 3166 .rpc_call_done = nfs4_renew_done, 3167 + .rpc_release = nfs4_renew_release, 3168 }; 3169 3170 int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) ··· 3174 .rpc_cred = cred, 3175 }; 3176 3177 + if (!atomic_inc_not_zero(&clp->cl_count)) 3178 + return -EIO; 3179 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3180 + &nfs4_renew_ops, clp); 3181 } 3182 3183 int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) ··· 3452 if (server) 3453 nfs_inc_server_stats(server, NFSIOS_DELAY); 3454 case -NFS4ERR_GRACE: 3455 + case -EKEYEXPIRED: 3456 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3457 task->tk_status = 0; 3458 return -EAGAIN; ··· 3564 case -NFS4ERR_RESOURCE: 3565 /* The IBM lawyers misread another document! */ 3566 case -NFS4ERR_DELAY: 3567 + case -EKEYEXPIRED: 3568 err = nfs4_delay(clp->cl_rpcclient, &timeout); 3569 } 3570 } while (err == 0); ··· 4179 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4180 return 0; 4181 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4182 + if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED) 4183 break; 4184 nfs4_handle_exception(server, err, &exception); 4185 } while (exception.retry); ··· 4204 goto out; 4205 case -NFS4ERR_GRACE: 4206 case -NFS4ERR_DELAY: 4207 + case -EKEYEXPIRED: 4208 nfs4_handle_exception(server, err, &exception); 4209 err = 0; 4210 } ··· 4355 err = 0; 4356 goto out; 4357 case -NFS4ERR_DELAY: 4358 + case -EKEYEXPIRED: 4359 break; 4360 } 4361 err = nfs4_handle_exception(server, err, &exception); ··· 4500 4501 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 4502 4503 + if (status != -NFS4ERR_CLID_INUSE) 4504 break; 4505 4506 if (signalled()) ··· 4554 switch (task->tk_status) { 4555 case -NFS4ERR_DELAY: 4556 case -NFS4ERR_GRACE: 4557 + case -EKEYEXPIRED: 4558 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 4559 rpc_delay(task, NFS4_POLL_RETRY_MIN); 4560 task->tk_status = 0; ··· 4611 /* 4612 * Reset a slot table 4613 */ 4614 + static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 4615 + int ivalue) 4616 { 4617 + struct nfs4_slot *new = NULL; 4618 int i; 4619 int ret = 0; 4620 4621 + dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, 4622 + max_reqs, tbl->max_slots); 4623 4624 + /* Does the newly negotiated max_reqs match the existing slot table? */ 4625 + if (max_reqs != tbl->max_slots) { 4626 + ret = -ENOMEM; 4627 + new = kmalloc(max_reqs * sizeof(struct nfs4_slot), 4628 + GFP_KERNEL); 4629 + if (!new) 4630 + goto out; 4631 + ret = 0; 4632 + kfree(tbl->slots); 4633 } 4634 spin_lock(&tbl->slot_tbl_lock); 4635 + if (new) { 4636 + tbl->slots = new; 4637 + tbl->max_slots = max_reqs; 4638 + } 4639 + for (i = 0; i < tbl->max_slots; ++i) 4640 tbl->slots[i].seq_nr = ivalue; 4641 spin_unlock(&tbl->slot_tbl_lock); 4642 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, ··· 4648 int status; 4649 4650 status = nfs4_reset_slot_table(&session->fc_slot_table, 4651 + session->fc_attrs.max_reqs, 1); 4652 if (status) 4653 return status; 4654 4655 status = nfs4_reset_slot_table(&session->bc_slot_table, 4656 + session->bc_attrs.max_reqs, 0); 4657 return status; 4658 } 4659 ··· 4798 args->fc_attrs.headerpadsz = 0; 4799 args->fc_attrs.max_rqst_sz = mxrqst_sz; 4800 args->fc_attrs.max_resp_sz = mxresp_sz; 4801 args->fc_attrs.max_ops = NFS4_MAX_OPS; 4802 args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; 4803 4804 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 4805 + "max_ops=%u max_reqs=%u\n", 4806 __func__, 4807 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 4808 + args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 4809 4810 /* Back channel attributes */ 4811 args->bc_attrs.headerpadsz = 0; ··· 5016 &res, args.sa_cache_this, 1); 5017 } 5018 5019 + static void nfs41_sequence_release(void *data) 5020 + { 5021 + struct nfs_client *clp = (struct nfs_client *)data; 5022 + 5023 + if (atomic_read(&clp->cl_count) > 1) 5024 + nfs4_schedule_state_renewal(clp); 5025 + nfs_put_client(clp); 5026 + } 5027 + 5028 + static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5029 { 5030 struct nfs_client *clp = (struct nfs_client *)data; 5031 ··· 5024 5025 if (task->tk_status < 0) { 5026 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5027 + if (atomic_read(&clp->cl_count) == 1) 5028 + goto out; 5029 5030 if (_nfs4_async_handle_error(task, NULL, clp, NULL) 5031 == -EAGAIN) { ··· 5032 } 5033 } 5034 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5035 + out: 5036 kfree(task->tk_msg.rpc_argp); 5037 kfree(task->tk_msg.rpc_resp); 5038 ··· 5057 static const struct rpc_call_ops nfs41_sequence_ops = { 5058 .rpc_call_done = nfs41_sequence_call_done, 5059 .rpc_call_prepare = nfs41_sequence_prepare, 5060 + .rpc_release = nfs41_sequence_release, 5061 }; 5062 5063 static int nfs41_proc_async_sequence(struct nfs_client *clp, ··· 5069 .rpc_cred = cred, 5070 }; 5071 5072 + if (!atomic_inc_not_zero(&clp->cl_count)) 5073 + return -EIO; 5074 args = kzalloc(sizeof(*args), GFP_KERNEL); 5075 res = kzalloc(sizeof(*res), GFP_KERNEL); 5076 + if (!args || !res) { 5077 kfree(args); 5078 + nfs_put_client(clp); 5079 return -ENOMEM; 5080 } 5081 res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+7 -17
fs/nfs/nfs4renewd.c
··· 36 * as an rpc_task, not a real kernel thread, so it always runs in rpciod's 37 * context. There is one renewd per nfs_server. 38 * 39 - * TODO: If the send queue gets backlogged (e.g., if the server goes down), 40 - * we will keep filling the queue with periodic RENEW requests. We need a 41 - * mechanism for ensuring that if renewd successfully sends off a request, 42 - * then it only wakes up when the request is finished. Maybe use the 43 - * child task framework of the RPC layer? 44 */ 45 46 #include <linux/mm.h> ··· 58 struct nfs_client *clp = 59 container_of(work, struct nfs_client, cl_renewd.work); 60 struct rpc_cred *cred; 61 - long lease, timeout; 62 unsigned long last, now; 63 64 ops = nfs4_state_renewal_ops[clp->cl_minorversion]; ··· 70 lease = clp->cl_lease_time; 71 last = clp->cl_last_renewal; 72 now = jiffies; 73 - timeout = (2 * lease) / 3 + (long)last - (long)now; 74 /* Are we close to a lease timeout? */ 75 if (time_after(now, last + lease/3)) { 76 cred = ops->get_state_renewal_cred_locked(clp); ··· 84 /* Queue an asynchronous RENEW. */ 85 ops->sched_state_renewal(clp, cred); 86 put_rpccred(cred); 87 } 88 - timeout = (2 * lease) / 3; 89 - spin_lock(&clp->cl_lock); 90 - } else 91 dprintk("%s: failed to call renewd. Reason: lease not expired \n", 92 __func__); 93 - if (timeout < 5 * HZ) /* safeguard */ 94 - timeout = 5 * HZ; 95 - dprintk("%s: requeueing work. Lease period = %ld\n", 96 - __func__, (timeout + HZ - 1) / HZ); 97 - cancel_delayed_work(&clp->cl_renewd); 98 - schedule_delayed_work(&clp->cl_renewd, timeout); 99 - spin_unlock(&clp->cl_lock); 100 nfs_expire_unreferenced_delegations(clp); 101 out: 102 dprintk("%s: done\n", __func__);
··· 36 * as an rpc_task, not a real kernel thread, so it always runs in rpciod's 37 * context. There is one renewd per nfs_server. 38 * 39 */ 40 41 #include <linux/mm.h> ··· 63 struct nfs_client *clp = 64 container_of(work, struct nfs_client, cl_renewd.work); 65 struct rpc_cred *cred; 66 + long lease; 67 unsigned long last, now; 68 69 ops = nfs4_state_renewal_ops[clp->cl_minorversion]; ··· 75 lease = clp->cl_lease_time; 76 last = clp->cl_last_renewal; 77 now = jiffies; 78 /* Are we close to a lease timeout? */ 79 if (time_after(now, last + lease/3)) { 80 cred = ops->get_state_renewal_cred_locked(clp); ··· 90 /* Queue an asynchronous RENEW. */ 91 ops->sched_state_renewal(clp, cred); 92 put_rpccred(cred); 93 + goto out_exp; 94 } 95 + } else { 96 dprintk("%s: failed to call renewd. Reason: lease not expired \n", 97 __func__); 98 + spin_unlock(&clp->cl_lock); 99 + } 100 + nfs4_schedule_state_renewal(clp); 101 + out_exp: 102 nfs_expire_unreferenced_delegations(clp); 103 out: 104 dprintk("%s: done\n", __func__);
+98 -20
fs/nfs/nfs4state.c
··· 1249 } 1250 1251 #ifdef CONFIG_NFS_V4_1 1252 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1253 { 1254 if (!flags) 1255 return; 1256 - else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) { 1257 - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1258 - nfs4_state_start_reclaim_reboot(clp); 1259 - nfs4_schedule_state_recovery(clp); 1260 - } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 1261 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 1262 SEQ4_STATUS_ADMIN_STATE_REVOKED | 1263 - SEQ4_STATUS_RECALLABLE_STATE_REVOKED | 1264 - SEQ4_STATUS_LEASE_MOVED)) { 1265 - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1266 - nfs4_state_start_reclaim_nograce(clp); 1267 - nfs4_schedule_state_recovery(clp); 1268 - } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | 1269 SEQ4_STATUS_BACKCHANNEL_FAULT | 1270 SEQ4_STATUS_CB_PATH_DOWN_SESSION)) 1271 - nfs_expire_all_delegations(clp); 1272 } 1273 1274 static int nfs4_reset_session(struct nfs_client *clp) ··· 1324 1325 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); 1326 status = nfs4_proc_create_session(clp); 1327 - if (status) 1328 status = nfs4_recovery_handle_error(clp, status); 1329 1330 - out: 1331 - /* 1332 - * Let the state manager reestablish state 1333 - */ 1334 - if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1335 - status == 0) 1336 nfs41_setup_state_renewal(clp); 1337 - 1338 return status; 1339 } 1340 1341 #else /* CONFIG_NFS_V4_1 */ 1342 static int nfs4_reset_session(struct nfs_client *clp) { return 0; } 1343 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } 1344 #endif /* CONFIG_NFS_V4_1 */ 1345 1346 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors ··· 1382 case -NFS4ERR_DELAY: 1383 case -NFS4ERR_CLID_INUSE: 1384 case -EAGAIN: 1385 break; 1386 1387 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery ··· 1466 nfs_client_return_marked_delegations(clp); 1467 continue; 1468 } 1469 1470 nfs4_clear_state_manager_bit(clp); 1471 /* Did we race with an attempt to give us more work? */
··· 1249 } 1250 1251 #ifdef CONFIG_NFS_V4_1 1252 + void nfs41_handle_recall_slot(struct nfs_client *clp) 1253 + { 1254 + set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1255 + nfs4_schedule_state_recovery(clp); 1256 + } 1257 + 1258 + static void nfs4_reset_all_state(struct nfs_client *clp) 1259 + { 1260 + if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1261 + clp->cl_boot_time = CURRENT_TIME; 1262 + nfs4_state_start_reclaim_nograce(clp); 1263 + nfs4_schedule_state_recovery(clp); 1264 + } 1265 + } 1266 + 1267 + static void nfs41_handle_server_reboot(struct nfs_client *clp) 1268 + { 1269 + if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1270 + nfs4_state_start_reclaim_reboot(clp); 1271 + nfs4_schedule_state_recovery(clp); 1272 + } 1273 + } 1274 + 1275 + static void nfs41_handle_state_revoked(struct nfs_client *clp) 1276 + { 1277 + /* Temporary */ 1278 + nfs4_reset_all_state(clp); 1279 + } 1280 + 1281 + static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 1282 + { 1283 + /* This will need to handle layouts too */ 1284 + nfs_expire_all_delegations(clp); 1285 + } 1286 + 1287 + static void nfs41_handle_cb_path_down(struct nfs_client *clp) 1288 + { 1289 + nfs_expire_all_delegations(clp); 1290 + if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 1291 + nfs4_schedule_state_recovery(clp); 1292 + } 1293 + 1294 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1295 { 1296 if (!flags) 1297 return; 1298 + else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 1299 + nfs41_handle_server_reboot(clp); 1300 + else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 1301 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 1302 SEQ4_STATUS_ADMIN_STATE_REVOKED | 1303 + SEQ4_STATUS_LEASE_MOVED)) 1304 + nfs41_handle_state_revoked(clp); 1305 + else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 1306 + nfs41_handle_recallable_state_revoked(clp); 1307 + else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | 1308 SEQ4_STATUS_BACKCHANNEL_FAULT | 1309 SEQ4_STATUS_CB_PATH_DOWN_SESSION)) 1310 + nfs41_handle_cb_path_down(clp); 1311 } 1312 1313 static int nfs4_reset_session(struct nfs_client *clp) ··· 1285 1286 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); 1287 status = nfs4_proc_create_session(clp); 1288 + if (status) { 1289 status = nfs4_recovery_handle_error(clp, status); 1290 + goto out; 1291 + } 1292 + /* create_session negotiated new slot table */ 1293 + clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1294 1295 + /* Let the state manager reestablish state */ 1296 + if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1297 nfs41_setup_state_renewal(clp); 1298 + out: 1299 return status; 1300 + } 1301 + 1302 + static int nfs4_recall_slot(struct nfs_client *clp) 1303 + { 1304 + struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table; 1305 + struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs; 1306 + struct nfs4_slot *new, *old; 1307 + int i; 1308 + 1309 + nfs4_begin_drain_session(clp); 1310 + new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), 1311 + GFP_KERNEL); 1312 + if (!new) 1313 + return -ENOMEM; 1314 + 1315 + spin_lock(&fc_tbl->slot_tbl_lock); 1316 + for (i = 0; i < fc_tbl->target_max_slots; i++) 1317 + new[i].seq_nr = fc_tbl->slots[i].seq_nr; 1318 + old = fc_tbl->slots; 1319 + fc_tbl->slots = new; 1320 + fc_tbl->max_slots = fc_tbl->target_max_slots; 1321 + fc_tbl->target_max_slots = 0; 1322 + fc_attrs->max_reqs = fc_tbl->max_slots; 1323 + spin_unlock(&fc_tbl->slot_tbl_lock); 1324 + 1325 + kfree(old); 1326 + nfs4_end_drain_session(clp); 1327 + return 0; 1328 } 1329 1330 #else /* CONFIG_NFS_V4_1 */ 1331 static int nfs4_reset_session(struct nfs_client *clp) { return 0; } 1332 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } 1333 + static int nfs4_recall_slot(struct nfs_client *clp) { return 0; } 1334 #endif /* CONFIG_NFS_V4_1 */ 1335 1336 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors ··· 1314 case -NFS4ERR_DELAY: 1315 case -NFS4ERR_CLID_INUSE: 1316 case -EAGAIN: 1317 + case -EKEYEXPIRED: 1318 break; 1319 1320 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery ··· 1397 nfs_client_return_marked_delegations(clp); 1398 continue; 1399 } 1400 + /* Recall session slots */ 1401 + if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state) 1402 + && nfs4_has_session(clp)) { 1403 + status = nfs4_recall_slot(clp); 1404 + if (status < 0) 1405 + goto out_error; 1406 + continue; 1407 + } 1408 + 1409 1410 nfs4_clear_state_manager_bit(clp); 1411 /* Did we race with an attempt to give us more work? */
+9 -1
fs/nfs/nfs4xdr.c
··· 1578 char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; 1579 uint32_t len; 1580 struct nfs_client *clp = args->client; 1581 1582 len = scnprintf(machine_name, sizeof(machine_name), "%s", 1583 clp->cl_ipaddr); ··· 1600 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ 1601 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ 1602 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ 1603 - *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */ 1604 *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ 1605 *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ 1606 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
··· 1578 char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; 1579 uint32_t len; 1580 struct nfs_client *clp = args->client; 1581 + u32 max_resp_sz_cached; 1582 + 1583 + /* 1584 + * Assumes OPEN is the biggest non-idempotent compound. 1585 + * 2 is the verifier. 1586 + */ 1587 + max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 1588 + RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; 1589 1590 len = scnprintf(machine_name, sizeof(machine_name), "%s", 1591 clp->cl_ipaddr); ··· 1592 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ 1593 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ 1594 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ 1595 + *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ 1596 *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ 1597 *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ 1598 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
+41
fs/nfs/proc.c
··· 47 #define NFSDBG_FACILITY NFSDBG_PROC 48 49 /* 50 * Bare-bones access to getattr: this is for nfs_read_super. 51 */ 52 static int ··· 340 341 static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) 342 { 343 nfs_mark_for_revalidate(dir); 344 return 1; 345 } ··· 595 596 static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) 597 { 598 nfs_invalidate_atime(data->inode); 599 if (task->tk_status >= 0) { 600 nfs_refresh_inode(data->inode, data->res.fattr); ··· 617 618 static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) 619 { 620 if (task->tk_status >= 0) 621 nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); 622 return 0;
··· 47 #define NFSDBG_FACILITY NFSDBG_PROC 48 49 /* 50 + * wrapper to handle the -EKEYEXPIRED error message. This should generally 51 + * only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't 52 + * support the NFSERR_JUKEBOX error code, but we handle this situation in the 53 + * same way that we handle that error with NFSv3. 54 + */ 55 + static int 56 + nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 57 + { 58 + int res; 59 + do { 60 + res = rpc_call_sync(clnt, msg, flags); 61 + if (res != -EKEYEXPIRED) 62 + break; 63 + schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 64 + res = -ERESTARTSYS; 65 + } while (!fatal_signal_pending(current)); 66 + return res; 67 + } 68 + 69 + #define rpc_call_sync(clnt, msg, flags) nfs_rpc_wrapper(clnt, msg, flags) 70 + 71 + static int 72 + nfs_async_handle_expired_key(struct rpc_task *task) 73 + { 74 + if (task->tk_status != -EKEYEXPIRED) 75 + return 0; 76 + task->tk_status = 0; 77 + rpc_restart_call(task); 78 + rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); 79 + return 1; 80 + } 81 + 82 + /* 83 * Bare-bones access to getattr: this is for nfs_read_super. 84 */ 85 static int ··· 307 308 static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) 309 { 310 + if (nfs_async_handle_expired_key(task)) 311 + return 0; 312 nfs_mark_for_revalidate(dir); 313 return 1; 314 } ··· 560 561 static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) 562 { 563 + if (nfs_async_handle_expired_key(task)) 564 + return -EAGAIN; 565 + 566 nfs_invalidate_atime(data->inode); 567 if (task->tk_status >= 0) { 568 nfs_refresh_inode(data->inode, data->res.fattr); ··· 579 580 static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) 581 { 582 + if (nfs_async_handle_expired_key(task)) 583 + return -EAGAIN; 584 + 585 if (task->tk_status >= 0) 586 nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); 587 return 0;
+1 -1
fs/nfs/symlink.c
··· 50 struct page *page; 51 void *err; 52 53 - err = ERR_PTR(nfs_revalidate_mapping_nolock(inode, inode->i_mapping)); 54 if (err) 55 goto read_failed; 56 page = read_cache_page(&inode->i_data, 0,
··· 50 struct page *page; 51 void *err; 52 53 + err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping)); 54 if (err) 55 goto read_failed; 56 page = read_cache_page(&inode->i_data, 0,
+96 -191
fs/nfs/write.c
··· 438 radix_tree_tag_set(&nfsi->nfs_page_tree, 439 req->wb_index, 440 NFS_PAGE_TAG_COMMIT); 441 spin_unlock(&inode->i_lock); 442 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 443 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); ··· 502 } 503 #endif 504 505 - /* 506 - * Wait for a request to complete. 507 - * 508 - * Interruptible by fatal signals only. 509 - */ 510 - static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) 511 - { 512 - struct nfs_inode *nfsi = NFS_I(inode); 513 - struct nfs_page *req; 514 - pgoff_t idx_end, next; 515 - unsigned int res = 0; 516 - int error; 517 - 518 - if (npages == 0) 519 - idx_end = ~0; 520 - else 521 - idx_end = idx_start + npages - 1; 522 - 523 - next = idx_start; 524 - while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { 525 - if (req->wb_index > idx_end) 526 - break; 527 - 528 - next = req->wb_index + 1; 529 - BUG_ON(!NFS_WBACK_BUSY(req)); 530 - 531 - kref_get(&req->wb_kref); 532 - spin_unlock(&inode->i_lock); 533 - error = nfs_wait_on_request(req); 534 - nfs_release_request(req); 535 - spin_lock(&inode->i_lock); 536 - if (error < 0) 537 - return error; 538 - res++; 539 - } 540 - return res; 541 - } 542 - 543 - static void nfs_cancel_commit_list(struct list_head *head) 544 - { 545 - struct nfs_page *req; 546 - 547 - while(!list_empty(head)) { 548 - req = nfs_list_entry(head->next); 549 - nfs_list_remove_request(req); 550 - nfs_clear_request_commit(req); 551 - nfs_inode_remove_request(req); 552 - nfs_unlock_request(req); 553 - } 554 - } 555 - 556 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 557 static int 558 nfs_need_commit(struct nfs_inode *nfsi) ··· 523 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) 524 { 525 struct nfs_inode *nfsi = NFS_I(inode); 526 527 if (!nfs_need_commit(nfsi)) 528 return 0; 529 530 - return nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); 531 } 532 #else 533 static inline int nfs_need_commit(struct nfs_inode *nfsi) ··· 598 spin_lock(&inode->i_lock); 599 } 600 601 - if (nfs_clear_request_commit(req)) 602 - radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, 603 - req->wb_index, NFS_PAGE_TAG_COMMIT); 604 605 /* Okay, the request matches. Update the region */ 606 if (offset < req->wb_offset) { ··· 1348 .rpc_release = nfs_commit_release, 1349 }; 1350 1351 - int nfs_commit_inode(struct inode *inode, int how) 1352 { 1353 LIST_HEAD(head); 1354 int res; ··· 1363 } 1364 return res; 1365 } 1366 #else 1367 - static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) 1368 { 1369 return 0; 1370 } 1371 #endif 1372 1373 - long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1374 { 1375 - struct inode *inode = mapping->host; 1376 - pgoff_t idx_start, idx_end; 1377 - unsigned int npages = 0; 1378 - LIST_HEAD(head); 1379 - int nocommit = how & FLUSH_NOCOMMIT; 1380 - long pages, ret; 1381 - 1382 - /* FIXME */ 1383 - if (wbc->range_cyclic) 1384 - idx_start = 0; 1385 - else { 1386 - idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; 1387 - idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; 1388 - if (idx_end > idx_start) { 1389 - pgoff_t l_npages = 1 + idx_end - idx_start; 1390 - npages = l_npages; 1391 - if (sizeof(npages) != sizeof(l_npages) && 1392 - (pgoff_t)npages != l_npages) 1393 - npages = 0; 1394 - } 1395 - } 1396 - how &= ~FLUSH_NOCOMMIT; 1397 - spin_lock(&inode->i_lock); 1398 - do { 1399 - ret = nfs_wait_on_requests_locked(inode, idx_start, npages); 1400 - if (ret != 0) 1401 - continue; 1402 - if (nocommit) 1403 - break; 1404 - pages = nfs_scan_commit(inode, &head, idx_start, npages); 1405 - if (pages == 0) 1406 - break; 1407 - if (how & FLUSH_INVALIDATE) { 1408 - spin_unlock(&inode->i_lock); 1409 - nfs_cancel_commit_list(&head); 1410 - ret = pages; 1411 - spin_lock(&inode->i_lock); 1412 - continue; 1413 - } 1414 - pages += nfs_scan_commit(inode, &head, 0, 0); 1415 - spin_unlock(&inode->i_lock); 1416 - ret = nfs_commit_list(inode, &head, how); 1417 - spin_lock(&inode->i_lock); 1418 - 1419 - } while (ret >= 0); 1420 - spin_unlock(&inode->i_lock); 1421 - return ret; 1422 - } 1423 - 1424 - static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) 1425 - { 1426 - int ret; 1427 - 1428 - ret = nfs_writepages(mapping, wbc); 1429 - if (ret < 0) 1430 - goto out; 1431 - ret = nfs_sync_mapping_wait(mapping, wbc, how); 1432 - if (ret < 0) 1433 - goto out; 1434 - return 0; 1435 - out: 1436 - __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1437 - return ret; 1438 - } 1439 - 1440 - /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ 1441 - static int nfs_write_mapping(struct address_space *mapping, int how) 1442 - { 1443 - struct writeback_control wbc = { 1444 - .bdi = mapping->backing_dev_info, 1445 - .sync_mode = WB_SYNC_ALL, 1446 - .nr_to_write = LONG_MAX, 1447 - .range_start = 0, 1448 - .range_end = LLONG_MAX, 1449 - }; 1450 - 1451 - return __nfs_write_mapping(mapping, &wbc, how); 1452 } 1453 1454 /* ··· 1415 */ 1416 int nfs_wb_all(struct inode *inode) 1417 { 1418 - return nfs_write_mapping(inode->i_mapping, 0); 1419 - } 1420 1421 - int nfs_wb_nocommit(struct inode *inode) 1422 - { 1423 - return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT); 1424 } 1425 1426 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 1427 { 1428 struct nfs_page *req; 1429 - loff_t range_start = page_offset(page); 1430 - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1431 - struct writeback_control wbc = { 1432 - .bdi = page->mapping->backing_dev_info, 1433 - .sync_mode = WB_SYNC_ALL, 1434 - .nr_to_write = LONG_MAX, 1435 - .range_start = range_start, 1436 - .range_end = range_end, 1437 - }; 1438 int ret = 0; 1439 1440 BUG_ON(!PageLocked(page)); 1441 for (;;) { 1442 req = nfs_page_find_request(page); 1443 if (req == NULL) 1444 - goto out; 1445 - if (test_bit(PG_CLEAN, &req->wb_flags)) { 1446 - nfs_release_request(req); 1447 break; 1448 - } 1449 if (nfs_lock_request_dontget(req)) { 1450 nfs_inode_remove_request(req); 1451 /* ··· 1448 ret = nfs_wait_on_request(req); 1449 nfs_release_request(req); 1450 if (ret < 0) 1451 - goto out; 1452 - } 1453 - if (!PagePrivate(page)) 1454 - return 0; 1455 - ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE); 1456 - out: 1457 - return ret; 1458 - } 1459 - 1460 - static int nfs_wb_page_priority(struct inode *inode, struct page *page, 1461 - int how) 1462 - { 1463 - loff_t range_start = page_offset(page); 1464 - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1465 - struct writeback_control wbc = { 1466 - .bdi = page->mapping->backing_dev_info, 1467 - .sync_mode = WB_SYNC_ALL, 1468 - .nr_to_write = LONG_MAX, 1469 - .range_start = range_start, 1470 - .range_end = range_end, 1471 - }; 1472 - int ret; 1473 - 1474 - do { 1475 - if (clear_page_dirty_for_io(page)) { 1476 - ret = nfs_writepage_locked(page, &wbc); 1477 - if (ret < 0) 1478 - goto out_error; 1479 - } else if (!PagePrivate(page)) 1480 break; 1481 - ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); 1482 - if (ret < 0) 1483 - goto out_error; 1484 - } while (PagePrivate(page)); 1485 - return 0; 1486 - out_error: 1487 - __mark_inode_dirty(inode, I_DIRTY_PAGES); 1488 return ret; 1489 } 1490 1491 /* 1492 * Write back all requests on one page - we do this before reading it. 1493 */ 1494 - int nfs_wb_page(struct inode *inode, struct page* page) 1495 { 1496 - return nfs_wb_page_priority(inode, page, FLUSH_STABLE); 1497 } 1498 1499 #ifdef CONFIG_MIGRATION
··· 438 radix_tree_tag_set(&nfsi->nfs_page_tree, 439 req->wb_index, 440 NFS_PAGE_TAG_COMMIT); 441 + nfsi->ncommit++; 442 spin_unlock(&inode->i_lock); 443 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 444 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); ··· 501 } 502 #endif 503 504 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 505 static int 506 nfs_need_commit(struct nfs_inode *nfsi) ··· 573 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) 574 { 575 struct nfs_inode *nfsi = NFS_I(inode); 576 + int ret; 577 578 if (!nfs_need_commit(nfsi)) 579 return 0; 580 581 + ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); 582 + if (ret > 0) 583 + nfsi->ncommit -= ret; 584 + if (nfs_need_commit(NFS_I(inode))) 585 + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 586 + return ret; 587 } 588 #else 589 static inline int nfs_need_commit(struct nfs_inode *nfsi) ··· 642 spin_lock(&inode->i_lock); 643 } 644 645 + if (nfs_clear_request_commit(req) && 646 + radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, 647 + req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) 648 + NFS_I(inode)->ncommit--; 649 650 /* Okay, the request matches. Update the region */ 651 if (offset < req->wb_offset) { ··· 1391 .rpc_release = nfs_commit_release, 1392 }; 1393 1394 + static int nfs_commit_inode(struct inode *inode, int how) 1395 { 1396 LIST_HEAD(head); 1397 int res; ··· 1406 } 1407 return res; 1408 } 1409 + 1410 + static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1411 + { 1412 + struct nfs_inode *nfsi = NFS_I(inode); 1413 + int flags = FLUSH_SYNC; 1414 + int ret = 0; 1415 + 1416 + /* Don't commit yet if this is a non-blocking flush and there are 1417 + * lots of outstanding writes for this mapping. 1418 + */ 1419 + if (wbc->sync_mode == WB_SYNC_NONE && 1420 + nfsi->ncommit <= (nfsi->npages >> 1)) 1421 + goto out_mark_dirty; 1422 + 1423 + if (wbc->nonblocking || wbc->for_background) 1424 + flags = 0; 1425 + ret = nfs_commit_inode(inode, flags); 1426 + if (ret >= 0) { 1427 + if (wbc->sync_mode == WB_SYNC_NONE) { 1428 + if (ret < wbc->nr_to_write) 1429 + wbc->nr_to_write -= ret; 1430 + else 1431 + wbc->nr_to_write = 0; 1432 + } 1433 + return 0; 1434 + } 1435 + out_mark_dirty: 1436 + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1437 + return ret; 1438 + } 1439 #else 1440 + static int nfs_commit_inode(struct inode *inode, int how) 1441 + { 1442 + return 0; 1443 + } 1444 + 1445 + static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1446 { 1447 return 0; 1448 } 1449 #endif 1450 1451 + int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1452 { 1453 + return nfs_commit_unstable_pages(inode, wbc); 1454 } 1455 1456 /* ··· 1499 */ 1500 int nfs_wb_all(struct inode *inode) 1501 { 1502 + struct writeback_control wbc = { 1503 + .sync_mode = WB_SYNC_ALL, 1504 + .nr_to_write = LONG_MAX, 1505 + .range_start = 0, 1506 + .range_end = LLONG_MAX, 1507 + }; 1508 1509 + return sync_inode(inode, &wbc); 1510 } 1511 1512 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 1513 { 1514 struct nfs_page *req; 1515 int ret = 0; 1516 1517 BUG_ON(!PageLocked(page)); 1518 for (;;) { 1519 req = nfs_page_find_request(page); 1520 if (req == NULL) 1521 break; 1522 if (nfs_lock_request_dontget(req)) { 1523 nfs_inode_remove_request(req); 1524 /* ··· 1543 ret = nfs_wait_on_request(req); 1544 nfs_release_request(req); 1545 if (ret < 0) 1546 break; 1547 + } 1548 return ret; 1549 } 1550 1551 /* 1552 * Write back all requests on one page - we do this before reading it. 1553 */ 1554 + int nfs_wb_page(struct inode *inode, struct page *page) 1555 { 1556 + loff_t range_start = page_offset(page); 1557 + loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1558 + struct writeback_control wbc = { 1559 + .sync_mode = WB_SYNC_ALL, 1560 + .nr_to_write = 0, 1561 + .range_start = range_start, 1562 + .range_end = range_end, 1563 + }; 1564 + struct nfs_page *req; 1565 + int need_commit; 1566 + int ret; 1567 + 1568 + while(PagePrivate(page)) { 1569 + if (clear_page_dirty_for_io(page)) { 1570 + ret = nfs_writepage_locked(page, &wbc); 1571 + if (ret < 0) 1572 + goto out_error; 1573 + } 1574 + req = nfs_find_and_lock_request(page); 1575 + if (!req) 1576 + break; 1577 + if (IS_ERR(req)) { 1578 + ret = PTR_ERR(req); 1579 + goto out_error; 1580 + } 1581 + need_commit = test_bit(PG_CLEAN, &req->wb_flags); 1582 + nfs_clear_page_tag_locked(req); 1583 + if (need_commit) { 1584 + ret = nfs_commit_inode(inode, FLUSH_SYNC); 1585 + if (ret < 0) 1586 + goto out_error; 1587 + } 1588 + } 1589 + return 0; 1590 + out_error: 1591 + return ret; 1592 } 1593 1594 #ifdef CONFIG_MIGRATION
+1 -13
include/linux/nfs_fs.h
··· 33 #define FLUSH_STABLE 4 /* commit to stable storage */ 34 #define FLUSH_LOWPRI 8 /* low priority background flush */ 35 #define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */ 36 - #define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */ 37 - #define FLUSH_INVALIDATE 64 /* Invalidate the page cache */ 38 - #define FLUSH_NOWRITEPAGE 128 /* Don't call writepage() */ 39 40 #ifdef __KERNEL__ 41 ··· 163 struct radix_tree_root nfs_page_tree; 164 165 unsigned long npages; 166 167 /* Open contexts for shared mmap writes */ 168 struct list_head open_files; ··· 347 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); 348 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); 349 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); 350 - extern int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping); 351 extern int nfs_setattr(struct dentry *, struct iattr *); 352 extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); 353 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); ··· 474 * Try to write back everything synchronously (but check the 475 * return value!) 476 */ 477 - extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int); 478 extern int nfs_wb_all(struct inode *inode); 479 - extern int nfs_wb_nocommit(struct inode *inode); 480 extern int nfs_wb_page(struct inode *inode, struct page* page); 481 extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 482 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 483 - extern int nfs_commit_inode(struct inode *, int); 484 extern struct nfs_write_data *nfs_commitdata_alloc(void); 485 extern void nfs_commit_free(struct nfs_write_data *wdata); 486 - #else 487 - static inline int 488 - nfs_commit_inode(struct inode *inode, int how) 489 - { 490 - return 0; 491 - } 492 #endif 493 494 static inline int
··· 33 #define FLUSH_STABLE 4 /* commit to stable storage */ 34 #define FLUSH_LOWPRI 8 /* low priority background flush */ 35 #define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */ 36 37 #ifdef __KERNEL__ 38 ··· 166 struct radix_tree_root nfs_page_tree; 167 168 unsigned long npages; 169 + unsigned long ncommit; 170 171 /* Open contexts for shared mmap writes */ 172 struct list_head open_files; ··· 349 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); 350 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); 351 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); 352 extern int nfs_setattr(struct dentry *, struct iattr *); 353 extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); 354 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); ··· 477 * Try to write back everything synchronously (but check the 478 * return value!) 479 */ 480 extern int nfs_wb_all(struct inode *inode); 481 extern int nfs_wb_page(struct inode *inode, struct page* page); 482 extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 483 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 484 extern struct nfs_write_data *nfs_commitdata_alloc(void); 485 extern void nfs_commit_free(struct nfs_write_data *wdata); 486 #endif 487 488 static inline int
+2
include/linux/nfs_fs_sb.h
··· 193 int max_slots; /* # slots in table */ 194 int highest_used_slotid; /* sent to server on each SEQ. 195 * op for dynamic resizing */ 196 }; 197 198 static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
··· 193 int max_slots; /* # slots in table */ 194 int highest_used_slotid; /* sent to server on each SEQ. 195 * op for dynamic resizing */ 196 + int target_max_slots; /* Set by CB_RECALL_SLOT as 197 + * the new max_slots */ 198 }; 199 200 static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
+15
include/linux/sunrpc/bc_xprt.h
··· 38 void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); 39 void bc_release_request(struct rpc_task *); 40 int bc_send(struct rpc_rqst *req); 41 #else /* CONFIG_NFS_V4_1 */ 42 static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, 43 unsigned int min_reqs) 44 { 45 return 0; 46 }
··· 38 void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); 39 void bc_release_request(struct rpc_task *); 40 int bc_send(struct rpc_rqst *req); 41 + 42 + /* 43 + * Determine if a shared backchannel is in use 44 + */ 45 + static inline int svc_is_backchannel(const struct svc_rqst *rqstp) 46 + { 47 + if (rqstp->rq_server->bc_xprt) 48 + return 1; 49 + return 0; 50 + } 51 #else /* CONFIG_NFS_V4_1 */ 52 static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, 53 unsigned int min_reqs) 54 + { 55 + return 0; 56 + } 57 + 58 + static inline int svc_is_backchannel(const struct svc_rqst *rqstp) 59 { 60 return 0; 61 }
+4 -4
net/sunrpc/addr.c
··· 71 if (unlikely(len == 0)) 72 return len; 73 74 - if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 75 - !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) 76 return len; 77 78 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", ··· 166 if (*delim != IPV6_SCOPE_DELIMITER) 167 return 0; 168 169 - if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 170 - !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) 171 return 0; 172 173 len = (buf + buflen) - delim - 1;
··· 71 if (unlikely(len == 0)) 72 return len; 73 74 + if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 75 + return len; 76 + if (sin6->sin6_scope_id == 0) 77 return len; 78 79 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", ··· 165 if (*delim != IPV6_SCOPE_DELIMITER) 166 return 0; 167 168 + if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 169 return 0; 170 171 len = (buf + buflen) - delim - 1;
+9 -2
net/sunrpc/auth_gss/auth_gss.c
··· 206 ctx->gc_win = window_size; 207 /* gssd signals an error by passing ctx->gc_win = 0: */ 208 if (ctx->gc_win == 0) { 209 - /* in which case, p points to an error code which we ignore */ 210 - p = ERR_PTR(-EACCES); 211 goto err; 212 } 213 /* copy the opaque wire context */ ··· 652 err = PTR_ERR(p); 653 switch (err) { 654 case -EACCES: 655 gss_msg->msg.errno = err; 656 err = mlen; 657 break;
··· 206 ctx->gc_win = window_size; 207 /* gssd signals an error by passing ctx->gc_win = 0: */ 208 if (ctx->gc_win == 0) { 209 + /* 210 + * in which case, p points to an error code. Anything other 211 + * than -EKEYEXPIRED gets converted to -EACCES. 212 + */ 213 + p = simple_get_bytes(p, end, &ret, sizeof(ret)); 214 + if (!IS_ERR(p)) 215 + p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 216 + ERR_PTR(-EACCES); 217 goto err; 218 } 219 /* copy the opaque wire context */ ··· 646 err = PTR_ERR(p); 647 switch (err) { 648 case -EACCES: 649 + case -EKEYEXPIRED: 650 gss_msg->msg.errno = err; 651 err = mlen; 652 break;
+4
net/sunrpc/svc.c
··· 506 { 507 unsigned int pages, arghi; 508 509 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 510 * We assume one is at most one page 511 */
··· 506 { 507 unsigned int pages, arghi; 508 509 + /* bc_xprt uses fore channel allocated buffers */ 510 + if (svc_is_backchannel(rqstp)) 511 + return 1; 512 + 513 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 514 * We assume one is at most one page 515 */
+7 -2
net/sunrpc/xprtsock.c
··· 1912 case -EALREADY: 1913 xprt_clear_connecting(xprt); 1914 return; 1915 } 1916 out_eagain: 1917 status = -EAGAIN; ··· 2105 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2106 * to use the server side send routines. 2107 */ 2108 - void *bc_malloc(struct rpc_task *task, size_t size) 2109 { 2110 struct page *page; 2111 struct rpc_buffer *buf; ··· 2125 /* 2126 * Free the space allocated in the bc_alloc routine 2127 */ 2128 - void bc_free(void *buffer) 2129 { 2130 struct rpc_buffer *buf; 2131
··· 1912 case -EALREADY: 1913 xprt_clear_connecting(xprt); 1914 return; 1915 + case -EINVAL: 1916 + /* Happens, for instance, if the user specified a link 1917 + * local IPv6 address without a scope-id. 1918 + */ 1919 + goto out; 1920 } 1921 out_eagain: 1922 status = -EAGAIN; ··· 2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2101 * to use the server side send routines. 2102 */ 2103 + static void *bc_malloc(struct rpc_task *task, size_t size) 2104 { 2105 struct page *page; 2106 struct rpc_buffer *buf; ··· 2120 /* 2121 * Free the space allocated in the bc_alloc routine 2122 */ 2123 + static void bc_free(void *buffer) 2124 { 2125 struct rpc_buffer *buf; 2126